1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 */ 8 9 /* 10 * DOC: Interesting implementation details of the Maple Tree 11 * 12 * Each node type has a number of slots for entries and a number of slots for 13 * pivots. In the case of dense nodes, the pivots are implied by the position 14 * and are simply the slot index + the minimum of the node. 15 * 16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 17 * indicate that the tree is specifying ranges, Pivots may appear in the 18 * subtree with an entry attached to the value where as keys are unique to a 19 * specific position of a B-tree. Pivot values are inclusive of the slot with 20 * the same index. 21 * 22 * 23 * The following illustrates the layout of a range64 nodes slots and pivots. 24 * 25 * 26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 28 * │ │ │ │ │ │ │ │ └─ Implied maximum 29 * │ │ │ │ │ │ │ └─ Pivot 14 30 * │ │ │ │ │ │ └─ Pivot 13 31 * │ │ │ │ │ └─ Pivot 12 32 * │ │ │ │ └─ Pivot 11 33 * │ │ │ └─ Pivot 2 34 * │ │ └─ Pivot 1 35 * │ └─ Pivot 0 36 * └─ Implied minimum 37 * 38 * Slot contents: 39 * Internal (non-leaf) nodes contain pointers to other nodes. 40 * Leaf nodes contain entries. 41 * 42 * The location of interest is often referred to as an offset. All offsets have 43 * a slot, but the last offset has an implied pivot from the node above (or 44 * UINT_MAX for the root node. 45 * 46 * Ranges complicate certain write activities. When modifying any of 47 * the B-tree variants, it is known that one entry will either be added or 48 * deleted. When modifying the Maple Tree, one store operation may overwrite 49 * the entire data set, or one half of the tree, or the middle half of the tree. 50 * 51 */ 52 53 54 #include <linux/maple_tree.h> 55 #include <linux/xarray.h> 56 #include <linux/types.h> 57 #include <linux/export.h> 58 #include <linux/slab.h> 59 #include <linux/limits.h> 60 #include <asm/barrier.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/maple_tree.h> 64 65 #define MA_ROOT_PARENT 1 66 67 /* 68 * Maple state flags 69 * * MA_STATE_BULK - Bulk insert mode 70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 72 */ 73 #define MA_STATE_BULK 1 74 #define MA_STATE_REBALANCE 2 75 #define MA_STATE_PREALLOC 4 76 77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 78 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 79 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 80 static struct kmem_cache *maple_node_cache; 81 82 #ifdef CONFIG_DEBUG_MAPLE_TREE 83 static const unsigned long mt_max[] = { 84 [maple_dense] = MAPLE_NODE_SLOTS, 85 [maple_leaf_64] = ULONG_MAX, 86 [maple_range_64] = ULONG_MAX, 87 [maple_arange_64] = ULONG_MAX, 88 }; 89 #define mt_node_max(x) mt_max[mte_node_type(x)] 90 #endif 91 92 static const unsigned char mt_slots[] = { 93 [maple_dense] = MAPLE_NODE_SLOTS, 94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 95 [maple_range_64] = MAPLE_RANGE64_SLOTS, 96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 97 }; 98 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 99 100 static const unsigned char mt_pivots[] = { 101 [maple_dense] = 0, 102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 105 }; 106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 107 108 static const unsigned char mt_min_slots[] = { 109 [maple_dense] = MAPLE_NODE_SLOTS / 2, 110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 113 }; 114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 115 116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 118 119 struct maple_big_node { 120 struct maple_pnode *parent; 121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 122 union { 123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 124 struct { 125 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 126 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 127 }; 128 }; 129 unsigned char b_end; 130 enum maple_type type; 131 }; 132 133 /* 134 * The maple_subtree_state is used to build a tree to replace a segment of an 135 * existing tree in a more atomic way. Any walkers of the older tree will hit a 136 * dead node and restart on updates. 137 */ 138 struct maple_subtree_state { 139 struct ma_state *orig_l; /* Original left side of subtree */ 140 struct ma_state *orig_r; /* Original right side of subtree */ 141 struct ma_state *l; /* New left side of subtree */ 142 struct ma_state *m; /* New middle of subtree (rare) */ 143 struct ma_state *r; /* New right side of subtree */ 144 struct ma_topiary *free; /* nodes to be freed */ 145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 146 struct maple_big_node *bn; 147 }; 148 149 #ifdef CONFIG_KASAN_STACK 150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ 151 #define noinline_for_kasan noinline_for_stack 152 #else 153 #define noinline_for_kasan inline 154 #endif 155 156 /* Functions */ 157 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 158 { 159 return kmem_cache_alloc(maple_node_cache, gfp); 160 } 161 162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 163 { 164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); 165 } 166 167 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 168 { 169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 170 } 171 172 static void mt_free_rcu(struct rcu_head *head) 173 { 174 struct maple_node *node = container_of(head, struct maple_node, rcu); 175 176 kmem_cache_free(maple_node_cache, node); 177 } 178 179 /* 180 * ma_free_rcu() - Use rcu callback to free a maple node 181 * @node: The node to free 182 * 183 * The maple tree uses the parent pointer to indicate this node is no longer in 184 * use and will be freed. 185 */ 186 static void ma_free_rcu(struct maple_node *node) 187 { 188 WARN_ON(node->parent != ma_parent_ptr(node)); 189 call_rcu(&node->rcu, mt_free_rcu); 190 } 191 192 static void mas_set_height(struct ma_state *mas) 193 { 194 unsigned int new_flags = mas->tree->ma_flags; 195 196 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 197 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); 198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 199 mas->tree->ma_flags = new_flags; 200 } 201 202 static unsigned int mas_mt_height(struct ma_state *mas) 203 { 204 return mt_height(mas->tree); 205 } 206 207 static inline enum maple_type mte_node_type(const struct maple_enode *entry) 208 { 209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 210 MAPLE_NODE_TYPE_MASK; 211 } 212 213 static inline bool ma_is_dense(const enum maple_type type) 214 { 215 return type < maple_leaf_64; 216 } 217 218 static inline bool ma_is_leaf(const enum maple_type type) 219 { 220 return type < maple_range_64; 221 } 222 223 static inline bool mte_is_leaf(const struct maple_enode *entry) 224 { 225 return ma_is_leaf(mte_node_type(entry)); 226 } 227 228 /* 229 * We also reserve values with the bottom two bits set to '10' which are 230 * below 4096 231 */ 232 static inline bool mt_is_reserved(const void *entry) 233 { 234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 235 xa_is_internal(entry); 236 } 237 238 static inline void mas_set_err(struct ma_state *mas, long err) 239 { 240 mas->node = MA_ERROR(err); 241 } 242 243 static inline bool mas_is_ptr(const struct ma_state *mas) 244 { 245 return mas->node == MAS_ROOT; 246 } 247 248 static inline bool mas_is_start(const struct ma_state *mas) 249 { 250 return mas->node == MAS_START; 251 } 252 253 bool mas_is_err(struct ma_state *mas) 254 { 255 return xa_is_err(mas->node); 256 } 257 258 static inline bool mas_searchable(struct ma_state *mas) 259 { 260 if (mas_is_none(mas)) 261 return false; 262 263 if (mas_is_ptr(mas)) 264 return false; 265 266 return true; 267 } 268 269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry) 270 { 271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 272 } 273 274 /* 275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 276 * @entry: The maple encoded node 277 * 278 * Return: a maple topiary pointer 279 */ 280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 281 { 282 return (struct maple_topiary *) 283 ((unsigned long)entry & ~MAPLE_NODE_MASK); 284 } 285 286 /* 287 * mas_mn() - Get the maple state node. 288 * @mas: The maple state 289 * 290 * Return: the maple node (not encoded - bare pointer). 291 */ 292 static inline struct maple_node *mas_mn(const struct ma_state *mas) 293 { 294 return mte_to_node(mas->node); 295 } 296 297 /* 298 * mte_set_node_dead() - Set a maple encoded node as dead. 299 * @mn: The maple encoded node. 300 */ 301 static inline void mte_set_node_dead(struct maple_enode *mn) 302 { 303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 304 smp_wmb(); /* Needed for RCU */ 305 } 306 307 /* Bit 1 indicates the root is a node */ 308 #define MAPLE_ROOT_NODE 0x02 309 /* maple_type stored bit 3-6 */ 310 #define MAPLE_ENODE_TYPE_SHIFT 0x03 311 /* Bit 2 means a NULL somewhere below */ 312 #define MAPLE_ENODE_NULL 0x04 313 314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 315 enum maple_type type) 316 { 317 return (void *)((unsigned long)node | 318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 319 } 320 321 static inline void *mte_mk_root(const struct maple_enode *node) 322 { 323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 324 } 325 326 static inline void *mte_safe_root(const struct maple_enode *node) 327 { 328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 329 } 330 331 static inline void *mte_set_full(const struct maple_enode *node) 332 { 333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 334 } 335 336 static inline void *mte_clear_full(const struct maple_enode *node) 337 { 338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL); 339 } 340 341 static inline bool mte_has_null(const struct maple_enode *node) 342 { 343 return (unsigned long)node & MAPLE_ENODE_NULL; 344 } 345 346 static inline bool ma_is_root(struct maple_node *node) 347 { 348 return ((unsigned long)node->parent & MA_ROOT_PARENT); 349 } 350 351 static inline bool mte_is_root(const struct maple_enode *node) 352 { 353 return ma_is_root(mte_to_node(node)); 354 } 355 356 static inline bool mas_is_root_limits(const struct ma_state *mas) 357 { 358 return !mas->min && mas->max == ULONG_MAX; 359 } 360 361 static inline bool mt_is_alloc(struct maple_tree *mt) 362 { 363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 364 } 365 366 /* 367 * The Parent Pointer 368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 370 * bit values need an extra bit to store the offset. This extra bit comes from 371 * a reuse of the last bit in the node type. This is possible by using bit 1 to 372 * indicate if bit 2 is part of the type or the slot. 373 * 374 * Note types: 375 * 0x??1 = Root 376 * 0x?00 = 16 bit nodes 377 * 0x010 = 32 bit nodes 378 * 0x110 = 64 bit nodes 379 * 380 * Slot size and alignment 381 * 0b??1 : Root 382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 385 */ 386 387 #define MAPLE_PARENT_ROOT 0x01 388 389 #define MAPLE_PARENT_SLOT_SHIFT 0x03 390 #define MAPLE_PARENT_SLOT_MASK 0xF8 391 392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 394 395 #define MAPLE_PARENT_RANGE64 0x06 396 #define MAPLE_PARENT_RANGE32 0x04 397 #define MAPLE_PARENT_NOT_RANGE16 0x02 398 399 /* 400 * mte_parent_shift() - Get the parent shift for the slot storage. 401 * @parent: The parent pointer cast as an unsigned long 402 * Return: The shift into that pointer to the star to of the slot 403 */ 404 static inline unsigned long mte_parent_shift(unsigned long parent) 405 { 406 /* Note bit 1 == 0 means 16B */ 407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 408 return MAPLE_PARENT_SLOT_SHIFT; 409 410 return MAPLE_PARENT_16B_SLOT_SHIFT; 411 } 412 413 /* 414 * mte_parent_slot_mask() - Get the slot mask for the parent. 415 * @parent: The parent pointer cast as an unsigned long. 416 * Return: The slot mask for that parent. 417 */ 418 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 419 { 420 /* Note bit 1 == 0 means 16B */ 421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 422 return MAPLE_PARENT_SLOT_MASK; 423 424 return MAPLE_PARENT_16B_SLOT_MASK; 425 } 426 427 /* 428 * mas_parent_type() - Return the maple_type of the parent from the stored 429 * parent type. 430 * @mas: The maple state 431 * @enode: The maple_enode to extract the parent's enum 432 * Return: The node->parent maple_type 433 */ 434 static inline 435 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) 436 { 437 unsigned long p_type; 438 439 p_type = (unsigned long)mte_to_node(enode)->parent; 440 if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) 441 return 0; 442 443 p_type &= MAPLE_NODE_MASK; 444 p_type &= ~mte_parent_slot_mask(p_type); 445 switch (p_type) { 446 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 447 if (mt_is_alloc(mas->tree)) 448 return maple_arange_64; 449 return maple_range_64; 450 } 451 452 return 0; 453 } 454 455 /* 456 * mas_set_parent() - Set the parent node and encode the slot 457 * @enode: The encoded maple node. 458 * @parent: The encoded maple node that is the parent of @enode. 459 * @slot: The slot that @enode resides in @parent. 460 * 461 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 462 * parent type. 463 */ 464 static inline 465 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, 466 const struct maple_enode *parent, unsigned char slot) 467 { 468 unsigned long val = (unsigned long)parent; 469 unsigned long shift; 470 unsigned long type; 471 enum maple_type p_type = mte_node_type(parent); 472 473 MAS_BUG_ON(mas, p_type == maple_dense); 474 MAS_BUG_ON(mas, p_type == maple_leaf_64); 475 476 switch (p_type) { 477 case maple_range_64: 478 case maple_arange_64: 479 shift = MAPLE_PARENT_SLOT_SHIFT; 480 type = MAPLE_PARENT_RANGE64; 481 break; 482 default: 483 case maple_dense: 484 case maple_leaf_64: 485 shift = type = 0; 486 break; 487 } 488 489 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 490 val |= (slot << shift) | type; 491 mte_to_node(enode)->parent = ma_parent_ptr(val); 492 } 493 494 /* 495 * mte_parent_slot() - get the parent slot of @enode. 496 * @enode: The encoded maple node. 497 * 498 * Return: The slot in the parent node where @enode resides. 499 */ 500 static inline unsigned int mte_parent_slot(const struct maple_enode *enode) 501 { 502 unsigned long val = (unsigned long)mte_to_node(enode)->parent; 503 504 if (val & MA_ROOT_PARENT) 505 return 0; 506 507 /* 508 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 509 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 510 */ 511 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 512 } 513 514 /* 515 * mte_parent() - Get the parent of @node. 516 * @node: The encoded maple node. 517 * 518 * Return: The parent maple node. 519 */ 520 static inline struct maple_node *mte_parent(const struct maple_enode *enode) 521 { 522 return (void *)((unsigned long) 523 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 524 } 525 526 /* 527 * ma_dead_node() - check if the @enode is dead. 528 * @enode: The encoded maple node 529 * 530 * Return: true if dead, false otherwise. 531 */ 532 static inline bool ma_dead_node(const struct maple_node *node) 533 { 534 struct maple_node *parent; 535 536 /* Do not reorder reads from the node prior to the parent check */ 537 smp_rmb(); 538 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK); 539 return (parent == node); 540 } 541 542 /* 543 * mte_dead_node() - check if the @enode is dead. 544 * @enode: The encoded maple node 545 * 546 * Return: true if dead, false otherwise. 547 */ 548 static inline bool mte_dead_node(const struct maple_enode *enode) 549 { 550 struct maple_node *parent, *node; 551 552 node = mte_to_node(enode); 553 /* Do not reorder reads from the node prior to the parent check */ 554 smp_rmb(); 555 parent = mte_parent(enode); 556 return (parent == node); 557 } 558 559 /* 560 * mas_allocated() - Get the number of nodes allocated in a maple state. 561 * @mas: The maple state 562 * 563 * The ma_state alloc member is overloaded to hold a pointer to the first 564 * allocated node or to the number of requested nodes to allocate. If bit 0 is 565 * set, then the alloc contains the number of requested nodes. If there is an 566 * allocated node, then the total allocated nodes is in that node. 567 * 568 * Return: The total number of nodes allocated 569 */ 570 static inline unsigned long mas_allocated(const struct ma_state *mas) 571 { 572 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 573 return 0; 574 575 return mas->alloc->total; 576 } 577 578 /* 579 * mas_set_alloc_req() - Set the requested number of allocations. 580 * @mas: the maple state 581 * @count: the number of allocations. 582 * 583 * The requested number of allocations is either in the first allocated node, 584 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 585 * no allocated node. Set the request either in the node or do the necessary 586 * encoding to store in @mas->alloc directly. 587 */ 588 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 589 { 590 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 591 if (!count) 592 mas->alloc = NULL; 593 else 594 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 595 return; 596 } 597 598 mas->alloc->request_count = count; 599 } 600 601 /* 602 * mas_alloc_req() - get the requested number of allocations. 603 * @mas: The maple state 604 * 605 * The alloc count is either stored directly in @mas, or in 606 * @mas->alloc->request_count if there is at least one node allocated. Decode 607 * the request count if it's stored directly in @mas->alloc. 608 * 609 * Return: The allocation request count. 610 */ 611 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 612 { 613 if ((unsigned long)mas->alloc & 0x1) 614 return (unsigned long)(mas->alloc) >> 1; 615 else if (mas->alloc) 616 return mas->alloc->request_count; 617 return 0; 618 } 619 620 /* 621 * ma_pivots() - Get a pointer to the maple node pivots. 622 * @node - the maple node 623 * @type - the node type 624 * 625 * In the event of a dead node, this array may be %NULL 626 * 627 * Return: A pointer to the maple node pivots 628 */ 629 static inline unsigned long *ma_pivots(struct maple_node *node, 630 enum maple_type type) 631 { 632 switch (type) { 633 case maple_arange_64: 634 return node->ma64.pivot; 635 case maple_range_64: 636 case maple_leaf_64: 637 return node->mr64.pivot; 638 case maple_dense: 639 return NULL; 640 } 641 return NULL; 642 } 643 644 /* 645 * ma_gaps() - Get a pointer to the maple node gaps. 646 * @node - the maple node 647 * @type - the node type 648 * 649 * Return: A pointer to the maple node gaps 650 */ 651 static inline unsigned long *ma_gaps(struct maple_node *node, 652 enum maple_type type) 653 { 654 switch (type) { 655 case maple_arange_64: 656 return node->ma64.gap; 657 case maple_range_64: 658 case maple_leaf_64: 659 case maple_dense: 660 return NULL; 661 } 662 return NULL; 663 } 664 665 /* 666 * mas_pivot() - Get the pivot at @piv of the maple encoded node. 667 * @mas: The maple state. 668 * @piv: The pivot. 669 * 670 * Return: the pivot at @piv of @mn. 671 */ 672 static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) 673 { 674 struct maple_node *node = mas_mn(mas); 675 enum maple_type type = mte_node_type(mas->node); 676 677 if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) { 678 mas_set_err(mas, -EIO); 679 return 0; 680 } 681 682 switch (type) { 683 case maple_arange_64: 684 return node->ma64.pivot[piv]; 685 case maple_range_64: 686 case maple_leaf_64: 687 return node->mr64.pivot[piv]; 688 case maple_dense: 689 return 0; 690 } 691 return 0; 692 } 693 694 /* 695 * mas_safe_pivot() - get the pivot at @piv or mas->max. 696 * @mas: The maple state 697 * @pivots: The pointer to the maple node pivots 698 * @piv: The pivot to fetch 699 * @type: The maple node type 700 * 701 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 702 * otherwise. 703 */ 704 static inline unsigned long 705 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 706 unsigned char piv, enum maple_type type) 707 { 708 if (piv >= mt_pivots[type]) 709 return mas->max; 710 711 return pivots[piv]; 712 } 713 714 /* 715 * mas_safe_min() - Return the minimum for a given offset. 716 * @mas: The maple state 717 * @pivots: The pointer to the maple node pivots 718 * @offset: The offset into the pivot array 719 * 720 * Return: The minimum range value that is contained in @offset. 721 */ 722 static inline unsigned long 723 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 724 { 725 if (likely(offset)) 726 return pivots[offset - 1] + 1; 727 728 return mas->min; 729 } 730 731 /* 732 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 733 * @mn: The encoded maple node 734 * @piv: The pivot offset 735 * @val: The value of the pivot 736 */ 737 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 738 unsigned long val) 739 { 740 struct maple_node *node = mte_to_node(mn); 741 enum maple_type type = mte_node_type(mn); 742 743 BUG_ON(piv >= mt_pivots[type]); 744 switch (type) { 745 default: 746 case maple_range_64: 747 case maple_leaf_64: 748 node->mr64.pivot[piv] = val; 749 break; 750 case maple_arange_64: 751 node->ma64.pivot[piv] = val; 752 break; 753 case maple_dense: 754 break; 755 } 756 757 } 758 759 /* 760 * ma_slots() - Get a pointer to the maple node slots. 761 * @mn: The maple node 762 * @mt: The maple node type 763 * 764 * Return: A pointer to the maple node slots 765 */ 766 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 767 { 768 switch (mt) { 769 default: 770 case maple_arange_64: 771 return mn->ma64.slot; 772 case maple_range_64: 773 case maple_leaf_64: 774 return mn->mr64.slot; 775 case maple_dense: 776 return mn->slot; 777 } 778 } 779 780 static inline bool mt_write_locked(const struct maple_tree *mt) 781 { 782 return mt_external_lock(mt) ? mt_write_lock_is_held(mt) : 783 lockdep_is_held(&mt->ma_lock); 784 } 785 786 static inline bool mt_locked(const struct maple_tree *mt) 787 { 788 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 789 lockdep_is_held(&mt->ma_lock); 790 } 791 792 static inline void *mt_slot(const struct maple_tree *mt, 793 void __rcu **slots, unsigned char offset) 794 { 795 return rcu_dereference_check(slots[offset], mt_locked(mt)); 796 } 797 798 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, 799 unsigned char offset) 800 { 801 return rcu_dereference_protected(slots[offset], mt_write_locked(mt)); 802 } 803 /* 804 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 805 * @mas: The maple state 806 * @slots: The pointer to the slots 807 * @offset: The offset into the slots array to fetch 808 * 809 * Return: The entry stored in @slots at the @offset. 810 */ 811 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, 812 unsigned char offset) 813 { 814 return mt_slot_locked(mas->tree, slots, offset); 815 } 816 817 /* 818 * mas_slot() - Get the slot value when not holding the maple tree lock. 819 * @mas: The maple state 820 * @slots: The pointer to the slots 821 * @offset: The offset into the slots array to fetch 822 * 823 * Return: The entry stored in @slots at the @offset 824 */ 825 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 826 unsigned char offset) 827 { 828 return mt_slot(mas->tree, slots, offset); 829 } 830 831 /* 832 * mas_root() - Get the maple tree root. 833 * @mas: The maple state. 834 * 835 * Return: The pointer to the root of the tree 836 */ 837 static inline void *mas_root(struct ma_state *mas) 838 { 839 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 840 } 841 842 static inline void *mt_root_locked(struct maple_tree *mt) 843 { 844 return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt)); 845 } 846 847 /* 848 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 849 * @mas: The maple state. 850 * 851 * Return: The pointer to the root of the tree 852 */ 853 static inline void *mas_root_locked(struct ma_state *mas) 854 { 855 return mt_root_locked(mas->tree); 856 } 857 858 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 859 enum maple_type mt) 860 { 861 switch (mt) { 862 case maple_arange_64: 863 return &mn->ma64.meta; 864 default: 865 return &mn->mr64.meta; 866 } 867 } 868 869 /* 870 * ma_set_meta() - Set the metadata information of a node. 871 * @mn: The maple node 872 * @mt: The maple node type 873 * @offset: The offset of the highest sub-gap in this node. 874 * @end: The end of the data in this node. 875 */ 876 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 877 unsigned char offset, unsigned char end) 878 { 879 struct maple_metadata *meta = ma_meta(mn, mt); 880 881 meta->gap = offset; 882 meta->end = end; 883 } 884 885 /* 886 * mt_clear_meta() - clear the metadata information of a node, if it exists 887 * @mt: The maple tree 888 * @mn: The maple node 889 * @type: The maple node type 890 * @offset: The offset of the highest sub-gap in this node. 891 * @end: The end of the data in this node. 892 */ 893 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, 894 enum maple_type type) 895 { 896 struct maple_metadata *meta; 897 unsigned long *pivots; 898 void __rcu **slots; 899 void *next; 900 901 switch (type) { 902 case maple_range_64: 903 pivots = mn->mr64.pivot; 904 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) { 905 slots = mn->mr64.slot; 906 next = mt_slot_locked(mt, slots, 907 MAPLE_RANGE64_SLOTS - 1); 908 if (unlikely((mte_to_node(next) && 909 mte_node_type(next)))) 910 return; /* no metadata, could be node */ 911 } 912 fallthrough; 913 case maple_arange_64: 914 meta = ma_meta(mn, type); 915 break; 916 default: 917 return; 918 } 919 920 meta->gap = 0; 921 meta->end = 0; 922 } 923 924 /* 925 * ma_meta_end() - Get the data end of a node from the metadata 926 * @mn: The maple node 927 * @mt: The maple node type 928 */ 929 static inline unsigned char ma_meta_end(struct maple_node *mn, 930 enum maple_type mt) 931 { 932 struct maple_metadata *meta = ma_meta(mn, mt); 933 934 return meta->end; 935 } 936 937 /* 938 * ma_meta_gap() - Get the largest gap location of a node from the metadata 939 * @mn: The maple node 940 * @mt: The maple node type 941 */ 942 static inline unsigned char ma_meta_gap(struct maple_node *mn, 943 enum maple_type mt) 944 { 945 return mn->ma64.meta.gap; 946 } 947 948 /* 949 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 950 * @mn: The maple node 951 * @mn: The maple node type 952 * @offset: The location of the largest gap. 953 */ 954 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 955 unsigned char offset) 956 { 957 958 struct maple_metadata *meta = ma_meta(mn, mt); 959 960 meta->gap = offset; 961 } 962 963 /* 964 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 965 * @mat - the ma_topiary, a linked list of dead nodes. 966 * @dead_enode - the node to be marked as dead and added to the tail of the list 967 * 968 * Add the @dead_enode to the linked list in @mat. 969 */ 970 static inline void mat_add(struct ma_topiary *mat, 971 struct maple_enode *dead_enode) 972 { 973 mte_set_node_dead(dead_enode); 974 mte_to_mat(dead_enode)->next = NULL; 975 if (!mat->tail) { 976 mat->tail = mat->head = dead_enode; 977 return; 978 } 979 980 mte_to_mat(mat->tail)->next = dead_enode; 981 mat->tail = dead_enode; 982 } 983 984 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 985 static inline void mas_free(struct ma_state *mas, struct maple_enode *used); 986 987 /* 988 * mas_mat_free() - Free all nodes in a dead list. 989 * @mas - the maple state 990 * @mat - the ma_topiary linked list of dead nodes to free. 991 * 992 * Free walk a dead list. 993 */ 994 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) 995 { 996 struct maple_enode *next; 997 998 while (mat->head) { 999 next = mte_to_mat(mat->head)->next; 1000 mas_free(mas, mat->head); 1001 mat->head = next; 1002 } 1003 } 1004 1005 /* 1006 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 1007 * @mas - the maple state 1008 * @mat - the ma_topiary linked list of dead nodes to free. 1009 * 1010 * Destroy walk a dead list. 1011 */ 1012 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 1013 { 1014 struct maple_enode *next; 1015 1016 while (mat->head) { 1017 next = mte_to_mat(mat->head)->next; 1018 mte_destroy_walk(mat->head, mat->mtree); 1019 mat->head = next; 1020 } 1021 } 1022 /* 1023 * mas_descend() - Descend into the slot stored in the ma_state. 1024 * @mas - the maple state. 1025 * 1026 * Note: Not RCU safe, only use in write side or debug code. 1027 */ 1028 static inline void mas_descend(struct ma_state *mas) 1029 { 1030 enum maple_type type; 1031 unsigned long *pivots; 1032 struct maple_node *node; 1033 void __rcu **slots; 1034 1035 node = mas_mn(mas); 1036 type = mte_node_type(mas->node); 1037 pivots = ma_pivots(node, type); 1038 slots = ma_slots(node, type); 1039 1040 if (mas->offset) 1041 mas->min = pivots[mas->offset - 1] + 1; 1042 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1043 mas->node = mas_slot(mas, slots, mas->offset); 1044 } 1045 1046 /* 1047 * mte_set_gap() - Set a maple node gap. 1048 * @mn: The encoded maple node 1049 * @gap: The offset of the gap to set 1050 * @val: The gap value 1051 */ 1052 static inline void mte_set_gap(const struct maple_enode *mn, 1053 unsigned char gap, unsigned long val) 1054 { 1055 switch (mte_node_type(mn)) { 1056 default: 1057 break; 1058 case maple_arange_64: 1059 mte_to_node(mn)->ma64.gap[gap] = val; 1060 break; 1061 } 1062 } 1063 1064 /* 1065 * mas_ascend() - Walk up a level of the tree. 1066 * @mas: The maple state 1067 * 1068 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1069 * may cause several levels of walking up to find the correct min and max. 1070 * May find a dead node which will cause a premature return. 1071 * Return: 1 on dead node, 0 otherwise 1072 */ 1073 static int mas_ascend(struct ma_state *mas) 1074 { 1075 struct maple_enode *p_enode; /* parent enode. */ 1076 struct maple_enode *a_enode; /* ancestor enode. */ 1077 struct maple_node *a_node; /* ancestor node. */ 1078 struct maple_node *p_node; /* parent node. */ 1079 unsigned char a_slot; 1080 enum maple_type a_type; 1081 unsigned long min, max; 1082 unsigned long *pivots; 1083 bool set_max = false, set_min = false; 1084 1085 a_node = mas_mn(mas); 1086 if (ma_is_root(a_node)) { 1087 mas->offset = 0; 1088 return 0; 1089 } 1090 1091 p_node = mte_parent(mas->node); 1092 if (unlikely(a_node == p_node)) 1093 return 1; 1094 1095 a_type = mas_parent_type(mas, mas->node); 1096 mas->offset = mte_parent_slot(mas->node); 1097 a_enode = mt_mk_node(p_node, a_type); 1098 1099 /* Check to make sure all parent information is still accurate */ 1100 if (p_node != mte_parent(mas->node)) 1101 return 1; 1102 1103 mas->node = a_enode; 1104 1105 if (mte_is_root(a_enode)) { 1106 mas->max = ULONG_MAX; 1107 mas->min = 0; 1108 return 0; 1109 } 1110 1111 if (!mas->min) 1112 set_min = true; 1113 1114 if (mas->max == ULONG_MAX) 1115 set_max = true; 1116 1117 min = 0; 1118 max = ULONG_MAX; 1119 do { 1120 p_enode = a_enode; 1121 a_type = mas_parent_type(mas, p_enode); 1122 a_node = mte_parent(p_enode); 1123 a_slot = mte_parent_slot(p_enode); 1124 a_enode = mt_mk_node(a_node, a_type); 1125 pivots = ma_pivots(a_node, a_type); 1126 1127 if (unlikely(ma_dead_node(a_node))) 1128 return 1; 1129 1130 if (!set_min && a_slot) { 1131 set_min = true; 1132 min = pivots[a_slot - 1] + 1; 1133 } 1134 1135 if (!set_max && a_slot < mt_pivots[a_type]) { 1136 set_max = true; 1137 max = pivots[a_slot]; 1138 } 1139 1140 if (unlikely(ma_dead_node(a_node))) 1141 return 1; 1142 1143 if (unlikely(ma_is_root(a_node))) 1144 break; 1145 1146 } while (!set_min || !set_max); 1147 1148 mas->max = max; 1149 mas->min = min; 1150 return 0; 1151 } 1152 1153 /* 1154 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1155 * @mas: The maple state 1156 * 1157 * Return: A pointer to a maple node. 1158 */ 1159 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1160 { 1161 struct maple_alloc *ret, *node = mas->alloc; 1162 unsigned long total = mas_allocated(mas); 1163 unsigned int req = mas_alloc_req(mas); 1164 1165 /* nothing or a request pending. */ 1166 if (WARN_ON(!total)) 1167 return NULL; 1168 1169 if (total == 1) { 1170 /* single allocation in this ma_state */ 1171 mas->alloc = NULL; 1172 ret = node; 1173 goto single_node; 1174 } 1175 1176 if (node->node_count == 1) { 1177 /* Single allocation in this node. */ 1178 mas->alloc = node->slot[0]; 1179 mas->alloc->total = node->total - 1; 1180 ret = node; 1181 goto new_head; 1182 } 1183 node->total--; 1184 ret = node->slot[--node->node_count]; 1185 node->slot[node->node_count] = NULL; 1186 1187 single_node: 1188 new_head: 1189 if (req) { 1190 req++; 1191 mas_set_alloc_req(mas, req); 1192 } 1193 1194 memset(ret, 0, sizeof(*ret)); 1195 return (struct maple_node *)ret; 1196 } 1197 1198 /* 1199 * mas_push_node() - Push a node back on the maple state allocation. 1200 * @mas: The maple state 1201 * @used: The used maple node 1202 * 1203 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1204 * requested node count as necessary. 1205 */ 1206 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1207 { 1208 struct maple_alloc *reuse = (struct maple_alloc *)used; 1209 struct maple_alloc *head = mas->alloc; 1210 unsigned long count; 1211 unsigned int requested = mas_alloc_req(mas); 1212 1213 count = mas_allocated(mas); 1214 1215 reuse->request_count = 0; 1216 reuse->node_count = 0; 1217 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { 1218 head->slot[head->node_count++] = reuse; 1219 head->total++; 1220 goto done; 1221 } 1222 1223 reuse->total = 1; 1224 if ((head) && !((unsigned long)head & 0x1)) { 1225 reuse->slot[0] = head; 1226 reuse->node_count = 1; 1227 reuse->total += head->total; 1228 } 1229 1230 mas->alloc = reuse; 1231 done: 1232 if (requested > 1) 1233 mas_set_alloc_req(mas, requested - 1); 1234 } 1235 1236 /* 1237 * mas_alloc_nodes() - Allocate nodes into a maple state 1238 * @mas: The maple state 1239 * @gfp: The GFP Flags 1240 */ 1241 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1242 { 1243 struct maple_alloc *node; 1244 unsigned long allocated = mas_allocated(mas); 1245 unsigned int requested = mas_alloc_req(mas); 1246 unsigned int count; 1247 void **slots = NULL; 1248 unsigned int max_req = 0; 1249 1250 if (!requested) 1251 return; 1252 1253 mas_set_alloc_req(mas, 0); 1254 if (mas->mas_flags & MA_STATE_PREALLOC) { 1255 if (allocated) 1256 return; 1257 WARN_ON(!allocated); 1258 } 1259 1260 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1261 node = (struct maple_alloc *)mt_alloc_one(gfp); 1262 if (!node) 1263 goto nomem_one; 1264 1265 if (allocated) { 1266 node->slot[0] = mas->alloc; 1267 node->node_count = 1; 1268 } else { 1269 node->node_count = 0; 1270 } 1271 1272 mas->alloc = node; 1273 node->total = ++allocated; 1274 requested--; 1275 } 1276 1277 node = mas->alloc; 1278 node->request_count = 0; 1279 while (requested) { 1280 max_req = MAPLE_ALLOC_SLOTS - node->node_count; 1281 slots = (void **)&node->slot[node->node_count]; 1282 max_req = min(requested, max_req); 1283 count = mt_alloc_bulk(gfp, max_req, slots); 1284 if (!count) 1285 goto nomem_bulk; 1286 1287 if (node->node_count == 0) { 1288 node->slot[0]->node_count = 0; 1289 node->slot[0]->request_count = 0; 1290 } 1291 1292 node->node_count += count; 1293 allocated += count; 1294 node = node->slot[0]; 1295 requested -= count; 1296 } 1297 mas->alloc->total = allocated; 1298 return; 1299 1300 nomem_bulk: 1301 /* Clean up potential freed allocations on bulk failure */ 1302 memset(slots, 0, max_req * sizeof(unsigned long)); 1303 nomem_one: 1304 mas_set_alloc_req(mas, requested); 1305 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) 1306 mas->alloc->total = allocated; 1307 mas_set_err(mas, -ENOMEM); 1308 } 1309 1310 /* 1311 * mas_free() - Free an encoded maple node 1312 * @mas: The maple state 1313 * @used: The encoded maple node to free. 1314 * 1315 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1316 * otherwise. 1317 */ 1318 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1319 { 1320 struct maple_node *tmp = mte_to_node(used); 1321 1322 if (mt_in_rcu(mas->tree)) 1323 ma_free_rcu(tmp); 1324 else 1325 mas_push_node(mas, tmp); 1326 } 1327 1328 /* 1329 * mas_node_count() - Check if enough nodes are allocated and request more if 1330 * there is not enough nodes. 1331 * @mas: The maple state 1332 * @count: The number of nodes needed 1333 * @gfp: the gfp flags 1334 */ 1335 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1336 { 1337 unsigned long allocated = mas_allocated(mas); 1338 1339 if (allocated < count) { 1340 mas_set_alloc_req(mas, count - allocated); 1341 mas_alloc_nodes(mas, gfp); 1342 } 1343 } 1344 1345 /* 1346 * mas_node_count() - Check if enough nodes are allocated and request more if 1347 * there is not enough nodes. 1348 * @mas: The maple state 1349 * @count: The number of nodes needed 1350 * 1351 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1352 */ 1353 static void mas_node_count(struct ma_state *mas, int count) 1354 { 1355 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1356 } 1357 1358 /* 1359 * mas_start() - Sets up maple state for operations. 1360 * @mas: The maple state. 1361 * 1362 * If mas->node == MAS_START, then set the min, max and depth to 1363 * defaults. 1364 * 1365 * Return: 1366 * - If mas->node is an error or not MAS_START, return NULL. 1367 * - If it's an empty tree: NULL & mas->node == MAS_NONE 1368 * - If it's a single entry: The entry & mas->node == MAS_ROOT 1369 * - If it's a tree: NULL & mas->node == safe root node. 1370 */ 1371 static inline struct maple_enode *mas_start(struct ma_state *mas) 1372 { 1373 if (likely(mas_is_start(mas))) { 1374 struct maple_enode *root; 1375 1376 mas->min = 0; 1377 mas->max = ULONG_MAX; 1378 1379 retry: 1380 mas->depth = 0; 1381 root = mas_root(mas); 1382 /* Tree with nodes */ 1383 if (likely(xa_is_node(root))) { 1384 mas->depth = 1; 1385 mas->node = mte_safe_root(root); 1386 mas->offset = 0; 1387 if (mte_dead_node(mas->node)) 1388 goto retry; 1389 1390 return NULL; 1391 } 1392 1393 /* empty tree */ 1394 if (unlikely(!root)) { 1395 mas->node = MAS_NONE; 1396 mas->offset = MAPLE_NODE_SLOTS; 1397 return NULL; 1398 } 1399 1400 /* Single entry tree */ 1401 mas->node = MAS_ROOT; 1402 mas->offset = MAPLE_NODE_SLOTS; 1403 1404 /* Single entry tree. */ 1405 if (mas->index > 0) 1406 return NULL; 1407 1408 return root; 1409 } 1410 1411 return NULL; 1412 } 1413 1414 /* 1415 * ma_data_end() - Find the end of the data in a node. 1416 * @node: The maple node 1417 * @type: The maple node type 1418 * @pivots: The array of pivots in the node 1419 * @max: The maximum value in the node 1420 * 1421 * Uses metadata to find the end of the data when possible. 1422 * Return: The zero indexed last slot with data (may be null). 1423 */ 1424 static inline unsigned char ma_data_end(struct maple_node *node, 1425 enum maple_type type, 1426 unsigned long *pivots, 1427 unsigned long max) 1428 { 1429 unsigned char offset; 1430 1431 if (!pivots) 1432 return 0; 1433 1434 if (type == maple_arange_64) 1435 return ma_meta_end(node, type); 1436 1437 offset = mt_pivots[type] - 1; 1438 if (likely(!pivots[offset])) 1439 return ma_meta_end(node, type); 1440 1441 if (likely(pivots[offset] == max)) 1442 return offset; 1443 1444 return mt_pivots[type]; 1445 } 1446 1447 /* 1448 * mas_data_end() - Find the end of the data (slot). 1449 * @mas: the maple state 1450 * 1451 * This method is optimized to check the metadata of a node if the node type 1452 * supports data end metadata. 1453 * 1454 * Return: The zero indexed last slot with data (may be null). 1455 */ 1456 static inline unsigned char mas_data_end(struct ma_state *mas) 1457 { 1458 enum maple_type type; 1459 struct maple_node *node; 1460 unsigned char offset; 1461 unsigned long *pivots; 1462 1463 type = mte_node_type(mas->node); 1464 node = mas_mn(mas); 1465 if (type == maple_arange_64) 1466 return ma_meta_end(node, type); 1467 1468 pivots = ma_pivots(node, type); 1469 if (unlikely(ma_dead_node(node))) 1470 return 0; 1471 1472 offset = mt_pivots[type] - 1; 1473 if (likely(!pivots[offset])) 1474 return ma_meta_end(node, type); 1475 1476 if (likely(pivots[offset] == mas->max)) 1477 return offset; 1478 1479 return mt_pivots[type]; 1480 } 1481 1482 /* 1483 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1484 * @mas - the maple state 1485 * 1486 * Return: The maximum gap in the leaf. 1487 */ 1488 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1489 { 1490 enum maple_type mt; 1491 unsigned long pstart, gap, max_gap; 1492 struct maple_node *mn; 1493 unsigned long *pivots; 1494 void __rcu **slots; 1495 unsigned char i; 1496 unsigned char max_piv; 1497 1498 mt = mte_node_type(mas->node); 1499 mn = mas_mn(mas); 1500 slots = ma_slots(mn, mt); 1501 max_gap = 0; 1502 if (unlikely(ma_is_dense(mt))) { 1503 gap = 0; 1504 for (i = 0; i < mt_slots[mt]; i++) { 1505 if (slots[i]) { 1506 if (gap > max_gap) 1507 max_gap = gap; 1508 gap = 0; 1509 } else { 1510 gap++; 1511 } 1512 } 1513 if (gap > max_gap) 1514 max_gap = gap; 1515 return max_gap; 1516 } 1517 1518 /* 1519 * Check the first implied pivot optimizes the loop below and slot 1 may 1520 * be skipped if there is a gap in slot 0. 1521 */ 1522 pivots = ma_pivots(mn, mt); 1523 if (likely(!slots[0])) { 1524 max_gap = pivots[0] - mas->min + 1; 1525 i = 2; 1526 } else { 1527 i = 1; 1528 } 1529 1530 /* reduce max_piv as the special case is checked before the loop */ 1531 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1532 /* 1533 * Check end implied pivot which can only be a gap on the right most 1534 * node. 1535 */ 1536 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1537 gap = ULONG_MAX - pivots[max_piv]; 1538 if (gap > max_gap) 1539 max_gap = gap; 1540 } 1541 1542 for (; i <= max_piv; i++) { 1543 /* data == no gap. */ 1544 if (likely(slots[i])) 1545 continue; 1546 1547 pstart = pivots[i - 1]; 1548 gap = pivots[i] - pstart; 1549 if (gap > max_gap) 1550 max_gap = gap; 1551 1552 /* There cannot be two gaps in a row. */ 1553 i++; 1554 } 1555 return max_gap; 1556 } 1557 1558 /* 1559 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1560 * @node: The maple node 1561 * @gaps: The pointer to the gaps 1562 * @mt: The maple node type 1563 * @*off: Pointer to store the offset location of the gap. 1564 * 1565 * Uses the metadata data end to scan backwards across set gaps. 1566 * 1567 * Return: The maximum gap value 1568 */ 1569 static inline unsigned long 1570 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1571 unsigned char *off) 1572 { 1573 unsigned char offset, i; 1574 unsigned long max_gap = 0; 1575 1576 i = offset = ma_meta_end(node, mt); 1577 do { 1578 if (gaps[i] > max_gap) { 1579 max_gap = gaps[i]; 1580 offset = i; 1581 } 1582 } while (i--); 1583 1584 *off = offset; 1585 return max_gap; 1586 } 1587 1588 /* 1589 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1590 * @mas: The maple state. 1591 * 1592 * Return: The gap value. 1593 */ 1594 static inline unsigned long mas_max_gap(struct ma_state *mas) 1595 { 1596 unsigned long *gaps; 1597 unsigned char offset; 1598 enum maple_type mt; 1599 struct maple_node *node; 1600 1601 mt = mte_node_type(mas->node); 1602 if (ma_is_leaf(mt)) 1603 return mas_leaf_max_gap(mas); 1604 1605 node = mas_mn(mas); 1606 MAS_BUG_ON(mas, mt != maple_arange_64); 1607 offset = ma_meta_gap(node, mt); 1608 gaps = ma_gaps(node, mt); 1609 return gaps[offset]; 1610 } 1611 1612 /* 1613 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1614 * @mas: The maple state 1615 * @offset: The gap offset in the parent to set 1616 * @new: The new gap value. 1617 * 1618 * Set the parent gap then continue to set the gap upwards, using the metadata 1619 * of the parent to see if it is necessary to check the node above. 1620 */ 1621 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1622 unsigned long new) 1623 { 1624 unsigned long meta_gap = 0; 1625 struct maple_node *pnode; 1626 struct maple_enode *penode; 1627 unsigned long *pgaps; 1628 unsigned char meta_offset; 1629 enum maple_type pmt; 1630 1631 pnode = mte_parent(mas->node); 1632 pmt = mas_parent_type(mas, mas->node); 1633 penode = mt_mk_node(pnode, pmt); 1634 pgaps = ma_gaps(pnode, pmt); 1635 1636 ascend: 1637 MAS_BUG_ON(mas, pmt != maple_arange_64); 1638 meta_offset = ma_meta_gap(pnode, pmt); 1639 meta_gap = pgaps[meta_offset]; 1640 1641 pgaps[offset] = new; 1642 1643 if (meta_gap == new) 1644 return; 1645 1646 if (offset != meta_offset) { 1647 if (meta_gap > new) 1648 return; 1649 1650 ma_set_meta_gap(pnode, pmt, offset); 1651 } else if (new < meta_gap) { 1652 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1653 ma_set_meta_gap(pnode, pmt, meta_offset); 1654 } 1655 1656 if (ma_is_root(pnode)) 1657 return; 1658 1659 /* Go to the parent node. */ 1660 pnode = mte_parent(penode); 1661 pmt = mas_parent_type(mas, penode); 1662 pgaps = ma_gaps(pnode, pmt); 1663 offset = mte_parent_slot(penode); 1664 penode = mt_mk_node(pnode, pmt); 1665 goto ascend; 1666 } 1667 1668 /* 1669 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1670 * @mas - the maple state. 1671 */ 1672 static inline void mas_update_gap(struct ma_state *mas) 1673 { 1674 unsigned char pslot; 1675 unsigned long p_gap; 1676 unsigned long max_gap; 1677 1678 if (!mt_is_alloc(mas->tree)) 1679 return; 1680 1681 if (mte_is_root(mas->node)) 1682 return; 1683 1684 max_gap = mas_max_gap(mas); 1685 1686 pslot = mte_parent_slot(mas->node); 1687 p_gap = ma_gaps(mte_parent(mas->node), 1688 mas_parent_type(mas, mas->node))[pslot]; 1689 1690 if (p_gap != max_gap) 1691 mas_parent_gap(mas, pslot, max_gap); 1692 } 1693 1694 /* 1695 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1696 * @parent with the slot encoded. 1697 * @mas - the maple state (for the tree) 1698 * @parent - the maple encoded node containing the children. 1699 */ 1700 static inline void mas_adopt_children(struct ma_state *mas, 1701 struct maple_enode *parent) 1702 { 1703 enum maple_type type = mte_node_type(parent); 1704 struct maple_node *node = mas_mn(mas); 1705 void __rcu **slots = ma_slots(node, type); 1706 unsigned long *pivots = ma_pivots(node, type); 1707 struct maple_enode *child; 1708 unsigned char offset; 1709 1710 offset = ma_data_end(node, type, pivots, mas->max); 1711 do { 1712 child = mas_slot_locked(mas, slots, offset); 1713 mas_set_parent(mas, child, parent, offset); 1714 } while (offset--); 1715 } 1716 1717 /* 1718 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the 1719 * parent encoding to locate the maple node in the tree. 1720 * @mas - the ma_state to use for operations. 1721 * @advanced - boolean to adopt the child nodes and free the old node (false) or 1722 * leave the node (true) and handle the adoption and free elsewhere. 1723 */ 1724 static inline void mas_replace(struct ma_state *mas, bool advanced) 1725 __must_hold(mas->tree->ma_lock) 1726 { 1727 struct maple_node *mn = mas_mn(mas); 1728 struct maple_enode *old_enode; 1729 unsigned char offset = 0; 1730 void __rcu **slots = NULL; 1731 1732 if (ma_is_root(mn)) { 1733 old_enode = mas_root_locked(mas); 1734 } else { 1735 offset = mte_parent_slot(mas->node); 1736 slots = ma_slots(mte_parent(mas->node), 1737 mas_parent_type(mas, mas->node)); 1738 old_enode = mas_slot_locked(mas, slots, offset); 1739 } 1740 1741 if (!advanced && !mte_is_leaf(mas->node)) 1742 mas_adopt_children(mas, mas->node); 1743 1744 if (mte_is_root(mas->node)) { 1745 mn->parent = ma_parent_ptr( 1746 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 1747 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1748 mas_set_height(mas); 1749 } else { 1750 rcu_assign_pointer(slots[offset], mas->node); 1751 } 1752 1753 if (!advanced) { 1754 mte_set_node_dead(old_enode); 1755 mas_free(mas, old_enode); 1756 } 1757 } 1758 1759 /* 1760 * mas_new_child() - Find the new child of a node. 1761 * @mas: the maple state 1762 * @child: the maple state to store the child. 1763 */ 1764 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) 1765 __must_hold(mas->tree->ma_lock) 1766 { 1767 enum maple_type mt; 1768 unsigned char offset; 1769 unsigned char end; 1770 unsigned long *pivots; 1771 struct maple_enode *entry; 1772 struct maple_node *node; 1773 void __rcu **slots; 1774 1775 mt = mte_node_type(mas->node); 1776 node = mas_mn(mas); 1777 slots = ma_slots(node, mt); 1778 pivots = ma_pivots(node, mt); 1779 end = ma_data_end(node, mt, pivots, mas->max); 1780 for (offset = mas->offset; offset <= end; offset++) { 1781 entry = mas_slot_locked(mas, slots, offset); 1782 if (mte_parent(entry) == node) { 1783 *child = *mas; 1784 mas->offset = offset + 1; 1785 child->offset = offset; 1786 mas_descend(child); 1787 child->offset = 0; 1788 return true; 1789 } 1790 } 1791 return false; 1792 } 1793 1794 /* 1795 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1796 * old data or set b_node->b_end. 1797 * @b_node: the maple_big_node 1798 * @shift: the shift count 1799 */ 1800 static inline void mab_shift_right(struct maple_big_node *b_node, 1801 unsigned char shift) 1802 { 1803 unsigned long size = b_node->b_end * sizeof(unsigned long); 1804 1805 memmove(b_node->pivot + shift, b_node->pivot, size); 1806 memmove(b_node->slot + shift, b_node->slot, size); 1807 if (b_node->type == maple_arange_64) 1808 memmove(b_node->gap + shift, b_node->gap, size); 1809 } 1810 1811 /* 1812 * mab_middle_node() - Check if a middle node is needed (unlikely) 1813 * @b_node: the maple_big_node that contains the data. 1814 * @size: the amount of data in the b_node 1815 * @split: the potential split location 1816 * @slot_count: the size that can be stored in a single node being considered. 1817 * 1818 * Return: true if a middle node is required. 1819 */ 1820 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1821 unsigned char slot_count) 1822 { 1823 unsigned char size = b_node->b_end; 1824 1825 if (size >= 2 * slot_count) 1826 return true; 1827 1828 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1829 return true; 1830 1831 return false; 1832 } 1833 1834 /* 1835 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1836 * @b_node: the maple_big_node with the data 1837 * @split: the suggested split location 1838 * @slot_count: the number of slots in the node being considered. 1839 * 1840 * Return: the split location. 1841 */ 1842 static inline int mab_no_null_split(struct maple_big_node *b_node, 1843 unsigned char split, unsigned char slot_count) 1844 { 1845 if (!b_node->slot[split]) { 1846 /* 1847 * If the split is less than the max slot && the right side will 1848 * still be sufficient, then increment the split on NULL. 1849 */ 1850 if ((split < slot_count - 1) && 1851 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1852 split++; 1853 else 1854 split--; 1855 } 1856 return split; 1857 } 1858 1859 /* 1860 * mab_calc_split() - Calculate the split location and if there needs to be two 1861 * splits. 1862 * @bn: The maple_big_node with the data 1863 * @mid_split: The second split, if required. 0 otherwise. 1864 * 1865 * Return: The first split location. The middle split is set in @mid_split. 1866 */ 1867 static inline int mab_calc_split(struct ma_state *mas, 1868 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1869 { 1870 unsigned char b_end = bn->b_end; 1871 int split = b_end / 2; /* Assume equal split. */ 1872 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1873 1874 /* 1875 * To support gap tracking, all NULL entries are kept together and a node cannot 1876 * end on a NULL entry, with the exception of the left-most leaf. The 1877 * limitation means that the split of a node must be checked for this condition 1878 * and be able to put more data in one direction or the other. 1879 */ 1880 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1881 *mid_split = 0; 1882 split = b_end - mt_min_slots[bn->type]; 1883 1884 if (!ma_is_leaf(bn->type)) 1885 return split; 1886 1887 mas->mas_flags |= MA_STATE_REBALANCE; 1888 if (!bn->slot[split]) 1889 split--; 1890 return split; 1891 } 1892 1893 /* 1894 * Although extremely rare, it is possible to enter what is known as the 3-way 1895 * split scenario. The 3-way split comes about by means of a store of a range 1896 * that overwrites the end and beginning of two full nodes. The result is a set 1897 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1898 * also be located in different parent nodes which are also full. This can 1899 * carry upwards all the way to the root in the worst case. 1900 */ 1901 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1902 split = b_end / 3; 1903 *mid_split = split * 2; 1904 } else { 1905 slot_min = mt_min_slots[bn->type]; 1906 1907 *mid_split = 0; 1908 /* 1909 * Avoid having a range less than the slot count unless it 1910 * causes one node to be deficient. 1911 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1912 */ 1913 while ((split < slot_count - 1) && 1914 ((bn->pivot[split] - min) < slot_count - 1) && 1915 (b_end - split > slot_min)) 1916 split++; 1917 } 1918 1919 /* Avoid ending a node on a NULL entry */ 1920 split = mab_no_null_split(bn, split, slot_count); 1921 1922 if (unlikely(*mid_split)) 1923 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1924 1925 return split; 1926 } 1927 1928 /* 1929 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1930 * and set @b_node->b_end to the next free slot. 1931 * @mas: The maple state 1932 * @mas_start: The starting slot to copy 1933 * @mas_end: The end slot to copy (inclusively) 1934 * @b_node: The maple_big_node to place the data 1935 * @mab_start: The starting location in maple_big_node to store the data. 1936 */ 1937 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1938 unsigned char mas_end, struct maple_big_node *b_node, 1939 unsigned char mab_start) 1940 { 1941 enum maple_type mt; 1942 struct maple_node *node; 1943 void __rcu **slots; 1944 unsigned long *pivots, *gaps; 1945 int i = mas_start, j = mab_start; 1946 unsigned char piv_end; 1947 1948 node = mas_mn(mas); 1949 mt = mte_node_type(mas->node); 1950 pivots = ma_pivots(node, mt); 1951 if (!i) { 1952 b_node->pivot[j] = pivots[i++]; 1953 if (unlikely(i > mas_end)) 1954 goto complete; 1955 j++; 1956 } 1957 1958 piv_end = min(mas_end, mt_pivots[mt]); 1959 for (; i < piv_end; i++, j++) { 1960 b_node->pivot[j] = pivots[i]; 1961 if (unlikely(!b_node->pivot[j])) 1962 break; 1963 1964 if (unlikely(mas->max == b_node->pivot[j])) 1965 goto complete; 1966 } 1967 1968 if (likely(i <= mas_end)) 1969 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 1970 1971 complete: 1972 b_node->b_end = ++j; 1973 j -= mab_start; 1974 slots = ma_slots(node, mt); 1975 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 1976 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 1977 gaps = ma_gaps(node, mt); 1978 memcpy(b_node->gap + mab_start, gaps + mas_start, 1979 sizeof(unsigned long) * j); 1980 } 1981 } 1982 1983 /* 1984 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 1985 * @mas: The maple state 1986 * @node: The maple node 1987 * @pivots: pointer to the maple node pivots 1988 * @mt: The maple type 1989 * @end: The assumed end 1990 * 1991 * Note, end may be incremented within this function but not modified at the 1992 * source. This is fine since the metadata is the last thing to be stored in a 1993 * node during a write. 1994 */ 1995 static inline void mas_leaf_set_meta(struct ma_state *mas, 1996 struct maple_node *node, unsigned long *pivots, 1997 enum maple_type mt, unsigned char end) 1998 { 1999 /* There is no room for metadata already */ 2000 if (mt_pivots[mt] <= end) 2001 return; 2002 2003 if (pivots[end] && pivots[end] < mas->max) 2004 end++; 2005 2006 if (end < mt_slots[mt] - 1) 2007 ma_set_meta(node, mt, 0, end); 2008 } 2009 2010 /* 2011 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 2012 * @b_node: the maple_big_node that has the data 2013 * @mab_start: the start location in @b_node. 2014 * @mab_end: The end location in @b_node (inclusively) 2015 * @mas: The maple state with the maple encoded node. 2016 */ 2017 static inline void mab_mas_cp(struct maple_big_node *b_node, 2018 unsigned char mab_start, unsigned char mab_end, 2019 struct ma_state *mas, bool new_max) 2020 { 2021 int i, j = 0; 2022 enum maple_type mt = mte_node_type(mas->node); 2023 struct maple_node *node = mte_to_node(mas->node); 2024 void __rcu **slots = ma_slots(node, mt); 2025 unsigned long *pivots = ma_pivots(node, mt); 2026 unsigned long *gaps = NULL; 2027 unsigned char end; 2028 2029 if (mab_end - mab_start > mt_pivots[mt]) 2030 mab_end--; 2031 2032 if (!pivots[mt_pivots[mt] - 1]) 2033 slots[mt_pivots[mt]] = NULL; 2034 2035 i = mab_start; 2036 do { 2037 pivots[j++] = b_node->pivot[i++]; 2038 } while (i <= mab_end && likely(b_node->pivot[i])); 2039 2040 memcpy(slots, b_node->slot + mab_start, 2041 sizeof(void *) * (i - mab_start)); 2042 2043 if (new_max) 2044 mas->max = b_node->pivot[i - 1]; 2045 2046 end = j - 1; 2047 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2048 unsigned long max_gap = 0; 2049 unsigned char offset = 0; 2050 2051 gaps = ma_gaps(node, mt); 2052 do { 2053 gaps[--j] = b_node->gap[--i]; 2054 if (gaps[j] > max_gap) { 2055 offset = j; 2056 max_gap = gaps[j]; 2057 } 2058 } while (j); 2059 2060 ma_set_meta(node, mt, offset, end); 2061 } else { 2062 mas_leaf_set_meta(mas, node, pivots, mt, end); 2063 } 2064 } 2065 2066 /* 2067 * mas_descend_adopt() - Descend through a sub-tree and adopt children. 2068 * @mas: the maple state with the maple encoded node of the sub-tree. 2069 * 2070 * Descend through a sub-tree and adopt children who do not have the correct 2071 * parents set. Follow the parents which have the correct parents as they are 2072 * the new entries which need to be followed to find other incorrectly set 2073 * parents. 2074 */ 2075 static inline void mas_descend_adopt(struct ma_state *mas) 2076 { 2077 struct ma_state list[3], next[3]; 2078 int i, n; 2079 2080 /* 2081 * At each level there may be up to 3 correct parent pointers which indicates 2082 * the new nodes which need to be walked to find any new nodes at a lower level. 2083 */ 2084 2085 for (i = 0; i < 3; i++) { 2086 list[i] = *mas; 2087 list[i].offset = 0; 2088 next[i].offset = 0; 2089 } 2090 next[0] = *mas; 2091 2092 while (!mte_is_leaf(list[0].node)) { 2093 n = 0; 2094 for (i = 0; i < 3; i++) { 2095 if (mas_is_none(&list[i])) 2096 continue; 2097 2098 if (i && list[i-1].node == list[i].node) 2099 continue; 2100 2101 while ((n < 3) && (mas_new_child(&list[i], &next[n]))) 2102 n++; 2103 2104 mas_adopt_children(&list[i], list[i].node); 2105 } 2106 2107 while (n < 3) 2108 next[n++].node = MAS_NONE; 2109 2110 /* descend by setting the list to the children */ 2111 for (i = 0; i < 3; i++) 2112 list[i] = next[i]; 2113 } 2114 } 2115 2116 /* 2117 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2118 * @mas: The maple state 2119 * @end: The maple node end 2120 * @mt: The maple node type 2121 */ 2122 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2123 enum maple_type mt) 2124 { 2125 if (!(mas->mas_flags & MA_STATE_BULK)) 2126 return; 2127 2128 if (mte_is_root(mas->node)) 2129 return; 2130 2131 if (end > mt_min_slots[mt]) { 2132 mas->mas_flags &= ~MA_STATE_REBALANCE; 2133 return; 2134 } 2135 } 2136 2137 /* 2138 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2139 * data from a maple encoded node. 2140 * @wr_mas: the maple write state 2141 * @b_node: the maple_big_node to fill with data 2142 * @offset_end: the offset to end copying 2143 * 2144 * Return: The actual end of the data stored in @b_node 2145 */ 2146 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, 2147 struct maple_big_node *b_node, unsigned char offset_end) 2148 { 2149 unsigned char slot; 2150 unsigned char b_end; 2151 /* Possible underflow of piv will wrap back to 0 before use. */ 2152 unsigned long piv; 2153 struct ma_state *mas = wr_mas->mas; 2154 2155 b_node->type = wr_mas->type; 2156 b_end = 0; 2157 slot = mas->offset; 2158 if (slot) { 2159 /* Copy start data up to insert. */ 2160 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2161 b_end = b_node->b_end; 2162 piv = b_node->pivot[b_end - 1]; 2163 } else 2164 piv = mas->min - 1; 2165 2166 if (piv + 1 < mas->index) { 2167 /* Handle range starting after old range */ 2168 b_node->slot[b_end] = wr_mas->content; 2169 if (!wr_mas->content) 2170 b_node->gap[b_end] = mas->index - 1 - piv; 2171 b_node->pivot[b_end++] = mas->index - 1; 2172 } 2173 2174 /* Store the new entry. */ 2175 mas->offset = b_end; 2176 b_node->slot[b_end] = wr_mas->entry; 2177 b_node->pivot[b_end] = mas->last; 2178 2179 /* Appended. */ 2180 if (mas->last >= mas->max) 2181 goto b_end; 2182 2183 /* Handle new range ending before old range ends */ 2184 piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2185 if (piv > mas->last) { 2186 if (piv == ULONG_MAX) 2187 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2188 2189 if (offset_end != slot) 2190 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2191 offset_end); 2192 2193 b_node->slot[++b_end] = wr_mas->content; 2194 if (!wr_mas->content) 2195 b_node->gap[b_end] = piv - mas->last + 1; 2196 b_node->pivot[b_end] = piv; 2197 } 2198 2199 slot = offset_end + 1; 2200 if (slot > wr_mas->node_end) 2201 goto b_end; 2202 2203 /* Copy end data to the end of the node. */ 2204 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); 2205 b_node->b_end--; 2206 return; 2207 2208 b_end: 2209 b_node->b_end = b_end; 2210 } 2211 2212 /* 2213 * mas_prev_sibling() - Find the previous node with the same parent. 2214 * @mas: the maple state 2215 * 2216 * Return: True if there is a previous sibling, false otherwise. 2217 */ 2218 static inline bool mas_prev_sibling(struct ma_state *mas) 2219 { 2220 unsigned int p_slot = mte_parent_slot(mas->node); 2221 2222 if (mte_is_root(mas->node)) 2223 return false; 2224 2225 if (!p_slot) 2226 return false; 2227 2228 mas_ascend(mas); 2229 mas->offset = p_slot - 1; 2230 mas_descend(mas); 2231 return true; 2232 } 2233 2234 /* 2235 * mas_next_sibling() - Find the next node with the same parent. 2236 * @mas: the maple state 2237 * 2238 * Return: true if there is a next sibling, false otherwise. 2239 */ 2240 static inline bool mas_next_sibling(struct ma_state *mas) 2241 { 2242 MA_STATE(parent, mas->tree, mas->index, mas->last); 2243 2244 if (mte_is_root(mas->node)) 2245 return false; 2246 2247 parent = *mas; 2248 mas_ascend(&parent); 2249 parent.offset = mte_parent_slot(mas->node) + 1; 2250 if (parent.offset > mas_data_end(&parent)) 2251 return false; 2252 2253 *mas = parent; 2254 mas_descend(mas); 2255 return true; 2256 } 2257 2258 /* 2259 * mte_node_or_node() - Return the encoded node or MAS_NONE. 2260 * @enode: The encoded maple node. 2261 * 2262 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. 2263 * 2264 * Return: @enode or MAS_NONE 2265 */ 2266 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) 2267 { 2268 if (enode) 2269 return enode; 2270 2271 return ma_enode_ptr(MAS_NONE); 2272 } 2273 2274 /* 2275 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2276 * @wr_mas: The maple write state 2277 * 2278 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2279 */ 2280 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2281 { 2282 struct ma_state *mas = wr_mas->mas; 2283 unsigned char count, offset; 2284 2285 if (unlikely(ma_is_dense(wr_mas->type))) { 2286 wr_mas->r_max = wr_mas->r_min = mas->index; 2287 mas->offset = mas->index = mas->min; 2288 return; 2289 } 2290 2291 wr_mas->node = mas_mn(wr_mas->mas); 2292 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2293 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, 2294 wr_mas->pivots, mas->max); 2295 offset = mas->offset; 2296 2297 while (offset < count && mas->index > wr_mas->pivots[offset]) 2298 offset++; 2299 2300 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; 2301 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); 2302 wr_mas->offset_end = mas->offset = offset; 2303 } 2304 2305 /* 2306 * mas_topiary_range() - Add a range of slots to the topiary. 2307 * @mas: The maple state 2308 * @destroy: The topiary to add the slots (usually destroy) 2309 * @start: The starting slot inclusively 2310 * @end: The end slot inclusively 2311 */ 2312 static inline void mas_topiary_range(struct ma_state *mas, 2313 struct ma_topiary *destroy, unsigned char start, unsigned char end) 2314 { 2315 void __rcu **slots; 2316 unsigned char offset; 2317 2318 MAS_BUG_ON(mas, mte_is_leaf(mas->node)); 2319 2320 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); 2321 for (offset = start; offset <= end; offset++) { 2322 struct maple_enode *enode = mas_slot_locked(mas, slots, offset); 2323 2324 if (mte_dead_node(enode)) 2325 continue; 2326 2327 mat_add(destroy, enode); 2328 } 2329 } 2330 2331 /* 2332 * mast_topiary() - Add the portions of the tree to the removal list; either to 2333 * be freed or discarded (destroy walk). 2334 * @mast: The maple_subtree_state. 2335 */ 2336 static inline void mast_topiary(struct maple_subtree_state *mast) 2337 { 2338 MA_WR_STATE(wr_mas, mast->orig_l, NULL); 2339 unsigned char r_start, r_end; 2340 unsigned char l_start, l_end; 2341 void __rcu **l_slots, **r_slots; 2342 2343 wr_mas.type = mte_node_type(mast->orig_l->node); 2344 mast->orig_l->index = mast->orig_l->last; 2345 mas_wr_node_walk(&wr_mas); 2346 l_start = mast->orig_l->offset + 1; 2347 l_end = mas_data_end(mast->orig_l); 2348 r_start = 0; 2349 r_end = mast->orig_r->offset; 2350 2351 if (r_end) 2352 r_end--; 2353 2354 l_slots = ma_slots(mas_mn(mast->orig_l), 2355 mte_node_type(mast->orig_l->node)); 2356 2357 r_slots = ma_slots(mas_mn(mast->orig_r), 2358 mte_node_type(mast->orig_r->node)); 2359 2360 if ((l_start < l_end) && 2361 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { 2362 l_start++; 2363 } 2364 2365 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { 2366 if (r_end) 2367 r_end--; 2368 } 2369 2370 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) 2371 return; 2372 2373 /* At the node where left and right sides meet, add the parts between */ 2374 if (mast->orig_l->node == mast->orig_r->node) { 2375 return mas_topiary_range(mast->orig_l, mast->destroy, 2376 l_start, r_end); 2377 } 2378 2379 /* mast->orig_r is different and consumed. */ 2380 if (mte_is_leaf(mast->orig_r->node)) 2381 return; 2382 2383 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) 2384 l_end--; 2385 2386 2387 if (l_start <= l_end) 2388 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); 2389 2390 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) 2391 r_start++; 2392 2393 if (r_start <= r_end) 2394 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); 2395 } 2396 2397 /* 2398 * mast_rebalance_next() - Rebalance against the next node 2399 * @mast: The maple subtree state 2400 * @old_r: The encoded maple node to the right (next node). 2401 */ 2402 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2403 { 2404 unsigned char b_end = mast->bn->b_end; 2405 2406 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2407 mast->bn, b_end); 2408 mast->orig_r->last = mast->orig_r->max; 2409 } 2410 2411 /* 2412 * mast_rebalance_prev() - Rebalance against the previous node 2413 * @mast: The maple subtree state 2414 * @old_l: The encoded maple node to the left (previous node) 2415 */ 2416 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2417 { 2418 unsigned char end = mas_data_end(mast->orig_l) + 1; 2419 unsigned char b_end = mast->bn->b_end; 2420 2421 mab_shift_right(mast->bn, end); 2422 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2423 mast->l->min = mast->orig_l->min; 2424 mast->orig_l->index = mast->orig_l->min; 2425 mast->bn->b_end = end + b_end; 2426 mast->l->offset += end; 2427 } 2428 2429 /* 2430 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2431 * the node to the right. Checking the nodes to the right then the left at each 2432 * level upwards until root is reached. Free and destroy as needed. 2433 * Data is copied into the @mast->bn. 2434 * @mast: The maple_subtree_state. 2435 */ 2436 static inline 2437 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2438 { 2439 struct ma_state r_tmp = *mast->orig_r; 2440 struct ma_state l_tmp = *mast->orig_l; 2441 struct maple_enode *ancestor = NULL; 2442 unsigned char start, end; 2443 unsigned char depth = 0; 2444 2445 r_tmp = *mast->orig_r; 2446 l_tmp = *mast->orig_l; 2447 do { 2448 mas_ascend(mast->orig_r); 2449 mas_ascend(mast->orig_l); 2450 depth++; 2451 if (!ancestor && 2452 (mast->orig_r->node == mast->orig_l->node)) { 2453 ancestor = mast->orig_r->node; 2454 end = mast->orig_r->offset - 1; 2455 start = mast->orig_l->offset + 1; 2456 } 2457 2458 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2459 if (!ancestor) { 2460 ancestor = mast->orig_r->node; 2461 start = 0; 2462 } 2463 2464 mast->orig_r->offset++; 2465 do { 2466 mas_descend(mast->orig_r); 2467 mast->orig_r->offset = 0; 2468 depth--; 2469 } while (depth); 2470 2471 mast_rebalance_next(mast); 2472 do { 2473 unsigned char l_off = 0; 2474 struct maple_enode *child = r_tmp.node; 2475 2476 mas_ascend(&r_tmp); 2477 if (ancestor == r_tmp.node) 2478 l_off = start; 2479 2480 if (r_tmp.offset) 2481 r_tmp.offset--; 2482 2483 if (l_off < r_tmp.offset) 2484 mas_topiary_range(&r_tmp, mast->destroy, 2485 l_off, r_tmp.offset); 2486 2487 if (l_tmp.node != child) 2488 mat_add(mast->free, child); 2489 2490 } while (r_tmp.node != ancestor); 2491 2492 *mast->orig_l = l_tmp; 2493 return true; 2494 2495 } else if (mast->orig_l->offset != 0) { 2496 if (!ancestor) { 2497 ancestor = mast->orig_l->node; 2498 end = mas_data_end(mast->orig_l); 2499 } 2500 2501 mast->orig_l->offset--; 2502 do { 2503 mas_descend(mast->orig_l); 2504 mast->orig_l->offset = 2505 mas_data_end(mast->orig_l); 2506 depth--; 2507 } while (depth); 2508 2509 mast_rebalance_prev(mast); 2510 do { 2511 unsigned char r_off; 2512 struct maple_enode *child = l_tmp.node; 2513 2514 mas_ascend(&l_tmp); 2515 if (ancestor == l_tmp.node) 2516 r_off = end; 2517 else 2518 r_off = mas_data_end(&l_tmp); 2519 2520 if (l_tmp.offset < r_off) 2521 l_tmp.offset++; 2522 2523 if (l_tmp.offset < r_off) 2524 mas_topiary_range(&l_tmp, mast->destroy, 2525 l_tmp.offset, r_off); 2526 2527 if (r_tmp.node != child) 2528 mat_add(mast->free, child); 2529 2530 } while (l_tmp.node != ancestor); 2531 2532 *mast->orig_r = r_tmp; 2533 return true; 2534 } 2535 } while (!mte_is_root(mast->orig_r->node)); 2536 2537 *mast->orig_r = r_tmp; 2538 *mast->orig_l = l_tmp; 2539 return false; 2540 } 2541 2542 /* 2543 * mast_ascend_free() - Add current original maple state nodes to the free list 2544 * and ascend. 2545 * @mast: the maple subtree state. 2546 * 2547 * Ascend the original left and right sides and add the previous nodes to the 2548 * free list. Set the slots to point to the correct location in the new nodes. 2549 */ 2550 static inline void 2551 mast_ascend_free(struct maple_subtree_state *mast) 2552 { 2553 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2554 struct maple_enode *left = mast->orig_l->node; 2555 struct maple_enode *right = mast->orig_r->node; 2556 2557 mas_ascend(mast->orig_l); 2558 mas_ascend(mast->orig_r); 2559 mat_add(mast->free, left); 2560 2561 if (left != right) 2562 mat_add(mast->free, right); 2563 2564 mast->orig_r->offset = 0; 2565 mast->orig_r->index = mast->r->max; 2566 /* last should be larger than or equal to index */ 2567 if (mast->orig_r->last < mast->orig_r->index) 2568 mast->orig_r->last = mast->orig_r->index; 2569 /* 2570 * The node may not contain the value so set slot to ensure all 2571 * of the nodes contents are freed or destroyed. 2572 */ 2573 wr_mas.type = mte_node_type(mast->orig_r->node); 2574 mas_wr_node_walk(&wr_mas); 2575 /* Set up the left side of things */ 2576 mast->orig_l->offset = 0; 2577 mast->orig_l->index = mast->l->min; 2578 wr_mas.mas = mast->orig_l; 2579 wr_mas.type = mte_node_type(mast->orig_l->node); 2580 mas_wr_node_walk(&wr_mas); 2581 2582 mast->bn->type = wr_mas.type; 2583 } 2584 2585 /* 2586 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2587 * @mas: the maple state with the allocations. 2588 * @b_node: the maple_big_node with the type encoding. 2589 * 2590 * Use the node type from the maple_big_node to allocate a new node from the 2591 * ma_state. This function exists mainly for code readability. 2592 * 2593 * Return: A new maple encoded node 2594 */ 2595 static inline struct maple_enode 2596 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2597 { 2598 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2599 } 2600 2601 /* 2602 * mas_mab_to_node() - Set up right and middle nodes 2603 * 2604 * @mas: the maple state that contains the allocations. 2605 * @b_node: the node which contains the data. 2606 * @left: The pointer which will have the left node 2607 * @right: The pointer which may have the right node 2608 * @middle: the pointer which may have the middle node (rare) 2609 * @mid_split: the split location for the middle node 2610 * 2611 * Return: the split of left. 2612 */ 2613 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2614 struct maple_big_node *b_node, struct maple_enode **left, 2615 struct maple_enode **right, struct maple_enode **middle, 2616 unsigned char *mid_split, unsigned long min) 2617 { 2618 unsigned char split = 0; 2619 unsigned char slot_count = mt_slots[b_node->type]; 2620 2621 *left = mas_new_ma_node(mas, b_node); 2622 *right = NULL; 2623 *middle = NULL; 2624 *mid_split = 0; 2625 2626 if (b_node->b_end < slot_count) { 2627 split = b_node->b_end; 2628 } else { 2629 split = mab_calc_split(mas, b_node, mid_split, min); 2630 *right = mas_new_ma_node(mas, b_node); 2631 } 2632 2633 if (*mid_split) 2634 *middle = mas_new_ma_node(mas, b_node); 2635 2636 return split; 2637 2638 } 2639 2640 /* 2641 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2642 * pointer. 2643 * @b_node - the big node to add the entry 2644 * @mas - the maple state to get the pivot (mas->max) 2645 * @entry - the entry to add, if NULL nothing happens. 2646 */ 2647 static inline void mab_set_b_end(struct maple_big_node *b_node, 2648 struct ma_state *mas, 2649 void *entry) 2650 { 2651 if (!entry) 2652 return; 2653 2654 b_node->slot[b_node->b_end] = entry; 2655 if (mt_is_alloc(mas->tree)) 2656 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2657 b_node->pivot[b_node->b_end++] = mas->max; 2658 } 2659 2660 /* 2661 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2662 * of @mas->node to either @left or @right, depending on @slot and @split 2663 * 2664 * @mas - the maple state with the node that needs a parent 2665 * @left - possible parent 1 2666 * @right - possible parent 2 2667 * @slot - the slot the mas->node was placed 2668 * @split - the split location between @left and @right 2669 */ 2670 static inline void mas_set_split_parent(struct ma_state *mas, 2671 struct maple_enode *left, 2672 struct maple_enode *right, 2673 unsigned char *slot, unsigned char split) 2674 { 2675 if (mas_is_none(mas)) 2676 return; 2677 2678 if ((*slot) <= split) 2679 mas_set_parent(mas, mas->node, left, *slot); 2680 else if (right) 2681 mas_set_parent(mas, mas->node, right, (*slot) - split - 1); 2682 2683 (*slot)++; 2684 } 2685 2686 /* 2687 * mte_mid_split_check() - Check if the next node passes the mid-split 2688 * @**l: Pointer to left encoded maple node. 2689 * @**m: Pointer to middle encoded maple node. 2690 * @**r: Pointer to right encoded maple node. 2691 * @slot: The offset 2692 * @*split: The split location. 2693 * @mid_split: The middle split. 2694 */ 2695 static inline void mte_mid_split_check(struct maple_enode **l, 2696 struct maple_enode **r, 2697 struct maple_enode *right, 2698 unsigned char slot, 2699 unsigned char *split, 2700 unsigned char mid_split) 2701 { 2702 if (*r == right) 2703 return; 2704 2705 if (slot < mid_split) 2706 return; 2707 2708 *l = *r; 2709 *r = right; 2710 *split = mid_split; 2711 } 2712 2713 /* 2714 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2715 * is taken from @mast->l. 2716 * @mast - the maple subtree state 2717 * @left - the left node 2718 * @right - the right node 2719 * @split - the split location. 2720 */ 2721 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2722 struct maple_enode *left, 2723 struct maple_enode *middle, 2724 struct maple_enode *right, 2725 unsigned char split, 2726 unsigned char mid_split) 2727 { 2728 unsigned char slot; 2729 struct maple_enode *l = left; 2730 struct maple_enode *r = right; 2731 2732 if (mas_is_none(mast->l)) 2733 return; 2734 2735 if (middle) 2736 r = middle; 2737 2738 slot = mast->l->offset; 2739 2740 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2741 mas_set_split_parent(mast->l, l, r, &slot, split); 2742 2743 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2744 mas_set_split_parent(mast->m, l, r, &slot, split); 2745 2746 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2747 mas_set_split_parent(mast->r, l, r, &slot, split); 2748 } 2749 2750 /* 2751 * mas_wmb_replace() - Write memory barrier and replace 2752 * @mas: The maple state 2753 * @free: the maple topiary list of nodes to free 2754 * @destroy: The maple topiary list of nodes to destroy (walk and free) 2755 * 2756 * Updates gap as necessary. 2757 */ 2758 static inline void mas_wmb_replace(struct ma_state *mas, 2759 struct ma_topiary *free, 2760 struct ma_topiary *destroy) 2761 { 2762 /* All nodes must see old data as dead prior to replacing that data */ 2763 smp_wmb(); /* Needed for RCU */ 2764 2765 /* Insert the new data in the tree */ 2766 mas_replace(mas, true); 2767 2768 if (!mte_is_leaf(mas->node)) 2769 mas_descend_adopt(mas); 2770 2771 mas_mat_free(mas, free); 2772 2773 if (destroy) 2774 mas_mat_destroy(mas, destroy); 2775 2776 if (mte_is_leaf(mas->node)) 2777 return; 2778 2779 mas_update_gap(mas); 2780 } 2781 2782 /* 2783 * mast_new_root() - Set a new tree root during subtree creation 2784 * @mast: The maple subtree state 2785 * @mas: The maple state 2786 */ 2787 static inline void mast_new_root(struct maple_subtree_state *mast, 2788 struct ma_state *mas) 2789 { 2790 mas_mn(mast->l)->parent = 2791 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); 2792 if (!mte_dead_node(mast->orig_l->node) && 2793 !mte_is_root(mast->orig_l->node)) { 2794 do { 2795 mast_ascend_free(mast); 2796 mast_topiary(mast); 2797 } while (!mte_is_root(mast->orig_l->node)); 2798 } 2799 if ((mast->orig_l->node != mas->node) && 2800 (mast->l->depth > mas_mt_height(mas))) { 2801 mat_add(mast->free, mas->node); 2802 } 2803 } 2804 2805 /* 2806 * mast_cp_to_nodes() - Copy data out to nodes. 2807 * @mast: The maple subtree state 2808 * @left: The left encoded maple node 2809 * @middle: The middle encoded maple node 2810 * @right: The right encoded maple node 2811 * @split: The location to split between left and (middle ? middle : right) 2812 * @mid_split: The location to split between middle and right. 2813 */ 2814 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2815 struct maple_enode *left, struct maple_enode *middle, 2816 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2817 { 2818 bool new_lmax = true; 2819 2820 mast->l->node = mte_node_or_none(left); 2821 mast->m->node = mte_node_or_none(middle); 2822 mast->r->node = mte_node_or_none(right); 2823 2824 mast->l->min = mast->orig_l->min; 2825 if (split == mast->bn->b_end) { 2826 mast->l->max = mast->orig_r->max; 2827 new_lmax = false; 2828 } 2829 2830 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2831 2832 if (middle) { 2833 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2834 mast->m->min = mast->bn->pivot[split] + 1; 2835 split = mid_split; 2836 } 2837 2838 mast->r->max = mast->orig_r->max; 2839 if (right) { 2840 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2841 mast->r->min = mast->bn->pivot[split] + 1; 2842 } 2843 } 2844 2845 /* 2846 * mast_combine_cp_left - Copy in the original left side of the tree into the 2847 * combined data set in the maple subtree state big node. 2848 * @mast: The maple subtree state 2849 */ 2850 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2851 { 2852 unsigned char l_slot = mast->orig_l->offset; 2853 2854 if (!l_slot) 2855 return; 2856 2857 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2858 } 2859 2860 /* 2861 * mast_combine_cp_right: Copy in the original right side of the tree into the 2862 * combined data set in the maple subtree state big node. 2863 * @mast: The maple subtree state 2864 */ 2865 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2866 { 2867 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2868 return; 2869 2870 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2871 mt_slot_count(mast->orig_r->node), mast->bn, 2872 mast->bn->b_end); 2873 mast->orig_r->last = mast->orig_r->max; 2874 } 2875 2876 /* 2877 * mast_sufficient: Check if the maple subtree state has enough data in the big 2878 * node to create at least one sufficient node 2879 * @mast: the maple subtree state 2880 */ 2881 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2882 { 2883 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2884 return true; 2885 2886 return false; 2887 } 2888 2889 /* 2890 * mast_overflow: Check if there is too much data in the subtree state for a 2891 * single node. 2892 * @mast: The maple subtree state 2893 */ 2894 static inline bool mast_overflow(struct maple_subtree_state *mast) 2895 { 2896 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2897 return true; 2898 2899 return false; 2900 } 2901 2902 static inline void *mtree_range_walk(struct ma_state *mas) 2903 { 2904 unsigned long *pivots; 2905 unsigned char offset; 2906 struct maple_node *node; 2907 struct maple_enode *next, *last; 2908 enum maple_type type; 2909 void __rcu **slots; 2910 unsigned char end; 2911 unsigned long max, min; 2912 unsigned long prev_max, prev_min; 2913 2914 next = mas->node; 2915 min = mas->min; 2916 max = mas->max; 2917 do { 2918 offset = 0; 2919 last = next; 2920 node = mte_to_node(next); 2921 type = mte_node_type(next); 2922 pivots = ma_pivots(node, type); 2923 end = ma_data_end(node, type, pivots, max); 2924 if (unlikely(ma_dead_node(node))) 2925 goto dead_node; 2926 2927 if (pivots[offset] >= mas->index) { 2928 prev_max = max; 2929 prev_min = min; 2930 max = pivots[offset]; 2931 goto next; 2932 } 2933 2934 do { 2935 offset++; 2936 } while ((offset < end) && (pivots[offset] < mas->index)); 2937 2938 prev_min = min; 2939 min = pivots[offset - 1] + 1; 2940 prev_max = max; 2941 if (likely(offset < end && pivots[offset])) 2942 max = pivots[offset]; 2943 2944 next: 2945 slots = ma_slots(node, type); 2946 next = mt_slot(mas->tree, slots, offset); 2947 if (unlikely(ma_dead_node(node))) 2948 goto dead_node; 2949 } while (!ma_is_leaf(type)); 2950 2951 mas->offset = offset; 2952 mas->index = min; 2953 mas->last = max; 2954 mas->min = prev_min; 2955 mas->max = prev_max; 2956 mas->node = last; 2957 return (void *)next; 2958 2959 dead_node: 2960 mas_reset(mas); 2961 return NULL; 2962 } 2963 2964 /* 2965 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2966 * @mas: The starting maple state 2967 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2968 * @count: The estimated count of iterations needed. 2969 * 2970 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 2971 * is hit. First @b_node is split into two entries which are inserted into the 2972 * next iteration of the loop. @b_node is returned populated with the final 2973 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 2974 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 2975 * to account of what has been copied into the new sub-tree. The update of 2976 * orig_l_mas->last is used in mas_consume to find the slots that will need to 2977 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 2978 * the new sub-tree in case the sub-tree becomes the full tree. 2979 * 2980 * Return: the number of elements in b_node during the last loop. 2981 */ 2982 static int mas_spanning_rebalance(struct ma_state *mas, 2983 struct maple_subtree_state *mast, unsigned char count) 2984 { 2985 unsigned char split, mid_split; 2986 unsigned char slot = 0; 2987 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 2988 2989 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 2990 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2991 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 2992 MA_TOPIARY(free, mas->tree); 2993 MA_TOPIARY(destroy, mas->tree); 2994 2995 /* 2996 * The tree needs to be rebalanced and leaves need to be kept at the same level. 2997 * Rebalancing is done by use of the ``struct maple_topiary``. 2998 */ 2999 mast->l = &l_mas; 3000 mast->m = &m_mas; 3001 mast->r = &r_mas; 3002 mast->free = &free; 3003 mast->destroy = &destroy; 3004 l_mas.node = r_mas.node = m_mas.node = MAS_NONE; 3005 3006 /* Check if this is not root and has sufficient data. */ 3007 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && 3008 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 3009 mast_spanning_rebalance(mast); 3010 3011 mast->orig_l->depth = 0; 3012 3013 /* 3014 * Each level of the tree is examined and balanced, pushing data to the left or 3015 * right, or rebalancing against left or right nodes is employed to avoid 3016 * rippling up the tree to limit the amount of churn. Once a new sub-section of 3017 * the tree is created, there may be a mix of new and old nodes. The old nodes 3018 * will have the incorrect parent pointers and currently be in two trees: the 3019 * original tree and the partially new tree. To remedy the parent pointers in 3020 * the old tree, the new data is swapped into the active tree and a walk down 3021 * the tree is performed and the parent pointers are updated. 3022 * See mas_descend_adopt() for more information.. 3023 */ 3024 while (count--) { 3025 mast->bn->b_end--; 3026 mast->bn->type = mte_node_type(mast->orig_l->node); 3027 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 3028 &mid_split, mast->orig_l->min); 3029 mast_set_split_parents(mast, left, middle, right, split, 3030 mid_split); 3031 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 3032 3033 /* 3034 * Copy data from next level in the tree to mast->bn from next 3035 * iteration 3036 */ 3037 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3038 mast->bn->type = mte_node_type(left); 3039 mast->orig_l->depth++; 3040 3041 /* Root already stored in l->node. */ 3042 if (mas_is_root_limits(mast->l)) 3043 goto new_root; 3044 3045 mast_ascend_free(mast); 3046 mast_combine_cp_left(mast); 3047 l_mas.offset = mast->bn->b_end; 3048 mab_set_b_end(mast->bn, &l_mas, left); 3049 mab_set_b_end(mast->bn, &m_mas, middle); 3050 mab_set_b_end(mast->bn, &r_mas, right); 3051 3052 /* Copy anything necessary out of the right node. */ 3053 mast_combine_cp_right(mast); 3054 mast_topiary(mast); 3055 mast->orig_l->last = mast->orig_l->max; 3056 3057 if (mast_sufficient(mast)) 3058 continue; 3059 3060 if (mast_overflow(mast)) 3061 continue; 3062 3063 /* May be a new root stored in mast->bn */ 3064 if (mas_is_root_limits(mast->orig_l)) 3065 break; 3066 3067 mast_spanning_rebalance(mast); 3068 3069 /* rebalancing from other nodes may require another loop. */ 3070 if (!count) 3071 count++; 3072 } 3073 3074 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 3075 mte_node_type(mast->orig_l->node)); 3076 mast->orig_l->depth++; 3077 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 3078 mas_set_parent(mas, left, l_mas.node, slot); 3079 if (middle) 3080 mas_set_parent(mas, middle, l_mas.node, ++slot); 3081 3082 if (right) 3083 mas_set_parent(mas, right, l_mas.node, ++slot); 3084 3085 if (mas_is_root_limits(mast->l)) { 3086 new_root: 3087 mast_new_root(mast, mas); 3088 } else { 3089 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 3090 } 3091 3092 if (!mte_dead_node(mast->orig_l->node)) 3093 mat_add(&free, mast->orig_l->node); 3094 3095 mas->depth = mast->orig_l->depth; 3096 *mast->orig_l = l_mas; 3097 mte_set_node_dead(mas->node); 3098 3099 /* Set up mas for insertion. */ 3100 mast->orig_l->depth = mas->depth; 3101 mast->orig_l->alloc = mas->alloc; 3102 *mas = *mast->orig_l; 3103 mas_wmb_replace(mas, &free, &destroy); 3104 mtree_range_walk(mas); 3105 return mast->bn->b_end; 3106 } 3107 3108 /* 3109 * mas_rebalance() - Rebalance a given node. 3110 * @mas: The maple state 3111 * @b_node: The big maple node. 3112 * 3113 * Rebalance two nodes into a single node or two new nodes that are sufficient. 3114 * Continue upwards until tree is sufficient. 3115 * 3116 * Return: the number of elements in b_node during the last loop. 3117 */ 3118 static inline int mas_rebalance(struct ma_state *mas, 3119 struct maple_big_node *b_node) 3120 { 3121 char empty_count = mas_mt_height(mas); 3122 struct maple_subtree_state mast; 3123 unsigned char shift, b_end = ++b_node->b_end; 3124 3125 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3126 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3127 3128 trace_ma_op(__func__, mas); 3129 3130 /* 3131 * Rebalancing occurs if a node is insufficient. Data is rebalanced 3132 * against the node to the right if it exists, otherwise the node to the 3133 * left of this node is rebalanced against this node. If rebalancing 3134 * causes just one node to be produced instead of two, then the parent 3135 * is also examined and rebalanced if it is insufficient. Every level 3136 * tries to combine the data in the same way. If one node contains the 3137 * entire range of the tree, then that node is used as a new root node. 3138 */ 3139 mas_node_count(mas, 1 + empty_count * 3); 3140 if (mas_is_err(mas)) 3141 return 0; 3142 3143 mast.orig_l = &l_mas; 3144 mast.orig_r = &r_mas; 3145 mast.bn = b_node; 3146 mast.bn->type = mte_node_type(mas->node); 3147 3148 l_mas = r_mas = *mas; 3149 3150 if (mas_next_sibling(&r_mas)) { 3151 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 3152 r_mas.last = r_mas.index = r_mas.max; 3153 } else { 3154 mas_prev_sibling(&l_mas); 3155 shift = mas_data_end(&l_mas) + 1; 3156 mab_shift_right(b_node, shift); 3157 mas->offset += shift; 3158 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3159 b_node->b_end = shift + b_end; 3160 l_mas.index = l_mas.last = l_mas.min; 3161 } 3162 3163 return mas_spanning_rebalance(mas, &mast, empty_count); 3164 } 3165 3166 /* 3167 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3168 * state. 3169 * @mas: The maple state 3170 * @end: The end of the left-most node. 3171 * 3172 * During a mass-insert event (such as forking), it may be necessary to 3173 * rebalance the left-most node when it is not sufficient. 3174 */ 3175 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3176 { 3177 enum maple_type mt = mte_node_type(mas->node); 3178 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3179 struct maple_enode *eparent; 3180 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3181 void __rcu **l_slots, **slots; 3182 unsigned long *l_pivs, *pivs, gap; 3183 bool in_rcu = mt_in_rcu(mas->tree); 3184 3185 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3186 3187 l_mas = *mas; 3188 mas_prev_sibling(&l_mas); 3189 3190 /* set up node. */ 3191 if (in_rcu) { 3192 /* Allocate for both left and right as well as parent. */ 3193 mas_node_count(mas, 3); 3194 if (mas_is_err(mas)) 3195 return; 3196 3197 newnode = mas_pop_node(mas); 3198 } else { 3199 newnode = &reuse; 3200 } 3201 3202 node = mas_mn(mas); 3203 newnode->parent = node->parent; 3204 slots = ma_slots(newnode, mt); 3205 pivs = ma_pivots(newnode, mt); 3206 left = mas_mn(&l_mas); 3207 l_slots = ma_slots(left, mt); 3208 l_pivs = ma_pivots(left, mt); 3209 if (!l_slots[split]) 3210 split++; 3211 tmp = mas_data_end(&l_mas) - split; 3212 3213 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3214 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3215 pivs[tmp] = l_mas.max; 3216 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3217 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3218 3219 l_mas.max = l_pivs[split]; 3220 mas->min = l_mas.max + 1; 3221 eparent = mt_mk_node(mte_parent(l_mas.node), 3222 mas_parent_type(&l_mas, l_mas.node)); 3223 tmp += end; 3224 if (!in_rcu) { 3225 unsigned char max_p = mt_pivots[mt]; 3226 unsigned char max_s = mt_slots[mt]; 3227 3228 if (tmp < max_p) 3229 memset(pivs + tmp, 0, 3230 sizeof(unsigned long) * (max_p - tmp)); 3231 3232 if (tmp < mt_slots[mt]) 3233 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3234 3235 memcpy(node, newnode, sizeof(struct maple_node)); 3236 ma_set_meta(node, mt, 0, tmp - 1); 3237 mte_set_pivot(eparent, mte_parent_slot(l_mas.node), 3238 l_pivs[split]); 3239 3240 /* Remove data from l_pivs. */ 3241 tmp = split + 1; 3242 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3243 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3244 ma_set_meta(left, mt, 0, split); 3245 3246 goto done; 3247 } 3248 3249 /* RCU requires replacing both l_mas, mas, and parent. */ 3250 mas->node = mt_mk_node(newnode, mt); 3251 ma_set_meta(newnode, mt, 0, tmp); 3252 3253 new_left = mas_pop_node(mas); 3254 new_left->parent = left->parent; 3255 mt = mte_node_type(l_mas.node); 3256 slots = ma_slots(new_left, mt); 3257 pivs = ma_pivots(new_left, mt); 3258 memcpy(slots, l_slots, sizeof(void *) * split); 3259 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3260 ma_set_meta(new_left, mt, 0, split); 3261 l_mas.node = mt_mk_node(new_left, mt); 3262 3263 /* replace parent. */ 3264 offset = mte_parent_slot(mas->node); 3265 mt = mas_parent_type(&l_mas, l_mas.node); 3266 parent = mas_pop_node(mas); 3267 slots = ma_slots(parent, mt); 3268 pivs = ma_pivots(parent, mt); 3269 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); 3270 rcu_assign_pointer(slots[offset], mas->node); 3271 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3272 pivs[offset - 1] = l_mas.max; 3273 eparent = mt_mk_node(parent, mt); 3274 done: 3275 gap = mas_leaf_max_gap(mas); 3276 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3277 gap = mas_leaf_max_gap(&l_mas); 3278 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3279 mas_ascend(mas); 3280 3281 if (in_rcu) 3282 mas_replace(mas, false); 3283 3284 mas_update_gap(mas); 3285 } 3286 3287 /* 3288 * mas_split_final_node() - Split the final node in a subtree operation. 3289 * @mast: the maple subtree state 3290 * @mas: The maple state 3291 * @height: The height of the tree in case it's a new root. 3292 */ 3293 static inline bool mas_split_final_node(struct maple_subtree_state *mast, 3294 struct ma_state *mas, int height) 3295 { 3296 struct maple_enode *ancestor; 3297 3298 if (mte_is_root(mas->node)) { 3299 if (mt_is_alloc(mas->tree)) 3300 mast->bn->type = maple_arange_64; 3301 else 3302 mast->bn->type = maple_range_64; 3303 mas->depth = height; 3304 } 3305 /* 3306 * Only a single node is used here, could be root. 3307 * The Big_node data should just fit in a single node. 3308 */ 3309 ancestor = mas_new_ma_node(mas, mast->bn); 3310 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset); 3311 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset); 3312 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3313 3314 mast->l->node = ancestor; 3315 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3316 mas->offset = mast->bn->b_end - 1; 3317 return true; 3318 } 3319 3320 /* 3321 * mast_fill_bnode() - Copy data into the big node in the subtree state 3322 * @mast: The maple subtree state 3323 * @mas: the maple state 3324 * @skip: The number of entries to skip for new nodes insertion. 3325 */ 3326 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3327 struct ma_state *mas, 3328 unsigned char skip) 3329 { 3330 bool cp = true; 3331 struct maple_enode *old = mas->node; 3332 unsigned char split; 3333 3334 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); 3335 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); 3336 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); 3337 mast->bn->b_end = 0; 3338 3339 if (mte_is_root(mas->node)) { 3340 cp = false; 3341 } else { 3342 mas_ascend(mas); 3343 mat_add(mast->free, old); 3344 mas->offset = mte_parent_slot(mas->node); 3345 } 3346 3347 if (cp && mast->l->offset) 3348 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3349 3350 split = mast->bn->b_end; 3351 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3352 mast->r->offset = mast->bn->b_end; 3353 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3354 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3355 cp = false; 3356 3357 if (cp) 3358 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3359 mast->bn, mast->bn->b_end); 3360 3361 mast->bn->b_end--; 3362 mast->bn->type = mte_node_type(mas->node); 3363 } 3364 3365 /* 3366 * mast_split_data() - Split the data in the subtree state big node into regular 3367 * nodes. 3368 * @mast: The maple subtree state 3369 * @mas: The maple state 3370 * @split: The location to split the big node 3371 */ 3372 static inline void mast_split_data(struct maple_subtree_state *mast, 3373 struct ma_state *mas, unsigned char split) 3374 { 3375 unsigned char p_slot; 3376 3377 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3378 mte_set_pivot(mast->r->node, 0, mast->r->max); 3379 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3380 mast->l->offset = mte_parent_slot(mas->node); 3381 mast->l->max = mast->bn->pivot[split]; 3382 mast->r->min = mast->l->max + 1; 3383 if (mte_is_leaf(mas->node)) 3384 return; 3385 3386 p_slot = mast->orig_l->offset; 3387 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3388 &p_slot, split); 3389 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3390 &p_slot, split); 3391 } 3392 3393 /* 3394 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3395 * data to the right or left node if there is room. 3396 * @mas: The maple state 3397 * @height: The current height of the maple state 3398 * @mast: The maple subtree state 3399 * @left: Push left or not. 3400 * 3401 * Keeping the height of the tree low means faster lookups. 3402 * 3403 * Return: True if pushed, false otherwise. 3404 */ 3405 static inline bool mas_push_data(struct ma_state *mas, int height, 3406 struct maple_subtree_state *mast, bool left) 3407 { 3408 unsigned char slot_total = mast->bn->b_end; 3409 unsigned char end, space, split; 3410 3411 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3412 tmp_mas = *mas; 3413 tmp_mas.depth = mast->l->depth; 3414 3415 if (left && !mas_prev_sibling(&tmp_mas)) 3416 return false; 3417 else if (!left && !mas_next_sibling(&tmp_mas)) 3418 return false; 3419 3420 end = mas_data_end(&tmp_mas); 3421 slot_total += end; 3422 space = 2 * mt_slot_count(mas->node) - 2; 3423 /* -2 instead of -1 to ensure there isn't a triple split */ 3424 if (ma_is_leaf(mast->bn->type)) 3425 space--; 3426 3427 if (mas->max == ULONG_MAX) 3428 space--; 3429 3430 if (slot_total >= space) 3431 return false; 3432 3433 /* Get the data; Fill mast->bn */ 3434 mast->bn->b_end++; 3435 if (left) { 3436 mab_shift_right(mast->bn, end + 1); 3437 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3438 mast->bn->b_end = slot_total + 1; 3439 } else { 3440 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3441 } 3442 3443 /* Configure mast for splitting of mast->bn */ 3444 split = mt_slots[mast->bn->type] - 2; 3445 if (left) { 3446 /* Switch mas to prev node */ 3447 mat_add(mast->free, mas->node); 3448 *mas = tmp_mas; 3449 /* Start using mast->l for the left side. */ 3450 tmp_mas.node = mast->l->node; 3451 *mast->l = tmp_mas; 3452 } else { 3453 mat_add(mast->free, tmp_mas.node); 3454 tmp_mas.node = mast->r->node; 3455 *mast->r = tmp_mas; 3456 split = slot_total - split; 3457 } 3458 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3459 /* Update parent slot for split calculation. */ 3460 if (left) 3461 mast->orig_l->offset += end + 1; 3462 3463 mast_split_data(mast, mas, split); 3464 mast_fill_bnode(mast, mas, 2); 3465 mas_split_final_node(mast, mas, height + 1); 3466 return true; 3467 } 3468 3469 /* 3470 * mas_split() - Split data that is too big for one node into two. 3471 * @mas: The maple state 3472 * @b_node: The maple big node 3473 * Return: 1 on success, 0 on failure. 3474 */ 3475 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3476 { 3477 struct maple_subtree_state mast; 3478 int height = 0; 3479 unsigned char mid_split, split = 0; 3480 3481 /* 3482 * Splitting is handled differently from any other B-tree; the Maple 3483 * Tree splits upwards. Splitting up means that the split operation 3484 * occurs when the walk of the tree hits the leaves and not on the way 3485 * down. The reason for splitting up is that it is impossible to know 3486 * how much space will be needed until the leaf is (or leaves are) 3487 * reached. Since overwriting data is allowed and a range could 3488 * overwrite more than one range or result in changing one entry into 3 3489 * entries, it is impossible to know if a split is required until the 3490 * data is examined. 3491 * 3492 * Splitting is a balancing act between keeping allocations to a minimum 3493 * and avoiding a 'jitter' event where a tree is expanded to make room 3494 * for an entry followed by a contraction when the entry is removed. To 3495 * accomplish the balance, there are empty slots remaining in both left 3496 * and right nodes after a split. 3497 */ 3498 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3499 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3500 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3501 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3502 MA_TOPIARY(mat, mas->tree); 3503 3504 trace_ma_op(__func__, mas); 3505 mas->depth = mas_mt_height(mas); 3506 /* Allocation failures will happen early. */ 3507 mas_node_count(mas, 1 + mas->depth * 2); 3508 if (mas_is_err(mas)) 3509 return 0; 3510 3511 mast.l = &l_mas; 3512 mast.r = &r_mas; 3513 mast.orig_l = &prev_l_mas; 3514 mast.orig_r = &prev_r_mas; 3515 mast.free = &mat; 3516 mast.bn = b_node; 3517 3518 while (height++ <= mas->depth) { 3519 if (mt_slots[b_node->type] > b_node->b_end) { 3520 mas_split_final_node(&mast, mas, height); 3521 break; 3522 } 3523 3524 l_mas = r_mas = *mas; 3525 l_mas.node = mas_new_ma_node(mas, b_node); 3526 r_mas.node = mas_new_ma_node(mas, b_node); 3527 /* 3528 * Another way that 'jitter' is avoided is to terminate a split up early if the 3529 * left or right node has space to spare. This is referred to as "pushing left" 3530 * or "pushing right" and is similar to the B* tree, except the nodes left or 3531 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3532 * is a significant savings. 3533 */ 3534 /* Try to push left. */ 3535 if (mas_push_data(mas, height, &mast, true)) 3536 break; 3537 3538 /* Try to push right. */ 3539 if (mas_push_data(mas, height, &mast, false)) 3540 break; 3541 3542 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3543 mast_split_data(&mast, mas, split); 3544 /* 3545 * Usually correct, mab_mas_cp in the above call overwrites 3546 * r->max. 3547 */ 3548 mast.r->max = mas->max; 3549 mast_fill_bnode(&mast, mas, 1); 3550 prev_l_mas = *mast.l; 3551 prev_r_mas = *mast.r; 3552 } 3553 3554 /* Set the original node as dead */ 3555 mat_add(mast.free, mas->node); 3556 mas->node = l_mas.node; 3557 mas_wmb_replace(mas, mast.free, NULL); 3558 mtree_range_walk(mas); 3559 return 1; 3560 } 3561 3562 /* 3563 * mas_reuse_node() - Reuse the node to store the data. 3564 * @wr_mas: The maple write state 3565 * @bn: The maple big node 3566 * @end: The end of the data. 3567 * 3568 * Will always return false in RCU mode. 3569 * 3570 * Return: True if node was reused, false otherwise. 3571 */ 3572 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, 3573 struct maple_big_node *bn, unsigned char end) 3574 { 3575 /* Need to be rcu safe. */ 3576 if (mt_in_rcu(wr_mas->mas->tree)) 3577 return false; 3578 3579 if (end > bn->b_end) { 3580 int clear = mt_slots[wr_mas->type] - bn->b_end; 3581 3582 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); 3583 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); 3584 } 3585 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); 3586 return true; 3587 } 3588 3589 /* 3590 * mas_commit_b_node() - Commit the big node into the tree. 3591 * @wr_mas: The maple write state 3592 * @b_node: The maple big node 3593 * @end: The end of the data. 3594 */ 3595 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, 3596 struct maple_big_node *b_node, unsigned char end) 3597 { 3598 struct maple_node *node; 3599 unsigned char b_end = b_node->b_end; 3600 enum maple_type b_type = b_node->type; 3601 3602 if ((b_end < mt_min_slots[b_type]) && 3603 (!mte_is_root(wr_mas->mas->node)) && 3604 (mas_mt_height(wr_mas->mas) > 1)) 3605 return mas_rebalance(wr_mas->mas, b_node); 3606 3607 if (b_end >= mt_slots[b_type]) 3608 return mas_split(wr_mas->mas, b_node); 3609 3610 if (mas_reuse_node(wr_mas, b_node, end)) 3611 goto reuse_node; 3612 3613 mas_node_count(wr_mas->mas, 1); 3614 if (mas_is_err(wr_mas->mas)) 3615 return 0; 3616 3617 node = mas_pop_node(wr_mas->mas); 3618 node->parent = mas_mn(wr_mas->mas)->parent; 3619 wr_mas->mas->node = mt_mk_node(node, b_type); 3620 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); 3621 mas_replace(wr_mas->mas, false); 3622 reuse_node: 3623 mas_update_gap(wr_mas->mas); 3624 return 1; 3625 } 3626 3627 /* 3628 * mas_root_expand() - Expand a root to a node 3629 * @mas: The maple state 3630 * @entry: The entry to store into the tree 3631 */ 3632 static inline int mas_root_expand(struct ma_state *mas, void *entry) 3633 { 3634 void *contents = mas_root_locked(mas); 3635 enum maple_type type = maple_leaf_64; 3636 struct maple_node *node; 3637 void __rcu **slots; 3638 unsigned long *pivots; 3639 int slot = 0; 3640 3641 mas_node_count(mas, 1); 3642 if (unlikely(mas_is_err(mas))) 3643 return 0; 3644 3645 node = mas_pop_node(mas); 3646 pivots = ma_pivots(node, type); 3647 slots = ma_slots(node, type); 3648 node->parent = ma_parent_ptr( 3649 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3650 mas->node = mt_mk_node(node, type); 3651 3652 if (mas->index) { 3653 if (contents) { 3654 rcu_assign_pointer(slots[slot], contents); 3655 if (likely(mas->index > 1)) 3656 slot++; 3657 } 3658 pivots[slot++] = mas->index - 1; 3659 } 3660 3661 rcu_assign_pointer(slots[slot], entry); 3662 mas->offset = slot; 3663 pivots[slot] = mas->last; 3664 if (mas->last != ULONG_MAX) 3665 pivots[++slot] = ULONG_MAX; 3666 3667 mas->depth = 1; 3668 mas_set_height(mas); 3669 ma_set_meta(node, maple_leaf_64, 0, slot); 3670 /* swap the new root into the tree */ 3671 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3672 return slot; 3673 } 3674 3675 static inline void mas_store_root(struct ma_state *mas, void *entry) 3676 { 3677 if (likely((mas->last != 0) || (mas->index != 0))) 3678 mas_root_expand(mas, entry); 3679 else if (((unsigned long) (entry) & 3) == 2) 3680 mas_root_expand(mas, entry); 3681 else { 3682 rcu_assign_pointer(mas->tree->ma_root, entry); 3683 mas->node = MAS_START; 3684 } 3685 } 3686 3687 /* 3688 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3689 * spans the node. 3690 * @mas: The maple state 3691 * @piv: The pivot value being written 3692 * @type: The maple node type 3693 * @entry: The data to write 3694 * 3695 * Spanning writes are writes that start in one node and end in another OR if 3696 * the write of a %NULL will cause the node to end with a %NULL. 3697 * 3698 * Return: True if this is a spanning write, false otherwise. 3699 */ 3700 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3701 { 3702 unsigned long max = wr_mas->r_max; 3703 unsigned long last = wr_mas->mas->last; 3704 enum maple_type type = wr_mas->type; 3705 void *entry = wr_mas->entry; 3706 3707 /* Contained in this pivot, fast path */ 3708 if (last < max) 3709 return false; 3710 3711 if (ma_is_leaf(type)) { 3712 max = wr_mas->mas->max; 3713 if (last < max) 3714 return false; 3715 } 3716 3717 if (last == max) { 3718 /* 3719 * The last entry of leaf node cannot be NULL unless it is the 3720 * rightmost node (writing ULONG_MAX), otherwise it spans slots. 3721 */ 3722 if (entry || last == ULONG_MAX) 3723 return false; 3724 } 3725 3726 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); 3727 return true; 3728 } 3729 3730 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3731 { 3732 wr_mas->type = mte_node_type(wr_mas->mas->node); 3733 mas_wr_node_walk(wr_mas); 3734 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3735 } 3736 3737 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3738 { 3739 wr_mas->mas->max = wr_mas->r_max; 3740 wr_mas->mas->min = wr_mas->r_min; 3741 wr_mas->mas->node = wr_mas->content; 3742 wr_mas->mas->offset = 0; 3743 wr_mas->mas->depth++; 3744 } 3745 /* 3746 * mas_wr_walk() - Walk the tree for a write. 3747 * @wr_mas: The maple write state 3748 * 3749 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3750 * 3751 * Return: True if it's contained in a node, false on spanning write. 3752 */ 3753 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3754 { 3755 struct ma_state *mas = wr_mas->mas; 3756 3757 while (true) { 3758 mas_wr_walk_descend(wr_mas); 3759 if (unlikely(mas_is_span_wr(wr_mas))) 3760 return false; 3761 3762 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3763 mas->offset); 3764 if (ma_is_leaf(wr_mas->type)) 3765 return true; 3766 3767 mas_wr_walk_traverse(wr_mas); 3768 } 3769 3770 return true; 3771 } 3772 3773 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) 3774 { 3775 struct ma_state *mas = wr_mas->mas; 3776 3777 while (true) { 3778 mas_wr_walk_descend(wr_mas); 3779 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3780 mas->offset); 3781 if (ma_is_leaf(wr_mas->type)) 3782 return true; 3783 mas_wr_walk_traverse(wr_mas); 3784 3785 } 3786 return true; 3787 } 3788 /* 3789 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3790 * @l_wr_mas: The left maple write state 3791 * @r_wr_mas: The right maple write state 3792 */ 3793 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3794 struct ma_wr_state *r_wr_mas) 3795 { 3796 struct ma_state *r_mas = r_wr_mas->mas; 3797 struct ma_state *l_mas = l_wr_mas->mas; 3798 unsigned char l_slot; 3799 3800 l_slot = l_mas->offset; 3801 if (!l_wr_mas->content) 3802 l_mas->index = l_wr_mas->r_min; 3803 3804 if ((l_mas->index == l_wr_mas->r_min) && 3805 (l_slot && 3806 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3807 if (l_slot > 1) 3808 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3809 else 3810 l_mas->index = l_mas->min; 3811 3812 l_mas->offset = l_slot - 1; 3813 } 3814 3815 if (!r_wr_mas->content) { 3816 if (r_mas->last < r_wr_mas->r_max) 3817 r_mas->last = r_wr_mas->r_max; 3818 r_mas->offset++; 3819 } else if ((r_mas->last == r_wr_mas->r_max) && 3820 (r_mas->last < r_mas->max) && 3821 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3822 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3823 r_wr_mas->type, r_mas->offset + 1); 3824 r_mas->offset++; 3825 } 3826 } 3827 3828 static inline void *mas_state_walk(struct ma_state *mas) 3829 { 3830 void *entry; 3831 3832 entry = mas_start(mas); 3833 if (mas_is_none(mas)) 3834 return NULL; 3835 3836 if (mas_is_ptr(mas)) 3837 return entry; 3838 3839 return mtree_range_walk(mas); 3840 } 3841 3842 /* 3843 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3844 * to date. 3845 * 3846 * @mas: The maple state. 3847 * 3848 * Note: Leaves mas in undesirable state. 3849 * Return: The entry for @mas->index or %NULL on dead node. 3850 */ 3851 static inline void *mtree_lookup_walk(struct ma_state *mas) 3852 { 3853 unsigned long *pivots; 3854 unsigned char offset; 3855 struct maple_node *node; 3856 struct maple_enode *next; 3857 enum maple_type type; 3858 void __rcu **slots; 3859 unsigned char end; 3860 unsigned long max; 3861 3862 next = mas->node; 3863 max = ULONG_MAX; 3864 do { 3865 offset = 0; 3866 node = mte_to_node(next); 3867 type = mte_node_type(next); 3868 pivots = ma_pivots(node, type); 3869 end = ma_data_end(node, type, pivots, max); 3870 if (unlikely(ma_dead_node(node))) 3871 goto dead_node; 3872 do { 3873 if (pivots[offset] >= mas->index) { 3874 max = pivots[offset]; 3875 break; 3876 } 3877 } while (++offset < end); 3878 3879 slots = ma_slots(node, type); 3880 next = mt_slot(mas->tree, slots, offset); 3881 if (unlikely(ma_dead_node(node))) 3882 goto dead_node; 3883 } while (!ma_is_leaf(type)); 3884 3885 return (void *)next; 3886 3887 dead_node: 3888 mas_reset(mas); 3889 return NULL; 3890 } 3891 3892 /* 3893 * mas_new_root() - Create a new root node that only contains the entry passed 3894 * in. 3895 * @mas: The maple state 3896 * @entry: The entry to store. 3897 * 3898 * Only valid when the index == 0 and the last == ULONG_MAX 3899 * 3900 * Return 0 on error, 1 on success. 3901 */ 3902 static inline int mas_new_root(struct ma_state *mas, void *entry) 3903 { 3904 struct maple_enode *root = mas_root_locked(mas); 3905 enum maple_type type = maple_leaf_64; 3906 struct maple_node *node; 3907 void __rcu **slots; 3908 unsigned long *pivots; 3909 3910 if (!entry && !mas->index && mas->last == ULONG_MAX) { 3911 mas->depth = 0; 3912 mas_set_height(mas); 3913 rcu_assign_pointer(mas->tree->ma_root, entry); 3914 mas->node = MAS_START; 3915 goto done; 3916 } 3917 3918 mas_node_count(mas, 1); 3919 if (mas_is_err(mas)) 3920 return 0; 3921 3922 node = mas_pop_node(mas); 3923 pivots = ma_pivots(node, type); 3924 slots = ma_slots(node, type); 3925 node->parent = ma_parent_ptr( 3926 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3927 mas->node = mt_mk_node(node, type); 3928 rcu_assign_pointer(slots[0], entry); 3929 pivots[0] = mas->last; 3930 mas->depth = 1; 3931 mas_set_height(mas); 3932 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3933 3934 done: 3935 if (xa_is_node(root)) 3936 mte_destroy_walk(root, mas->tree); 3937 3938 return 1; 3939 } 3940 /* 3941 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3942 * and new nodes where necessary, then place the sub-tree in the actual tree. 3943 * Note that mas is expected to point to the node which caused the store to 3944 * span. 3945 * @wr_mas: The maple write state 3946 * 3947 * Return: 0 on error, positive on success. 3948 */ 3949 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3950 { 3951 struct maple_subtree_state mast; 3952 struct maple_big_node b_node; 3953 struct ma_state *mas; 3954 unsigned char height; 3955 3956 /* Left and Right side of spanning store */ 3957 MA_STATE(l_mas, NULL, 0, 0); 3958 MA_STATE(r_mas, NULL, 0, 0); 3959 3960 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3961 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3962 3963 /* 3964 * A store operation that spans multiple nodes is called a spanning 3965 * store and is handled early in the store call stack by the function 3966 * mas_is_span_wr(). When a spanning store is identified, the maple 3967 * state is duplicated. The first maple state walks the left tree path 3968 * to ``index``, the duplicate walks the right tree path to ``last``. 3969 * The data in the two nodes are combined into a single node, two nodes, 3970 * or possibly three nodes (see the 3-way split above). A ``NULL`` 3971 * written to the last entry of a node is considered a spanning store as 3972 * a rebalance is required for the operation to complete and an overflow 3973 * of data may happen. 3974 */ 3975 mas = wr_mas->mas; 3976 trace_ma_op(__func__, mas); 3977 3978 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3979 return mas_new_root(mas, wr_mas->entry); 3980 /* 3981 * Node rebalancing may occur due to this store, so there may be three new 3982 * entries per level plus a new root. 3983 */ 3984 height = mas_mt_height(mas); 3985 mas_node_count(mas, 1 + height * 3); 3986 if (mas_is_err(mas)) 3987 return 0; 3988 3989 /* 3990 * Set up right side. Need to get to the next offset after the spanning 3991 * store to ensure it's not NULL and to combine both the next node and 3992 * the node with the start together. 3993 */ 3994 r_mas = *mas; 3995 /* Avoid overflow, walk to next slot in the tree. */ 3996 if (r_mas.last + 1) 3997 r_mas.last++; 3998 3999 r_mas.index = r_mas.last; 4000 mas_wr_walk_index(&r_wr_mas); 4001 r_mas.last = r_mas.index = mas->last; 4002 4003 /* Set up left side. */ 4004 l_mas = *mas; 4005 mas_wr_walk_index(&l_wr_mas); 4006 4007 if (!wr_mas->entry) { 4008 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 4009 mas->offset = l_mas.offset; 4010 mas->index = l_mas.index; 4011 mas->last = l_mas.last = r_mas.last; 4012 } 4013 4014 /* expanding NULLs may make this cover the entire range */ 4015 if (!l_mas.index && r_mas.last == ULONG_MAX) { 4016 mas_set_range(mas, 0, ULONG_MAX); 4017 return mas_new_root(mas, wr_mas->entry); 4018 } 4019 4020 memset(&b_node, 0, sizeof(struct maple_big_node)); 4021 /* Copy l_mas and store the value in b_node. */ 4022 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); 4023 /* Copy r_mas into b_node. */ 4024 if (r_mas.offset <= r_wr_mas.node_end) 4025 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, 4026 &b_node, b_node.b_end + 1); 4027 else 4028 b_node.b_end++; 4029 4030 /* Stop spanning searches by searching for just index. */ 4031 l_mas.index = l_mas.last = mas->index; 4032 4033 mast.bn = &b_node; 4034 mast.orig_l = &l_mas; 4035 mast.orig_r = &r_mas; 4036 /* Combine l_mas and r_mas and split them up evenly again. */ 4037 return mas_spanning_rebalance(mas, &mast, height + 1); 4038 } 4039 4040 /* 4041 * mas_wr_node_store() - Attempt to store the value in a node 4042 * @wr_mas: The maple write state 4043 * 4044 * Attempts to reuse the node, but may allocate. 4045 * 4046 * Return: True if stored, false otherwise 4047 */ 4048 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, 4049 unsigned char new_end) 4050 { 4051 struct ma_state *mas = wr_mas->mas; 4052 void __rcu **dst_slots; 4053 unsigned long *dst_pivots; 4054 unsigned char dst_offset, offset_end = wr_mas->offset_end; 4055 struct maple_node reuse, *newnode; 4056 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; 4057 bool in_rcu = mt_in_rcu(mas->tree); 4058 4059 /* Check if there is enough data. The room is enough. */ 4060 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && 4061 !(mas->mas_flags & MA_STATE_BULK)) 4062 return false; 4063 4064 if (mas->last == wr_mas->end_piv) 4065 offset_end++; /* don't copy this offset */ 4066 else if (unlikely(wr_mas->r_max == ULONG_MAX)) 4067 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); 4068 4069 /* set up node. */ 4070 if (in_rcu) { 4071 mas_node_count(mas, 1); 4072 if (mas_is_err(mas)) 4073 return false; 4074 4075 newnode = mas_pop_node(mas); 4076 } else { 4077 memset(&reuse, 0, sizeof(struct maple_node)); 4078 newnode = &reuse; 4079 } 4080 4081 newnode->parent = mas_mn(mas)->parent; 4082 dst_pivots = ma_pivots(newnode, wr_mas->type); 4083 dst_slots = ma_slots(newnode, wr_mas->type); 4084 /* Copy from start to insert point */ 4085 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); 4086 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); 4087 4088 /* Handle insert of new range starting after old range */ 4089 if (wr_mas->r_min < mas->index) { 4090 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); 4091 dst_pivots[mas->offset++] = mas->index - 1; 4092 } 4093 4094 /* Store the new entry and range end. */ 4095 if (mas->offset < node_pivots) 4096 dst_pivots[mas->offset] = mas->last; 4097 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); 4098 4099 /* 4100 * this range wrote to the end of the node or it overwrote the rest of 4101 * the data 4102 */ 4103 if (offset_end > wr_mas->node_end) 4104 goto done; 4105 4106 dst_offset = mas->offset + 1; 4107 /* Copy to the end of node if necessary. */ 4108 copy_size = wr_mas->node_end - offset_end + 1; 4109 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, 4110 sizeof(void *) * copy_size); 4111 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, 4112 sizeof(unsigned long) * (copy_size - 1)); 4113 4114 if (new_end < node_pivots) 4115 dst_pivots[new_end] = mas->max; 4116 4117 done: 4118 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); 4119 if (in_rcu) { 4120 mte_set_node_dead(mas->node); 4121 mas->node = mt_mk_node(newnode, wr_mas->type); 4122 mas_replace(mas, false); 4123 } else { 4124 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 4125 } 4126 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4127 mas_update_gap(mas); 4128 return true; 4129 } 4130 4131 /* 4132 * mas_wr_slot_store: Attempt to store a value in a slot. 4133 * @wr_mas: the maple write state 4134 * 4135 * Return: True if stored, false otherwise 4136 */ 4137 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) 4138 { 4139 struct ma_state *mas = wr_mas->mas; 4140 unsigned char offset = mas->offset; 4141 void __rcu **slots = wr_mas->slots; 4142 bool gap = false; 4143 4144 gap |= !mt_slot_locked(mas->tree, slots, offset); 4145 gap |= !mt_slot_locked(mas->tree, slots, offset + 1); 4146 4147 if (wr_mas->offset_end - offset == 1) { 4148 if (mas->index == wr_mas->r_min) { 4149 /* Overwriting the range and a part of the next one */ 4150 rcu_assign_pointer(slots[offset], wr_mas->entry); 4151 wr_mas->pivots[offset] = mas->last; 4152 } else { 4153 /* Overwriting a part of the range and the next one */ 4154 rcu_assign_pointer(slots[offset + 1], wr_mas->entry); 4155 wr_mas->pivots[offset] = mas->index - 1; 4156 mas->offset++; /* Keep mas accurate. */ 4157 } 4158 } else if (!mt_in_rcu(mas->tree)) { 4159 /* 4160 * Expand the range, only partially overwriting the previous and 4161 * next ranges 4162 */ 4163 gap |= !mt_slot_locked(mas->tree, slots, offset + 2); 4164 rcu_assign_pointer(slots[offset + 1], wr_mas->entry); 4165 wr_mas->pivots[offset] = mas->index - 1; 4166 wr_mas->pivots[offset + 1] = mas->last; 4167 mas->offset++; /* Keep mas accurate. */ 4168 } else { 4169 return false; 4170 } 4171 4172 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4173 /* 4174 * Only update gap when the new entry is empty or there is an empty 4175 * entry in the original two ranges. 4176 */ 4177 if (!wr_mas->entry || gap) 4178 mas_update_gap(mas); 4179 4180 return true; 4181 } 4182 4183 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 4184 { 4185 while ((wr_mas->offset_end < wr_mas->node_end) && 4186 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) 4187 wr_mas->offset_end++; 4188 4189 if (wr_mas->offset_end < wr_mas->node_end) 4190 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; 4191 else 4192 wr_mas->end_piv = wr_mas->mas->max; 4193 } 4194 4195 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 4196 { 4197 struct ma_state *mas = wr_mas->mas; 4198 4199 if (!wr_mas->slots[wr_mas->offset_end]) { 4200 /* If this one is null, the next and prev are not */ 4201 mas->last = wr_mas->end_piv; 4202 } else { 4203 /* Check next slot(s) if we are overwriting the end */ 4204 if ((mas->last == wr_mas->end_piv) && 4205 (wr_mas->node_end != wr_mas->offset_end) && 4206 !wr_mas->slots[wr_mas->offset_end + 1]) { 4207 wr_mas->offset_end++; 4208 if (wr_mas->offset_end == wr_mas->node_end) 4209 mas->last = mas->max; 4210 else 4211 mas->last = wr_mas->pivots[wr_mas->offset_end]; 4212 wr_mas->end_piv = mas->last; 4213 } 4214 } 4215 4216 if (!wr_mas->content) { 4217 /* If this one is null, the next and prev are not */ 4218 mas->index = wr_mas->r_min; 4219 } else { 4220 /* Check prev slot if we are overwriting the start */ 4221 if (mas->index == wr_mas->r_min && mas->offset && 4222 !wr_mas->slots[mas->offset - 1]) { 4223 mas->offset--; 4224 wr_mas->r_min = mas->index = 4225 mas_safe_min(mas, wr_mas->pivots, mas->offset); 4226 wr_mas->r_max = wr_mas->pivots[mas->offset]; 4227 } 4228 } 4229 } 4230 4231 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) 4232 { 4233 struct ma_state *mas = wr_mas->mas; 4234 unsigned char new_end = wr_mas->node_end + 2; 4235 4236 new_end -= wr_mas->offset_end - mas->offset; 4237 if (wr_mas->r_min == mas->index) 4238 new_end--; 4239 4240 if (wr_mas->end_piv == mas->last) 4241 new_end--; 4242 4243 return new_end; 4244 } 4245 4246 /* 4247 * mas_wr_append: Attempt to append 4248 * @wr_mas: the maple write state 4249 * 4250 * Return: True if appended, false otherwise 4251 */ 4252 static inline bool mas_wr_append(struct ma_wr_state *wr_mas, 4253 unsigned char new_end) 4254 { 4255 unsigned char end = wr_mas->node_end; 4256 struct ma_state *mas = wr_mas->mas; 4257 unsigned char node_pivots = mt_pivots[wr_mas->type]; 4258 4259 if (mas->offset != wr_mas->node_end) 4260 return false; 4261 4262 if (new_end < node_pivots) { 4263 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4264 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4265 } 4266 4267 if (new_end == wr_mas->node_end + 1) { 4268 if (mas->last == wr_mas->r_max) { 4269 /* Append to end of range */ 4270 rcu_assign_pointer(wr_mas->slots[new_end], 4271 wr_mas->entry); 4272 wr_mas->pivots[end] = mas->index - 1; 4273 mas->offset = new_end; 4274 } else { 4275 /* Append to start of range */ 4276 rcu_assign_pointer(wr_mas->slots[new_end], 4277 wr_mas->content); 4278 wr_mas->pivots[end] = mas->last; 4279 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); 4280 } 4281 } else { 4282 /* Append to the range without touching any boundaries. */ 4283 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); 4284 wr_mas->pivots[end + 1] = mas->last; 4285 rcu_assign_pointer(wr_mas->slots[end + 1], wr_mas->entry); 4286 wr_mas->pivots[end] = mas->index - 1; 4287 mas->offset = end + 1; 4288 } 4289 4290 if (!wr_mas->content || !wr_mas->entry) 4291 mas_update_gap(mas); 4292 4293 return true; 4294 } 4295 4296 /* 4297 * mas_wr_bnode() - Slow path for a modification. 4298 * @wr_mas: The write maple state 4299 * 4300 * This is where split, rebalance end up. 4301 */ 4302 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4303 { 4304 struct maple_big_node b_node; 4305 4306 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4307 memset(&b_node, 0, sizeof(struct maple_big_node)); 4308 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4309 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); 4310 } 4311 4312 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) 4313 { 4314 struct ma_state *mas = wr_mas->mas; 4315 unsigned char new_end; 4316 4317 /* Direct replacement */ 4318 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { 4319 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4320 if (!!wr_mas->entry ^ !!wr_mas->content) 4321 mas_update_gap(mas); 4322 return; 4323 } 4324 4325 /* 4326 * new_end exceeds the size of the maple node and cannot enter the fast 4327 * path. 4328 */ 4329 new_end = mas_wr_new_end(wr_mas); 4330 if (new_end >= mt_slots[wr_mas->type]) 4331 goto slow_path; 4332 4333 /* Attempt to append */ 4334 if (mas_wr_append(wr_mas, new_end)) 4335 return; 4336 4337 if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) 4338 return; 4339 4340 if (mas_wr_node_store(wr_mas, new_end)) 4341 return; 4342 4343 if (mas_is_err(mas)) 4344 return; 4345 4346 slow_path: 4347 mas_wr_bnode(wr_mas); 4348 } 4349 4350 /* 4351 * mas_wr_store_entry() - Internal call to store a value 4352 * @mas: The maple state 4353 * @entry: The entry to store. 4354 * 4355 * Return: The contents that was stored at the index. 4356 */ 4357 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) 4358 { 4359 struct ma_state *mas = wr_mas->mas; 4360 4361 wr_mas->content = mas_start(mas); 4362 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4363 mas_store_root(mas, wr_mas->entry); 4364 return wr_mas->content; 4365 } 4366 4367 if (unlikely(!mas_wr_walk(wr_mas))) { 4368 mas_wr_spanning_store(wr_mas); 4369 return wr_mas->content; 4370 } 4371 4372 /* At this point, we are at the leaf node that needs to be altered. */ 4373 mas_wr_end_piv(wr_mas); 4374 4375 if (!wr_mas->entry) 4376 mas_wr_extend_null(wr_mas); 4377 4378 /* New root for a single pointer */ 4379 if (unlikely(!mas->index && mas->last == ULONG_MAX)) { 4380 mas_new_root(mas, wr_mas->entry); 4381 return wr_mas->content; 4382 } 4383 4384 mas_wr_modify(wr_mas); 4385 return wr_mas->content; 4386 } 4387 4388 /** 4389 * mas_insert() - Internal call to insert a value 4390 * @mas: The maple state 4391 * @entry: The entry to store 4392 * 4393 * Return: %NULL or the contents that already exists at the requested index 4394 * otherwise. The maple state needs to be checked for error conditions. 4395 */ 4396 static inline void *mas_insert(struct ma_state *mas, void *entry) 4397 { 4398 MA_WR_STATE(wr_mas, mas, entry); 4399 4400 /* 4401 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4402 * tree. If the insert fits exactly into an existing gap with a value 4403 * of NULL, then the slot only needs to be written with the new value. 4404 * If the range being inserted is adjacent to another range, then only a 4405 * single pivot needs to be inserted (as well as writing the entry). If 4406 * the new range is within a gap but does not touch any other ranges, 4407 * then two pivots need to be inserted: the start - 1, and the end. As 4408 * usual, the entry must be written. Most operations require a new node 4409 * to be allocated and replace an existing node to ensure RCU safety, 4410 * when in RCU mode. The exception to requiring a newly allocated node 4411 * is when inserting at the end of a node (appending). When done 4412 * carefully, appending can reuse the node in place. 4413 */ 4414 wr_mas.content = mas_start(mas); 4415 if (wr_mas.content) 4416 goto exists; 4417 4418 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4419 mas_store_root(mas, entry); 4420 return NULL; 4421 } 4422 4423 /* spanning writes always overwrite something */ 4424 if (!mas_wr_walk(&wr_mas)) 4425 goto exists; 4426 4427 /* At this point, we are at the leaf node that needs to be altered. */ 4428 wr_mas.offset_end = mas->offset; 4429 wr_mas.end_piv = wr_mas.r_max; 4430 4431 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4432 goto exists; 4433 4434 if (!entry) 4435 return NULL; 4436 4437 mas_wr_modify(&wr_mas); 4438 return wr_mas.content; 4439 4440 exists: 4441 mas_set_err(mas, -EEXIST); 4442 return wr_mas.content; 4443 4444 } 4445 4446 static inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4447 { 4448 retry: 4449 mas_set(mas, index); 4450 mas_state_walk(mas); 4451 if (mas_is_start(mas)) 4452 goto retry; 4453 } 4454 4455 static inline bool mas_rewalk_if_dead(struct ma_state *mas, 4456 struct maple_node *node, const unsigned long index) 4457 { 4458 if (unlikely(ma_dead_node(node))) { 4459 mas_rewalk(mas, index); 4460 return true; 4461 } 4462 return false; 4463 } 4464 4465 /* 4466 * mas_prev_node() - Find the prev non-null entry at the same level in the 4467 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. 4468 * @mas: The maple state 4469 * @min: The lower limit to search 4470 * 4471 * The prev node value will be mas->node[mas->offset] or MAS_NONE. 4472 * Return: 1 if the node is dead, 0 otherwise. 4473 */ 4474 static inline int mas_prev_node(struct ma_state *mas, unsigned long min) 4475 { 4476 enum maple_type mt; 4477 int offset, level; 4478 void __rcu **slots; 4479 struct maple_node *node; 4480 unsigned long *pivots; 4481 unsigned long max; 4482 4483 node = mas_mn(mas); 4484 if (!mas->min) 4485 goto no_entry; 4486 4487 max = mas->min - 1; 4488 if (max < min) 4489 goto no_entry; 4490 4491 level = 0; 4492 do { 4493 if (ma_is_root(node)) 4494 goto no_entry; 4495 4496 /* Walk up. */ 4497 if (unlikely(mas_ascend(mas))) 4498 return 1; 4499 offset = mas->offset; 4500 level++; 4501 node = mas_mn(mas); 4502 } while (!offset); 4503 4504 offset--; 4505 mt = mte_node_type(mas->node); 4506 while (level > 1) { 4507 level--; 4508 slots = ma_slots(node, mt); 4509 mas->node = mas_slot(mas, slots, offset); 4510 if (unlikely(ma_dead_node(node))) 4511 return 1; 4512 4513 mt = mte_node_type(mas->node); 4514 node = mas_mn(mas); 4515 pivots = ma_pivots(node, mt); 4516 offset = ma_data_end(node, mt, pivots, max); 4517 if (unlikely(ma_dead_node(node))) 4518 return 1; 4519 } 4520 4521 slots = ma_slots(node, mt); 4522 mas->node = mas_slot(mas, slots, offset); 4523 pivots = ma_pivots(node, mt); 4524 if (unlikely(ma_dead_node(node))) 4525 return 1; 4526 4527 if (likely(offset)) 4528 mas->min = pivots[offset - 1] + 1; 4529 mas->max = max; 4530 mas->offset = mas_data_end(mas); 4531 if (unlikely(mte_dead_node(mas->node))) 4532 return 1; 4533 4534 return 0; 4535 4536 no_entry: 4537 if (unlikely(ma_dead_node(node))) 4538 return 1; 4539 4540 mas->node = MAS_NONE; 4541 return 0; 4542 } 4543 4544 /* 4545 * mas_prev_slot() - Get the entry in the previous slot 4546 * 4547 * @mas: The maple state 4548 * @max: The minimum starting range 4549 * 4550 * Return: The entry in the previous slot which is possibly NULL 4551 */ 4552 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) 4553 { 4554 void *entry; 4555 void __rcu **slots; 4556 unsigned long pivot; 4557 enum maple_type type; 4558 unsigned long *pivots; 4559 struct maple_node *node; 4560 unsigned long save_point = mas->index; 4561 4562 retry: 4563 node = mas_mn(mas); 4564 type = mte_node_type(mas->node); 4565 pivots = ma_pivots(node, type); 4566 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4567 goto retry; 4568 4569 again: 4570 if (mas->min <= min) { 4571 pivot = mas_safe_min(mas, pivots, mas->offset); 4572 4573 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4574 goto retry; 4575 4576 if (pivot <= min) 4577 return NULL; 4578 } 4579 4580 if (likely(mas->offset)) { 4581 mas->offset--; 4582 mas->last = mas->index - 1; 4583 mas->index = mas_safe_min(mas, pivots, mas->offset); 4584 } else { 4585 if (mas_prev_node(mas, min)) { 4586 mas_rewalk(mas, save_point); 4587 goto retry; 4588 } 4589 4590 if (mas_is_none(mas)) 4591 return NULL; 4592 4593 mas->last = mas->max; 4594 node = mas_mn(mas); 4595 type = mte_node_type(mas->node); 4596 pivots = ma_pivots(node, type); 4597 mas->index = pivots[mas->offset - 1] + 1; 4598 } 4599 4600 slots = ma_slots(node, type); 4601 entry = mas_slot(mas, slots, mas->offset); 4602 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4603 goto retry; 4604 4605 if (likely(entry)) 4606 return entry; 4607 4608 if (!empty) 4609 goto again; 4610 4611 return entry; 4612 } 4613 4614 /* 4615 * mas_next_node() - Get the next node at the same level in the tree. 4616 * @mas: The maple state 4617 * @max: The maximum pivot value to check. 4618 * 4619 * The next value will be mas->node[mas->offset] or MAS_NONE. 4620 * Return: 1 on dead node, 0 otherwise. 4621 */ 4622 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, 4623 unsigned long max) 4624 { 4625 unsigned long min; 4626 unsigned long *pivots; 4627 struct maple_enode *enode; 4628 int level = 0; 4629 unsigned char node_end; 4630 enum maple_type mt; 4631 void __rcu **slots; 4632 4633 if (mas->max >= max) 4634 goto no_entry; 4635 4636 min = mas->max + 1; 4637 level = 0; 4638 do { 4639 if (ma_is_root(node)) 4640 goto no_entry; 4641 4642 /* Walk up. */ 4643 if (unlikely(mas_ascend(mas))) 4644 return 1; 4645 4646 level++; 4647 node = mas_mn(mas); 4648 mt = mte_node_type(mas->node); 4649 pivots = ma_pivots(node, mt); 4650 node_end = ma_data_end(node, mt, pivots, mas->max); 4651 if (unlikely(ma_dead_node(node))) 4652 return 1; 4653 4654 } while (unlikely(mas->offset == node_end)); 4655 4656 slots = ma_slots(node, mt); 4657 mas->offset++; 4658 enode = mas_slot(mas, slots, mas->offset); 4659 if (unlikely(ma_dead_node(node))) 4660 return 1; 4661 4662 if (level > 1) 4663 mas->offset = 0; 4664 4665 while (unlikely(level > 1)) { 4666 level--; 4667 mas->node = enode; 4668 node = mas_mn(mas); 4669 mt = mte_node_type(mas->node); 4670 slots = ma_slots(node, mt); 4671 enode = mas_slot(mas, slots, 0); 4672 if (unlikely(ma_dead_node(node))) 4673 return 1; 4674 } 4675 4676 if (!mas->offset) 4677 pivots = ma_pivots(node, mt); 4678 4679 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); 4680 if (unlikely(ma_dead_node(node))) 4681 return 1; 4682 4683 mas->node = enode; 4684 mas->min = min; 4685 return 0; 4686 4687 no_entry: 4688 if (unlikely(ma_dead_node(node))) 4689 return 1; 4690 4691 mas->node = MAS_NONE; 4692 return 0; 4693 } 4694 4695 /* 4696 * mas_next_slot() - Get the entry in the next slot 4697 * 4698 * @mas: The maple state 4699 * @max: The maximum starting range 4700 * @empty: Can be empty 4701 * 4702 * Return: The entry in the next slot which is possibly NULL 4703 */ 4704 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) 4705 { 4706 void __rcu **slots; 4707 unsigned long *pivots; 4708 unsigned long pivot; 4709 enum maple_type type; 4710 struct maple_node *node; 4711 unsigned char data_end; 4712 unsigned long save_point = mas->last; 4713 void *entry; 4714 4715 retry: 4716 node = mas_mn(mas); 4717 type = mte_node_type(mas->node); 4718 pivots = ma_pivots(node, type); 4719 data_end = ma_data_end(node, type, pivots, mas->max); 4720 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4721 goto retry; 4722 4723 again: 4724 if (mas->max >= max) { 4725 if (likely(mas->offset < data_end)) 4726 pivot = pivots[mas->offset]; 4727 else 4728 return NULL; /* must be mas->max */ 4729 4730 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4731 goto retry; 4732 4733 if (pivot >= max) 4734 return NULL; 4735 } 4736 4737 if (likely(mas->offset < data_end)) { 4738 mas->index = pivots[mas->offset] + 1; 4739 mas->offset++; 4740 if (likely(mas->offset < data_end)) 4741 mas->last = pivots[mas->offset]; 4742 else 4743 mas->last = mas->max; 4744 } else { 4745 if (mas_next_node(mas, node, max)) { 4746 mas_rewalk(mas, save_point); 4747 goto retry; 4748 } 4749 4750 if (mas_is_none(mas)) 4751 return NULL; 4752 4753 mas->offset = 0; 4754 mas->index = mas->min; 4755 node = mas_mn(mas); 4756 type = mte_node_type(mas->node); 4757 pivots = ma_pivots(node, type); 4758 mas->last = pivots[0]; 4759 } 4760 4761 slots = ma_slots(node, type); 4762 entry = mt_slot(mas->tree, slots, mas->offset); 4763 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4764 goto retry; 4765 4766 if (entry) 4767 return entry; 4768 4769 if (!empty) { 4770 if (!mas->offset) 4771 data_end = 2; 4772 goto again; 4773 } 4774 4775 return entry; 4776 } 4777 4778 /* 4779 * mas_next_entry() - Internal function to get the next entry. 4780 * @mas: The maple state 4781 * @limit: The maximum range start. 4782 * 4783 * Set the @mas->node to the next entry and the range_start to 4784 * the beginning value for the entry. Does not check beyond @limit. 4785 * Sets @mas->index and @mas->last to the limit if it is hit. 4786 * Restarts on dead nodes. 4787 * 4788 * Return: the next entry or %NULL. 4789 */ 4790 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4791 { 4792 if (mas->last >= limit) 4793 return NULL; 4794 4795 return mas_next_slot(mas, limit, false); 4796 } 4797 4798 /* 4799 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4800 * highest gap address of a given size in a given node and descend. 4801 * @mas: The maple state 4802 * @size: The needed size. 4803 * 4804 * Return: True if found in a leaf, false otherwise. 4805 * 4806 */ 4807 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, 4808 unsigned long *gap_min, unsigned long *gap_max) 4809 { 4810 enum maple_type type = mte_node_type(mas->node); 4811 struct maple_node *node = mas_mn(mas); 4812 unsigned long *pivots, *gaps; 4813 void __rcu **slots; 4814 unsigned long gap = 0; 4815 unsigned long max, min; 4816 unsigned char offset; 4817 4818 if (unlikely(mas_is_err(mas))) 4819 return true; 4820 4821 if (ma_is_dense(type)) { 4822 /* dense nodes. */ 4823 mas->offset = (unsigned char)(mas->index - mas->min); 4824 return true; 4825 } 4826 4827 pivots = ma_pivots(node, type); 4828 slots = ma_slots(node, type); 4829 gaps = ma_gaps(node, type); 4830 offset = mas->offset; 4831 min = mas_safe_min(mas, pivots, offset); 4832 /* Skip out of bounds. */ 4833 while (mas->last < min) 4834 min = mas_safe_min(mas, pivots, --offset); 4835 4836 max = mas_safe_pivot(mas, pivots, offset, type); 4837 while (mas->index <= max) { 4838 gap = 0; 4839 if (gaps) 4840 gap = gaps[offset]; 4841 else if (!mas_slot(mas, slots, offset)) 4842 gap = max - min + 1; 4843 4844 if (gap) { 4845 if ((size <= gap) && (size <= mas->last - min + 1)) 4846 break; 4847 4848 if (!gaps) { 4849 /* Skip the next slot, it cannot be a gap. */ 4850 if (offset < 2) 4851 goto ascend; 4852 4853 offset -= 2; 4854 max = pivots[offset]; 4855 min = mas_safe_min(mas, pivots, offset); 4856 continue; 4857 } 4858 } 4859 4860 if (!offset) 4861 goto ascend; 4862 4863 offset--; 4864 max = min - 1; 4865 min = mas_safe_min(mas, pivots, offset); 4866 } 4867 4868 if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) 4869 goto no_space; 4870 4871 if (unlikely(ma_is_leaf(type))) { 4872 mas->offset = offset; 4873 *gap_min = min; 4874 *gap_max = min + gap - 1; 4875 return true; 4876 } 4877 4878 /* descend, only happens under lock. */ 4879 mas->node = mas_slot(mas, slots, offset); 4880 mas->min = min; 4881 mas->max = max; 4882 mas->offset = mas_data_end(mas); 4883 return false; 4884 4885 ascend: 4886 if (!mte_is_root(mas->node)) 4887 return false; 4888 4889 no_space: 4890 mas_set_err(mas, -EBUSY); 4891 return false; 4892 } 4893 4894 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4895 { 4896 enum maple_type type = mte_node_type(mas->node); 4897 unsigned long pivot, min, gap = 0; 4898 unsigned char offset, data_end; 4899 unsigned long *gaps, *pivots; 4900 void __rcu **slots; 4901 struct maple_node *node; 4902 bool found = false; 4903 4904 if (ma_is_dense(type)) { 4905 mas->offset = (unsigned char)(mas->index - mas->min); 4906 return true; 4907 } 4908 4909 node = mas_mn(mas); 4910 pivots = ma_pivots(node, type); 4911 slots = ma_slots(node, type); 4912 gaps = ma_gaps(node, type); 4913 offset = mas->offset; 4914 min = mas_safe_min(mas, pivots, offset); 4915 data_end = ma_data_end(node, type, pivots, mas->max); 4916 for (; offset <= data_end; offset++) { 4917 pivot = mas_safe_pivot(mas, pivots, offset, type); 4918 4919 /* Not within lower bounds */ 4920 if (mas->index > pivot) 4921 goto next_slot; 4922 4923 if (gaps) 4924 gap = gaps[offset]; 4925 else if (!mas_slot(mas, slots, offset)) 4926 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 4927 else 4928 goto next_slot; 4929 4930 if (gap >= size) { 4931 if (ma_is_leaf(type)) { 4932 found = true; 4933 goto done; 4934 } 4935 if (mas->index <= pivot) { 4936 mas->node = mas_slot(mas, slots, offset); 4937 mas->min = min; 4938 mas->max = pivot; 4939 offset = 0; 4940 break; 4941 } 4942 } 4943 next_slot: 4944 min = pivot + 1; 4945 if (mas->last <= pivot) { 4946 mas_set_err(mas, -EBUSY); 4947 return true; 4948 } 4949 } 4950 4951 if (mte_is_root(mas->node)) 4952 found = true; 4953 done: 4954 mas->offset = offset; 4955 return found; 4956 } 4957 4958 /** 4959 * mas_walk() - Search for @mas->index in the tree. 4960 * @mas: The maple state. 4961 * 4962 * mas->index and mas->last will be set to the range if there is a value. If 4963 * mas->node is MAS_NONE, reset to MAS_START. 4964 * 4965 * Return: the entry at the location or %NULL. 4966 */ 4967 void *mas_walk(struct ma_state *mas) 4968 { 4969 void *entry; 4970 4971 if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) 4972 mas->node = MAS_START; 4973 retry: 4974 entry = mas_state_walk(mas); 4975 if (mas_is_start(mas)) { 4976 goto retry; 4977 } else if (mas_is_none(mas)) { 4978 mas->index = 0; 4979 mas->last = ULONG_MAX; 4980 } else if (mas_is_ptr(mas)) { 4981 if (!mas->index) { 4982 mas->last = 0; 4983 return entry; 4984 } 4985 4986 mas->index = 1; 4987 mas->last = ULONG_MAX; 4988 mas->node = MAS_NONE; 4989 return NULL; 4990 } 4991 4992 return entry; 4993 } 4994 EXPORT_SYMBOL_GPL(mas_walk); 4995 4996 static inline bool mas_rewind_node(struct ma_state *mas) 4997 { 4998 unsigned char slot; 4999 5000 do { 5001 if (mte_is_root(mas->node)) { 5002 slot = mas->offset; 5003 if (!slot) 5004 return false; 5005 } else { 5006 mas_ascend(mas); 5007 slot = mas->offset; 5008 } 5009 } while (!slot); 5010 5011 mas->offset = --slot; 5012 return true; 5013 } 5014 5015 /* 5016 * mas_skip_node() - Internal function. Skip over a node. 5017 * @mas: The maple state. 5018 * 5019 * Return: true if there is another node, false otherwise. 5020 */ 5021 static inline bool mas_skip_node(struct ma_state *mas) 5022 { 5023 if (mas_is_err(mas)) 5024 return false; 5025 5026 do { 5027 if (mte_is_root(mas->node)) { 5028 if (mas->offset >= mas_data_end(mas)) { 5029 mas_set_err(mas, -EBUSY); 5030 return false; 5031 } 5032 } else { 5033 mas_ascend(mas); 5034 } 5035 } while (mas->offset >= mas_data_end(mas)); 5036 5037 mas->offset++; 5038 return true; 5039 } 5040 5041 /* 5042 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5043 * @size 5044 * @mas: The maple state 5045 * @size: The size of the gap required 5046 * 5047 * Search between @mas->index and @mas->last for a gap of @size. 5048 */ 5049 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5050 { 5051 struct maple_enode *last = NULL; 5052 5053 /* 5054 * There are 4 options: 5055 * go to child (descend) 5056 * go back to parent (ascend) 5057 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5058 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5059 */ 5060 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5061 if (last == mas->node) 5062 mas_skip_node(mas); 5063 else 5064 last = mas->node; 5065 } 5066 } 5067 5068 /* 5069 * mas_sparse_area() - Internal function. Return upper or lower limit when 5070 * searching for a gap in an empty tree. 5071 * @mas: The maple state 5072 * @min: the minimum range 5073 * @max: The maximum range 5074 * @size: The size of the gap 5075 * @fwd: Searching forward or back 5076 */ 5077 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, 5078 unsigned long max, unsigned long size, bool fwd) 5079 { 5080 if (!unlikely(mas_is_none(mas)) && min == 0) { 5081 min++; 5082 /* 5083 * At this time, min is increased, we need to recheck whether 5084 * the size is satisfied. 5085 */ 5086 if (min > max || max - min + 1 < size) 5087 return -EBUSY; 5088 } 5089 /* mas_is_ptr */ 5090 5091 if (fwd) { 5092 mas->index = min; 5093 mas->last = min + size - 1; 5094 } else { 5095 mas->last = max; 5096 mas->index = max - size + 1; 5097 } 5098 return 0; 5099 } 5100 5101 /* 5102 * mas_empty_area() - Get the lowest address within the range that is 5103 * sufficient for the size requested. 5104 * @mas: The maple state 5105 * @min: The lowest value of the range 5106 * @max: The highest value of the range 5107 * @size: The size needed 5108 */ 5109 int mas_empty_area(struct ma_state *mas, unsigned long min, 5110 unsigned long max, unsigned long size) 5111 { 5112 unsigned char offset; 5113 unsigned long *pivots; 5114 enum maple_type mt; 5115 5116 if (min > max) 5117 return -EINVAL; 5118 5119 if (size == 0 || max - min < size - 1) 5120 return -EINVAL; 5121 5122 if (mas_is_start(mas)) 5123 mas_start(mas); 5124 else if (mas->offset >= 2) 5125 mas->offset -= 2; 5126 else if (!mas_skip_node(mas)) 5127 return -EBUSY; 5128 5129 /* Empty set */ 5130 if (mas_is_none(mas) || mas_is_ptr(mas)) 5131 return mas_sparse_area(mas, min, max, size, true); 5132 5133 /* The start of the window can only be within these values */ 5134 mas->index = min; 5135 mas->last = max; 5136 mas_awalk(mas, size); 5137 5138 if (unlikely(mas_is_err(mas))) 5139 return xa_err(mas->node); 5140 5141 offset = mas->offset; 5142 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5143 return -EBUSY; 5144 5145 mt = mte_node_type(mas->node); 5146 pivots = ma_pivots(mas_mn(mas), mt); 5147 min = mas_safe_min(mas, pivots, offset); 5148 if (mas->index < min) 5149 mas->index = min; 5150 mas->last = mas->index + size - 1; 5151 return 0; 5152 } 5153 EXPORT_SYMBOL_GPL(mas_empty_area); 5154 5155 /* 5156 * mas_empty_area_rev() - Get the highest address within the range that is 5157 * sufficient for the size requested. 5158 * @mas: The maple state 5159 * @min: The lowest value of the range 5160 * @max: The highest value of the range 5161 * @size: The size needed 5162 */ 5163 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5164 unsigned long max, unsigned long size) 5165 { 5166 struct maple_enode *last = mas->node; 5167 5168 if (min > max) 5169 return -EINVAL; 5170 5171 if (size == 0 || max - min < size - 1) 5172 return -EINVAL; 5173 5174 if (mas_is_start(mas)) { 5175 mas_start(mas); 5176 mas->offset = mas_data_end(mas); 5177 } else if (mas->offset >= 2) { 5178 mas->offset -= 2; 5179 } else if (!mas_rewind_node(mas)) { 5180 return -EBUSY; 5181 } 5182 5183 /* Empty set. */ 5184 if (mas_is_none(mas) || mas_is_ptr(mas)) 5185 return mas_sparse_area(mas, min, max, size, false); 5186 5187 /* The start of the window can only be within these values. */ 5188 mas->index = min; 5189 mas->last = max; 5190 5191 while (!mas_rev_awalk(mas, size, &min, &max)) { 5192 if (last == mas->node) { 5193 if (!mas_rewind_node(mas)) 5194 return -EBUSY; 5195 } else { 5196 last = mas->node; 5197 } 5198 } 5199 5200 if (mas_is_err(mas)) 5201 return xa_err(mas->node); 5202 5203 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5204 return -EBUSY; 5205 5206 /* Trim the upper limit to the max. */ 5207 if (max < mas->last) 5208 mas->last = max; 5209 5210 mas->index = mas->last - size + 1; 5211 return 0; 5212 } 5213 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5214 5215 /* 5216 * mte_dead_leaves() - Mark all leaves of a node as dead. 5217 * @mas: The maple state 5218 * @slots: Pointer to the slot array 5219 * @type: The maple node type 5220 * 5221 * Must hold the write lock. 5222 * 5223 * Return: The number of leaves marked as dead. 5224 */ 5225 static inline 5226 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt, 5227 void __rcu **slots) 5228 { 5229 struct maple_node *node; 5230 enum maple_type type; 5231 void *entry; 5232 int offset; 5233 5234 for (offset = 0; offset < mt_slot_count(enode); offset++) { 5235 entry = mt_slot(mt, slots, offset); 5236 type = mte_node_type(entry); 5237 node = mte_to_node(entry); 5238 /* Use both node and type to catch LE & BE metadata */ 5239 if (!node || !type) 5240 break; 5241 5242 mte_set_node_dead(entry); 5243 node->type = type; 5244 rcu_assign_pointer(slots[offset], node); 5245 } 5246 5247 return offset; 5248 } 5249 5250 /** 5251 * mte_dead_walk() - Walk down a dead tree to just before the leaves 5252 * @enode: The maple encoded node 5253 * @offset: The starting offset 5254 * 5255 * Note: This can only be used from the RCU callback context. 5256 */ 5257 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset) 5258 { 5259 struct maple_node *node, *next; 5260 void __rcu **slots = NULL; 5261 5262 next = mte_to_node(*enode); 5263 do { 5264 *enode = ma_enode_ptr(next); 5265 node = mte_to_node(*enode); 5266 slots = ma_slots(node, node->type); 5267 next = rcu_dereference_protected(slots[offset], 5268 lock_is_held(&rcu_callback_map)); 5269 offset = 0; 5270 } while (!ma_is_leaf(next->type)); 5271 5272 return slots; 5273 } 5274 5275 /** 5276 * mt_free_walk() - Walk & free a tree in the RCU callback context 5277 * @head: The RCU head that's within the node. 5278 * 5279 * Note: This can only be used from the RCU callback context. 5280 */ 5281 static void mt_free_walk(struct rcu_head *head) 5282 { 5283 void __rcu **slots; 5284 struct maple_node *node, *start; 5285 struct maple_enode *enode; 5286 unsigned char offset; 5287 enum maple_type type; 5288 5289 node = container_of(head, struct maple_node, rcu); 5290 5291 if (ma_is_leaf(node->type)) 5292 goto free_leaf; 5293 5294 start = node; 5295 enode = mt_mk_node(node, node->type); 5296 slots = mte_dead_walk(&enode, 0); 5297 node = mte_to_node(enode); 5298 do { 5299 mt_free_bulk(node->slot_len, slots); 5300 offset = node->parent_slot + 1; 5301 enode = node->piv_parent; 5302 if (mte_to_node(enode) == node) 5303 goto free_leaf; 5304 5305 type = mte_node_type(enode); 5306 slots = ma_slots(mte_to_node(enode), type); 5307 if ((offset < mt_slots[type]) && 5308 rcu_dereference_protected(slots[offset], 5309 lock_is_held(&rcu_callback_map))) 5310 slots = mte_dead_walk(&enode, offset); 5311 node = mte_to_node(enode); 5312 } while ((node != start) || (node->slot_len < offset)); 5313 5314 slots = ma_slots(node, node->type); 5315 mt_free_bulk(node->slot_len, slots); 5316 5317 free_leaf: 5318 mt_free_rcu(&node->rcu); 5319 } 5320 5321 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode, 5322 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset) 5323 { 5324 struct maple_node *node; 5325 struct maple_enode *next = *enode; 5326 void __rcu **slots = NULL; 5327 enum maple_type type; 5328 unsigned char next_offset = 0; 5329 5330 do { 5331 *enode = next; 5332 node = mte_to_node(*enode); 5333 type = mte_node_type(*enode); 5334 slots = ma_slots(node, type); 5335 next = mt_slot_locked(mt, slots, next_offset); 5336 if ((mte_dead_node(next))) 5337 next = mt_slot_locked(mt, slots, ++next_offset); 5338 5339 mte_set_node_dead(*enode); 5340 node->type = type; 5341 node->piv_parent = prev; 5342 node->parent_slot = offset; 5343 offset = next_offset; 5344 next_offset = 0; 5345 prev = *enode; 5346 } while (!mte_is_leaf(next)); 5347 5348 return slots; 5349 } 5350 5351 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, 5352 bool free) 5353 { 5354 void __rcu **slots; 5355 struct maple_node *node = mte_to_node(enode); 5356 struct maple_enode *start; 5357 5358 if (mte_is_leaf(enode)) { 5359 node->type = mte_node_type(enode); 5360 goto free_leaf; 5361 } 5362 5363 start = enode; 5364 slots = mte_destroy_descend(&enode, mt, start, 0); 5365 node = mte_to_node(enode); // Updated in the above call. 5366 do { 5367 enum maple_type type; 5368 unsigned char offset; 5369 struct maple_enode *parent, *tmp; 5370 5371 node->slot_len = mte_dead_leaves(enode, mt, slots); 5372 if (free) 5373 mt_free_bulk(node->slot_len, slots); 5374 offset = node->parent_slot + 1; 5375 enode = node->piv_parent; 5376 if (mte_to_node(enode) == node) 5377 goto free_leaf; 5378 5379 type = mte_node_type(enode); 5380 slots = ma_slots(mte_to_node(enode), type); 5381 if (offset >= mt_slots[type]) 5382 goto next; 5383 5384 tmp = mt_slot_locked(mt, slots, offset); 5385 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5386 parent = enode; 5387 enode = tmp; 5388 slots = mte_destroy_descend(&enode, mt, parent, offset); 5389 } 5390 next: 5391 node = mte_to_node(enode); 5392 } while (start != enode); 5393 5394 node = mte_to_node(enode); 5395 node->slot_len = mte_dead_leaves(enode, mt, slots); 5396 if (free) 5397 mt_free_bulk(node->slot_len, slots); 5398 5399 free_leaf: 5400 if (free) 5401 mt_free_rcu(&node->rcu); 5402 else 5403 mt_clear_meta(mt, node, node->type); 5404 } 5405 5406 /* 5407 * mte_destroy_walk() - Free a tree or sub-tree. 5408 * @enode: the encoded maple node (maple_enode) to start 5409 * @mt: the tree to free - needed for node types. 5410 * 5411 * Must hold the write lock. 5412 */ 5413 static inline void mte_destroy_walk(struct maple_enode *enode, 5414 struct maple_tree *mt) 5415 { 5416 struct maple_node *node = mte_to_node(enode); 5417 5418 if (mt_in_rcu(mt)) { 5419 mt_destroy_walk(enode, mt, false); 5420 call_rcu(&node->rcu, mt_free_walk); 5421 } else { 5422 mt_destroy_walk(enode, mt, true); 5423 } 5424 } 5425 5426 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5427 { 5428 if (unlikely(mas_is_paused(wr_mas->mas))) 5429 mas_reset(wr_mas->mas); 5430 5431 if (!mas_is_start(wr_mas->mas)) { 5432 if (mas_is_none(wr_mas->mas)) { 5433 mas_reset(wr_mas->mas); 5434 } else { 5435 wr_mas->r_max = wr_mas->mas->max; 5436 wr_mas->type = mte_node_type(wr_mas->mas->node); 5437 if (mas_is_span_wr(wr_mas)) 5438 mas_reset(wr_mas->mas); 5439 } 5440 } 5441 } 5442 5443 /* Interface */ 5444 5445 /** 5446 * mas_store() - Store an @entry. 5447 * @mas: The maple state. 5448 * @entry: The entry to store. 5449 * 5450 * The @mas->index and @mas->last is used to set the range for the @entry. 5451 * Note: The @mas should have pre-allocated entries to ensure there is memory to 5452 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. 5453 * 5454 * Return: the first entry between mas->index and mas->last or %NULL. 5455 */ 5456 void *mas_store(struct ma_state *mas, void *entry) 5457 { 5458 MA_WR_STATE(wr_mas, mas, entry); 5459 5460 trace_ma_write(__func__, mas, 0, entry); 5461 #ifdef CONFIG_DEBUG_MAPLE_TREE 5462 if (MAS_WARN_ON(mas, mas->index > mas->last)) 5463 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); 5464 5465 if (mas->index > mas->last) { 5466 mas_set_err(mas, -EINVAL); 5467 return NULL; 5468 } 5469 5470 #endif 5471 5472 /* 5473 * Storing is the same operation as insert with the added caveat that it 5474 * can overwrite entries. Although this seems simple enough, one may 5475 * want to examine what happens if a single store operation was to 5476 * overwrite multiple entries within a self-balancing B-Tree. 5477 */ 5478 mas_wr_store_setup(&wr_mas); 5479 mas_wr_store_entry(&wr_mas); 5480 return wr_mas.content; 5481 } 5482 EXPORT_SYMBOL_GPL(mas_store); 5483 5484 /** 5485 * mas_store_gfp() - Store a value into the tree. 5486 * @mas: The maple state 5487 * @entry: The entry to store 5488 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5489 * 5490 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5491 * be allocated. 5492 */ 5493 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5494 { 5495 MA_WR_STATE(wr_mas, mas, entry); 5496 5497 mas_wr_store_setup(&wr_mas); 5498 trace_ma_write(__func__, mas, 0, entry); 5499 retry: 5500 mas_wr_store_entry(&wr_mas); 5501 if (unlikely(mas_nomem(mas, gfp))) 5502 goto retry; 5503 5504 if (unlikely(mas_is_err(mas))) 5505 return xa_err(mas->node); 5506 5507 return 0; 5508 } 5509 EXPORT_SYMBOL_GPL(mas_store_gfp); 5510 5511 /** 5512 * mas_store_prealloc() - Store a value into the tree using memory 5513 * preallocated in the maple state. 5514 * @mas: The maple state 5515 * @entry: The entry to store. 5516 */ 5517 void mas_store_prealloc(struct ma_state *mas, void *entry) 5518 { 5519 MA_WR_STATE(wr_mas, mas, entry); 5520 5521 mas_wr_store_setup(&wr_mas); 5522 trace_ma_write(__func__, mas, 0, entry); 5523 mas_wr_store_entry(&wr_mas); 5524 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); 5525 mas_destroy(mas); 5526 } 5527 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5528 5529 /** 5530 * mas_preallocate() - Preallocate enough nodes for a store operation 5531 * @mas: The maple state 5532 * @gfp: The GFP_FLAGS to use for allocations. 5533 * 5534 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5535 */ 5536 int mas_preallocate(struct ma_state *mas, gfp_t gfp) 5537 { 5538 int ret; 5539 5540 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); 5541 mas->mas_flags |= MA_STATE_PREALLOC; 5542 if (likely(!mas_is_err(mas))) 5543 return 0; 5544 5545 mas_set_alloc_req(mas, 0); 5546 ret = xa_err(mas->node); 5547 mas_reset(mas); 5548 mas_destroy(mas); 5549 mas_reset(mas); 5550 return ret; 5551 } 5552 EXPORT_SYMBOL_GPL(mas_preallocate); 5553 5554 /* 5555 * mas_destroy() - destroy a maple state. 5556 * @mas: The maple state 5557 * 5558 * Upon completion, check the left-most node and rebalance against the node to 5559 * the right if necessary. Frees any allocated nodes associated with this maple 5560 * state. 5561 */ 5562 void mas_destroy(struct ma_state *mas) 5563 { 5564 struct maple_alloc *node; 5565 unsigned long total; 5566 5567 /* 5568 * When using mas_for_each() to insert an expected number of elements, 5569 * it is possible that the number inserted is less than the expected 5570 * number. To fix an invalid final node, a check is performed here to 5571 * rebalance the previous node with the final node. 5572 */ 5573 if (mas->mas_flags & MA_STATE_REBALANCE) { 5574 unsigned char end; 5575 5576 mas_start(mas); 5577 mtree_range_walk(mas); 5578 end = mas_data_end(mas) + 1; 5579 if (end < mt_min_slot_count(mas->node) - 1) 5580 mas_destroy_rebalance(mas, end); 5581 5582 mas->mas_flags &= ~MA_STATE_REBALANCE; 5583 } 5584 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5585 5586 total = mas_allocated(mas); 5587 while (total) { 5588 node = mas->alloc; 5589 mas->alloc = node->slot[0]; 5590 if (node->node_count > 1) { 5591 size_t count = node->node_count - 1; 5592 5593 mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5594 total -= count; 5595 } 5596 kmem_cache_free(maple_node_cache, node); 5597 total--; 5598 } 5599 5600 mas->alloc = NULL; 5601 } 5602 EXPORT_SYMBOL_GPL(mas_destroy); 5603 5604 /* 5605 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5606 * @mas: The maple state 5607 * @nr_entries: The number of expected entries. 5608 * 5609 * This will attempt to pre-allocate enough nodes to store the expected number 5610 * of entries. The allocations will occur using the bulk allocator interface 5611 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5612 * to ensure any unused nodes are freed. 5613 * 5614 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5615 */ 5616 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5617 { 5618 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5619 struct maple_enode *enode = mas->node; 5620 int nr_nodes; 5621 int ret; 5622 5623 /* 5624 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5625 * forking a process and duplicating the VMAs from one tree to a new 5626 * tree. When such a situation arises, it is known that the new tree is 5627 * not going to be used until the entire tree is populated. For 5628 * performance reasons, it is best to use a bulk load with RCU disabled. 5629 * This allows for optimistic splitting that favours the left and reuse 5630 * of nodes during the operation. 5631 */ 5632 5633 /* Optimize splitting for bulk insert in-order */ 5634 mas->mas_flags |= MA_STATE_BULK; 5635 5636 /* 5637 * Avoid overflow, assume a gap between each entry and a trailing null. 5638 * If this is wrong, it just means allocation can happen during 5639 * insertion of entries. 5640 */ 5641 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5642 if (!mt_is_alloc(mas->tree)) 5643 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5644 5645 /* Leaves; reduce slots to keep space for expansion */ 5646 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5647 /* Internal nodes */ 5648 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5649 /* Add working room for split (2 nodes) + new parents */ 5650 mas_node_count(mas, nr_nodes + 3); 5651 5652 /* Detect if allocations run out */ 5653 mas->mas_flags |= MA_STATE_PREALLOC; 5654 5655 if (!mas_is_err(mas)) 5656 return 0; 5657 5658 ret = xa_err(mas->node); 5659 mas->node = enode; 5660 mas_destroy(mas); 5661 return ret; 5662 5663 } 5664 EXPORT_SYMBOL_GPL(mas_expected_entries); 5665 5666 static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, 5667 void **entry) 5668 { 5669 bool was_none = mas_is_none(mas); 5670 5671 if (mas_is_none(mas) || mas_is_paused(mas)) 5672 mas->node = MAS_START; 5673 5674 if (mas_is_start(mas)) 5675 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5676 5677 if (mas_is_ptr(mas)) { 5678 *entry = NULL; 5679 if (was_none && mas->index == 0) { 5680 mas->index = mas->last = 0; 5681 return true; 5682 } 5683 mas->index = 1; 5684 mas->last = ULONG_MAX; 5685 mas->node = MAS_NONE; 5686 return true; 5687 } 5688 5689 if (mas_is_none(mas)) 5690 return true; 5691 return false; 5692 } 5693 5694 /** 5695 * mas_next() - Get the next entry. 5696 * @mas: The maple state 5697 * @max: The maximum index to check. 5698 * 5699 * Returns the next entry after @mas->index. 5700 * Must hold rcu_read_lock or the write lock. 5701 * Can return the zero entry. 5702 * 5703 * Return: The next entry or %NULL 5704 */ 5705 void *mas_next(struct ma_state *mas, unsigned long max) 5706 { 5707 void *entry = NULL; 5708 5709 if (mas_next_setup(mas, max, &entry)) 5710 return entry; 5711 5712 /* Retries on dead nodes handled by mas_next_slot */ 5713 return mas_next_slot(mas, max, false); 5714 } 5715 EXPORT_SYMBOL_GPL(mas_next); 5716 5717 /** 5718 * mas_next_range() - Advance the maple state to the next range 5719 * @mas: The maple state 5720 * @max: The maximum index to check. 5721 * 5722 * Sets @mas->index and @mas->last to the range. 5723 * Must hold rcu_read_lock or the write lock. 5724 * Can return the zero entry. 5725 * 5726 * Return: The next entry or %NULL 5727 */ 5728 void *mas_next_range(struct ma_state *mas, unsigned long max) 5729 { 5730 void *entry = NULL; 5731 5732 if (mas_next_setup(mas, max, &entry)) 5733 return entry; 5734 5735 /* Retries on dead nodes handled by mas_next_slot */ 5736 return mas_next_slot(mas, max, true); 5737 } 5738 EXPORT_SYMBOL_GPL(mas_next_range); 5739 5740 /** 5741 * mt_next() - get the next value in the maple tree 5742 * @mt: The maple tree 5743 * @index: The start index 5744 * @max: The maximum index to check 5745 * 5746 * Takes RCU read lock internally to protect the search, which does not 5747 * protect the returned pointer after dropping RCU read lock. 5748 * See also: Documentation/core-api/maple_tree.rst 5749 * 5750 * Return: The entry higher than @index or %NULL if nothing is found. 5751 */ 5752 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5753 { 5754 void *entry = NULL; 5755 MA_STATE(mas, mt, index, index); 5756 5757 rcu_read_lock(); 5758 entry = mas_next(&mas, max); 5759 rcu_read_unlock(); 5760 return entry; 5761 } 5762 EXPORT_SYMBOL_GPL(mt_next); 5763 5764 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, 5765 void **entry) 5766 { 5767 if (mas->index <= min) 5768 goto none; 5769 5770 if (mas_is_none(mas) || mas_is_paused(mas)) 5771 mas->node = MAS_START; 5772 5773 if (mas_is_start(mas)) { 5774 mas_walk(mas); 5775 if (!mas->index) 5776 goto none; 5777 } 5778 5779 if (unlikely(mas_is_ptr(mas))) { 5780 if (!mas->index) 5781 goto none; 5782 mas->index = mas->last = 0; 5783 *entry = mas_root(mas); 5784 return true; 5785 } 5786 5787 if (mas_is_none(mas)) { 5788 if (mas->index) { 5789 /* Walked to out-of-range pointer? */ 5790 mas->index = mas->last = 0; 5791 mas->node = MAS_ROOT; 5792 *entry = mas_root(mas); 5793 return true; 5794 } 5795 return true; 5796 } 5797 5798 return false; 5799 5800 none: 5801 mas->node = MAS_NONE; 5802 return true; 5803 } 5804 5805 /** 5806 * mas_prev() - Get the previous entry 5807 * @mas: The maple state 5808 * @min: The minimum value to check. 5809 * 5810 * Must hold rcu_read_lock or the write lock. 5811 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5812 * searchable nodes. 5813 * 5814 * Return: the previous value or %NULL. 5815 */ 5816 void *mas_prev(struct ma_state *mas, unsigned long min) 5817 { 5818 void *entry = NULL; 5819 5820 if (mas_prev_setup(mas, min, &entry)) 5821 return entry; 5822 5823 return mas_prev_slot(mas, min, false); 5824 } 5825 EXPORT_SYMBOL_GPL(mas_prev); 5826 5827 /** 5828 * mas_prev_range() - Advance to the previous range 5829 * @mas: The maple state 5830 * @min: The minimum value to check. 5831 * 5832 * Sets @mas->index and @mas->last to the range. 5833 * Must hold rcu_read_lock or the write lock. 5834 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5835 * searchable nodes. 5836 * 5837 * Return: the previous value or %NULL. 5838 */ 5839 void *mas_prev_range(struct ma_state *mas, unsigned long min) 5840 { 5841 void *entry = NULL; 5842 5843 if (mas_prev_setup(mas, min, &entry)) 5844 return entry; 5845 5846 return mas_prev_slot(mas, min, true); 5847 } 5848 EXPORT_SYMBOL_GPL(mas_prev_range); 5849 5850 /** 5851 * mt_prev() - get the previous value in the maple tree 5852 * @mt: The maple tree 5853 * @index: The start index 5854 * @min: The minimum index to check 5855 * 5856 * Takes RCU read lock internally to protect the search, which does not 5857 * protect the returned pointer after dropping RCU read lock. 5858 * See also: Documentation/core-api/maple_tree.rst 5859 * 5860 * Return: The entry before @index or %NULL if nothing is found. 5861 */ 5862 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5863 { 5864 void *entry = NULL; 5865 MA_STATE(mas, mt, index, index); 5866 5867 rcu_read_lock(); 5868 entry = mas_prev(&mas, min); 5869 rcu_read_unlock(); 5870 return entry; 5871 } 5872 EXPORT_SYMBOL_GPL(mt_prev); 5873 5874 /** 5875 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5876 * @mas: The maple state to pause 5877 * 5878 * Some users need to pause a walk and drop the lock they're holding in 5879 * order to yield to a higher priority thread or carry out an operation 5880 * on an entry. Those users should call this function before they drop 5881 * the lock. It resets the @mas to be suitable for the next iteration 5882 * of the loop after the user has reacquired the lock. If most entries 5883 * found during a walk require you to call mas_pause(), the mt_for_each() 5884 * iterator may be more appropriate. 5885 * 5886 */ 5887 void mas_pause(struct ma_state *mas) 5888 { 5889 mas->node = MAS_PAUSE; 5890 } 5891 EXPORT_SYMBOL_GPL(mas_pause); 5892 5893 /** 5894 * mas_find_setup() - Internal function to set up mas_find*(). 5895 * @mas: The maple state 5896 * @max: The maximum index 5897 * @entry: Pointer to the entry 5898 * 5899 * Returns: True if entry is the answer, false otherwise. 5900 */ 5901 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, 5902 void **entry) 5903 { 5904 *entry = NULL; 5905 5906 if (unlikely(mas_is_none(mas))) { 5907 if (unlikely(mas->last >= max)) 5908 return true; 5909 5910 mas->index = mas->last; 5911 mas->node = MAS_START; 5912 } else if (unlikely(mas_is_paused(mas))) { 5913 if (unlikely(mas->last >= max)) 5914 return true; 5915 5916 mas->node = MAS_START; 5917 mas->index = ++mas->last; 5918 } else if (unlikely(mas_is_ptr(mas))) 5919 goto ptr_out_of_range; 5920 5921 if (unlikely(mas_is_start(mas))) { 5922 /* First run or continue */ 5923 if (mas->index > max) 5924 return true; 5925 5926 *entry = mas_walk(mas); 5927 if (*entry) 5928 return true; 5929 5930 } 5931 5932 if (unlikely(!mas_searchable(mas))) { 5933 if (unlikely(mas_is_ptr(mas))) 5934 goto ptr_out_of_range; 5935 5936 return true; 5937 } 5938 5939 if (mas->index == max) 5940 return true; 5941 5942 return false; 5943 5944 ptr_out_of_range: 5945 mas->node = MAS_NONE; 5946 mas->index = 1; 5947 mas->last = ULONG_MAX; 5948 return true; 5949 } 5950 5951 /** 5952 * mas_find() - On the first call, find the entry at or after mas->index up to 5953 * %max. Otherwise, find the entry after mas->index. 5954 * @mas: The maple state 5955 * @max: The maximum value to check. 5956 * 5957 * Must hold rcu_read_lock or the write lock. 5958 * If an entry exists, last and index are updated accordingly. 5959 * May set @mas->node to MAS_NONE. 5960 * 5961 * Return: The entry or %NULL. 5962 */ 5963 void *mas_find(struct ma_state *mas, unsigned long max) 5964 { 5965 void *entry = NULL; 5966 5967 if (mas_find_setup(mas, max, &entry)) 5968 return entry; 5969 5970 /* Retries on dead nodes handled by mas_next_slot */ 5971 return mas_next_slot(mas, max, false); 5972 } 5973 EXPORT_SYMBOL_GPL(mas_find); 5974 5975 /** 5976 * mas_find_range() - On the first call, find the entry at or after 5977 * mas->index up to %max. Otherwise, advance to the next slot mas->index. 5978 * @mas: The maple state 5979 * @max: The maximum value to check. 5980 * 5981 * Must hold rcu_read_lock or the write lock. 5982 * If an entry exists, last and index are updated accordingly. 5983 * May set @mas->node to MAS_NONE. 5984 * 5985 * Return: The entry or %NULL. 5986 */ 5987 void *mas_find_range(struct ma_state *mas, unsigned long max) 5988 { 5989 void *entry; 5990 5991 if (mas_find_setup(mas, max, &entry)) 5992 return entry; 5993 5994 /* Retries on dead nodes handled by mas_next_slot */ 5995 return mas_next_slot(mas, max, true); 5996 } 5997 EXPORT_SYMBOL_GPL(mas_find_range); 5998 5999 /** 6000 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() 6001 * @mas: The maple state 6002 * @min: The minimum index 6003 * @entry: Pointer to the entry 6004 * 6005 * Returns: True if entry is the answer, false otherwise. 6006 */ 6007 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, 6008 void **entry) 6009 { 6010 *entry = NULL; 6011 6012 if (unlikely(mas_is_none(mas))) { 6013 if (mas->index <= min) 6014 goto none; 6015 6016 mas->last = mas->index; 6017 mas->node = MAS_START; 6018 } 6019 6020 if (unlikely(mas_is_paused(mas))) { 6021 if (unlikely(mas->index <= min)) { 6022 mas->node = MAS_NONE; 6023 return true; 6024 } 6025 mas->node = MAS_START; 6026 mas->last = --mas->index; 6027 } 6028 6029 if (unlikely(mas_is_start(mas))) { 6030 /* First run or continue */ 6031 if (mas->index < min) 6032 return true; 6033 6034 *entry = mas_walk(mas); 6035 if (*entry) 6036 return true; 6037 } 6038 6039 if (unlikely(!mas_searchable(mas))) { 6040 if (mas_is_ptr(mas)) 6041 goto none; 6042 6043 if (mas_is_none(mas)) { 6044 /* 6045 * Walked to the location, and there was nothing so the 6046 * previous location is 0. 6047 */ 6048 mas->last = mas->index = 0; 6049 mas->node = MAS_ROOT; 6050 *entry = mas_root(mas); 6051 return true; 6052 } 6053 } 6054 6055 if (mas->index < min) 6056 return true; 6057 6058 return false; 6059 6060 none: 6061 mas->node = MAS_NONE; 6062 return true; 6063 } 6064 6065 /** 6066 * mas_find_rev: On the first call, find the first non-null entry at or below 6067 * mas->index down to %min. Otherwise find the first non-null entry below 6068 * mas->index down to %min. 6069 * @mas: The maple state 6070 * @min: The minimum value to check. 6071 * 6072 * Must hold rcu_read_lock or the write lock. 6073 * If an entry exists, last and index are updated accordingly. 6074 * May set @mas->node to MAS_NONE. 6075 * 6076 * Return: The entry or %NULL. 6077 */ 6078 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6079 { 6080 void *entry; 6081 6082 if (mas_find_rev_setup(mas, min, &entry)) 6083 return entry; 6084 6085 /* Retries on dead nodes handled by mas_prev_slot */ 6086 return mas_prev_slot(mas, min, false); 6087 6088 } 6089 EXPORT_SYMBOL_GPL(mas_find_rev); 6090 6091 /** 6092 * mas_find_range_rev: On the first call, find the first non-null entry at or 6093 * below mas->index down to %min. Otherwise advance to the previous slot after 6094 * mas->index down to %min. 6095 * @mas: The maple state 6096 * @min: The minimum value to check. 6097 * 6098 * Must hold rcu_read_lock or the write lock. 6099 * If an entry exists, last and index are updated accordingly. 6100 * May set @mas->node to MAS_NONE. 6101 * 6102 * Return: The entry or %NULL. 6103 */ 6104 void *mas_find_range_rev(struct ma_state *mas, unsigned long min) 6105 { 6106 void *entry; 6107 6108 if (mas_find_rev_setup(mas, min, &entry)) 6109 return entry; 6110 6111 /* Retries on dead nodes handled by mas_prev_slot */ 6112 return mas_prev_slot(mas, min, true); 6113 } 6114 EXPORT_SYMBOL_GPL(mas_find_range_rev); 6115 6116 /** 6117 * mas_erase() - Find the range in which index resides and erase the entire 6118 * range. 6119 * @mas: The maple state 6120 * 6121 * Must hold the write lock. 6122 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6123 * erases that range. 6124 * 6125 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6126 */ 6127 void *mas_erase(struct ma_state *mas) 6128 { 6129 void *entry; 6130 MA_WR_STATE(wr_mas, mas, NULL); 6131 6132 if (mas_is_none(mas) || mas_is_paused(mas)) 6133 mas->node = MAS_START; 6134 6135 /* Retry unnecessary when holding the write lock. */ 6136 entry = mas_state_walk(mas); 6137 if (!entry) 6138 return NULL; 6139 6140 write_retry: 6141 /* Must reset to ensure spanning writes of last slot are detected */ 6142 mas_reset(mas); 6143 mas_wr_store_setup(&wr_mas); 6144 mas_wr_store_entry(&wr_mas); 6145 if (mas_nomem(mas, GFP_KERNEL)) 6146 goto write_retry; 6147 6148 return entry; 6149 } 6150 EXPORT_SYMBOL_GPL(mas_erase); 6151 6152 /** 6153 * mas_nomem() - Check if there was an error allocating and do the allocation 6154 * if necessary If there are allocations, then free them. 6155 * @mas: The maple state 6156 * @gfp: The GFP_FLAGS to use for allocations 6157 * Return: true on allocation, false otherwise. 6158 */ 6159 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6160 __must_hold(mas->tree->ma_lock) 6161 { 6162 if (likely(mas->node != MA_ERROR(-ENOMEM))) { 6163 mas_destroy(mas); 6164 return false; 6165 } 6166 6167 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6168 mtree_unlock(mas->tree); 6169 mas_alloc_nodes(mas, gfp); 6170 mtree_lock(mas->tree); 6171 } else { 6172 mas_alloc_nodes(mas, gfp); 6173 } 6174 6175 if (!mas_allocated(mas)) 6176 return false; 6177 6178 mas->node = MAS_START; 6179 return true; 6180 } 6181 6182 void __init maple_tree_init(void) 6183 { 6184 maple_node_cache = kmem_cache_create("maple_node", 6185 sizeof(struct maple_node), sizeof(struct maple_node), 6186 SLAB_PANIC, NULL); 6187 } 6188 6189 /** 6190 * mtree_load() - Load a value stored in a maple tree 6191 * @mt: The maple tree 6192 * @index: The index to load 6193 * 6194 * Return: the entry or %NULL 6195 */ 6196 void *mtree_load(struct maple_tree *mt, unsigned long index) 6197 { 6198 MA_STATE(mas, mt, index, index); 6199 void *entry; 6200 6201 trace_ma_read(__func__, &mas); 6202 rcu_read_lock(); 6203 retry: 6204 entry = mas_start(&mas); 6205 if (unlikely(mas_is_none(&mas))) 6206 goto unlock; 6207 6208 if (unlikely(mas_is_ptr(&mas))) { 6209 if (index) 6210 entry = NULL; 6211 6212 goto unlock; 6213 } 6214 6215 entry = mtree_lookup_walk(&mas); 6216 if (!entry && unlikely(mas_is_start(&mas))) 6217 goto retry; 6218 unlock: 6219 rcu_read_unlock(); 6220 if (xa_is_zero(entry)) 6221 return NULL; 6222 6223 return entry; 6224 } 6225 EXPORT_SYMBOL(mtree_load); 6226 6227 /** 6228 * mtree_store_range() - Store an entry at a given range. 6229 * @mt: The maple tree 6230 * @index: The start of the range 6231 * @last: The end of the range 6232 * @entry: The entry to store 6233 * @gfp: The GFP_FLAGS to use for allocations 6234 * 6235 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6236 * be allocated. 6237 */ 6238 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6239 unsigned long last, void *entry, gfp_t gfp) 6240 { 6241 MA_STATE(mas, mt, index, last); 6242 MA_WR_STATE(wr_mas, &mas, entry); 6243 6244 trace_ma_write(__func__, &mas, 0, entry); 6245 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6246 return -EINVAL; 6247 6248 if (index > last) 6249 return -EINVAL; 6250 6251 mtree_lock(mt); 6252 retry: 6253 mas_wr_store_entry(&wr_mas); 6254 if (mas_nomem(&mas, gfp)) 6255 goto retry; 6256 6257 mtree_unlock(mt); 6258 if (mas_is_err(&mas)) 6259 return xa_err(mas.node); 6260 6261 return 0; 6262 } 6263 EXPORT_SYMBOL(mtree_store_range); 6264 6265 /** 6266 * mtree_store() - Store an entry at a given index. 6267 * @mt: The maple tree 6268 * @index: The index to store the value 6269 * @entry: The entry to store 6270 * @gfp: The GFP_FLAGS to use for allocations 6271 * 6272 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6273 * be allocated. 6274 */ 6275 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6276 gfp_t gfp) 6277 { 6278 return mtree_store_range(mt, index, index, entry, gfp); 6279 } 6280 EXPORT_SYMBOL(mtree_store); 6281 6282 /** 6283 * mtree_insert_range() - Insert an entry at a given range if there is no value. 6284 * @mt: The maple tree 6285 * @first: The start of the range 6286 * @last: The end of the range 6287 * @entry: The entry to store 6288 * @gfp: The GFP_FLAGS to use for allocations. 6289 * 6290 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6291 * request, -ENOMEM if memory could not be allocated. 6292 */ 6293 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6294 unsigned long last, void *entry, gfp_t gfp) 6295 { 6296 MA_STATE(ms, mt, first, last); 6297 6298 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6299 return -EINVAL; 6300 6301 if (first > last) 6302 return -EINVAL; 6303 6304 mtree_lock(mt); 6305 retry: 6306 mas_insert(&ms, entry); 6307 if (mas_nomem(&ms, gfp)) 6308 goto retry; 6309 6310 mtree_unlock(mt); 6311 if (mas_is_err(&ms)) 6312 return xa_err(ms.node); 6313 6314 return 0; 6315 } 6316 EXPORT_SYMBOL(mtree_insert_range); 6317 6318 /** 6319 * mtree_insert() - Insert an entry at a given index if there is no value. 6320 * @mt: The maple tree 6321 * @index : The index to store the value 6322 * @entry: The entry to store 6323 * @gfp: The GFP_FLAGS to use for allocations. 6324 * 6325 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6326 * request, -ENOMEM if memory could not be allocated. 6327 */ 6328 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6329 gfp_t gfp) 6330 { 6331 return mtree_insert_range(mt, index, index, entry, gfp); 6332 } 6333 EXPORT_SYMBOL(mtree_insert); 6334 6335 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6336 void *entry, unsigned long size, unsigned long min, 6337 unsigned long max, gfp_t gfp) 6338 { 6339 int ret = 0; 6340 6341 MA_STATE(mas, mt, 0, 0); 6342 if (!mt_is_alloc(mt)) 6343 return -EINVAL; 6344 6345 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6346 return -EINVAL; 6347 6348 mtree_lock(mt); 6349 retry: 6350 ret = mas_empty_area(&mas, min, max, size); 6351 if (ret) 6352 goto unlock; 6353 6354 mas_insert(&mas, entry); 6355 /* 6356 * mas_nomem() may release the lock, causing the allocated area 6357 * to be unavailable, so try to allocate a free area again. 6358 */ 6359 if (mas_nomem(&mas, gfp)) 6360 goto retry; 6361 6362 if (mas_is_err(&mas)) 6363 ret = xa_err(mas.node); 6364 else 6365 *startp = mas.index; 6366 6367 unlock: 6368 mtree_unlock(mt); 6369 return ret; 6370 } 6371 EXPORT_SYMBOL(mtree_alloc_range); 6372 6373 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6374 void *entry, unsigned long size, unsigned long min, 6375 unsigned long max, gfp_t gfp) 6376 { 6377 int ret = 0; 6378 6379 MA_STATE(mas, mt, 0, 0); 6380 if (!mt_is_alloc(mt)) 6381 return -EINVAL; 6382 6383 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6384 return -EINVAL; 6385 6386 mtree_lock(mt); 6387 retry: 6388 ret = mas_empty_area_rev(&mas, min, max, size); 6389 if (ret) 6390 goto unlock; 6391 6392 mas_insert(&mas, entry); 6393 /* 6394 * mas_nomem() may release the lock, causing the allocated area 6395 * to be unavailable, so try to allocate a free area again. 6396 */ 6397 if (mas_nomem(&mas, gfp)) 6398 goto retry; 6399 6400 if (mas_is_err(&mas)) 6401 ret = xa_err(mas.node); 6402 else 6403 *startp = mas.index; 6404 6405 unlock: 6406 mtree_unlock(mt); 6407 return ret; 6408 } 6409 EXPORT_SYMBOL(mtree_alloc_rrange); 6410 6411 /** 6412 * mtree_erase() - Find an index and erase the entire range. 6413 * @mt: The maple tree 6414 * @index: The index to erase 6415 * 6416 * Erasing is the same as a walk to an entry then a store of a NULL to that 6417 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6418 * 6419 * Return: The entry stored at the @index or %NULL 6420 */ 6421 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6422 { 6423 void *entry = NULL; 6424 6425 MA_STATE(mas, mt, index, index); 6426 trace_ma_op(__func__, &mas); 6427 6428 mtree_lock(mt); 6429 entry = mas_erase(&mas); 6430 mtree_unlock(mt); 6431 6432 return entry; 6433 } 6434 EXPORT_SYMBOL(mtree_erase); 6435 6436 /** 6437 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6438 * @mt: The maple tree 6439 * 6440 * Note: Does not handle locking. 6441 */ 6442 void __mt_destroy(struct maple_tree *mt) 6443 { 6444 void *root = mt_root_locked(mt); 6445 6446 rcu_assign_pointer(mt->ma_root, NULL); 6447 if (xa_is_node(root)) 6448 mte_destroy_walk(root, mt); 6449 6450 mt->ma_flags = 0; 6451 } 6452 EXPORT_SYMBOL_GPL(__mt_destroy); 6453 6454 /** 6455 * mtree_destroy() - Destroy a maple tree 6456 * @mt: The maple tree 6457 * 6458 * Frees all resources used by the tree. Handles locking. 6459 */ 6460 void mtree_destroy(struct maple_tree *mt) 6461 { 6462 mtree_lock(mt); 6463 __mt_destroy(mt); 6464 mtree_unlock(mt); 6465 } 6466 EXPORT_SYMBOL(mtree_destroy); 6467 6468 /** 6469 * mt_find() - Search from the start up until an entry is found. 6470 * @mt: The maple tree 6471 * @index: Pointer which contains the start location of the search 6472 * @max: The maximum value of the search range 6473 * 6474 * Takes RCU read lock internally to protect the search, which does not 6475 * protect the returned pointer after dropping RCU read lock. 6476 * See also: Documentation/core-api/maple_tree.rst 6477 * 6478 * In case that an entry is found @index is updated to point to the next 6479 * possible entry independent whether the found entry is occupying a 6480 * single index or a range if indices. 6481 * 6482 * Return: The entry at or after the @index or %NULL 6483 */ 6484 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6485 { 6486 MA_STATE(mas, mt, *index, *index); 6487 void *entry; 6488 #ifdef CONFIG_DEBUG_MAPLE_TREE 6489 unsigned long copy = *index; 6490 #endif 6491 6492 trace_ma_read(__func__, &mas); 6493 6494 if ((*index) > max) 6495 return NULL; 6496 6497 rcu_read_lock(); 6498 retry: 6499 entry = mas_state_walk(&mas); 6500 if (mas_is_start(&mas)) 6501 goto retry; 6502 6503 if (unlikely(xa_is_zero(entry))) 6504 entry = NULL; 6505 6506 if (entry) 6507 goto unlock; 6508 6509 while (mas_searchable(&mas) && (mas.last < max)) { 6510 entry = mas_next_entry(&mas, max); 6511 if (likely(entry && !xa_is_zero(entry))) 6512 break; 6513 } 6514 6515 if (unlikely(xa_is_zero(entry))) 6516 entry = NULL; 6517 unlock: 6518 rcu_read_unlock(); 6519 if (likely(entry)) { 6520 *index = mas.last + 1; 6521 #ifdef CONFIG_DEBUG_MAPLE_TREE 6522 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy))) 6523 pr_err("index not increased! %lx <= %lx\n", 6524 *index, copy); 6525 #endif 6526 } 6527 6528 return entry; 6529 } 6530 EXPORT_SYMBOL(mt_find); 6531 6532 /** 6533 * mt_find_after() - Search from the start up until an entry is found. 6534 * @mt: The maple tree 6535 * @index: Pointer which contains the start location of the search 6536 * @max: The maximum value to check 6537 * 6538 * Same as mt_find() except that it checks @index for 0 before 6539 * searching. If @index == 0, the search is aborted. This covers a wrap 6540 * around of @index to 0 in an iterator loop. 6541 * 6542 * Return: The entry at or after the @index or %NULL 6543 */ 6544 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6545 unsigned long max) 6546 { 6547 if (!(*index)) 6548 return NULL; 6549 6550 return mt_find(mt, index, max); 6551 } 6552 EXPORT_SYMBOL(mt_find_after); 6553 6554 #ifdef CONFIG_DEBUG_MAPLE_TREE 6555 atomic_t maple_tree_tests_run; 6556 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6557 atomic_t maple_tree_tests_passed; 6558 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6559 6560 #ifndef __KERNEL__ 6561 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6562 void mt_set_non_kernel(unsigned int val) 6563 { 6564 kmem_cache_set_non_kernel(maple_node_cache, val); 6565 } 6566 6567 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 6568 unsigned long mt_get_alloc_size(void) 6569 { 6570 return kmem_cache_get_alloc(maple_node_cache); 6571 } 6572 6573 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 6574 void mt_zero_nr_tallocated(void) 6575 { 6576 kmem_cache_zero_nr_tallocated(maple_node_cache); 6577 } 6578 6579 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 6580 unsigned int mt_nr_tallocated(void) 6581 { 6582 return kmem_cache_nr_tallocated(maple_node_cache); 6583 } 6584 6585 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 6586 unsigned int mt_nr_allocated(void) 6587 { 6588 return kmem_cache_nr_allocated(maple_node_cache); 6589 } 6590 6591 /* 6592 * mas_dead_node() - Check if the maple state is pointing to a dead node. 6593 * @mas: The maple state 6594 * @index: The index to restore in @mas. 6595 * 6596 * Used in test code. 6597 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. 6598 */ 6599 static inline int mas_dead_node(struct ma_state *mas, unsigned long index) 6600 { 6601 if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) 6602 return 0; 6603 6604 if (likely(!mte_dead_node(mas->node))) 6605 return 0; 6606 6607 mas_rewalk(mas, index); 6608 return 1; 6609 } 6610 6611 void mt_cache_shrink(void) 6612 { 6613 } 6614 #else 6615 /* 6616 * mt_cache_shrink() - For testing, don't use this. 6617 * 6618 * Certain testcases can trigger an OOM when combined with other memory 6619 * debugging configuration options. This function is used to reduce the 6620 * possibility of an out of memory even due to kmem_cache objects remaining 6621 * around for longer than usual. 6622 */ 6623 void mt_cache_shrink(void) 6624 { 6625 kmem_cache_shrink(maple_node_cache); 6626 6627 } 6628 EXPORT_SYMBOL_GPL(mt_cache_shrink); 6629 6630 #endif /* not defined __KERNEL__ */ 6631 /* 6632 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 6633 * @mas: The maple state 6634 * @offset: The offset into the slot array to fetch. 6635 * 6636 * Return: The entry stored at @offset. 6637 */ 6638 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 6639 unsigned char offset) 6640 { 6641 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 6642 offset); 6643 } 6644 6645 /* Depth first search, post-order */ 6646 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 6647 { 6648 6649 struct maple_enode *p = MAS_NONE, *mn = mas->node; 6650 unsigned long p_min, p_max; 6651 6652 mas_next_node(mas, mas_mn(mas), max); 6653 if (!mas_is_none(mas)) 6654 return; 6655 6656 if (mte_is_root(mn)) 6657 return; 6658 6659 mas->node = mn; 6660 mas_ascend(mas); 6661 do { 6662 p = mas->node; 6663 p_min = mas->min; 6664 p_max = mas->max; 6665 mas_prev_node(mas, 0); 6666 } while (!mas_is_none(mas)); 6667 6668 mas->node = p; 6669 mas->max = p_max; 6670 mas->min = p_min; 6671 } 6672 6673 /* Tree validations */ 6674 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6675 unsigned long min, unsigned long max, unsigned int depth, 6676 enum mt_dump_format format); 6677 static void mt_dump_range(unsigned long min, unsigned long max, 6678 unsigned int depth, enum mt_dump_format format) 6679 { 6680 static const char spaces[] = " "; 6681 6682 switch(format) { 6683 case mt_dump_hex: 6684 if (min == max) 6685 pr_info("%.*s%lx: ", depth * 2, spaces, min); 6686 else 6687 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); 6688 break; 6689 default: 6690 case mt_dump_dec: 6691 if (min == max) 6692 pr_info("%.*s%lu: ", depth * 2, spaces, min); 6693 else 6694 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 6695 } 6696 } 6697 6698 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 6699 unsigned int depth, enum mt_dump_format format) 6700 { 6701 mt_dump_range(min, max, depth, format); 6702 6703 if (xa_is_value(entry)) 6704 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), 6705 xa_to_value(entry), entry); 6706 else if (xa_is_zero(entry)) 6707 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 6708 else if (mt_is_reserved(entry)) 6709 pr_cont("UNKNOWN ENTRY (%p)\n", entry); 6710 else 6711 pr_cont("%p\n", entry); 6712 } 6713 6714 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 6715 unsigned long min, unsigned long max, unsigned int depth, 6716 enum mt_dump_format format) 6717 { 6718 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 6719 bool leaf = mte_is_leaf(entry); 6720 unsigned long first = min; 6721 int i; 6722 6723 pr_cont(" contents: "); 6724 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { 6725 switch(format) { 6726 case mt_dump_hex: 6727 pr_cont("%p %lX ", node->slot[i], node->pivot[i]); 6728 break; 6729 default: 6730 case mt_dump_dec: 6731 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6732 } 6733 } 6734 pr_cont("%p\n", node->slot[i]); 6735 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 6736 unsigned long last = max; 6737 6738 if (i < (MAPLE_RANGE64_SLOTS - 1)) 6739 last = node->pivot[i]; 6740 else if (!node->slot[i] && max != mt_node_max(entry)) 6741 break; 6742 if (last == 0 && i > 0) 6743 break; 6744 if (leaf) 6745 mt_dump_entry(mt_slot(mt, node->slot, i), 6746 first, last, depth + 1, format); 6747 else if (node->slot[i]) 6748 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6749 first, last, depth + 1, format); 6750 6751 if (last == max) 6752 break; 6753 if (last > max) { 6754 switch(format) { 6755 case mt_dump_hex: 6756 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", 6757 node, last, max, i); 6758 break; 6759 default: 6760 case mt_dump_dec: 6761 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6762 node, last, max, i); 6763 } 6764 } 6765 first = last + 1; 6766 } 6767 } 6768 6769 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 6770 unsigned long min, unsigned long max, unsigned int depth, 6771 enum mt_dump_format format) 6772 { 6773 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 6774 bool leaf = mte_is_leaf(entry); 6775 unsigned long first = min; 6776 int i; 6777 6778 pr_cont(" contents: "); 6779 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) 6780 pr_cont("%lu ", node->gap[i]); 6781 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 6782 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) 6783 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6784 pr_cont("%p\n", node->slot[i]); 6785 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 6786 unsigned long last = max; 6787 6788 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 6789 last = node->pivot[i]; 6790 else if (!node->slot[i]) 6791 break; 6792 if (last == 0 && i > 0) 6793 break; 6794 if (leaf) 6795 mt_dump_entry(mt_slot(mt, node->slot, i), 6796 first, last, depth + 1, format); 6797 else if (node->slot[i]) 6798 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6799 first, last, depth + 1, format); 6800 6801 if (last == max) 6802 break; 6803 if (last > max) { 6804 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6805 node, last, max, i); 6806 break; 6807 } 6808 first = last + 1; 6809 } 6810 } 6811 6812 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6813 unsigned long min, unsigned long max, unsigned int depth, 6814 enum mt_dump_format format) 6815 { 6816 struct maple_node *node = mte_to_node(entry); 6817 unsigned int type = mte_node_type(entry); 6818 unsigned int i; 6819 6820 mt_dump_range(min, max, depth, format); 6821 6822 pr_cont("node %p depth %d type %d parent %p", node, depth, type, 6823 node ? node->parent : NULL); 6824 switch (type) { 6825 case maple_dense: 6826 pr_cont("\n"); 6827 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 6828 if (min + i > max) 6829 pr_cont("OUT OF RANGE: "); 6830 mt_dump_entry(mt_slot(mt, node->slot, i), 6831 min + i, min + i, depth, format); 6832 } 6833 break; 6834 case maple_leaf_64: 6835 case maple_range_64: 6836 mt_dump_range64(mt, entry, min, max, depth, format); 6837 break; 6838 case maple_arange_64: 6839 mt_dump_arange64(mt, entry, min, max, depth, format); 6840 break; 6841 6842 default: 6843 pr_cont(" UNKNOWN TYPE\n"); 6844 } 6845 } 6846 6847 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) 6848 { 6849 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 6850 6851 pr_info("maple_tree(%p) flags %X, height %u root %p\n", 6852 mt, mt->ma_flags, mt_height(mt), entry); 6853 if (!xa_is_node(entry)) 6854 mt_dump_entry(entry, 0, 0, 0, format); 6855 else if (entry) 6856 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); 6857 } 6858 EXPORT_SYMBOL_GPL(mt_dump); 6859 6860 /* 6861 * Calculate the maximum gap in a node and check if that's what is reported in 6862 * the parent (unless root). 6863 */ 6864 static void mas_validate_gaps(struct ma_state *mas) 6865 { 6866 struct maple_enode *mte = mas->node; 6867 struct maple_node *p_mn, *node = mte_to_node(mte); 6868 enum maple_type mt = mte_node_type(mas->node); 6869 unsigned long gap = 0, max_gap = 0; 6870 unsigned long p_end, p_start = mas->min; 6871 unsigned char p_slot, offset; 6872 unsigned long *gaps = NULL; 6873 unsigned long *pivots = ma_pivots(node, mt); 6874 unsigned int i; 6875 6876 if (ma_is_dense(mt)) { 6877 for (i = 0; i < mt_slot_count(mte); i++) { 6878 if (mas_get_slot(mas, i)) { 6879 if (gap > max_gap) 6880 max_gap = gap; 6881 gap = 0; 6882 continue; 6883 } 6884 gap++; 6885 } 6886 goto counted; 6887 } 6888 6889 gaps = ma_gaps(node, mt); 6890 for (i = 0; i < mt_slot_count(mte); i++) { 6891 p_end = mas_safe_pivot(mas, pivots, i, mt); 6892 6893 if (!gaps) { 6894 if (!mas_get_slot(mas, i)) 6895 gap = p_end - p_start + 1; 6896 } else { 6897 void *entry = mas_get_slot(mas, i); 6898 6899 gap = gaps[i]; 6900 MT_BUG_ON(mas->tree, !entry); 6901 6902 if (gap > p_end - p_start + 1) { 6903 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", 6904 mas_mn(mas), i, gap, p_end, p_start, 6905 p_end - p_start + 1); 6906 MT_BUG_ON(mas->tree, gap > p_end - p_start + 1); 6907 } 6908 } 6909 6910 if (gap > max_gap) 6911 max_gap = gap; 6912 6913 p_start = p_end + 1; 6914 if (p_end >= mas->max) 6915 break; 6916 } 6917 6918 counted: 6919 if (mt == maple_arange_64) { 6920 offset = ma_meta_gap(node, mt); 6921 if (offset > i) { 6922 pr_err("gap offset %p[%u] is invalid\n", node, offset); 6923 MT_BUG_ON(mas->tree, 1); 6924 } 6925 6926 if (gaps[offset] != max_gap) { 6927 pr_err("gap %p[%u] is not the largest gap %lu\n", 6928 node, offset, max_gap); 6929 MT_BUG_ON(mas->tree, 1); 6930 } 6931 6932 MT_BUG_ON(mas->tree, !gaps); 6933 for (i++ ; i < mt_slot_count(mte); i++) { 6934 if (gaps[i] != 0) { 6935 pr_err("gap %p[%u] beyond node limit != 0\n", 6936 node, i); 6937 MT_BUG_ON(mas->tree, 1); 6938 } 6939 } 6940 } 6941 6942 if (mte_is_root(mte)) 6943 return; 6944 6945 p_slot = mte_parent_slot(mas->node); 6946 p_mn = mte_parent(mte); 6947 MT_BUG_ON(mas->tree, max_gap > mas->max); 6948 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { 6949 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); 6950 mt_dump(mas->tree, mt_dump_hex); 6951 MT_BUG_ON(mas->tree, 1); 6952 } 6953 } 6954 6955 static void mas_validate_parent_slot(struct ma_state *mas) 6956 { 6957 struct maple_node *parent; 6958 struct maple_enode *node; 6959 enum maple_type p_type; 6960 unsigned char p_slot; 6961 void __rcu **slots; 6962 int i; 6963 6964 if (mte_is_root(mas->node)) 6965 return; 6966 6967 p_slot = mte_parent_slot(mas->node); 6968 p_type = mas_parent_type(mas, mas->node); 6969 parent = mte_parent(mas->node); 6970 slots = ma_slots(parent, p_type); 6971 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 6972 6973 /* Check prev/next parent slot for duplicate node entry */ 6974 6975 for (i = 0; i < mt_slots[p_type]; i++) { 6976 node = mas_slot(mas, slots, i); 6977 if (i == p_slot) { 6978 if (node != mas->node) 6979 pr_err("parent %p[%u] does not have %p\n", 6980 parent, i, mas_mn(mas)); 6981 MT_BUG_ON(mas->tree, node != mas->node); 6982 } else if (node == mas->node) { 6983 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", 6984 mas_mn(mas), parent, i, p_slot); 6985 MT_BUG_ON(mas->tree, node == mas->node); 6986 } 6987 } 6988 } 6989 6990 static void mas_validate_child_slot(struct ma_state *mas) 6991 { 6992 enum maple_type type = mte_node_type(mas->node); 6993 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 6994 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 6995 struct maple_enode *child; 6996 unsigned char i; 6997 6998 if (mte_is_leaf(mas->node)) 6999 return; 7000 7001 for (i = 0; i < mt_slots[type]; i++) { 7002 child = mas_slot(mas, slots, i); 7003 7004 if (!child) { 7005 pr_err("Non-leaf node lacks child at %p[%u]\n", 7006 mas_mn(mas), i); 7007 MT_BUG_ON(mas->tree, 1); 7008 } 7009 7010 if (mte_parent_slot(child) != i) { 7011 pr_err("Slot error at %p[%u]: child %p has pslot %u\n", 7012 mas_mn(mas), i, mte_to_node(child), 7013 mte_parent_slot(child)); 7014 MT_BUG_ON(mas->tree, 1); 7015 } 7016 7017 if (mte_parent(child) != mte_to_node(mas->node)) { 7018 pr_err("child %p has parent %p not %p\n", 7019 mte_to_node(child), mte_parent(child), 7020 mte_to_node(mas->node)); 7021 MT_BUG_ON(mas->tree, 1); 7022 } 7023 7024 if (i < mt_pivots[type] && pivots[i] == mas->max) 7025 break; 7026 } 7027 } 7028 7029 /* 7030 * Validate all pivots are within mas->min and mas->max, check metadata ends 7031 * where the maximum ends and ensure there is no slots or pivots set outside of 7032 * the end of the data. 7033 */ 7034 static void mas_validate_limits(struct ma_state *mas) 7035 { 7036 int i; 7037 unsigned long prev_piv = 0; 7038 enum maple_type type = mte_node_type(mas->node); 7039 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7040 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7041 7042 for (i = 0; i < mt_slots[type]; i++) { 7043 unsigned long piv; 7044 7045 piv = mas_safe_pivot(mas, pivots, i, type); 7046 7047 if (!piv && (i != 0)) { 7048 pr_err("Missing node limit pivot at %p[%u]", 7049 mas_mn(mas), i); 7050 MAS_WARN_ON(mas, 1); 7051 } 7052 7053 if (prev_piv > piv) { 7054 pr_err("%p[%u] piv %lu < prev_piv %lu\n", 7055 mas_mn(mas), i, piv, prev_piv); 7056 MAS_WARN_ON(mas, piv < prev_piv); 7057 } 7058 7059 if (piv < mas->min) { 7060 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, 7061 piv, mas->min); 7062 MAS_WARN_ON(mas, piv < mas->min); 7063 } 7064 if (piv > mas->max) { 7065 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, 7066 piv, mas->max); 7067 MAS_WARN_ON(mas, piv > mas->max); 7068 } 7069 prev_piv = piv; 7070 if (piv == mas->max) 7071 break; 7072 } 7073 7074 if (mas_data_end(mas) != i) { 7075 pr_err("node%p: data_end %u != the last slot offset %u\n", 7076 mas_mn(mas), mas_data_end(mas), i); 7077 MT_BUG_ON(mas->tree, 1); 7078 } 7079 7080 for (i += 1; i < mt_slots[type]; i++) { 7081 void *entry = mas_slot(mas, slots, i); 7082 7083 if (entry && (i != mt_slots[type] - 1)) { 7084 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), 7085 i, entry); 7086 MT_BUG_ON(mas->tree, entry != NULL); 7087 } 7088 7089 if (i < mt_pivots[type]) { 7090 unsigned long piv = pivots[i]; 7091 7092 if (!piv) 7093 continue; 7094 7095 pr_err("%p[%u] should not have piv %lu\n", 7096 mas_mn(mas), i, piv); 7097 MAS_WARN_ON(mas, i < mt_pivots[type] - 1); 7098 } 7099 } 7100 } 7101 7102 static void mt_validate_nulls(struct maple_tree *mt) 7103 { 7104 void *entry, *last = (void *)1; 7105 unsigned char offset = 0; 7106 void __rcu **slots; 7107 MA_STATE(mas, mt, 0, 0); 7108 7109 mas_start(&mas); 7110 if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) 7111 return; 7112 7113 while (!mte_is_leaf(mas.node)) 7114 mas_descend(&mas); 7115 7116 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7117 do { 7118 entry = mas_slot(&mas, slots, offset); 7119 if (!last && !entry) { 7120 pr_err("Sequential nulls end at %p[%u]\n", 7121 mas_mn(&mas), offset); 7122 } 7123 MT_BUG_ON(mt, !last && !entry); 7124 last = entry; 7125 if (offset == mas_data_end(&mas)) { 7126 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7127 if (mas_is_none(&mas)) 7128 return; 7129 offset = 0; 7130 slots = ma_slots(mte_to_node(mas.node), 7131 mte_node_type(mas.node)); 7132 } else { 7133 offset++; 7134 } 7135 7136 } while (!mas_is_none(&mas)); 7137 } 7138 7139 /* 7140 * validate a maple tree by checking: 7141 * 1. The limits (pivots are within mas->min to mas->max) 7142 * 2. The gap is correctly set in the parents 7143 */ 7144 void mt_validate(struct maple_tree *mt) 7145 { 7146 unsigned char end; 7147 7148 MA_STATE(mas, mt, 0, 0); 7149 rcu_read_lock(); 7150 mas_start(&mas); 7151 if (!mas_searchable(&mas)) 7152 goto done; 7153 7154 while (!mte_is_leaf(mas.node)) 7155 mas_descend(&mas); 7156 7157 while (!mas_is_none(&mas)) { 7158 MAS_WARN_ON(&mas, mte_dead_node(mas.node)); 7159 end = mas_data_end(&mas); 7160 if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && 7161 (mas.max != ULONG_MAX))) { 7162 pr_err("Invalid size %u of %p\n", end, mas_mn(&mas)); 7163 } 7164 7165 mas_validate_parent_slot(&mas); 7166 mas_validate_limits(&mas); 7167 mas_validate_child_slot(&mas); 7168 if (mt_is_alloc(mt)) 7169 mas_validate_gaps(&mas); 7170 mas_dfs_postorder(&mas, ULONG_MAX); 7171 } 7172 mt_validate_nulls(mt); 7173 done: 7174 rcu_read_unlock(); 7175 7176 } 7177 EXPORT_SYMBOL_GPL(mt_validate); 7178 7179 void mas_dump(const struct ma_state *mas) 7180 { 7181 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node); 7182 if (mas_is_none(mas)) 7183 pr_err("(MAS_NONE) "); 7184 else if (mas_is_ptr(mas)) 7185 pr_err("(MAS_ROOT) "); 7186 else if (mas_is_start(mas)) 7187 pr_err("(MAS_START) "); 7188 else if (mas_is_paused(mas)) 7189 pr_err("(MAS_PAUSED) "); 7190 7191 pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last); 7192 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n", 7193 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); 7194 if (mas->index > mas->last) 7195 pr_err("Check index & last\n"); 7196 } 7197 EXPORT_SYMBOL_GPL(mas_dump); 7198 7199 void mas_wr_dump(const struct ma_wr_state *wr_mas) 7200 { 7201 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n", 7202 wr_mas->node, wr_mas->r_min, wr_mas->r_max); 7203 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", 7204 wr_mas->type, wr_mas->offset_end, wr_mas->node_end, 7205 wr_mas->end_piv); 7206 } 7207 EXPORT_SYMBOL_GPL(mas_wr_dump); 7208 7209 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7210