Lines Matching defs:xas
21 * @xas is the 'xarray operation state'. It may be either a pointer to
38 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
41 xas_lock_irq(xas);
43 xas_lock_bh(xas);
45 xas_lock(xas);
48 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
51 xas_unlock_irq(xas);
53 xas_unlock_bh(xas);
55 xas_unlock(xas);
121 * @xas: Array operation state.
126 static void xas_squash_marks(const struct xa_state *xas)
129 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
131 if (!xas->xa_sibs)
135 unsigned long *marks = xas->xa_node->marks[mark];
136 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
138 __set_bit(xas->xa_offset, marks);
139 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
149 static void xas_set_offset(struct xa_state *xas)
151 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
155 static void xas_move_index(struct xa_state *xas, unsigned long offset)
157 unsigned int shift = xas->xa_node->shift;
158 xas->xa_index &= ~XA_CHUNK_MASK << shift;
159 xas->xa_index += offset << shift;
162 static void xas_next_offset(struct xa_state *xas)
164 xas->xa_offset++;
165 xas_move_index(xas, xas->xa_offset);
168 static void *set_bounds(struct xa_state *xas)
170 xas->xa_node = XAS_BOUNDS;
175 * Starts a walk. If the @xas is already valid, we assume that it's on
178 * of the xarray, return NULL without changing @xas->xa_node. Otherwise
179 * set @xas->xa_node to NULL and return the current head of the array.
181 static void *xas_start(struct xa_state *xas)
185 if (xas_valid(xas))
186 return xas_reload(xas);
187 if (xas_error(xas))
190 entry = xa_head(xas->xa);
192 if (xas->xa_index)
193 return set_bounds(xas);
195 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
196 return set_bounds(xas);
199 xas->xa_node = NULL;
203 static __always_inline void *xas_descend(struct xa_state *xas,
206 unsigned int offset = get_offset(xas->xa_index, node);
207 void *entry = xa_entry(xas->xa, node, offset);
209 xas->xa_node = node;
212 entry = xa_entry(xas->xa, node, offset);
217 xas->xa_offset = offset;
223 * @xas: XArray operation state.
225 * Usually walks the @xas to the appropriate state to load the entry
227 * @xas is in an error state. xas_load() will never expand the tree.
231 * present within the range specified by @xas.
236 void *xas_load(struct xa_state *xas)
238 void *entry = xas_start(xas);
243 if (xas->xa_shift > node->shift)
245 entry = xas_descend(xas, node);
264 * @xas: XArray operation state.
269 void xas_destroy(struct xa_state *xas)
271 struct xa_node *next, *node = xas->xa_alloc;
277 xas->xa_alloc = node = next;
283 * @xas: XArray operation state.
288 * If it fails, @xas is flagged as needing memory to continue. The caller
299 bool xas_nomem(struct xa_state *xas, gfp_t gfp)
301 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
302 xas_destroy(xas);
305 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
307 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
308 if (!xas->xa_alloc)
310 xas->xa_alloc->parent = NULL;
311 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
312 xas->xa_node = XAS_RESTART;
319 * @xas: XArray operation state.
326 static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
327 __must_hold(xas->xa->xa_lock)
329 unsigned int lock_type = xa_lock_type(xas->xa);
331 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
332 xas_destroy(xas);
335 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
338 xas_unlock_type(xas, lock_type);
339 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
340 xas_lock_type(xas, lock_type);
342 xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
344 if (!xas->xa_alloc)
346 xas->xa_alloc->parent = NULL;
347 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
348 xas->xa_node = XAS_RESTART;
352 static void xas_update(struct xa_state *xas, struct xa_node *node)
354 if (xas->xa_update)
355 xas->xa_update(node);
360 static void *xas_alloc(struct xa_state *xas, unsigned int shift)
362 struct xa_node *parent = xas->xa_node;
363 struct xa_node *node = xas->xa_alloc;
365 if (xas_invalid(xas))
369 xas->xa_alloc = NULL;
373 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
376 node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
378 xas_set_err(xas, -ENOMEM);
384 node->offset = xas->xa_offset;
387 xas_update(xas, parent);
394 RCU_INIT_POINTER(node->parent, xas->xa_node);
395 node->array = xas->xa;
402 static unsigned long xas_size(const struct xa_state *xas)
404 return (xas->xa_sibs + 1UL) << xas->xa_shift;
410 * in order to add the entry described by @xas. Because we cannot store a
414 static unsigned long xas_max(struct xa_state *xas)
416 unsigned long max = xas->xa_index;
419 if (xas->xa_shift || xas->xa_sibs) {
420 unsigned long mask = xas_size(xas) - 1;
438 static void xas_shrink(struct xa_state *xas)
440 struct xarray *xa = xas->xa;
441 struct xa_node *node = xas->xa_node;
456 xas->xa_node = XAS_BOUNDS;
466 xas_update(xas, node);
477 * @xas: Array operation state.
479 * Attempts to delete the @xas->xa_node. This will fail if xa->node has
482 static void xas_delete_node(struct xa_state *xas)
484 struct xa_node *node = xas->xa_node;
493 parent = xa_parent_locked(xas->xa, node);
494 xas->xa_node = parent;
495 xas->xa_offset = node->offset;
499 xas->xa->xa_head = NULL;
500 xas->xa_node = XAS_BOUNDS;
504 parent->slots[xas->xa_offset] = NULL;
508 xas_update(xas, node);
512 xas_shrink(xas);
517 * @xas: Array operation state.
524 static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
530 void *entry = xa_entry_locked(xas->xa, node, offset);
543 parent = xa_parent_locked(xas->xa, node);
547 xas_update(xas, node);
558 * sufficient height to be able to contain @xas->xa_index
560 static int xas_expand(struct xa_state *xas, void *head)
562 struct xarray *xa = xas->xa;
565 unsigned long max = xas_max(xas);
577 xas->xa_node = NULL;
583 node = xas_alloc(xas, shift);
618 xas_update(xas, node);
623 xas->xa_node = node;
629 * @xas: XArray operation state.
638 * slot, returns %NULL and indicates the error in @xas.
640 static void *xas_create(struct xa_state *xas, bool allow_root)
642 struct xarray *xa = xas->xa;
645 struct xa_node *node = xas->xa_node;
647 unsigned int order = xas->xa_shift;
651 xas->xa_node = NULL;
654 shift = xas_expand(xas, entry);
661 } else if (xas_error(xas)) {
664 unsigned int offset = xas->xa_offset;
678 node = xas_alloc(xas, shift);
689 entry = xas_descend(xas, node);
690 slot = &node->slots[xas->xa_offset];
698 * @xas: XArray operation state.
700 * Creates all of the slots in the range covered by @xas. Sets @xas to
705 void xas_create_range(struct xa_state *xas)
707 unsigned long index = xas->xa_index;
708 unsigned char shift = xas->xa_shift;
709 unsigned char sibs = xas->xa_sibs;
711 xas->xa_index |= ((sibs + 1UL) << shift) - 1;
712 if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
713 xas->xa_offset |= sibs;
714 xas->xa_shift = 0;
715 xas->xa_sibs = 0;
718 xas_create(xas, true);
719 if (xas_error(xas))
721 if (xas->xa_index <= (index | XA_CHUNK_MASK))
723 xas->xa_index -= XA_CHUNK_SIZE;
726 struct xa_node *node = xas->xa_node;
729 xas->xa_node = xa_parent_locked(xas->xa, node);
730 xas->xa_offset = node->offset - 1;
737 xas->xa_shift = shift;
738 xas->xa_sibs = sibs;
739 xas->xa_index = index;
742 xas->xa_index = index;
743 if (xas->xa_node)
744 xas_set_offset(xas);
748 static void update_node(struct xa_state *xas, struct xa_node *node,
758 xas_update(xas, node);
760 xas_delete_node(xas);
765 * @xas: XArray operation state.
768 * If @xas is operating on a multi-index entry, the entry returned by this
776 void *xas_store(struct xa_state *xas, void *entry)
779 void __rcu **slot = &xas->xa->xa_head;
788 first = xas_create(xas, allow_root);
790 first = xas_load(xas);
793 if (xas_invalid(xas))
795 node = xas->xa_node;
796 if (node && (xas->xa_shift < node->shift))
797 xas->xa_sibs = 0;
798 if ((first == entry) && !xas->xa_sibs)
802 offset = xas->xa_offset;
803 max = xas->xa_offset + xas->xa_sibs;
806 if (xas->xa_sibs)
807 xas_squash_marks(xas);
810 xas_init_marks(xas);
822 xas_free_nodes(xas, xa_to_node(next));
831 entry = xa_mk_sibling(xas->xa_offset);
836 next = xa_entry_locked(xas->xa, node, ++offset);
845 update_node(xas, node, count, values);
852 * @xas: XArray operation state.
855 * Return: true if the mark is set, false if the mark is clear or @xas
858 bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
860 if (xas_invalid(xas))
862 if (!xas->xa_node)
863 return xa_marked(xas->xa, mark);
864 return node_get_mark(xas->xa_node, xas->xa_offset, mark);
870 * @xas: XArray operation state.
874 * on all the ancestor entries. Does nothing if @xas has not been walked to
877 void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
879 struct xa_node *node = xas->xa_node;
880 unsigned int offset = xas->xa_offset;
882 if (xas_invalid(xas))
889 node = xa_parent_locked(xas->xa, node);
892 if (!xa_marked(xas->xa, mark))
893 xa_mark_set(xas->xa, mark);
899 * @xas: XArray operation state.
904 * @xas has not been walked to an entry, or is in an error state.
906 void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
908 struct xa_node *node = xas->xa_node;
909 unsigned int offset = xas->xa_offset;
911 if (xas_invalid(xas))
921 node = xa_parent_locked(xas->xa, node);
924 if (xa_marked(xas->xa, mark))
925 xa_mark_clear(xas->xa, mark);
931 * @xas: Array operations state.
933 * Initialise all marks for the entry specified by @xas. If we're tracking
940 void xas_init_marks(const struct xa_state *xas)
945 if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
946 xas_set_mark(xas, mark);
948 xas_clear_mark(xas, mark);
1006 * @xas: XArray operation state.
1014 * entries of the order stored in the @xas.
1018 void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1022 unsigned int mask = xas->xa_sibs;
1025 if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1027 if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1035 node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1038 node->array = xas->xa;
1047 RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1048 xas->xa_alloc = node;
1053 xas_destroy(xas);
1054 xas_set_err(xas, -ENOMEM);
1060 * @xas: XArray operation state.
1064 * The size of the new entries is set in @xas. The value in @entry is
1069 void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1074 void *curr = xas_load(xas);
1077 node = xas->xa_node;
1081 marks = node_get_marks(node, xas->xa_offset);
1083 offset = xas->xa_offset + sibs;
1085 if (xas->xa_shift < node->shift) {
1086 struct xa_node *child = xas->xa_alloc;
1088 xas->xa_alloc = rcu_dereference_raw(child->parent);
1095 node_set_marks(node, offset, child, xas->xa_sibs,
1101 xas_update(xas, child);
1103 unsigned int canon = offset - xas->xa_sibs;
1111 (xas->xa_sibs + 1);
1113 } while (offset-- > xas->xa_offset);
1116 xas_update(xas, node);
1123 * @xas: XArray operation state.
1128 * the lock. It resets the @xas to be suitable for the next iteration
1136 void xas_pause(struct xa_state *xas)
1138 struct xa_node *node = xas->xa_node;
1140 if (xas_invalid(xas))
1143 xas->xa_node = XAS_RESTART;
1145 unsigned long offset = xas->xa_offset;
1147 if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1150 xas->xa_index += (offset - xas->xa_offset) << node->shift;
1151 if (xas->xa_index == 0)
1152 xas->xa_node = XAS_BOUNDS;
1154 xas->xa_index++;
1161 * @xas: XArray operation state.
1166 void *__xas_prev(struct xa_state *xas)
1170 if (!xas_frozen(xas->xa_node))
1171 xas->xa_index--;
1172 if (!xas->xa_node)
1173 return set_bounds(xas);
1174 if (xas_not_node(xas->xa_node))
1175 return xas_load(xas);
1177 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1178 xas->xa_offset--;
1180 while (xas->xa_offset == 255) {
1181 xas->xa_offset = xas->xa_node->offset - 1;
1182 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1183 if (!xas->xa_node)
1184 return set_bounds(xas);
1188 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1192 xas->xa_node = xa_to_node(entry);
1193 xas_set_offset(xas);
1200 * @xas: XArray operation state.
1205 void *__xas_next(struct xa_state *xas)
1209 if (!xas_frozen(xas->xa_node))
1210 xas->xa_index++;
1211 if (!xas->xa_node)
1212 return set_bounds(xas);
1213 if (xas_not_node(xas->xa_node))
1214 return xas_load(xas);
1216 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1217 xas->xa_offset++;
1219 while (xas->xa_offset == XA_CHUNK_SIZE) {
1220 xas->xa_offset = xas->xa_node->offset + 1;
1221 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1222 if (!xas->xa_node)
1223 return set_bounds(xas);
1227 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1231 xas->xa_node = xa_to_node(entry);
1232 xas_set_offset(xas);
1239 * @xas: XArray operation state.
1242 * If the @xas has not yet been walked to an entry, return the entry
1243 * which has an index >= xas.xa_index. If it has been walked, the entry
1248 * is set to the smallest index not yet in the array. This allows @xas
1253 void *xas_find(struct xa_state *xas, unsigned long max)
1257 if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1259 if (xas->xa_index > max)
1260 return set_bounds(xas);
1262 if (!xas->xa_node) {
1263 xas->xa_index = 1;
1264 return set_bounds(xas);
1265 } else if (xas->xa_node == XAS_RESTART) {
1266 entry = xas_load(xas);
1267 if (entry || xas_not_node(xas->xa_node))
1269 } else if (!xas->xa_node->shift &&
1270 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1271 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1274 xas_next_offset(xas);
1276 while (xas->xa_node && (xas->xa_index <= max)) {
1277 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1278 xas->xa_offset = xas->xa_node->offset + 1;
1279 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1283 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1285 xas->xa_node = xa_to_node(entry);
1286 xas->xa_offset = 0;
1292 xas_next_offset(xas);
1295 if (!xas->xa_node)
1296 xas->xa_node = XAS_BOUNDS;
1303 * @xas: XArray operation state.
1307 * If the @xas has not yet been walked to an entry, return the marked entry
1308 * which has an index >= xas.xa_index. If it has been walked, the entry
1310 * first marked entry with an index > xas.xa_index.
1312 * If no marked entry is found and the array is smaller than @max, @xas is
1313 * set to the bounds state and xas->xa_index is set to the smallest index
1314 * not yet in the array. This allows @xas to be immediately passed to
1317 * If no entry is found before @max is reached, @xas is set to the restart
1322 void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1328 if (xas_error(xas))
1330 if (xas->xa_index > max)
1333 if (!xas->xa_node) {
1334 xas->xa_index = 1;
1336 } else if (xas_top(xas->xa_node)) {
1338 entry = xa_head(xas->xa);
1339 xas->xa_node = NULL;
1340 if (xas->xa_index > max_index(entry))
1343 if (xa_marked(xas->xa, mark))
1345 xas->xa_index = 1;
1348 xas->xa_node = xa_to_node(entry);
1349 xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1352 while (xas->xa_index <= max) {
1353 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1354 xas->xa_offset = xas->xa_node->offset + 1;
1355 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1356 if (!xas->xa_node)
1363 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1365 xas->xa_offset = xa_to_sibling(entry);
1366 xas_move_index(xas, xas->xa_offset);
1370 offset = xas_find_chunk(xas, advance, mark);
1371 if (offset > xas->xa_offset) {
1373 xas_move_index(xas, offset);
1375 if ((xas->xa_index - 1) >= max)
1377 xas->xa_offset = offset;
1382 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1383 if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1387 xas->xa_node = xa_to_node(entry);
1388 xas_set_offset(xas);
1392 if (xas->xa_index > max)
1394 return set_bounds(xas);
1396 xas->xa_node = XAS_RESTART;
1403 * @xas: XArray operation state.
1405 * The @xas describes both a range and a position within that range.
1408 * Return: The next entry in the range covered by @xas or %NULL.
1410 void *xas_find_conflict(struct xa_state *xas)
1414 if (xas_error(xas))
1417 if (!xas->xa_node)
1420 if (xas_top(xas->xa_node)) {
1421 curr = xas_start(xas);
1426 curr = xas_descend(xas, node);
1432 if (xas->xa_node->shift > xas->xa_shift)
1436 if (xas->xa_node->shift == xas->xa_shift) {
1437 if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1439 } else if (xas->xa_offset == XA_CHUNK_MASK) {
1440 xas->xa_offset = xas->xa_node->offset;
1441 xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1442 if (!xas->xa_node)
1446 curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1450 xas->xa_node = xa_to_node(curr);
1451 xas->xa_offset = 0;
1452 curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1457 xas->xa_offset -= xas->xa_sibs;
1472 XA_STATE(xas, xa, index);
1477 entry = xas_load(&xas);
1480 } while (xas_retry(&xas, entry));
1487 static void *xas_result(struct xa_state *xas, void *curr)
1491 if (xas_error(xas))
1492 curr = xas->xa_node;
1510 XA_STATE(xas, xa, index);
1511 return xas_result(&xas, xas_store(&xas, NULL));
1556 XA_STATE(xas, xa, index);
1565 curr = xas_store(&xas, entry);
1567 xas_clear_mark(&xas, XA_FREE_MARK);
1568 } while (__xas_nomem(&xas, gfp));
1570 return xas_result(&xas, curr);
1622 XA_STATE(xas, xa, index);
1629 curr = xas_load(&xas);
1631 xas_store(&xas, entry);
1633 xas_clear_mark(&xas, XA_FREE_MARK);
1635 } while (__xas_nomem(&xas, gfp));
1637 return xas_result(&xas, curr);
1659 XA_STATE(xas, xa, index);
1668 curr = xas_load(&xas);
1670 xas_store(&xas, entry);
1672 xas_clear_mark(&xas, XA_FREE_MARK);
1674 xas_set_err(&xas, -EBUSY);
1676 } while (__xas_nomem(&xas, gfp));
1678 return xas_error(&xas);
1683 static void xas_set_range(struct xa_state *xas, unsigned long first,
1690 xas_set(xas, first);
1710 xas->xa_shift = shift;
1711 xas->xa_sibs = sibs;
1735 XA_STATE(xas, xa, 0);
1743 xas_lock(&xas);
1748 xas_set_order(&xas, last, order);
1749 xas_create(&xas, true);
1750 if (xas_error(&xas))
1754 xas_set_range(&xas, first, last);
1755 xas_store(&xas, entry);
1756 if (xas_error(&xas))
1758 first += xas_size(&xas);
1761 xas_unlock(&xas);
1762 } while (xas_nomem(&xas, gfp));
1764 return xas_result(&xas, NULL);
1770 * @xas: XArray operation state.
1772 * Called after xas_load, the xas should not be in an error state.
1776 int xas_get_order(struct xa_state *xas)
1780 if (!xas->xa_node)
1784 unsigned int slot = xas->xa_offset + (1 << order);
1788 if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
1793 order += xas->xa_node->shift;
1807 XA_STATE(xas, xa, index);
1812 entry = xas_load(&xas);
1814 order = xas_get_order(&xas);
1845 XA_STATE(xas, xa, 0);
1856 xas.xa_index = limit.min;
1857 xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1858 if (xas.xa_node == XAS_RESTART)
1859 xas_set_err(&xas, -EBUSY);
1861 *id = xas.xa_index;
1862 xas_store(&xas, entry);
1863 xas_clear_mark(&xas, XA_FREE_MARK);
1864 } while (__xas_nomem(&xas, gfp));
1866 return xas_error(&xas);
1935 XA_STATE(xas, xa, index);
1936 void *entry = xas_load(&xas);
1939 xas_set_mark(&xas, mark);
1953 XA_STATE(xas, xa, index);
1954 void *entry = xas_load(&xas);
1957 xas_clear_mark(&xas, mark);
1975 XA_STATE(xas, xa, index);
1979 entry = xas_start(&xas);
1980 while (xas_get_mark(&xas, mark)) {
1983 entry = xas_descend(&xas, xa_to_node(entry));
2049 XA_STATE(xas, xa, *indexp);
2055 entry = xas_find_marked(&xas, max, filter);
2057 entry = xas_find(&xas, max);
2058 } while (xas_retry(&xas, entry));
2062 *indexp = xas.xa_index;
2067 static bool xas_sibling(struct xa_state *xas)
2069 struct xa_node *node = xas->xa_node;
2075 return (xas->xa_index & mask) >
2076 ((unsigned long)xas->xa_offset << node->shift);
2099 XA_STATE(xas, xa, *indexp + 1);
2102 if (xas.xa_index == 0)
2108 entry = xas_find_marked(&xas, max, filter);
2110 entry = xas_find(&xas, max);
2112 if (xas_invalid(&xas))
2114 if (xas_sibling(&xas))
2116 if (!xas_retry(&xas, entry))
2122 *indexp = xas.xa_index;
2127 static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2134 xas_for_each(xas, entry, max) {
2135 if (xas_retry(xas, entry))
2146 static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2153 xas_for_each_marked(xas, entry, max, mark) {
2154 if (xas_retry(xas, entry))
2196 XA_STATE(xas, xa, start);
2202 return xas_extract_marked(&xas, dst, max, n, filter);
2203 return xas_extract_present(&xas, dst, max, n);
2216 struct xa_state xas = {
2226 xas_store(&xas, NULL);
2242 XA_STATE(xas, xa, 0);
2246 xas.xa_node = NULL;
2247 xas_lock_irqsave(&xas, flags);
2250 xas_init_marks(&xas);
2255 xas_free_nodes(&xas, xa_to_node(entry));
2256 xas_unlock_irqrestore(&xas, flags);