Home
last modified time | relevance | path

Searched refs:xas (Results 1 – 25 of 37) sorted by relevance

12

/linux/lib/
H A Dxarray.c38 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) in xas_lock_type() argument
41 xas_lock_irq(xas); in xas_lock_type()
43 xas_lock_bh(xas); in xas_lock_type()
45 xas_lock(xas); in xas_lock_type()
48 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) in xas_unlock_type() argument
51 xas_unlock_irq(xas); in xas_unlock_type()
53 xas_unlock_bh(xas); in xas_unlock_type()
55 xas_unlock(xas); in xas_unlock_type()
126 static void xas_squash_marks(const struct xa_state *xas) in xas_squash_marks() argument
129 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; in xas_squash_marks()
[all …]
H A Dtest_xarray.c74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order()
78 xas_lock(&xas); in xa_store_order()
79 curr = xas_store(&xas, entry); in xa_store_order()
80 xas_unlock(&xas); in xa_store_order()
81 } while (xas_nomem(&xas, gfp)); in xa_store_order()
104 XA_STATE(xas, xa, 0); in check_xas_retry()
111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); in check_xas_retry()
113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); in check_xas_retry()
114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); in check_xas_retry()
115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); in check_xas_retry()
[all …]
H A Didr.c385 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); in ida_alloc_range()
397 xas_lock_irqsave(&xas, flags); in ida_alloc_range()
399 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); in ida_alloc_range()
400 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range()
402 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range()
410 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range()
414 xas_store(&xas, xa_mk_value(tmp)); in ida_alloc_range()
424 xas_store(&xas, bitmap); in ida_alloc_range()
425 if (xas_error(&xas)) { in ida_alloc_range()
433 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range()
[all …]
/linux/include/linux/
H A Dxarray.h1416 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) argument
1417 #define xas_trylock(xas) xa_trylock((xas)->xa) argument
1418 #define xas_lock(xas) xa_lock((xas)->xa) argument
1419 #define xas_unlock(xas) xa_unlock((xas)->xa) argument
1420 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) argument
1421 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) argument
1422 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) argument
1423 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) argument
1424 #define xas_lock_irqsave(xas, flags) \ argument
1425 xa_lock_irqsave((xas)->xa, flags)
[all …]
H A Diov_iter.h211 XA_STATE(xas, iter->xarray, index); in iterate_xarray()
214 xas_for_each(&xas, folio, ULONG_MAX) { in iterate_xarray()
217 if (xas_retry(&xas, folio)) in iterate_xarray()
/linux/fs/
H A Ddax.c147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument
151 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
160 key->xa = xas->xa; in dax_entry_waitqueue()
163 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
185 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
191 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
213 static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order) in get_next_unlocked_entry() argument
223 entry = xas_find_conflict(xas); in get_next_unlocked_entry()
231 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_next_unlocked_entry()
234 xas_unlock_irq(xas); in get_next_unlocked_entry()
[all …]
/linux/tools/testing/radix-tree/
H A Diteration_check.c23 XA_STATE(xas, xa, index); in my_item_insert()
28 xas_lock(&xas); in my_item_insert()
30 xas_set_order(&xas, index, order); in my_item_insert()
32 if (xas_find_conflict(&xas)) in my_item_insert()
34 xas_store(&xas, item); in my_item_insert()
35 xas_set_mark(&xas, TAG); in my_item_insert()
38 xas_unlock(&xas); in my_item_insert()
39 if (xas_nomem(&xas, GFP_KERNEL)) in my_item_insert()
69 XA_STATE(xas, &array, 0); in tagged_iteration_fn()
75 xas_set(&xas, 0); in tagged_iteration_fn()
[all …]
H A Dtest.c176 XA_STATE(xas, xa, start); in tag_tagged_items()
183 xas_lock_irq(&xas); in tag_tagged_items()
184 xas_for_each_marked(&xas, item, end, iftag) { in tag_tagged_items()
185 xas_set_mark(&xas, thentag); in tag_tagged_items()
189 xas_pause(&xas); in tag_tagged_items()
190 xas_unlock_irq(&xas); in tag_tagged_items()
192 xas_lock_irq(&xas); in tag_tagged_items()
194 xas_unlock_irq(&xas); in tag_tagged_items()
257 XA_STATE(xas, xa, 0); in item_kill_tree()
260 xas_for_each(&xas, entry, ULONG_MAX) { in item_kill_tree()
[all …]
H A Diteration_check_2.c15 XA_STATE(xas, arg, 0); in iterator()
21 xas_set(&xas, 0); in iterator()
23 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) in iterator()
26 assert(xas.xa_index >= 100); in iterator()
H A Dregression1.c82 XA_STATE(xas, &mt_tree, start); in find_get_pages()
87 xas_for_each(&xas, page, ULONG_MAX) { in find_get_pages()
88 if (xas_retry(&xas, page)) in find_get_pages()
99 if (unlikely(page != xas_reload(&xas))) in find_get_pages()
108 xas_reset(&xas); in find_get_pages()
/linux/fs/cachefiles/
H A Dondemand.c25 XA_STATE(xas, NULL, 0); in cachefiles_ondemand_fd_release()
32 xas.xa = &cache->reqs; in cachefiles_ondemand_fd_release()
42 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { in cachefiles_ondemand_fd_release()
46 xas_store(&xas, NULL); in cachefiles_ondemand_fd_release()
122 XA_STATE(xas, &cache->reqs, id); in cachefiles_ondemand_fd_ioctl()
131 req = xas_load(&xas); in cachefiles_ondemand_fd_ioctl()
137 xas_store(&xas, NULL); in cachefiles_ondemand_fd_ioctl()
167 XA_STATE(xas, &cache->reqs, 0); in cachefiles_ondemand_copen()
192 xas.xa_index = id; in cachefiles_ondemand_copen()
193 req = xas_load(&xas); in cachefiles_ondemand_copen()
[all …]
H A Ddaemon.c361 XA_STATE(xas, &cache->reqs, 0); in cachefiles_daemon_poll()
370 xas_lock(&xas); in cachefiles_daemon_poll()
371 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { in cachefiles_daemon_poll()
377 xas_unlock(&xas); in cachefiles_daemon_poll()
/linux/mm/
H A Dmemfd.c38 static void memfd_tag_pins(struct xa_state *xas) in memfd_tag_pins() argument
45 xas_lock_irq(xas); in memfd_tag_pins()
46 xas_for_each(xas, folio, ULONG_MAX) { in memfd_tag_pins()
48 xas_set_mark(xas, MEMFD_TAG_PINNED); in memfd_tag_pins()
54 xas_pause(xas); in memfd_tag_pins()
55 xas_unlock_irq(xas); in memfd_tag_pins()
57 xas_lock_irq(xas); in memfd_tag_pins()
59 xas_unlock_irq(xas); in memfd_tag_pins()
158 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins()
162 memfd_tag_pins(&xas); in memfd_wait_for_pins()
[all …]
H A Dfilemap.c132 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
135 mapping_set_update(&xas, mapping); in page_cache_delete()
137 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
142 xas_store(&xas, shadow); in page_cache_delete()
143 xas_init_marks(&xas); in page_cache_delete()
282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
287 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
288 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
314 xas_store(&xas, NULL); in page_cache_delete_batch()
483 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
[all …]
H A Dkhugepaged.c1866 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file()
1877 mapping_set_update(&xas, mapping); in collapse_file()
1890 xas_lock_irq(&xas); in collapse_file()
1891 xas_create_range(&xas); in collapse_file()
1892 if (!xas_error(&xas)) in collapse_file()
1894 xas_unlock_irq(&xas); in collapse_file()
1895 if (!xas_nomem(&xas, GFP_KERNEL)) { in collapse_file()
1902 xas_set(&xas, index); in collapse_file()
1903 folio = xas_load(&xas); in collapse_file()
1905 VM_BUG_ON(index != xas.xa_index); in collapse_file()
[all …]
H A Dtruncate.c29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries()
36 xas_set_update(&xas, workingset_update_node); in clear_shadow_entries()
39 xas_lock_irq(&xas); in clear_shadow_entries()
42 xas_for_each(&xas, folio, max) { in clear_shadow_entries()
44 xas_store(&xas, NULL); in clear_shadow_entries()
47 xas_unlock_irq(&xas); in clear_shadow_entries()
63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals()
101 xas_set(&xas, indices[j]); in truncate_folio_batch_exceptionals()
102 xas_set_update(&xas, workingset_update_node); in truncate_folio_batch_exceptionals()
105 xas_lock_irq(&xas); in truncate_folio_batch_exceptionals()
[all …]
H A Dshmem.c500 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
505 item = xas_load(&xas); in shmem_replace_entry()
508 xas_store(&xas, replacement); in shmem_replace_entry()
523 XA_STATE(xas, &mapping->i_pages, index); in shmem_confirm_swap()
529 entry = xas_load(&xas); in shmem_confirm_swap()
531 ret = xas_get_order(&xas); in shmem_confirm_swap()
532 } while (xas_retry(&xas, entry)); in shmem_confirm_swap()
885 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
904 xas_lock_irq(&xas); in shmem_add_to_page_cache()
905 xas_for_each_conflict(&xas, entr in shmem_add_to_page_cache()
[all...]
H A Dpage-writeback.c2386 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2390 xas_lock_irq(&xas); in tag_pages_for_writeback()
2391 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { in tag_pages_for_writeback()
2392 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback()
2396 xas_pause(&xas); in tag_pages_for_writeback()
2397 xas_unlock_irq(&xas); in tag_pages_for_writeback()
2399 xas_lock_irq(&xas); in tag_pages_for_writeback()
2401 xas_unlock_irq(&xas); in tag_pages_for_writeback()
3026 XA_STATE(xas, &mapping->i_pages, folio->index); in __folio_start_writeback()
3032 xas_lock_irqsave(&xas, flags); in __folio_start_writeback()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_mr.c96 XA_STATE(xas, &mr->page_list, 0); in rxe_mr_fill_pages_from_sgt()
106 xas_lock(&xas); in rxe_mr_fill_pages_from_sgt()
112 xas_set_err(&xas, -EINVAL); in rxe_mr_fill_pages_from_sgt()
116 xas_store(&xas, page); in rxe_mr_fill_pages_from_sgt()
117 if (xas_error(&xas)) in rxe_mr_fill_pages_from_sgt()
119 xas_next(&xas); in rxe_mr_fill_pages_from_sgt()
123 xas_unlock(&xas); in rxe_mr_fill_pages_from_sgt()
124 } while (xas_nomem(&xas, GFP_KERNEL)); in rxe_mr_fill_pages_from_sgt()
126 return xas_error(&xas); in rxe_mr_fill_pages_from_sgt()
161 XA_STATE(xas, &mr->page_list, 0); in rxe_mr_alloc()
[all …]
/linux/drivers/infiniband/core/
H A Dib_core_uverbs.c268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); in rdma_user_mmap_entry_insert_range()
294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); in rdma_user_mmap_entry_insert_range()
295 if (xas.xa_node == XAS_RESTART) in rdma_user_mmap_entry_insert_range()
298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range()
308 xas_next_entry(&xas, xa_last - 1); in rdma_user_mmap_entry_insert_range()
309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
/linux/drivers/iommu/iommufd/
H A Dpages.c560 XA_STATE(xas, xa, start_index); in batch_from_xarray()
565 entry = xas_next(&xas); in batch_from_xarray()
566 if (xas_retry(&xas, entry)) in batch_from_xarray()
581 XA_STATE(xas, xa, start_index); in batch_from_xarray_clear()
584 xas_lock(&xas); in batch_from_xarray_clear()
586 entry = xas_next(&xas); in batch_from_xarray_clear()
587 if (xas_retry(&xas, entry)) in batch_from_xarray_clear()
592 xas_store(&xas, NULL); in batch_from_xarray_clear()
597 xas_unlock(&xas); in batch_from_xarray_clear()
603 XA_STATE(xas, xa, start_index); in clear_xarray()
[all …]
H A Dmain.c105 XA_STATE(xas, &ictx->objects, obj->id); in iommufd_object_finalize()
109 old = xas_store(&xas, obj); in iommufd_object_finalize()
118 XA_STATE(xas, &ictx->objects, obj->id); in iommufd_object_abort()
122 old = xas_store(&xas, NULL); in iommufd_object_abort()
211 XA_STATE(xas, &ictx->objects, id); in iommufd_object_remove()
236 obj = xas_load(&xas); in iommufd_object_remove()
258 xas_store(&xas, (flags & REMOVE_OBJ_TOMBSTONE) ? XA_ZERO_ENTRY : NULL); in iommufd_object_remove()
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dfs_counters.c115 XA_STATE(xas, &fc_stats->counters, 0); in mlx5_fc_stats_query_all_counters()
123 xas_lock(&xas); in mlx5_fc_stats_query_all_counters()
124 xas_for_each(&xas, counter, U32_MAX) { in mlx5_fc_stats_query_all_counters()
125 if (xas_retry(&xas, counter)) in mlx5_fc_stats_query_all_counters()
133 xas_unlock(&xas); in mlx5_fc_stats_query_all_counters()
135 xas_reset(&xas); in mlx5_fc_stats_query_all_counters()
142 xas_lock(&xas); in mlx5_fc_stats_query_all_counters()
150 xas_unlock(&xas); in mlx5_fc_stats_query_all_counters()
/linux/fs/btrfs/
H A Dextent_io.c1924 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); in lock_extent_buffer_for_io()
1930 xas_lock_irqsave(&xas, flags); in lock_extent_buffer_for_io()
1931 xas_load(&xas); in lock_extent_buffer_for_io()
1932 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); in lock_extent_buffer_for_io()
1933 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); in lock_extent_buffer_for_io()
1934 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); in lock_extent_buffer_for_io()
1935 xas_unlock_irqrestore(&xas, flags); in lock_extent_buffer_for_io()
2025 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); in buffer_tree_set_mark()
2028 xas_lock_irqsave(&xas, flags); in buffer_tree_set_mark()
2029 xas_load(&xas); in buffer_tree_set_mark()
[all …]
/linux/fs/netfs/
H A Diterator.c160 XA_STATE(xas, iter->xarray, index); in netfs_limit_xarray()
169 xas_for_each(&xas, folio, ULONG_MAX) { in netfs_limit_xarray()
171 if (xas_retry(&xas, folio)) in netfs_limit_xarray()

12