| /linux/lib/ |
| H A D | xarray.c | 152 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); in xas_set_offset() 159 xas->xa_index &= ~XA_CHUNK_MASK << shift; in xas_move_index() 160 xas->xa_index += offset << shift; in xas_move_index() 193 if (xas->xa_index) in xas_start() 196 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) in xas_start() 207 unsigned int offset = get_offset(xas->xa_index, node); in xas_descend() 418 unsigned long max = xas->xa_index; in xas_max() 714 unsigned long index = xas->xa_index; in xas_create_range() 718 xas->xa_index |= ((sibs + 1UL) << shift) - 1; in xas_create_range() 728 if (xas->xa_index <= (index | XA_CHUNK_MASK)) in xas_create_range() [all …]
|
| H A D | test_xarray.c | 139 xas_store(&xas, xa_mk_index(xas.xa_index)); in check_xas_retry() 284 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); in check_xa_mark_2() 617 XA_BUG_ON(xa, xas.xa_index != index); in check_multi_store_2() 1395 return entry ? xas.xa_index : -1; in xa_find_entry() 1558 XA_BUG_ON(xa, xas.xa_index != i); in check_move_small() 1565 XA_BUG_ON(xa, xas.xa_index != i); in check_move_small() 1572 XA_BUG_ON(xa, xas.xa_index != i); in check_move_small() 1581 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); in check_move_small() 1583 XA_BUG_ON(xa, xas.xa_index != 0); in check_move_small() 1585 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); in check_move_small() [all …]
|
| /linux/include/linux/ |
| H A D | xarray.h | 1356 unsigned long xa_index; member 1377 .xa_index = index, \ 1623 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; in xas_reload() 1645 xas->xa_index = index; in xas_set() 1663 xas->xa_index = index; in xas_advance() 1677 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order() 1723 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) in xas_next_entry() 1727 if (unlikely(xas->xa_index >= max)) in xas_next_entry() 1735 xas->xa_index++; in xas_next_entry() 1785 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; in xas_next_marked() [all …]
|
| /linux/fs/cachefiles/ |
| H A D | ondemand.c | 192 xas.xa_index = id; in cachefiles_ondemand_copen() 405 if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req) in cachefiles_ondemand_finish_req() 448 cache->req_id_next = xas.xa_index + 1; in cachefiles_ondemand_daemon_read() 459 msg->msg_id = xas.xa_index; in cachefiles_ondemand_daemon_read() 559 xas.xa_index = cache->msg_id_next; in cachefiles_ondemand_send_req() 562 xas.xa_index = 0; in cachefiles_ondemand_send_req() 570 cache->msg_id_next = xas.xa_index + 1; in cachefiles_ondemand_send_req()
|
| /linux/fs/ |
| H A D | dax.c | 151 unsigned long index = xas->xa_index; in dax_entry_waitqueue() 653 unsigned long index = xas->xa_index; in grab_mapping_entry() 694 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry() 1056 unsigned long index = xas->xa_index; in dax_insert_entry() 1162 index = xas->xa_index & ~(count - 1); in dax_writeback_one() 1215 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range() 1217 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range() 1235 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range() 1812 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; in dax_fault_iter() 1966 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_fault_check_fallback() [all …]
|
| /linux/mm/ |
| H A D | filemap.c | 869 folio->index = xas.xa_index; in __filemap_add_folio() 1810 return xas.xa_index; in page_cache_next_miss() 1811 if (xas.xa_index == 0) in page_cache_next_miss() 1847 if (xas.xa_index == ULONG_MAX) in page_cache_prev_miss() 1851 return xas.xa_index; in page_cache_prev_miss() 2118 indices[fbatch->nr] = xas.xa_index; in find_get_entries() 2184 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries() 2188 base = xas.xa_index & ~(nr - 1); in find_lock_entries() 2199 indices[fbatch->nr] = xas.xa_index; in find_lock_entries() 2258 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig() [all …]
|
| H A D | madvise.c | 256 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); in shmem_swapin_range()
|
| H A D | khugepaged.c | 1905 VM_BUG_ON(index != xas.xa_index); in collapse_file()
|
| H A D | shmem.c | 999 if (xas.xa_index == max) in shmem_partial_swap_usage() 1434 indices[folio_batch_count(fbatch)] = xas.xa_index; in shmem_find_swap_entries()
|
| /linux/drivers/infiniband/core/ |
| H A D | ib_core_uverbs.c | 298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range() 309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
|
| H A D | device.c | 197 *indexp = xas.xa_index; in xan_find_marked()
|
| /linux/tools/testing/radix-tree/ |
| H A D | iteration_check_2.c | 26 assert(xas.xa_index >= 100); in iterator()
|
| H A D | test.c | 262 item_free(entry, xas.xa_index); in item_kill_tree()
|
| /linux/arch/arm64/kernel/ |
| H A D | hibernate.c | 305 unsigned long pfn = xa_state.xa_index; in swsusp_mte_restore_tags()
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | xarray.rst | 355 如果xas_load()遇到一个多索引条目,xa_state中的xa_index将不会被改变。当遍历一个XArray或者调用xas_find()
|
| /linux/drivers/iommu/iommufd/ |
| H A D | pages.c | 645 if (xas.xa_index != start_index) in pages_to_xarray() 646 clear_xarray(xa, start_index, xas.xa_index - 1); in pages_to_xarray()
|
| /linux/virt/kvm/ |
| H A D | kvm_main.c | 2480 if (xas.xa_index != index || in kvm_handle_gfn_range()
|