/linux/fs/nilfs2/ |
H A D | btnode.c | 234 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 235 err = __xa_insert(&btnc->i_pages, newkey, ofolio, GFP_NOFS); in nilfs_btnode_prepare_change_key() 236 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 304 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 305 __xa_erase(&btnc->i_pages, oldkey); in nilfs_btnode_commit_change_key() 306 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); in nilfs_btnode_commit_change_key() 307 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 346 xa_erase_irq(&btnc->i_pages, newkey); in nilfs_btnode_abort_change_key()
|
H A D | page.c | 329 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages() 330 f = __xa_erase(&smap->i_pages, index); in nilfs_copy_back_pages() 333 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages() 335 xa_lock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 336 f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS); in nilfs_copy_back_pages() 345 __xa_set_mark(&dmap->i_pages, index, in nilfs_copy_back_pages() 348 xa_unlock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 457 xa_lock_irq(&mapping->i_pages); in __nilfs_clear_folio_dirty() 459 __xa_clear_mark(&mapping->i_pages, folio->index, in __nilfs_clear_folio_dirty() 461 xa_unlock_irq(&mapping->i_pages); in __nilfs_clear_folio_dirty() [all …]
|
/linux/arch/nios2/include/asm/ |
H A D | cacheflush.h | 54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 57 xa_lock_irqsave(&mapping->i_pages, flags) 59 xa_unlock_irqrestore(&mapping->i_pages, flags)
|
/linux/mm/ |
H A D | truncate.c | 34 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 52 xa_lock_irq(&mapping->i_pages); in clear_shadow_entries() 61 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entries() 92 xa_lock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals() 113 xa_unlock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals() 454 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final() 455 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final() 559 xa_lock_irq(&mapping->i_pages); in invalidate_complete_folio2() 565 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2() 573 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2()
|
H A D | swap_state.c | 80 shadow = xa_load(&address_space->i_pages, idx); in get_shadow_from_swap_cache() 95 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache() 150 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache() 242 xa_lock_irq(&address_space->i_pages); in delete_from_swap_cache() 244 xa_unlock_irq(&address_space->i_pages); in delete_from_swap_cache() 260 XA_STATE(xas, &address_space->i_pages, index); in clear_shadow_from_swap_cache() 264 xa_lock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 270 xa_unlock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 721 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); in init_swap_address_space()
|
H A D | workingset.c | 639 mapping = container_of(node->array, struct address_space, i_pages); in workingset_update_node() 640 lockdep_assert_held(&mapping->i_pages.xa_lock); in workingset_update_node() 734 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate() 737 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate() 746 xa_unlock(&mapping->i_pages); in shadow_lru_isolate() 771 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate()
|
H A D | filemap.c | 140 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 264 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio() 266 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio() 290 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 337 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch() 345 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch() 484 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page() 641 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback() 819 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio() 854 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio() [all …]
|
H A D | readahead.c | 245 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() 751 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand() 780 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
|
H A D | shmem.c | 467 xa_lock_irq(&mapping->i_pages); in shmem_charge() 469 xa_unlock_irq(&mapping->i_pages); in shmem_charge() 489 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry() 511 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap() 789 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache() 843 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache() 849 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache() 862 int order = xa_get_order(&mapping->i_pages, index); in shmem_free_swap() 865 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap() 883 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage() [all …]
|
H A D | page-writeback.c | 2414 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2794 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_mark_dirty() 2798 __xa_set_mark(&mapping->i_pages, folio_index(folio), in __folio_mark_dirty() 2801 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_mark_dirty() 3083 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_end_writeback() 3085 __xa_clear_mark(&mapping->i_pages, folio_index(folio), in __folio_end_writeback() 3100 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_end_writeback() 3123 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_start_writeback()
|
H A D | memfd.c | 120 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins()
|
H A D | huge_memory.c | 3195 xa_lock(&swap_cache->i_pages); in __split_huge_page() 3217 __xa_store(&folio->mapping->i_pages, head[i].index, in __split_huge_page() 3220 __xa_store(&swap_cache->i_pages, offset + i, in __split_huge_page() 3243 xa_unlock(&swap_cache->i_pages); in __split_huge_page() 3250 xa_unlock(&folio->mapping->i_pages); in __split_huge_page() 3354 XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); in split_huge_page_to_list_to_order()
|
/linux/arch/csky/abiv1/inc/abi/ |
H A D | cacheflush.h | 19 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 20 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/linux/include/linux/ |
H A D | backing-dev.h | 253 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb() 297 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_begin() 315 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_end()
|
H A D | pagemap.h | 145 return xa_empty(&mapping->i_pages); in mapping_empty() 182 head = rcu_access_pointer(mapping->i_pages.xa_head); in mapping_shrinkable() 1432 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); in __readahead_folio() 1476 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | page_migration.rst | 84 5. i_pages的锁被持有。这将导致所有试图通过映射访问该页的进程在自旋锁上阻塞。 99 11. i_pages锁被放弃。这样一来,在映射中的查找又变得可能了。进程将从在锁上自旋到在
|
/linux/fs/erofs/ |
H A D | fscache.c | 62 XA_STATE(xas, &req->mapping->i_pages, start_page); in erofs_fscache_req_complete() 243 iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages, in erofs_fscache_meta_read_folio() 283 iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE); in erofs_fscache_data_read_slice() 298 iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count); in erofs_fscache_data_read_slice() 318 iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count); in erofs_fscache_data_read_slice()
|
/linux/arch/arm/include/asm/ |
H A D | cacheflush.h | 321 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 322 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/linux/fs/ |
H A D | dax.c | 448 xas.xa = &mapping->i_pages; in dax_lock_folio() 473 XA_STATE(xas, &mapping->i_pages, folio->index); in dax_unlock_folio() 502 xas.xa = &mapping->i_pages; in dax_lock_mapping_entry() 536 XA_STATE(xas, &mapping->i_pages, index); in dax_unlock_mapping_entry() 685 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range() 748 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry() 773 XA_STATE(xas, &mapping->i_pages, start); in __dax_clear_dirty_range() 1028 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range() 1716 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault() 1826 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault() [all …]
|
H A D | fs-writeback.c | 401 XA_STATE(xas, &mapping->i_pages, 0); in inode_do_switch_wbs() 406 xa_lock_irq(&mapping->i_pages); in inode_do_switch_wbs() 484 xa_unlock_irq(&mapping->i_pages); in inode_do_switch_wbs()
|
H A D | inode.c | 403 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); in __address_space_init_once() 667 xa_lock_irq(&inode->i_data.i_pages); in clear_inode() 677 xa_unlock_irq(&inode->i_data.i_pages); in clear_inode()
|
/linux/fs/orangefs/ |
H A D | inode.c | 244 struct xarray *i_pages; in orangefs_readahead() local 262 i_pages = &rac->mapping->i_pages; in orangefs_readahead() 264 iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac)); in orangefs_readahead()
|
/linux/Documentation/mm/ |
H A D | page_migration.rst | 107 5. The i_pages lock is taken. This will cause all processes trying 125 11. The i_pages lock is dropped. With that lookups in the mapping
|
/linux/fs/nfs/ |
H A D | fscache.c | 315 xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) { in nfs_netfs_issue_read()
|
/linux/fs/afs/ |
H A D | dir.c | 118 XA_STATE(xas, &mapping->i_pages, 0); in afs_dir_read_cleanup() 193 XA_STATE(xas, &mapping->i_pages, 0); in afs_dir_dump() 227 XA_STATE(xas, &mapping->i_pages, 0); in afs_dir_check() 313 iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages, in afs_read_dir()
|