/linux/mm/ |
H A D | filemap.c | 129 struct folio *folio, void *shadow) in page_cache_delete() argument 131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 137 nr = folio_nr_pages(folio); in page_cache_delete() 139 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete() 144 folio->mapping = NULL; in page_cache_delete() 150 struct folio *folio) in filemap_unaccount_folio() argument 154 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio() 155 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio() 157 current->comm, folio_pfn(folio)); in filemap_unaccount_folio() [all …]
|
H A D | truncate.c | 30 struct folio *folio; in clear_shadow_entries() local 42 xas_for_each(&xas, folio, max) { in clear_shadow_entries() 43 if (xa_is_value(folio)) in clear_shadow_entries() 65 struct folio *folio; in truncate_folio_batch_exceptionals() local 93 xas_for_each(&xas, folio, indices[nr-1]) { in truncate_folio_batch_exceptionals() 94 if (xa_is_value(folio)) in truncate_folio_batch_exceptionals() 121 void folio_invalidate(struct folio *folio, size_t offset, size_t length) in folio_invalidate() argument 123 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate() 126 aops->invalidate_folio(folio, offset, length); in folio_invalidate() 140 static void truncate_cleanup_folio(struct folio *folio) in truncate_cleanup_folio() argument [all …]
|
H A D | mlock.c | 61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument 64 if (!folio_test_clear_lru(folio)) in __mlock_folio() 67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio() 69 if (unlikely(folio_evictable(folio))) { in __mlock_folio() 75 if (folio_test_unevictable(folio)) { in __mlock_folio() 76 lruvec_del_folio(lruvec, folio); in __mlock_folio() 77 folio_clear_unevictable(folio); in __mlock_folio() 78 lruvec_add_folio(lruvec, folio); in __mlock_folio() 81 folio_nr_pages(folio)); in __mlock_folio() 86 if (folio_test_unevictable(folio)) { in __mlock_folio() [all …]
|
H A D | migrate.c | 56 struct folio *folio = folio_get_nontail_page(page); in isolate_movable_page() local 68 if (!folio) in isolate_movable_page() 76 if (unlikely(!__folio_test_movable(folio))) in isolate_movable_page() 90 if (unlikely(!folio_trylock(folio))) in isolate_movable_page() 93 if (!folio_test_movable(folio) || folio_test_isolated(folio)) in isolate_movable_page() 96 mops = folio_movable_ops(folio); in isolate_movable_page() 97 VM_BUG_ON_FOLIO(!mops, folio); in isolate_movable_page() 99 if (!mops->isolate_page(&folio->page, mode)) in isolate_movable_page() 103 WARN_ON_ONCE(folio_test_isolated(folio)); in isolate_movable_page() 104 folio_set_isolated(folio); in isolate_movable_page() [all …]
|
H A D | page_idle.c | 34 static struct folio *page_idle_get_folio(unsigned long pfn) in page_idle_get_folio() 37 struct folio *folio; in page_idle_get_folio() local 42 folio = page_folio(page); in page_idle_get_folio() 43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio() 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio() 46 folio_put(folio); in page_idle_get_folio() 47 folio in page_idle_get_folio() 52 page_idle_clear_pte_refs_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg) page_idle_clear_pte_refs_one() argument 89 page_idle_clear_pte_refs(struct folio * folio) page_idle_clear_pte_refs() argument 119 struct folio *folio; page_idle_bitmap_read() local 164 struct folio *folio; page_idle_bitmap_write() local [all...] |
H A D | memory-failure.c | 230 struct folio *folio = page_folio(p); in hwpoison_filter_dev() local 238 mapping = folio_mapping(folio); in hwpoison_filter_dev() 381 void shake_folio(struct folio *folio) in shake_folio() argument 383 if (folio_test_hugetlb(folio)) in shake_folio() 389 if (folio_test_slab(folio)) in shake_folio() 609 static void collect_procs_anon(const struct folio *folio, in collect_procs_anon() argument 617 av = folio_lock_anon_vma_read(folio, NULL); in collect_procs_anon() 621 pgoff = page_pgoff(folio, page); in collect_procs_anon() 647 static void collect_procs_file(const struct folio *folio, in collect_procs_file() argument 653 struct address_space *mapping = folio->mapping; in collect_procs_file() [all …]
|
H A D | swap.h | 14 void swap_read_folio(struct folio *folio, struct swap_iocb **plug); 23 void __swap_writepage(struct folio *folio, struct writeback_control *wbc); 53 bool add_to_swap(struct folio *folio); 55 int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 57 void __delete_from_swap_cache(struct folio *folio, 59 void delete_from_swap_cache(struct folio *folio); 63 struct folio *swap_cache_get_folio(swp_entry_t entry, 65 struct folio *filemap_get_incore_folio(struct address_space *mapping, 68 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 71 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags, [all …]
|
H A D | internal.h | 85 static inline int folio_nr_pages_mapped(const struct folio *folio) in folio_nr_pages_mapped() argument 87 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped() 96 const struct folio *folio) in folio_swap() argument 99 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), in folio_swap() 105 static inline void *folio_raw_mapping(const struct folio *folio) in folio_raw_mapping() argument 107 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping() 204 static inline int folio_pte_batch(struct folio *folio, unsigned long addr, in folio_pte_batch() argument 208 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); in folio_pte_batch() 221 VM_WARN_ON_FOLIO(!pte_present(pte), folio); in folio_pte_batch() 222 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); in folio_pte_batch() [all …]
|
/linux/fs/btrfs/ |
H A D | subpage.c | 92 struct folio *folio, enum btrfs_subpage_type type) in btrfs_attach_subpage() argument 100 if (folio->mapping) in btrfs_attach_subpage() 101 ASSERT(folio_test_locked(folio)); in btrfs_attach_subpage() 104 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio)) in btrfs_attach_subpage() 111 folio_attach_private(folio, subpage); in btrfs_attach_subpage() 115 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) in btrfs_detach_subpage() argument 120 if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio)) in btrfs_detach_subpage() 123 subpage = folio_detach_private(folio); in btrfs_detach_subpage() 164 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) in btrfs_folio_inc_eb_refs() argument 168 if (!btrfs_is_subpage(fs_info, folio->mapping)) in btrfs_folio_inc_eb_refs() [all …]
|
H A D | subpage.h | 11 struct folio; 83 struct folio *folio, enum btrfs_subpage_type type); 84 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio); 91 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); 92 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); 95 struct folio *folio, u64 start, u32 len); 97 struct folio *folio, u64 start, u32 len); 99 struct folio *folio, unsigned long bitmap); 116 struct folio *folio, u64 start, u32 len); \ 118 struct folio *folio, u64 start, u32 len); \ [all …]
|
/linux/fs/jfs/ |
H A D | jfs_metapage.c | 50 folio_unlock(mp->folio); in __lock_metapage() 52 folio_lock(mp->folio); in __lock_metapage() 83 static inline struct metapage *folio_to_mp(struct folio *folio, int offset) in folio_to_mp() argument 85 struct meta_anchor *anchor = folio->private; in folio_to_mp() 92 static inline int insert_metapage(struct folio *folio, struct metapage *mp) in insert_metapage() argument 98 a = folio->private; in insert_metapage() 103 folio_attach_private(folio, a); in insert_metapage() 104 kmap(&folio->page); in insert_metapage() 108 l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits; in insert_metapage() 117 static inline void remove_metapage(struct folio *folio, struct metapage *mp) in remove_metapage() argument [all …]
|
/linux/include/linux/ |
H A D | hugetlb_cgroup.h | 61 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) in __hugetlb_cgroup_from_folio() argument 63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio() 65 return folio->_hugetlb_cgroup_rsvd; in __hugetlb_cgroup_from_folio() 67 return folio->_hugetlb_cgroup; in __hugetlb_cgroup_from_folio() 70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio) in hugetlb_cgroup_from_folio() argument 72 return __hugetlb_cgroup_from_folio(folio, false); in hugetlb_cgroup_from_folio() 76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument 78 return __hugetlb_cgroup_from_folio(folio, true); in hugetlb_cgroup_from_folio_rsvd() 81 static inline void __set_hugetlb_cgroup(struct folio *folio, in __set_hugetlb_cgroup() argument 84 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup() [all …]
|
H A D | page_ref.h | 87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument 89 return page_ref_count(&folio->page); in folio_ref_count() 104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument 106 set_page_count(&folio->page, v); in folio_set_count() 125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument 127 page_ref_add(&folio->page, nr); in folio_ref_add() 137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument 139 page_ref_sub(&folio->page, nr); in folio_ref_sub() 142 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument 144 int ret = atomic_sub_return(nr, &folio->_refcount); in folio_ref_sub_return() [all …]
|
H A D | migrate.h | 10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); 11 typedef void free_folio_t(struct folio *folio, unsigned long private); 66 int migrate_folio(struct address_space *mapping, struct folio *dst, 67 struct folio *src, enum migrate_mode mode); 71 struct folio *alloc_migration_target(struct folio *src, unsigned long private); 73 bool isolate_folio_to_list(struct folio *foli 95 isolate_folio_to_list(struct folio * folio,struct list_head * list) isolate_folio_to_list() argument 121 folio_test_movable(struct folio * folio) folio_test_movable() argument 127 folio_movable_ops(struct folio * folio) folio_movable_ops() argument 150 migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node) migrate_misplaced_folio_prepare() argument 155 migrate_misplaced_folio(struct folio * folio,struct vm_area_struct * vma,int node) migrate_misplaced_folio() argument [all...] |
/linux/mm/damon/ |
H A D | paddr.c | 22 static bool damon_folio_mkold_one(struct folio *folio, in damon_folio_mkold_one() argument 25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_mkold_one() 37 static void damon_folio_mkold(struct folio *folio) in damon_folio_mkold() argument 45 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { in damon_folio_mkold() 46 folio_set_idle(folio); in damon_folio_mkold() 50 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); in damon_folio_mkold() 63 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); damon_pa_mkold() local 90 damon_folio_young_one(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg) damon_folio_young_one() argument 122 damon_folio_young(struct folio * folio) damon_folio_young() argument 153 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); damon_pa_young() local 202 __damos_pa_filter_out(struct damos_filter * filter,struct folio * folio) __damos_pa_filter_out() argument 235 damos_pa_filter_out(struct damos * scheme,struct folio * folio) damos_pa_filter_out() argument 268 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); damon_pa_pageout() local 300 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); damon_pa_mark_accessed_or_deactivate() local 368 struct folio *folio; damon_pa_migrate_folio_list() local 373 struct folio *folio; damon_pa_migrate_folio_list() local 430 struct folio *folio = lru_to_folio(folio_list); damon_pa_migrate_pages() local 458 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); damon_pa_migrate() local [all...] |
/linux/fs/iomap/ |
H A D | buffered-io.c | 45 static inline bool ifs_is_fully_uptodate(struct folio *folio, in ifs_is_fully_uptodate() argument 48 struct inode *inode = folio->mapping->host; in ifs_is_fully_uptodate() 50 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); in ifs_is_fully_uptodate() 59 static bool ifs_set_range_uptodate(struct folio *folio, in ifs_set_range_uptodate() argument 62 struct inode *inode = folio->mapping->host; in ifs_set_range_uptodate() 68 return ifs_is_fully_uptodate(folio, ifs); in ifs_set_range_uptodate() 71 static void iomap_set_range_uptodate(struct folio *folio, size_t off, in iomap_set_range_uptodate() argument 74 struct iomap_folio_state *ifs = folio->private; in iomap_set_range_uptodate() 80 uptodate = ifs_set_range_uptodate(folio, ifs, off, len); in iomap_set_range_uptodate() 85 folio_mark_uptodate(folio); in iomap_set_range_uptodate() [all …]
|
/linux/fs/bcachefs/ |
H A D | fs-io-pagecache.h | 7 typedef DARRAY(struct folio *) folios; 19 static inline u64 folio_end_pos(struct folio *folio) in folio_end_pos() argument 21 return folio_pos(folio) + folio_size(folio); in folio_end_pos() 24 static inline size_t folio_sectors(struct folio *folio) in folio_sectors() argument 26 return PAGE_SECTORS << folio_order(folio); in folio_sectors() 29 static inline loff_t folio_sector(struct folio *folio) in folio_sector() argument 31 return folio_pos(folio) >> 9; in folio_sector() 34 static inline u64 folio_end_sector(struct folio *folio) in folio_end_sector() argument 36 return folio_end_pos(folio) >> 9; in folio_end_sector() 72 static inline void bch2_folio_sector_set(struct folio *folio, in bch2_folio_sector_set() argument [all …]
|
H A D | fs-io-pagecache.c | 19 struct folio *f; in bch2_filemap_get_contig_folios_d() 126 struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp) in __bch2_folio_create() argument 132 folio_sectors(folio), gfp); in __bch2_folio_create() 137 folio_attach_private(folio, s); in __bch2_folio_create() 141 struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp) in bch2_folio_create() argument 143 return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp); in bch2_folio_create() 155 static void __bch2_folio_set(struct folio *folio, in __bch2_folio_set() argument 159 struct bch_folio *s = bch2_folio(folio); in __bch2_folio_set() 160 unsigned i, sectors = folio_sectors(folio); in __bch2_folio_set() 169 bch2_folio_sector_set(folio, s, i, state); in __bch2_folio_set() [all …]
|
/linux/fs/ubifs/ |
H A D | file.c | 99 static int do_readpage(struct folio *folio) in do_readpage() argument 105 struct inode *inode = folio->mapping->host; in do_readpage() 110 inode->i_ino, folio->index, i_size, folio->flags); in do_readpage() 111 ubifs_assert(c, !folio_test_checked(folio)); in do_readpage() 112 ubifs_assert(c, !folio->private); in do_readpage() 114 addr = kmap_local_folio(folio, 0); in do_readpage() 116 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_readpage() 120 folio_set_checked(folio); in do_readpage() 121 addr = folio_zero_tail(folio, 0, addr); in do_readpage() 153 if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio))) in do_readpage() [all …]
|
/linux/fs/sysv/ |
H A D | dir.c | 31 static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len) in dir_commit_chunk() argument 33 struct address_space *mapping = folio->mapping; in dir_commit_chunk() 36 block_write_end(NULL, mapping, pos, len, len, folio, NULL); in dir_commit_chunk() 41 folio_unlock(folio); in dir_commit_chunk() 62 struct folio **foliop) in dir_get_folio() 64 struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL); in dir_get_folio() local 66 if (IS_ERR(folio)) in dir_get_folio() 67 return ERR_CAST(folio); in dir_get_folio() 68 *foliop = folio; in dir_get_folio() 69 return kmap_local_folio(folio, 0); in dir_get_folio() [all …]
|
/linux/fs/ecryptfs/ |
H A D | mmap.c | 33 struct folio *folio = NULL; in ecryptfs_writepages() local 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() 37 error = ecryptfs_encrypt_page(folio); in ecryptfs_writepages() 41 folio->index); in ecryptfs_writepages() 42 folio_clear_uptodate(folio); in ecryptfs_writepages() 45 folio_unlock(folio); in ecryptfs_writepages() 91 ecryptfs_copy_up_encrypted_with_header(struct folio *folio, in ecryptfs_copy_up_encrypted_with_header() argument 100 loff_t view_extent_num = ((loff_t)folio->index in ecryptfs_copy_up_encrypted_with_header() 110 page_virt = kmap_local_folio(folio, 0); in ecryptfs_copy_up_encrypted_with_header() 117 page_virt, folio->mapping->host); in ecryptfs_copy_up_encrypted_with_header() [all …]
|
/linux/fs/netfs/ |
H A D | buffered_write.c | 16 static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) in __netfs_set_group() argument 19 folio_attach_private(folio, netfs_get_group(netfs_group)); in __netfs_set_group() 22 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) in netfs_set_group() argument 24 void *priv = folio_get_private(folio); in netfs_set_group() 28 folio_attach_private(folio, netfs_get_group(netfs_group)); in netfs_set_group() 30 folio_detach_private(folio); in netfs_set_group() 38 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, in netfs_grab_folio_for_write() 111 struct folio *folio = NULL, *writethrough = NULL; in netfs_perform_write() local 170 folio = netfs_grab_folio_for_write(mapping, pos, part); in netfs_perform_write() 171 if (IS_ERR(folio)) { in netfs_perform_write() [all …]
|
/linux/include/trace/events/ |
H A D | pagemap.h | 19 #define trace_pagemap_flags(folio) ( \ argument 20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ 21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \ 22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \ 23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \ 24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \ 25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \ 30 TP_PROTO(struct folio *folio), 32 TP_ARGS(folio), 35 __field(struct folio *, folio ) [all …]
|
/linux/fs/nilfs2/ |
H A D | dir.c | 81 static int nilfs_prepare_chunk(struct folio *folio, unsigned int from, in nilfs_prepare_chunk() argument 84 loff_t pos = folio_pos(folio) + from; in nilfs_prepare_chunk() 86 return __block_write_begin(folio, pos, to - from, nilfs_get_block); in nilfs_prepare_chunk() 89 static void nilfs_commit_chunk(struct folio *folio, in nilfs_commit_chunk() argument 93 loff_t pos = folio_pos(folio) + from; in nilfs_commit_chunk() 98 nr_dirty = nilfs_page_count_clean_buffers(folio, from, to); in nilfs_commit_chunk() 99 copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL); in nilfs_commit_chunk() 106 folio_unlock(folio); in nilfs_commit_chunk() 109 static bool nilfs_check_folio(struct folio *folio, char *kaddr) in nilfs_check_folio() argument 111 struct inode *dir = folio->mapping->host; in nilfs_check_folio() [all …]
|
/linux/fs/gfs2/ |
H A D | aops.c | 40 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, in gfs2_trans_add_databufs() argument 43 struct buffer_head *head = folio_buffers(folio); in gfs2_trans_add_databufs() 92 static int gfs2_write_jdata_folio(struct folio *folio, in gfs2_write_jdata_folio() argument 95 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio() 105 if (folio_pos(folio) < i_size && in gfs2_write_jdata_folio() 106 i_size < folio_pos(folio) + folio_size(folio)) in gfs2_write_jdata_folio() 107 folio_zero_segment(folio, offset_in_folio(folio, i_size), in gfs2_write_jdata_folio() 108 folio_size(folio)); in gfs2_write_jdata_folio() 110 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc, in gfs2_write_jdata_folio() 123 static int __gfs2_jdata_write_folio(struct folio *folio, in __gfs2_jdata_write_folio() argument [all …]
|