| /linux/include/linux/ |
| H A D | rmap.h | 175 struct anon_vma *folio_get_anon_vma(const struct folio *folio); 178 static __always_inline void folio_lock_large_mapcount(struct folio *folio) in folio_lock_large_mapcount() argument 180 bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); in folio_lock_large_mapcount() 183 static __always_inline void folio_unlock_large_mapcount(struct folio *folio) in folio_unlock_large_mapcount() argument 185 __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); in folio_unlock_large_mapcount() 188 static inline unsigned int folio_mm_id(const struct folio *folio, int idx) in folio_mm_id() argument 191 return folio->_mm_id[idx] & MM_ID_MASK; in folio_mm_id() 194 static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id) in folio_set_mm_id() argument 197 folio->_mm_id[idx] &= ~MM_ID_MASK; in folio_set_mm_id() 198 folio->_mm_id[idx] |= id; in folio_set_mm_id() [all …]
|
| H A D | pagemap.h | 551 struct address_space *folio_mapping(const struct folio *folio); 565 static inline struct address_space *folio_flush_mapping(struct folio *folio) in folio_flush_mapping() argument 567 if (unlikely(folio_test_swapcache(folio))) in folio_flush_mapping() 570 return folio_mapping(folio); in folio_flush_mapping() 582 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument 584 return folio->mapping->host; in folio_inode() 595 static inline void folio_attach_private(struct folio *folio, void *data) in folio_attach_private() argument 597 folio_get(folio); in folio_attach_private() 598 folio->private = data; in folio_attach_private() 599 folio_set_private(folio); in folio_attach_private() [all …]
|
| H A D | hugetlb_cgroup.h | 61 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) in __hugetlb_cgroup_from_folio() argument 63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio() 65 return folio->_hugetlb_cgroup_rsvd; in __hugetlb_cgroup_from_folio() 67 return folio->_hugetlb_cgroup; in __hugetlb_cgroup_from_folio() 70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio) in hugetlb_cgroup_from_folio() argument 72 return __hugetlb_cgroup_from_folio(folio, false); in hugetlb_cgroup_from_folio() 76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument 78 return __hugetlb_cgroup_from_folio(folio, true); in hugetlb_cgroup_from_folio_rsvd() 81 static inline void __set_hugetlb_cgroup(struct folio *folio, in __set_hugetlb_cgroup() argument 84 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup() [all …]
|
| H A D | mm_inline.h | 28 static inline int folio_is_file_lru(const struct folio *folio) in folio_is_file_lru() argument 30 return !folio_test_swapbacked(folio); in folio_is_file_lru() 66 static __always_inline void __folio_clear_lru_flags(struct folio *folio) in __folio_clear_lru_flags() argument 68 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags() 70 __folio_clear_lru(folio); in __folio_clear_lru_flags() 73 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags() 76 __folio_clear_active(folio); in __folio_clear_lru_flags() 77 __folio_clear_unevictable(folio); in __folio_clear_lru_flags() 87 static __always_inline enum lru_list folio_lru_list(const struct folio *folio) in folio_lru_list() argument 91 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list() [all …]
|
| H A D | page_ref.h | 87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument 89 return page_ref_count(&folio->page); in folio_ref_count() 104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument 106 set_page_count(&folio->page, v); in folio_set_count() 125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument 127 page_ref_add(&folio->page, nr); in folio_ref_add() 137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument 139 page_ref_sub(&folio->page, nr); in folio_ref_sub() 142 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument 144 int ret = atomic_sub_return(nr, &folio->_refcount); in folio_ref_sub_return() [all …]
|
| H A D | swap.h | 309 struct folio *folio = page_folio(page); in page_swap_entry() local 310 swp_entry_t entry = folio->swap; in page_swap_entry() 312 entry.val += folio_page_idx(folio, page); in page_swap_entry() 320 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); 321 void workingset_refault(struct folio *folio, void *shadow); 322 void workingset_activation(struct folio *folio); 335 void lru_note_cost_refault(struct folio *); 336 void folio_add_lru(struct folio *); 337 void folio_add_lru_vma(struct folio *, struct vm_area_struct *); 339 void folio_mark_accessed(struct folio *); [all …]
|
| H A D | memremap.h | 83 void (*folio_free)(struct folio *folio); 108 void (*folio_split)(struct folio *head, struct folio *tail); 166 static inline bool folio_is_device_private(const struct folio *folio) in folio_is_device_private() argument 169 folio_is_zone_device(folio) && in folio_is_device_private() 170 folio->pgmap->type == MEMORY_DEVICE_PRIVATE; in folio_is_device_private() 179 static inline bool folio_is_pci_p2pdma(const struct folio *folio) in folio_is_pci_p2pdma() argument 182 folio_is_zone_device(folio) && in folio_is_pci_p2pdma() 183 folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; in folio_is_pci_p2pdma() 186 static inline void *folio_zone_device_data(const struct folio *folio) in folio_zone_device_data() argument 188 VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio); in folio_zone_device_data() [all …]
|
| /linux/mm/ |
| H A D | swap.c | 73 static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, in __page_cache_release() argument 76 if (folio_test_lru(folio)) { in __page_cache_release() 77 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp); in __page_cache_release() 78 lruvec_del_folio(*lruvecp, folio); in __page_cache_release() 79 __folio_clear_lru_flags(folio); in __page_cache_release() 87 static void page_cache_release(struct folio *folio) in page_cache_release() argument 92 __page_cache_release(folio, &lruvec, &flags); in page_cache_release() 97 void __folio_put(struct folio *folio) in __folio_put() argument 99 if (unlikely(folio_is_zone_device(folio))) { in __folio_put() 100 free_zone_device_folio(folio); in __folio_put() [all …]
|
| H A D | truncate.c | 30 struct folio *folio; in clear_shadow_entries() local 42 xas_for_each(&xas, folio, max) { in clear_shadow_entries() 43 if (xa_is_value(folio)) in clear_shadow_entries() 65 struct folio *folio; in truncate_folio_batch_exceptionals() local 107 xas_for_each(&xas, folio, indices[nr-1]) { in truncate_folio_batch_exceptionals() 108 if (xa_is_value(folio)) in truncate_folio_batch_exceptionals() 135 void folio_invalidate(struct folio *folio, size_t offset, size_t length) in folio_invalidate() argument 137 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate() 140 aops->invalidate_folio(folio, offset, length); in folio_invalidate() 154 static void truncate_cleanup_folio(struct folio *folio) in truncate_cleanup_folio() argument [all …]
|
| H A D | page_io.c | 32 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_write() local 43 folio_mark_dirty(folio); in __end_swap_bio_write() 47 folio_clear_reclaim(folio); in __end_swap_bio_write() 49 folio_end_writeback(folio); in __end_swap_bio_write() 60 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_read() local 67 folio_mark_uptodate(folio); in __end_swap_bio_read() 69 folio_unlock(folio); in __end_swap_bio_read() 174 static bool is_folio_zero_filled(struct folio *folio) in is_folio_zero_filled() argument 181 for (i = 0; i < folio_nr_pages(folio); i++) { in is_folio_zero_filled() 182 data = kmap_local_folio(folio, i * PAGE_SIZE); in is_folio_zero_filled() [all …]
|
| H A D | filemap.c | 130 struct folio *folio, void *shadow) in page_cache_delete() argument 132 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 137 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 138 nr = folio_nr_pages(folio); in page_cache_delete() 140 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete() 145 folio->mapping = NULL; in page_cache_delete() 151 struct folio *folio) in filemap_unaccount_folio() argument 155 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio() 156 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio() 158 current->comm, folio_pfn(folio)); in filemap_unaccount_folio() [all …]
|
| H A D | rmap.c | 494 struct anon_vma *folio_get_anon_vma(const struct folio *folio) in folio_get_anon_vma() argument 499 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in folio_get_anon_vma() 502 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma() 505 if (!folio_mapped(folio)) in folio_get_anon_vma() 521 if (!folio_mapped(folio)) { in folio_get_anon_vma() 540 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, in folio_lock_anon_vma_read() argument 547 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in folio_lock_anon_vma_read() 550 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read() 553 if (!folio_mapped(folio)) in folio_lock_anon_vma_read() 564 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read() [all …]
|
| H A D | swap.h | 135 const struct folio *folio, bool irq) in __swap_cluster_get_and_lock() argument 137 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); in __swap_cluster_get_and_lock() 138 VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio); in __swap_cluster_get_and_lock() 139 return __swap_cluster_lock(__swap_entry_to_info(folio->swap), in __swap_cluster_get_and_lock() 140 swp_offset(folio->swap), irq); in __swap_cluster_get_and_lock() 156 const struct folio *folio) in swap_cluster_get_and_lock() argument 158 return __swap_cluster_get_and_lock(folio, false); in swap_cluster_get_and_lock() 171 const struct folio *folio) in swap_cluster_get_and_lock_irq() argument 173 return __swap_cluster_get_and_lock(folio, true); in swap_cluster_get_and_lock_irq() 189 void swap_read_folio(struct folio *folio, struct swap_iocb **plug); [all …]
|
| H A D | mlock.c | 61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument 64 if (!folio_test_clear_lru(folio)) in __mlock_folio() 67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio() 69 if (unlikely(folio_evictable(folio))) { in __mlock_folio() 75 if (folio_test_unevictable(folio)) { in __mlock_folio() 76 lruvec_del_folio(lruvec, folio); in __mlock_folio() 77 folio_clear_unevictable(folio); in __mlock_folio() 78 lruvec_add_folio(lruvec, folio); in __mlock_folio() 81 folio_nr_pages(folio)); in __mlock_folio() 86 if (folio_test_unevictable(folio)) { in __mlock_folio() [all …]
|
| H A D | swap_state.c | 88 struct folio *swap_cache_get_folio(swp_entry_t entry) in swap_cache_get_folio() 91 struct folio *folio; in swap_cache_get_folio() local 98 folio = swp_tb_to_folio(swp_tb); in swap_cache_get_folio() 99 if (likely(folio_try_get(folio))) in swap_cache_get_folio() 100 return folio; in swap_cache_get_folio() 137 void swap_cache_add_folio(struct folio *folio, swp_entry_t entry, void **shadowp) in swap_cache_add_folio() argument 143 unsigned long nr_pages = folio_nr_pages(folio); in swap_cache_add_folio() 145 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); in swap_cache_add_folio() 146 VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio); in swap_cache_add_folio() 147 VM_WARN_ON_ONCE_FOLIO(!folio_test_swapbacked(folio), folio); in swap_cache_add_folio() [all …]
|
| H A D | migrate.c | 119 struct folio *folio = folio_get_nontail_page(page); in isolate_movable_ops_page() local 131 if (!folio) in isolate_movable_ops_page() 156 if (unlikely(!folio_trylock(folio))) in isolate_movable_ops_page() 173 folio_unlock(folio); in isolate_movable_ops_page() 178 folio_unlock(folio); in isolate_movable_ops_page() 180 folio_put(folio); in isolate_movable_ops_page() 199 struct folio *folio = page_folio(page); in putback_movable_ops_page() local 203 folio_lock(folio); in putback_movable_ops_page() 206 folio_unlock(folio); in putback_movable_ops_page() 207 folio_put(folio); in putback_movable_ops_page() [all …]
|
| /linux/fs/iomap/ |
| H A D | buffered-io.c | 33 static inline bool ifs_is_fully_uptodate(struct folio *folio, in ifs_is_fully_uptodate() argument 36 struct inode *inode = folio->mapping->host; in ifs_is_fully_uptodate() 38 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); in ifs_is_fully_uptodate() 45 static unsigned ifs_next_uptodate_block(struct folio *folio, in ifs_next_uptodate_block() argument 48 struct iomap_folio_state *ifs = folio->private; in ifs_next_uptodate_block() 57 static unsigned ifs_next_nonuptodate_block(struct folio *folio, in ifs_next_nonuptodate_block() argument 60 struct iomap_folio_state *ifs = folio->private; in ifs_next_nonuptodate_block() 65 static bool ifs_set_range_uptodate(struct folio *folio, in ifs_set_range_uptodate() argument 68 struct inode *inode = folio->mapping->host; in ifs_set_range_uptodate() 74 return ifs_is_fully_uptodate(folio, ifs); in ifs_set_range_uptodate() [all …]
|
| /linux/fs/jfs/ |
| H A D | jfs_metapage.c | 51 folio_unlock(mp->folio); in __lock_metapage() 53 folio_lock(mp->folio); in __lock_metapage() 84 static inline struct metapage *folio_to_mp(struct folio *folio, int offset) in folio_to_mp() argument 86 struct meta_anchor *anchor = folio->private; in folio_to_mp() 93 static inline int insert_metapage(struct folio *folio, struct metapage *mp) in insert_metapage() argument 99 a = folio->private; in insert_metapage() 104 folio_attach_private(folio, a); in insert_metapage() 105 kmap(&folio->page); in insert_metapage() 109 l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits; in insert_metapage() 118 static inline void remove_metapage(struct folio *folio, struct metapage *mp) in remove_metapage() argument [all …]
|
| /linux/mm/damon/ |
| H A D | ops-common.c | 26 struct folio *damon_get_folio(unsigned long pfn) in damon_get_folio() 29 struct folio *folio; in damon_get_folio() local 34 folio = page_folio(page); in damon_get_folio() 35 if (!folio_test_lru(folio) || !folio_try_get(folio)) in damon_get_folio() 37 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio() 38 folio_put(folio); in damon_get_folio() 39 folio = NULL; in damon_get_folio() 41 return folio; in damon_get_folio() 47 struct folio *folio; in damon_ptep_mkold() local 56 folio = damon_get_folio(pfn); in damon_ptep_mkold() [all …]
|
| H A D | paddr.c | 42 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_mkold() local 44 if (!folio) in damon_pa_mkold() 47 damon_folio_mkold(folio); in damon_pa_mkold() 48 folio_put(folio); in damon_pa_mkold() 72 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_young() local 75 if (!folio) in damon_pa_young() 78 accessed = damon_folio_young(folio); in damon_pa_young() 79 *folio_sz = folio_size(folio); in damon_pa_young() 80 folio_put(folio); in damon_pa_young() 126 static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) in damos_pa_filter_out() argument [all …]
|
| /linux/fs/nilfs2/ |
| H A D | page.c | 28 static struct buffer_head *__nilfs_get_folio_block(struct folio *folio, in __nilfs_get_folio_block() argument 34 struct buffer_head *bh = folio_buffers(folio); in __nilfs_get_folio_block() 37 bh = create_empty_buffers(folio, 1 << blkbits, b_state); in __nilfs_get_folio_block() 53 struct folio *folio; in nilfs_grab_buffer() local 56 folio = filemap_grab_folio(mapping, index); in nilfs_grab_buffer() 57 if (IS_ERR(folio)) in nilfs_grab_buffer() 60 bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state); in nilfs_grab_buffer() 62 folio_unlock(folio); in nilfs_grab_buffer() 63 folio_put(folio); in nilfs_grab_buffer() 76 struct folio *folio = bh->b_folio; in nilfs_forget_buffer() local [all …]
|
| /linux/fs/ubifs/ |
| H A D | file.c | 45 static int read_block(struct inode *inode, struct folio *folio, size_t offset, in read_block() argument 58 folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); in read_block() 77 err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset, in read_block() 88 folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len); in read_block() 99 static int do_readpage(struct folio *folio) in do_readpage() argument 104 struct inode *inode = folio->mapping->host; in do_readpage() 110 inode->i_ino, folio->index, i_size, folio->flags.f); in do_readpage() 111 ubifs_assert(c, !folio_test_checked(folio)); in do_readpage() 112 ubifs_assert(c, !folio->private); in do_readpage() 114 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_readpage() [all …]
|
| /linux/fs/ecryptfs/ |
| H A D | mmap.c | 33 struct folio *folio = NULL; in ecryptfs_writepages() local 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() 37 error = ecryptfs_encrypt_page(folio); in ecryptfs_writepages() 41 folio->index); in ecryptfs_writepages() 42 folio_clear_uptodate(folio); in ecryptfs_writepages() 45 folio_unlock(folio); in ecryptfs_writepages() 91 ecryptfs_copy_up_encrypted_with_header(struct folio *folio, in ecryptfs_copy_up_encrypted_with_header() argument 100 loff_t view_extent_num = ((loff_t)folio->index in ecryptfs_copy_up_encrypted_with_header() 110 page_virt = kmap_local_folio(folio, 0); in ecryptfs_copy_up_encrypted_with_header() 117 page_virt, folio->mapping->host); in ecryptfs_copy_up_encrypted_with_header() [all …]
|
| /linux/fs/netfs/ |
| H A D | buffered_write.c | 16 static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) in __netfs_set_group() argument 19 folio_attach_private(folio, netfs_get_group(netfs_group)); in __netfs_set_group() 22 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) in netfs_set_group() argument 24 void *priv = folio_get_private(folio); in netfs_set_group() 28 folio_attach_private(folio, netfs_get_group(netfs_group)); in netfs_set_group() 30 folio_detach_private(folio); in netfs_set_group() 38 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, in netfs_grab_folio_for_write() 121 struct folio *folio = NULL, *writethrough = NULL; in netfs_perform_write() local 179 folio = netfs_grab_folio_for_write(mapping, pos, part); in netfs_perform_write() 180 if (IS_ERR(folio)) { in netfs_perform_write() [all …]
|
| /linux/include/trace/events/ |
| H A D | pagemap.h | 19 #define trace_pagemap_flags(folio) ( \ argument 20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ 21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \ 22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \ 23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \ 24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \ 25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \ 30 TP_PROTO(struct folio *folio), 32 TP_ARGS(folio), 35 __field(struct folio *, folio ) [all …]
|