| /linux/include/linux/ |
| H A D | rmap.h | 112 static __always_inline void folio_lock_large_mapcount(struct folio *folio) 114 bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); in put_anon_vma() 117 static __always_inline void folio_unlock_large_mapcount(struct folio *folio) in put_anon_vma() 119 __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); 122 static inline unsigned int folio_mm_id(const struct folio *folio, int idx) in anon_vma_lock_write() 125 return folio->_mm_id[idx] & MM_ID_MASK; in anon_vma_trylock_write() 128 static inline void folio_set_mm_id(struct folio *foli in anon_vma_trylock_write() 178 folio_lock_large_mapcount(struct folio * folio) folio_lock_large_mapcount() argument 183 folio_unlock_large_mapcount(struct folio * folio) folio_unlock_large_mapcount() argument 188 folio_mm_id(const struct folio * folio,int idx) folio_mm_id() argument 194 folio_set_mm_id(struct folio * folio,int idx,mm_id_t id) folio_set_mm_id() argument 201 __folio_large_mapcount_sanity_checks(const struct folio * folio,int diff,mm_id_t mm_id) __folio_large_mapcount_sanity_checks() argument 229 folio_set_large_mapcount(struct folio * folio,int mapcount,struct vm_area_struct * vma) folio_set_large_mapcount() argument 243 folio_add_return_large_mapcount(struct folio * folio,int diff,struct vm_area_struct * vma) folio_add_return_large_mapcount() argument 293 folio_sub_return_large_mapcount(struct folio * folio,int diff,struct vm_area_struct * vma) folio_sub_return_large_mapcount() argument 344 folio_set_large_mapcount(struct folio * folio,int mapcount,struct vm_area_struct * vma) folio_set_large_mapcount() argument 351 folio_add_large_mapcount(struct folio * folio,int diff,struct vm_area_struct * vma) folio_add_large_mapcount() argument 357 folio_add_return_large_mapcount(struct folio * folio,int diff,struct vm_area_struct * vma) folio_add_return_large_mapcount() argument 363 folio_sub_large_mapcount(struct folio * folio,int diff,struct vm_area_struct * vma) folio_sub_large_mapcount() argument 369 folio_sub_return_large_mapcount(struct folio * folio,int diff,struct vm_area_struct * vma) folio_sub_return_large_mapcount() argument 376 folio_inc_large_mapcount(folio,vma) global() argument 378 folio_inc_return_large_mapcount(folio,vma) global() argument 380 folio_dec_large_mapcount(folio,vma) global() argument 382 folio_dec_return_large_mapcount(folio,vma) global() argument 397 __folio_rmap_sanity_checks(const struct folio * folio,const struct page * page,int nr_pages,enum pgtable_level level) __folio_rmap_sanity_checks() argument 472 folio_add_anon_rmap_pte(folio,page,vma,address,flags) global() argument 480 folio_add_file_rmap_pte(folio,page,vma) global() argument 488 folio_remove_rmap_pte(folio,page,vma) global() argument 501 hugetlb_try_dup_anon_rmap(struct folio * folio,struct vm_area_struct * vma) hugetlb_try_dup_anon_rmap() argument 518 hugetlb_try_share_anon_rmap(struct folio * folio) hugetlb_try_share_anon_rmap() argument 541 hugetlb_add_file_rmap(struct folio * folio) hugetlb_add_file_rmap() argument 550 hugetlb_remove_rmap(struct folio * folio) hugetlb_remove_rmap() argument 558 __folio_dup_file_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * dst_vma,enum pgtable_level level) __folio_dup_file_rmap() argument 601 folio_dup_file_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * dst_vma) folio_dup_file_rmap_ptes() argument 607 folio_dup_file_rmap_pte(struct folio * folio,struct page * page,struct vm_area_struct * dst_vma) folio_dup_file_rmap_pte() argument 623 folio_dup_file_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * dst_vma) folio_dup_file_rmap_pmd() argument 633 __folio_try_dup_anon_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,enum pgtable_level level) __folio_try_dup_anon_rmap() argument 722 folio_try_dup_anon_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) folio_try_dup_anon_rmap_ptes() argument 730 folio_try_dup_anon_rmap_pte(struct folio * folio,struct page * page,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) folio_try_dup_anon_rmap_pte() argument 761 folio_try_dup_anon_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) folio_try_dup_anon_rmap_pmd() argument 774 __folio_try_share_anon_rmap(struct folio * folio,struct page * page,int nr_pages,enum pgtable_level level) __folio_try_share_anon_rmap() argument 867 folio_try_share_anon_rmap_pte(struct folio * folio,struct page * page) folio_try_share_anon_rmap_pte() argument 896 folio_try_share_anon_rmap_pmd(struct folio * folio,struct page * page) folio_try_share_anon_rmap_pmd() argument 1047 folio_referenced(struct folio * folio,int is_locked,struct mem_cgroup * memcg,vm_flags_t * vm_flags) folio_referenced() argument 1055 try_to_unmap(struct folio * folio,enum ttu_flags flags) try_to_unmap() argument 1059 folio_mkclean(struct folio * folio) folio_mkclean() argument [all...] |
| H A D | hugetlb_cgroup.h | 61 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) in __hugetlb_cgroup_from_folio() argument 63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio() 65 return folio->_hugetlb_cgroup_rsvd; in __hugetlb_cgroup_from_folio() 67 return folio->_hugetlb_cgroup; in __hugetlb_cgroup_from_folio() 70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio) in hugetlb_cgroup_from_folio() argument 72 return __hugetlb_cgroup_from_folio(folio, false); in hugetlb_cgroup_from_folio() 76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument 78 return __hugetlb_cgroup_from_folio(folio, true); in hugetlb_cgroup_from_folio_rsvd() 81 static inline void __set_hugetlb_cgroup(struct folio *folio, in __set_hugetlb_cgroup() argument 84 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup() [all …]
|
| H A D | pagemap.h | 562 struct address_space *folio_mapping(const struct folio *folio); 576 static inline struct address_space *folio_flush_mapping(struct folio *folio) in folio_flush_mapping() argument 578 if (unlikely(folio_test_swapcache(folio))) in folio_flush_mapping() 581 return folio_mapping(folio); in folio_flush_mapping() 593 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument 595 return folio->mapping->host; in folio_inode() 606 static inline void folio_attach_private(struct folio *folio, void *data) in folio_attach_private() argument 608 folio_get(folio); in folio_attach_private() 609 folio->private = data; in folio_attach_private() 610 folio_set_private(folio); in folio_attach_private() [all …]
|
| H A D | mm_inline.h | 28 static inline int folio_is_file_lru(const struct folio *folio) in folio_is_file_lru() argument 30 return !folio_test_swapbacked(folio); in folio_is_file_lru() 66 static __always_inline void __folio_clear_lru_flags(struct folio *folio) in __folio_clear_lru_flags() argument 68 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags() 70 __folio_clear_lru(folio); in __folio_clear_lru_flags() 73 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags() 76 __folio_clear_active(folio); in __folio_clear_lru_flags() 77 __folio_clear_unevictable(folio); in __folio_clear_lru_flags() 87 static __always_inline enum lru_list folio_lru_list(const struct folio *folio) in folio_lru_list() argument 91 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list() [all …]
|
| H A D | page_ref.h | 87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument 89 return page_ref_count(&folio->page); in folio_ref_count() 104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument 106 set_page_count(&folio->page, v); in folio_set_count() 125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument 127 page_ref_add(&folio->page, nr); in folio_ref_add() 137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument 139 page_ref_sub(&folio->page, nr); in folio_ref_sub() 142 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument 144 int ret = atomic_sub_return(nr, &folio->_refcount); in folio_ref_sub_return() [all …]
|
| H A D | memremap.h | 83 void (*folio_free)(struct folio *folio); 108 void (*folio_split)(struct folio *head, struct folio *tail); 166 static inline bool folio_is_device_private(const struct folio *folio) in folio_is_device_private() argument 169 folio_is_zone_device(folio) && in folio_is_device_private() 170 folio->pgmap->type == MEMORY_DEVICE_PRIVATE; in folio_is_device_private() 179 static inline bool folio_is_pci_p2pdma(const struct folio *folio) in folio_is_pci_p2pdma() argument 182 folio_is_zone_device(folio) && in folio_is_pci_p2pdma() 183 folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; in folio_is_pci_p2pdma() 186 static inline void *folio_zone_device_data(const struct folio *folio) in folio_zone_device_data() argument 188 VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio); in folio_zone_device_data() [all …]
|
| H A D | swap.h | 307 struct folio *folio = page_folio(page); in page_swap_entry() 308 swp_entry_t entry = folio->swap; in page_swap_entry() 310 entry.val += folio_page_idx(folio, page); in page_swap_entry() 318 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); 319 void workingset_refault(struct folio *folio, void *shadow); 320 void workingset_activation(struct folio *folio); 309 struct folio *folio = page_folio(page); page_swap_entry() local 341 folio_may_be_lru_cached(struct folio * folio) folio_may_be_lru_cached() argument 499 free_folio_and_swap_cache(folio) global() argument 508 free_swap_cache(struct folio * folio) free_swap_cache() argument 535 put_swap_folio(struct folio * folio,swp_entry_t swp) put_swap_folio() argument 554 folio_alloc_swap(struct folio * folio) folio_alloc_swap() argument 559 folio_free_swap(struct folio * folio) folio_free_swap() argument 604 folio_throttle_swaprate(struct folio * folio,gfp_t gfp) folio_throttle_swaprate() argument 611 folio_throttle_swaprate(struct folio * folio,gfp_t gfp) folio_throttle_swaprate() argument 618 mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry) mem_cgroup_try_charge_swap() argument 637 mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry) mem_cgroup_try_charge_swap() argument 653 mem_cgroup_swap_full(struct folio * folio) mem_cgroup_swap_full() argument [all...] |
| /linux/mm/ |
| H A D | swap.c | 52 * The following folio batches are grouped together because they are protected 73 static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, in __page_cache_release() argument 76 if (folio_test_lru(folio)) { in __page_cache_release() 77 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp); in __page_cache_release() 78 lruvec_del_folio(*lruvecp, folio); in __page_cache_release() 79 __folio_clear_lru_flags(folio); in __page_cache_release() 87 static void page_cache_release(struct folio *folio) in page_cache_release() argument 92 __page_cache_release(folio, in page_cache_release() 97 __folio_put(struct folio * folio) __folio_put() argument 118 lru_add(struct lruvec * lruvec,struct folio * folio) lru_add() argument 165 struct folio *folio = fbatch->folios[i]; folio_batch_move_lru() local 183 __folio_batch_add_and_move(struct folio_batch __percpu * fbatch,struct folio * folio,move_fn_t move_fn,bool disable_irq) __folio_batch_add_and_move() argument 204 folio_batch_add_and_move(folio,op) global() argument 213 lru_move_tail(struct lruvec * lruvec,struct folio * folio) lru_move_tail() argument 231 folio_rotate_reclaimable(struct folio * folio) folio_rotate_reclaimable() argument 294 lru_note_cost_refault(struct folio * folio) lru_note_cost_refault() argument 303 lru_activate(struct lruvec * lruvec,struct folio * folio) lru_activate() argument 329 folio_activate(struct folio * folio) folio_activate() argument 343 folio_activate(struct folio * folio) folio_activate() argument 357 __lru_cache_activate_folio(struct folio * folio) __lru_cache_activate_folio() argument 389 lru_gen_inc_refs(struct folio * folio) lru_gen_inc_refs() argument 413 lru_gen_clear_refs(struct folio * folio) lru_gen_clear_refs() argument 431 lru_gen_inc_refs(struct folio * folio) lru_gen_inc_refs() argument 435 lru_gen_clear_refs(struct folio * folio) lru_gen_clear_refs() argument 455 folio_mark_accessed(struct folio * folio) folio_mark_accessed() argument 500 folio_add_lru(struct folio * folio) folio_add_lru() argument 523 folio_add_lru_vma(struct folio * folio,struct vm_area_struct * vma) folio_add_lru_vma() argument 554 lru_deactivate_file(struct lruvec * lruvec,struct folio * folio) lru_deactivate_file() argument 595 lru_deactivate(struct lruvec * lruvec,struct folio * folio) lru_deactivate() argument 611 lru_lazyfree(struct lruvec * lruvec,struct folio * folio) lru_lazyfree() argument 686 deactivate_file_folio(struct folio * folio) deactivate_file_folio() argument 706 folio_deactivate(struct folio * folio) folio_deactivate() argument 724 folio_mark_lazyfree(struct folio * folio) folio_mark_lazyfree() argument 958 struct folio *folio = folios->folios[i]; folios_put_refs() local 1028 struct folio *folio = page_folio(encoded_page_ptr(encoded[i])); release_pages() local 1080 struct folio *folio = fbatch->folios[i]; folio_batch_remove_exceptionals() local [all...] |
| H A D | truncate.c | 30 struct folio *folio; in clear_shadow_entries() local 42 xas_for_each(&xas, folio, max) { in clear_shadow_entries() 43 if (xa_is_value(folio)) in clear_shadow_entries() 65 struct folio *folio; in truncate_folio_batch_exceptionals() local 107 xas_for_each(&xas, folio, indices[nr-1]) { in truncate_folio_batch_exceptionals() 108 if (xa_is_value(folio)) in truncate_folio_batch_exceptionals() 135 void folio_invalidate(struct folio *folio, size_t offset, size_t length) in folio_invalidate() argument 137 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate() 140 aops->invalidate_folio(folio, offset, length); in folio_invalidate() 154 static void truncate_cleanup_folio(struct folio *folio) in truncate_cleanup_folio() argument [all …]
|
| H A D | page_io.c | 32 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_write() local 43 folio_mark_dirty(folio); in __end_swap_bio_write() 47 folio_clear_reclaim(folio); in __end_swap_bio_write() 49 folio_end_writeback(folio); in __end_swap_bio_write() 60 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_read() local 67 folio_mark_uptodate(folio); in __end_swap_bio_read() 69 folio_unlock(folio); in __end_swap_bio_read() 174 static bool is_folio_zero_filled(struct folio *foli argument 204 swap_zeromap_folio_set(struct folio * folio) swap_zeromap_folio_set() argument 224 swap_zeromap_folio_clear(struct folio * folio) swap_zeromap_folio_clear() argument 240 swap_writeout(struct folio * folio,struct swap_iocb ** swap_plug) swap_writeout() argument 291 count_swpout_vm_event(struct folio * folio) count_swpout_vm_event() argument 305 bio_associate_blkg_from_page(struct bio * bio,struct folio * folio) bio_associate_blkg_from_page() argument 320 bio_associate_blkg_from_page(bio,folio) global() argument 374 swap_writepage_fs(struct folio * folio,struct swap_iocb ** swap_plug) swap_writepage_fs() argument 410 swap_writepage_bdev_sync(struct folio * folio,struct swap_info_struct * sis) swap_writepage_bdev_sync() argument 430 swap_writepage_bdev_async(struct folio * folio,struct swap_info_struct * sis) swap_writepage_bdev_async() argument 447 __swap_writepage(struct folio * folio,struct swap_iocb ** swap_plug) __swap_writepage() argument 489 struct folio *folio = page_folio(sio->bvec[p].bv_page); sio_read_complete() local 499 struct folio *folio = page_folio(sio->bvec[p].bv_page); sio_read_complete() local 508 swap_read_folio_zeromap(struct folio * folio) swap_read_folio_zeromap() argument 538 swap_read_folio_fs(struct folio * folio,struct swap_iocb ** plug) swap_read_folio_fs() argument 572 swap_read_folio_bdev_sync(struct folio * folio,struct swap_info_struct * sis) swap_read_folio_bdev_sync() argument 594 swap_read_folio_bdev_async(struct folio * folio,struct swap_info_struct * sis) swap_read_folio_bdev_async() argument 609 swap_read_folio(struct folio * folio,struct swap_iocb ** plug) swap_read_folio() argument [all...] |
| H A D | filemap.c | 130 struct folio *folio, void *shadow) in page_cache_delete() argument 132 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 137 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 138 nr = folio_nr_pages(folio); in page_cache_delete() 140 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete() 145 folio->mapping = NULL; in page_cache_delete() 151 struct folio *folio) in filemap_unaccount_folio() argument 155 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio() 156 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio() 158 current->comm, folio_pfn(folio)); in filemap_unaccount_folio() [all …]
|
| H A D | rmap.c | 578 * NOTE: the caller should hold folio lock when calling this. in folio_lock_anon_vma_read() 580 struct anon_vma *folio_get_anon_vma(const struct folio *folio) in folio_lock_anon_vma_read() 585 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in folio_lock_anon_vma_read() 588 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read() 591 if (!folio_mapped(folio)) in folio_lock_anon_vma_read() 601 * If this folio is still mapped, then its anon_vma cannot have been in folio_lock_anon_vma_read() 607 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read() 626 struct anon_vma *folio_lock_anon_vma_read(const struct folio *foli in try_to_unmap_flush() 494 folio_get_anon_vma(const struct folio * folio) folio_get_anon_vma() argument 540 folio_lock_anon_vma_read(const struct folio * folio,struct rmap_walk_control * rwc) folio_lock_anon_vma_read() argument 763 page_address_in_vma(const struct folio * folio,const struct page * page,const struct vm_area_struct * vma) page_address_in_vma() argument 824 folio_referenced_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg) folio_referenced_one() argument 944 folio_referenced(struct folio * folio,int is_locked,struct mem_cgroup * memcg,vm_flags_t * vm_flags) folio_referenced() argument 1059 page_mkclean_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg) page_mkclean_one() argument 1078 folio_mkclean(struct folio * folio) folio_mkclean() argument 1110 mapping_wrprotect_range_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg) mapping_wrprotect_range_one() argument 1209 __folio_mod_stat(struct folio * folio,int nr,int nr_pmdmapped) __folio_mod_stat() argument 1231 __folio_add_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level) __folio_add_rmap() argument 1319 folio_move_anon_rmap(struct folio * folio,struct vm_area_struct * vma) folio_move_anon_rmap() argument 1342 __folio_set_anon(struct folio * folio,struct vm_area_struct * vma,unsigned long address,bool exclusive) __folio_set_anon() argument 1374 __page_check_anon_rmap(const struct folio * folio,const struct page * page,struct vm_area_struct * vma,unsigned long address) __page_check_anon_rmap() argument 1395 __folio_add_anon_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,unsigned long address,rmap_t flags,enum pgtable_level level) __folio_add_anon_rmap() argument 1474 folio_add_anon_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,unsigned long address,rmap_t flags) folio_add_anon_rmap_ptes() argument 1495 folio_add_anon_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma,unsigned long address,rmap_t flags) folio_add_anon_rmap_pmd() argument 1521 folio_add_new_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address,rmap_t flags) folio_add_new_anon_rmap() argument 1579 __folio_add_file_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level) __folio_add_file_rmap() argument 1608 folio_add_file_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma) folio_add_file_rmap_ptes() argument 1624 folio_add_file_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma) folio_add_file_rmap_pmd() argument 1644 folio_add_file_rmap_pud(struct folio * folio,struct page * page,struct vm_area_struct * vma) folio_add_file_rmap_pud() argument 1655 __folio_remove_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level) __folio_remove_rmap() argument 1776 folio_remove_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma) folio_remove_rmap_ptes() argument 1792 folio_remove_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma) folio_remove_rmap_pmd() argument 1812 folio_remove_rmap_pud(struct folio * folio,struct page * page,struct vm_area_struct * vma) folio_remove_rmap_pud() argument 1823 folio_unmap_pte_batch(struct folio * folio,struct page_vma_mapped_walk * pvmw,enum ttu_flags flags,pte_t pte) folio_unmap_pte_batch() argument 1852 try_to_unmap_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg) try_to_unmap_one() argument 2244 folio_not_mapped(struct folio * folio) folio_not_mapped() argument 2260 try_to_unmap(struct folio * folio,enum ttu_flags flags) try_to_unmap() argument 2281 try_to_migrate_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg) try_to_migrate_one() argument 2599 try_to_migrate(struct folio * folio,enum ttu_flags flags) try_to_migrate() argument 2680 struct folio *folio, *fw_folio; make_device_exclusive() local 2782 rmap_walk_anon_lock(const struct folio * folio,struct rmap_walk_control * rwc) rmap_walk_anon_lock() argument 2824 rmap_walk_anon(struct folio * folio,struct rmap_walk_control * rwc,bool locked) rmap_walk_anon() argument 2891 __rmap_walk_file(struct folio * folio,struct address_space * mapping,pgoff_t pgoff_start,unsigned long nr_pages,struct rmap_walk_control * rwc,bool locked) __rmap_walk_file() argument 2943 rmap_walk_file(struct folio * folio,struct rmap_walk_control * rwc,bool locked) rmap_walk_file() argument 2961 rmap_walk(struct folio * folio,struct rmap_walk_control * rwc) rmap_walk() argument 2972 rmap_walk_locked(struct folio * folio,struct rmap_walk_control * rwc) rmap_walk_locked() argument 2988 hugetlb_add_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address,rmap_t flags) hugetlb_add_anon_rmap() argument 3002 hugetlb_add_new_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address) hugetlb_add_new_anon_rmap() argument [all...] |
| H A D | swap.h | 135 const struct folio *folio, bool irq) in __swap_cluster_get_and_lock() argument 137 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); in __swap_cluster_get_and_lock() 138 VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio); in __swap_cluster_get_and_lock() 139 return __swap_cluster_lock(__swap_entry_to_info(folio->swap), in __swap_cluster_get_and_lock() 140 swp_offset(folio->swap), irq); in __swap_cluster_get_and_lock() 144 * swap_cluster_get_and_lock - Locks the cluster that holds a folio's entries. 145 * @folio 156 swap_cluster_get_and_lock(const struct folio * folio) swap_cluster_get_and_lock() argument 171 swap_cluster_get_and_lock_irq(const struct folio * folio) swap_cluster_get_and_lock_irq() argument 224 folio_matches_swap_entry(const struct folio * folio,swp_entry_t entry) folio_matches_swap_entry() argument 274 folio_swap_flags(struct folio * folio) folio_swap_flags() argument 332 swap_cluster_get_and_lock(struct folio * folio) swap_cluster_get_and_lock() argument 338 swap_cluster_get_and_lock_irq(struct folio * folio) swap_cluster_get_and_lock_irq() argument 356 swap_read_folio(struct folio * folio,struct swap_iocb ** plug) swap_read_folio() argument 368 folio_matches_swap_entry(const struct folio * folio,swp_entry_t entry) folio_matches_swap_entry() argument 389 swap_update_readahead(struct folio * folio,struct vm_area_struct * vma,unsigned long addr) swap_update_readahead() argument 394 swap_writeout(struct folio * folio,struct swap_iocb ** swap_plug) swap_writeout() argument 414 swap_cache_add_folio(struct folio * folio,swp_entry_t entry,void ** shadow) swap_cache_add_folio() argument 418 swap_cache_del_folio(struct folio * folio) swap_cache_del_folio() argument 423 __swap_cache_del_folio(struct swap_cluster_info * ci,struct folio * folio,swp_entry_t entry,void * shadow) __swap_cache_del_folio() argument 432 folio_swap_flags(struct folio * folio) folio_swap_flags() argument [all...] |
| H A D | mlock.c | 61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument 64 if (!folio_test_clear_lru(folio)) in __mlock_folio() 67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio() 69 if (unlikely(folio_evictable(folio))) { in __mlock_folio() 75 if (folio_test_unevictable(folio)) { in __mlock_folio() 76 lruvec_del_folio(lruvec, folio); in __mlock_folio() 77 folio_clear_unevictable(folio); in __mlock_folio() 78 lruvec_add_folio(lruvec, folio); in __mlock_folio() 81 folio_nr_pages(folio)); in __mlock_folio() 86 if (folio_test_unevictable(folio)) { in __mlock_folio() [all …]
|
| H A D | swap_state.c | 76 * swap_cache_get_folio - Looks up a folio in the swap cache. 79 * A found folio will be returned unlocked and with its refcount increased. 83 * Return: Returns the found folio on success, NULL otherwise. The caller 84 * must lock and check if the folio still matches the swap entry before 87 struct folio *swap_cache_get_folio(swp_entry_t entry) in swap_cache_get_folio() 90 struct folio *folio; in swap_cache_get_folio() local 97 folio = swp_tb_to_folio(swp_tb); in swap_cache_get_folio() 98 if (likely(folio_try_get(folio))) in swap_cache_get_folio() 99 return folio; in swap_cache_get_folio() 136 swap_cache_add_folio(struct folio * folio,swp_entry_t entry,void ** shadowp) swap_cache_add_folio() argument 185 __swap_cache_del_folio(struct swap_cluster_info * ci,struct folio * folio,swp_entry_t entry,void * shadow) __swap_cache_del_folio() argument 224 swap_cache_del_folio(struct folio * folio) swap_cache_del_folio() argument 314 free_swap_cache(struct folio * folio) free_swap_cache() argument 327 free_folio_and_swap_cache(struct folio * folio) free_folio_and_swap_cache() argument 345 struct folio *folio = page_folio(encoded_page_ptr(pages[i])); free_pages_and_swap_cache() local 371 swap_update_readahead(struct folio * folio,struct vm_area_struct * vma,unsigned long addr) swap_update_readahead() argument 409 struct folio *folio; __read_swap_cache_async() local 520 struct folio *folio; read_swap_cache_async() local 620 struct folio *folio; swap_cluster_readahead() local 730 struct folio *folio; swap_vma_readahead() local 815 struct folio *folio; swapin_readahead() local [all...] |
| H A D | page_idle.c | 34 static struct folio *page_idle_get_folio(unsigned long pfn) in page_idle_get_folio() 37 struct folio *folio; in page_idle_get_folio() local 42 folio = page_folio(page); in page_idle_get_folio() 43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio() 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio() 46 folio_put(folio); in page_idle_get_folio() 47 folio = NULL; in page_idle_get_folio() 49 return folio; in page_idle_get_folio() 52 static bool page_idle_clear_pte_refs_one(struct folio *folio, in page_idle_clear_pte_refs_one() argument 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() [all …]
|
| H A D | migrate.c | 117 * folio dependencies will have to be removed. in isolate_movable_ops_page() 119 struct folio *folio = folio_get_nontail_page(page); in isolate_movable_ops_page() local 131 if (!folio) in isolate_movable_ops_page() 156 if (unlikely(!folio_trylock(folio))) in isolate_movable_ops_page() 173 folio_unlock(folio); in isolate_movable_ops_page() 178 folio_unlock(folio); in isolate_movable_ops_page() 180 folio_put(folio); in isolate_movable_ops_page() 197 * folio dependencies will have to be removed. in putback_movable_ops_page() 199 struct folio *foli in putback_movable_ops_page() local 259 struct folio *folio; putback_movable_pages() local 279 isolate_folio_to_list(struct folio * folio,struct list_head * list) isolate_folio_to_list() argument 299 try_to_map_unused_to_zeropage(struct page_vma_mapped_walk * pvmw,struct folio * folio,pte_t old_pte,unsigned long idx) try_to_map_unused_to_zeropage() argument 339 struct folio *folio; global() member 346 remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg) remove_migration_pte() argument 571 __folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int expected_count) __folio_migrate_mapping() argument 708 folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count) folio_migrate_mapping() argument 759 folio_migrate_flags(struct folio * newfolio,struct folio * folio) folio_migrate_flags() argument 1563 try_split_folio(struct folio * folio,struct list_head * split_folios,enum migrate_mode mode) try_split_folio() argument 1620 struct folio *folio, *folio2; migrate_hugetlbs() local 1708 struct folio *folio, *folio2, *dst, *dst2; migrate_folios_move() local 1756 struct folio *folio, *folio2, *dst, *dst2; migrate_folios_undo() local 1796 struct folio *folio, *folio2, *dst = NULL; migrate_pages_batch() local 2078 struct folio *folio, *folio2; migrate_pages() local 2237 __add_folio_for_migration(struct folio * folio,int node,struct list_head * pagelist,bool migrate_all) __add_folio_for_migration() argument 2279 struct folio *folio; add_folio_for_migration() local 2441 struct folio *folio; do_pages_stat_array() local 2657 migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node) migrate_misplaced_folio_prepare() argument 2722 migrate_misplaced_folio(struct folio * folio,int node) migrate_misplaced_folio() argument [all...] |
| /linux/fs/iomap/ |
| H A D | buffered-io.c | 18 * Structure allocated for each folio to track per-block uptodate, dirty state 34 static inline bool ifs_is_fully_uptodate(struct folio *folio, in ifs_is_fully_uptodate() 37 struct inode *inode = folio->mapping->host; in ifs_is_fully_uptodate() 39 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); in ifs_is_fully_uptodate() 43 * Find the next uptodate block in the folio. end_blk is inclusive. 46 static unsigned ifs_next_uptodate_block(struct folio *folio, in ifs_next_uptodate_block() 49 struct iomap_folio_state *ifs = folio->private; in ifs_next_uptodate_block() 55 * Find the next non-uptodate block in the folio 33 ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs) ifs_is_fully_uptodate() argument 45 ifs_next_uptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk) ifs_next_uptodate_block() argument 57 ifs_next_nonuptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk) ifs_next_nonuptodate_block() argument 65 ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len) ifs_set_range_uptodate() argument 77 iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len) iomap_set_range_uptodate() argument 101 ifs_next_dirty_block(struct folio * folio,unsigned start_blk,unsigned end_blk) ifs_next_dirty_block() argument 116 ifs_next_clean_block(struct folio * folio,unsigned start_blk,unsigned end_blk) ifs_next_clean_block() argument 127 ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end) ifs_find_dirty_range() argument 151 iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end) iomap_find_dirty_range() argument 164 ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len) ifs_clear_range_dirty() argument 179 iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len) iomap_clear_range_dirty() argument 187 ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len) ifs_set_range_dirty() argument 202 iomap_set_range_dirty(struct folio * folio,size_t off,size_t len) iomap_set_range_dirty() argument 211 ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags) ifs_alloc() argument 246 ifs_free(struct folio * folio) ifs_free() argument 278 iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp) iomap_adjust_read_range() argument 362 iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio) iomap_read_inline_data() argument 384 iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error) iomap_finish_folio_read() argument 407 iomap_read_init(struct folio * folio) iomap_read_init() argument 451 iomap_read_end(struct folio * folio,size_t bytes_submitted) iomap_read_end() argument 504 struct folio *folio = ctx->cur_folio; iomap_read_folio_iter() local 560 struct folio *folio = ctx->cur_folio; iomap_read_folio() local 654 iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count) iomap_is_partially_uptodate() argument 698 iomap_release_folio(struct folio * folio,gfp_t gfp_flags) iomap_release_folio() argument 715 iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len) iomap_invalidate_folio() argument 732 iomap_dirty_folio(struct address_space * mapping,struct folio * folio) iomap_dirty_folio() argument 759 __iomap_write_begin(const struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len,struct folio * folio) __iomap_write_begin() argument 836 struct folio *folio = folio_batch_next(iter->fbatch); __iomap_get_folio() local 865 __iomap_put_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t ret,struct folio * folio) __iomap_put_folio() argument 879 iomap_trim_folio_range(struct iomap_iter * iter,struct folio * folio,size_t * offset,u64 * bytes) iomap_trim_folio_range() argument 894 iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio) iomap_write_begin_inline() argument 914 struct folio *folio; iomap_write_begin() local 995 __iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio) __iomap_write_end() argument 1019 iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied) iomap_write_end_inline() argument 1044 iomap_write_end(struct iomap_iter * iter,size_t len,size_t copied,struct folio * folio) iomap_write_end() argument 1073 struct folio *folio; iomap_write_iter() local 1200 iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch) iomap_write_delalloc_ifs_punch() argument 1229 iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch) iomap_write_delalloc_punch() argument 1275 struct folio *folio; iomap_write_delalloc_scan() local 1433 struct folio *folio; iomap_unshare_iter() local 1506 struct folio *folio; iomap_zero_iter() local 1656 iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio) iomap_folio_mkwrite_iter() argument 1683 struct folio *folio = page_folio(vmf->page); iomap_page_mkwrite() local 1705 iomap_writeback_init(struct inode * inode,struct folio * folio) iomap_writeback_init() argument 1725 iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len) iomap_finish_folio_write() argument 1739 iomap_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 pos,u32 rlen,u64 end_pos,size_t * bytes_submitted) iomap_writeback_range() argument 1770 iomap_writeback_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos) iomap_writeback_handle_eof() argument 1822 iomap_writeback_folio(struct iomap_writepage_ctx * wpc,struct folio * folio) iomap_writeback_folio() argument 1911 struct folio *folio = NULL; iomap_writepages() local [all...] |
| /linux/fs/jfs/ |
| H A D | jfs_metapage.c | 51 folio_unlock(mp->folio); in __lock_metapage() 53 folio_lock(mp->folio); in __lock_metapage() 84 static inline struct metapage *folio_to_mp(struct folio *folio, int offset) in folio_to_mp() argument 86 struct meta_anchor *anchor = folio->private; in folio_to_mp() 93 static inline int insert_metapage(struct folio *folio, struct metapage *mp) in insert_metapage() argument 99 a = folio->private; in insert_metapage() 104 folio_attach_private(folio, a); in insert_metapage() 105 kmap(&folio->page); in insert_metapage() 109 l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits; in insert_metapage() 118 static inline void remove_metapage(struct folio *folio, struct metapage *mp) in remove_metapage() argument [all …]
|
| /linux/mm/damon/ |
| H A D | ops-common.c | 26 struct folio *damon_get_folio(unsigned long pfn) in damon_get_folio() 29 struct folio *folio; in damon_get_folio() local 34 folio = page_folio(page); in damon_get_folio() 35 if (!folio_test_lru(folio) || !folio_try_get(folio)) in damon_get_folio() 37 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio() 38 folio_put(folio); in damon_get_folio() 39 folio = NULL; in damon_get_folio() 41 return folio; in damon_get_folio() 47 struct folio *folio; in damon_ptep_mkold() local 56 folio = damon_get_folio(pfn); in damon_ptep_mkold() [all …]
|
| H A D | paddr.c | 42 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_mkold() local 44 if (!folio) in damon_pa_mkold() 47 damon_folio_mkold(folio); in damon_pa_mkold() 48 folio_put(folio); in damon_pa_mkold() 72 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_young() local 75 if (!folio) in damon_pa_young() 78 accessed = damon_folio_young(folio); in damon_pa_young() 79 *folio_sz = folio_size(folio); in damon_pa_young() 126 damos_pa_filter_out(struct damos * scheme,struct folio * folio) damos_pa_filter_out() argument 140 damon_pa_invalid_damos_folio(struct folio * folio,struct damos * s) damon_pa_invalid_damos_folio() argument 159 struct folio *folio; damon_pa_pageout() local 215 struct folio *folio; damon_pa_mark_accessed_or_deactivate() local 265 struct folio *folio; damon_pa_migrate() local 298 struct folio *folio; damon_pa_stat() local [all...] |
| /linux/fs/nilfs2/ |
| H A D | page.c | 28 static struct buffer_head *__nilfs_get_folio_block(struct folio *folio, in __nilfs_get_folio_block() argument 34 struct buffer_head *bh = folio_buffers(folio); in __nilfs_get_folio_block() 37 bh = create_empty_buffers(folio, 1 << blkbits, b_state); in __nilfs_get_folio_block() 53 struct folio *folio; in nilfs_grab_buffer() local 56 folio = filemap_grab_folio(mapping, index); in nilfs_grab_buffer() 57 if (IS_ERR(folio)) in nilfs_grab_buffer() 60 bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state); in nilfs_grab_buffer() 62 folio_unlock(folio); in nilfs_grab_buffer() 63 folio_put(folio); in nilfs_grab_buffer() 76 struct folio *folio = bh->b_folio; in nilfs_forget_buffer() local [all …]
|
| /linux/fs/ubifs/ |
| H A D | file.c | 45 static int read_block(struct inode *inode, struct folio *folio, size_t offset, in read_block() argument 58 folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); in read_block() 77 err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset, in read_block() 88 folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len); in read_block() 99 static int do_readpage(struct folio *folio) in do_readpage() argument 104 struct inode *inode = folio->mapping->host; in do_readpage() 110 inode->i_ino, folio->index, i_size, folio in do_readpage() 213 struct folio *folio; write_begin_slow() local 311 allocate_budget(struct ubifs_info * c,struct folio * folio,struct ubifs_inode * ui,int appending) allocate_budget() argument 418 struct folio *folio; ubifs_write_begin() local 502 cancel_budget(struct ubifs_info * c,struct folio * folio,struct ubifs_inode * ui,int appending) cancel_budget() argument 521 ubifs_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata) ubifs_write_end() argument 592 populate_page(struct ubifs_info * c,struct folio * folio,struct bu_info * bu,int * n) populate_page() argument 764 struct folio *folio; ubifs_do_bulk_read() local 808 ubifs_bulk_read(struct folio * folio) ubifs_bulk_read() argument 874 ubifs_read_folio(struct file * file,struct folio * folio) ubifs_read_folio() argument 883 do_writepage(struct folio * folio,size_t len) do_writepage() argument 982 ubifs_writepage(struct folio * folio,struct writeback_control * wbc) ubifs_writepage() argument 1054 struct folio *folio = NULL; ubifs_writepages() local 1138 struct folio *folio; do_truncation() local 1284 ubifs_invalidate_folio(struct folio * folio,size_t offset,size_t length) ubifs_invalidate_folio() argument 1448 ubifs_dirty_folio(struct address_space * mapping,struct folio * folio) ubifs_dirty_folio() argument 1462 ubifs_release_folio(struct folio * folio,gfp_t unused_gfp_flags) ubifs_release_folio() argument 1495 struct folio *folio = page_folio(vmf->page); ubifs_vm_page_mkwrite() local [all...] |
| /linux/include/trace/events/ |
| H A D | pagemap.h | 19 #define trace_pagemap_flags(folio) ( \ argument 20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ 21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \ 22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \ 23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \ 24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \ 25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \ 30 TP_PROTO(struct folio *folio), 32 TP_ARGS(folio), 35 __field(struct folio *, folio ) [all …]
|
| /linux/fs/ecryptfs/ |
| H A D | mmap.c | 33 struct folio *folio = NULL; in ecryptfs_writepages() local 36 while ((folio = writeback_iter(mapping, wbc, folio, &error))) { in ecryptfs_writepages() 37 error = ecryptfs_encrypt_page(folio); in ecryptfs_writepages() 41 folio->index); in ecryptfs_writepages() 42 folio_clear_uptodate(folio); in ecryptfs_writepages() 45 folio_unlock(folio); in ecryptfs_writepages() 91 ecryptfs_copy_up_encrypted_with_header(struct folio *folio, in ecryptfs_copy_up_encrypted_with_header() argument 100 loff_t view_extent_num = ((loff_t)folio->index in ecryptfs_copy_up_encrypted_with_header() 110 page_virt = kmap_local_folio(folio, 0); in ecryptfs_copy_up_encrypted_with_header() 117 page_virt, folio->mapping->host); in ecryptfs_copy_up_encrypted_with_header() [all …]
|