Lines Matching refs:folio

494 struct anon_vma *folio_get_anon_vma(const struct folio *folio)  in folio_get_anon_vma()  argument
499 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in folio_get_anon_vma()
502 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
505 if (!folio_mapped(folio)) in folio_get_anon_vma()
521 if (!folio_mapped(folio)) { in folio_get_anon_vma()
540 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, in folio_lock_anon_vma_read() argument
547 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in folio_lock_anon_vma_read()
550 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
553 if (!folio_mapped(folio)) in folio_lock_anon_vma_read()
564 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
583 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
763 unsigned long page_address_in_vma(const struct folio *folio, in page_address_in_vma() argument
766 if (folio_test_anon(folio)) { in page_address_in_vma()
767 struct anon_vma *anon_vma = folio_anon_vma(folio); in page_address_in_vma()
777 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
782 return vma_address(vma, page_pgoff(folio, page), 1); in page_address_in_vma()
824 static bool folio_referenced_one(struct folio *folio, in folio_referenced_one() argument
828 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one()
853 mlock_vma_folio(folio, vma); in folio_referenced_one()
866 folio_test_anon(folio) && folio_test_swapbacked(folio) && in folio_referenced_one()
867 !folio_maybe_mapped_shared(folio)) { in folio_referenced_one()
893 folio_clear_idle(folio); in folio_referenced_one()
894 if (folio_test_clear_young(folio)) in folio_referenced_one()
944 int folio_referenced(struct folio *folio, int is_locked, in folio_referenced() argument
949 .mapcount = folio_mapcount(folio), in folio_referenced()
964 if (!folio_raw_mapping(folio)) in folio_referenced()
968 we_locked = folio_trylock(folio); in folio_referenced()
973 rmap_walk(folio, &rwc); in folio_referenced()
977 folio_unlock(folio); in folio_referenced()
1059 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, in page_mkclean_one() argument
1062 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); in page_mkclean_one()
1078 int folio_mkclean(struct folio *folio) in folio_mkclean() argument
1088 BUG_ON(!folio_test_locked(folio)); in folio_mkclean()
1090 if (!folio_mapped(folio)) in folio_mkclean()
1093 mapping = folio_mapping(folio); in folio_mkclean()
1097 rmap_walk(folio, &rwc); in folio_mkclean()
1110 static bool mapping_wrprotect_range_one(struct folio *folio, in mapping_wrprotect_range_one() argument
1128 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
1209 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) in __folio_mod_stat() argument
1214 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; in __folio_mod_stat()
1215 lruvec_stat_mod_folio(folio, idx, nr); in __folio_mod_stat()
1218 if (folio_test_anon(folio)) { in __folio_mod_stat()
1220 lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); in __folio_mod_stat()
1223 idx = folio_test_swapbacked(folio) ? in __folio_mod_stat()
1225 __mod_node_page_state(folio_pgdat(folio), idx, in __folio_mod_stat()
1231 static __always_inline void __folio_add_rmap(struct folio *folio, in __folio_add_rmap() argument
1235 atomic_t *mapped = &folio->_nr_pages_mapped; in __folio_add_rmap()
1239 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_add_rmap()
1243 if (!folio_test_large(folio)) { in __folio_add_rmap()
1244 nr = atomic_inc_and_test(&folio->_mapcount); in __folio_add_rmap()
1249 nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); in __folio_add_rmap()
1252 nr = folio_large_nr_pages(folio); in __folio_add_rmap()
1266 folio_add_large_mapcount(folio, orig_nr_pages, vma); in __folio_add_rmap()
1270 first = atomic_inc_and_test(&folio->_entire_mapcount); in __folio_add_rmap()
1273 nr_pmdmapped = folio_large_nr_pages(folio); in __folio_add_rmap()
1274 nr = folio_inc_return_large_mapcount(folio, vma); in __folio_add_rmap()
1277 nr = folio_large_nr_pages(folio); in __folio_add_rmap()
1286 nr_pages = folio_large_nr_pages(folio); in __folio_add_rmap()
1302 folio_inc_large_mapcount(folio, vma); in __folio_add_rmap()
1307 __folio_mod_stat(folio, nr, nr_pmdmapped); in __folio_add_rmap()
1319 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) in folio_move_anon_rmap() argument
1323 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_move_anon_rmap()
1332 WRITE_ONCE(folio->mapping, anon_vma); in folio_move_anon_rmap()
1342 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, in __folio_set_anon() argument
1363 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); in __folio_set_anon()
1364 folio->index = linear_page_index(vma, address); in __folio_set_anon()
1374 static void __page_check_anon_rmap(const struct folio *folio, in __page_check_anon_rmap() argument
1389 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1390 folio); in __page_check_anon_rmap()
1391 VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1395 static __always_inline void __folio_add_anon_rmap(struct folio *folio, in __folio_add_anon_rmap() argument
1401 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_add_anon_rmap()
1403 __folio_add_rmap(folio, page, nr_pages, vma, level); in __folio_add_anon_rmap()
1405 if (likely(!folio_test_ksm(folio))) in __folio_add_anon_rmap()
1406 __page_check_anon_rmap(folio, page, vma, address); in __folio_add_anon_rmap()
1429 VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && in __folio_add_anon_rmap()
1430 atomic_read(&folio->_mapcount) > 0, folio); in __folio_add_anon_rmap()
1434 VM_WARN_ON_FOLIO(folio_test_large(folio) && in __folio_add_anon_rmap()
1435 folio_entire_mapcount(folio) > 1 && in __folio_add_anon_rmap()
1436 PageAnonExclusive(cur_page), folio); in __folio_add_anon_rmap()
1445 PageAnonExclusive(cur_page), folio); in __folio_add_anon_rmap()
1454 if (folio_nr_pages(folio) == nr_pages) in __folio_add_anon_rmap()
1455 mlock_vma_folio(folio, vma); in __folio_add_anon_rmap()
1474 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, in folio_add_anon_rmap_ptes() argument
1478 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, in folio_add_anon_rmap_ptes()
1495 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, in folio_add_anon_rmap_pmd() argument
1499 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, in folio_add_anon_rmap_pmd()
1521 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in folio_add_new_anon_rmap() argument
1527 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in folio_add_new_anon_rmap()
1528 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); in folio_add_new_anon_rmap()
1534 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) in folio_add_new_anon_rmap()
1535 __folio_set_swapbacked(folio); in folio_add_new_anon_rmap()
1536 __folio_set_anon(folio, vma, address, exclusive); in folio_add_new_anon_rmap()
1538 if (likely(!folio_test_large(folio))) { in folio_add_new_anon_rmap()
1540 atomic_set(&folio->_mapcount, 0); in folio_add_new_anon_rmap()
1542 SetPageAnonExclusive(&folio->page); in folio_add_new_anon_rmap()
1543 } else if (!folio_test_pmd_mappable(folio)) { in folio_add_new_anon_rmap()
1546 nr = folio_large_nr_pages(folio); in folio_add_new_anon_rmap()
1548 struct page *page = folio_page(folio, i); in folio_add_new_anon_rmap()
1557 folio_set_large_mapcount(folio, nr, vma); in folio_add_new_anon_rmap()
1559 atomic_set(&folio->_nr_pages_mapped, nr); in folio_add_new_anon_rmap()
1561 nr = folio_large_nr_pages(folio); in folio_add_new_anon_rmap()
1563 atomic_set(&folio->_entire_mapcount, 0); in folio_add_new_anon_rmap()
1564 folio_set_large_mapcount(folio, 1, vma); in folio_add_new_anon_rmap()
1566 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); in folio_add_new_anon_rmap()
1568 SetPageAnonExclusive(&folio->page); in folio_add_new_anon_rmap()
1575 __folio_mod_stat(folio, nr, nr_pmdmapped); in folio_add_new_anon_rmap()
1576 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in folio_add_new_anon_rmap()
1579 static __always_inline void __folio_add_file_rmap(struct folio *folio, in __folio_add_file_rmap() argument
1583 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in __folio_add_file_rmap()
1585 __folio_add_rmap(folio, page, nr_pages, vma, level); in __folio_add_file_rmap()
1593 if (folio_nr_pages(folio) == nr_pages) in __folio_add_file_rmap()
1594 mlock_vma_folio(folio, vma); in __folio_add_file_rmap()
1608 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, in folio_add_file_rmap_ptes() argument
1611 __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); in folio_add_file_rmap_ptes()
1624 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, in folio_add_file_rmap_pmd() argument
1628 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); in folio_add_file_rmap_pmd()
1644 void folio_add_file_rmap_pud(struct folio *folio, struct page *page, in folio_add_file_rmap_pud() argument
1649 __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); in folio_add_file_rmap_pud()
1655 static __always_inline void __folio_remove_rmap(struct folio *folio, in __folio_remove_rmap() argument
1659 atomic_t *mapped = &folio->_nr_pages_mapped; in __folio_remove_rmap()
1663 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_remove_rmap()
1667 if (!folio_test_large(folio)) { in __folio_remove_rmap()
1668 nr = atomic_add_negative(-1, &folio->_mapcount); in __folio_remove_rmap()
1673 nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); in __folio_remove_rmap()
1676 nr = folio_large_nr_pages(folio); in __folio_remove_rmap()
1678 partially_mapped = nr < folio_large_nr_pages(folio) && in __folio_remove_rmap()
1679 !folio_entire_mapcount(folio); in __folio_remove_rmap()
1685 folio_sub_large_mapcount(folio, nr_pages, vma); in __folio_remove_rmap()
1699 last = atomic_add_negative(-1, &folio->_entire_mapcount); in __folio_remove_rmap()
1701 nr_pmdmapped = folio_large_nr_pages(folio); in __folio_remove_rmap()
1702 nr = folio_dec_return_large_mapcount(folio, vma); in __folio_remove_rmap()
1705 nr = folio_large_nr_pages(folio); in __folio_remove_rmap()
1708 nr < folio_large_nr_pages(folio); in __folio_remove_rmap()
1714 folio_dec_large_mapcount(folio, vma); in __folio_remove_rmap()
1715 last = atomic_add_negative(-1, &folio->_entire_mapcount); in __folio_remove_rmap()
1719 nr_pages = folio_large_nr_pages(folio); in __folio_remove_rmap()
1747 if (partially_mapped && folio_test_anon(folio) && in __folio_remove_rmap()
1748 !folio_test_partially_mapped(folio) && in __folio_remove_rmap()
1749 !folio_is_device_private(folio)) in __folio_remove_rmap()
1750 deferred_split_folio(folio, true); in __folio_remove_rmap()
1752 __folio_mod_stat(folio, -nr, -nr_pmdmapped); in __folio_remove_rmap()
1762 munlock_vma_folio(folio, vma); in __folio_remove_rmap()
1776 void folio_remove_rmap_ptes(struct folio *folio, struct page *page, in folio_remove_rmap_ptes() argument
1779 __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); in folio_remove_rmap_ptes()
1792 void folio_remove_rmap_pmd(struct folio *folio, struct page *page, in folio_remove_rmap_pmd() argument
1796 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); in folio_remove_rmap_pmd()
1812 void folio_remove_rmap_pud(struct folio *folio, struct page *page, in folio_remove_rmap_pud() argument
1817 __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); in folio_remove_rmap_pud()
1823 static inline unsigned int folio_unmap_pte_batch(struct folio *folio, in folio_unmap_pte_batch() argument
1833 if (!folio_test_large(folio)) in folio_unmap_pte_batch()
1841 if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) in folio_unmap_pte_batch()
1846 return folio_pte_batch(folio, pvmw->pte, pte, max_nr); in folio_unmap_pte_batch()
1852 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, in try_to_unmap_one() argument
1856 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_unmap_one()
1887 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1932 mlock_vma_folio(folio, vma); in try_to_unmap_one()
1937 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { in try_to_unmap_one()
1938 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) in try_to_unmap_one()
1962 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_unmap_one()
1975 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in try_to_unmap_one()
1978 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_unmap_one()
1980 anon_exclusive = folio_test_anon(folio) && in try_to_unmap_one()
1983 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1984 bool anon = folio_test_anon(folio); in try_to_unmap_one()
2034 folio_mark_dirty(folio); in try_to_unmap_one()
2036 nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval); in try_to_unmap_one()
2055 folio_mark_dirty(folio); in try_to_unmap_one()
2072 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
2073 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_unmap_one()
2077 dec_mm_counter(mm, mm_counter(folio)); in try_to_unmap_one()
2092 dec_mm_counter(mm, mm_counter(folio)); in try_to_unmap_one()
2093 } else if (folio_test_anon(folio)) { in try_to_unmap_one()
2100 if (unlikely(folio_test_swapbacked(folio) != in try_to_unmap_one()
2101 folio_test_swapcache(folio))) { in try_to_unmap_one()
2107 if (!folio_test_swapbacked(folio)) { in try_to_unmap_one()
2117 ref_count = folio_ref_count(folio); in try_to_unmap_one()
2118 map_count = folio_mapcount(folio); in try_to_unmap_one()
2126 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { in try_to_unmap_one()
2132 folio_set_swapbacked(folio); in try_to_unmap_one()
2169 folio_try_share_anon_rmap_pte(folio, subpage)) { in try_to_unmap_one()
2209 dec_mm_counter(mm, mm_counter_file(folio)); in try_to_unmap_one()
2212 if (unlikely(folio_test_hugetlb(folio))) { in try_to_unmap_one()
2213 hugetlb_remove_rmap(folio); in try_to_unmap_one()
2215 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); in try_to_unmap_one()
2219 folio_put_refs(folio, nr_pages); in try_to_unmap_one()
2225 if (nr_pages == folio_nr_pages(folio)) in try_to_unmap_one()
2245 static int folio_not_mapped(struct folio *folio) in folio_not_mapped() argument
2247 return !folio_mapped(folio); in folio_not_mapped()
2261 void try_to_unmap(struct folio *folio, enum ttu_flags flags) in try_to_unmap() argument
2271 rmap_walk_locked(folio, &rwc); in try_to_unmap()
2273 rmap_walk(folio, &rwc); in try_to_unmap()
2282 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, in try_to_migrate_one() argument
2286 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_migrate_one()
2315 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2348 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_migrate_one()
2350 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in try_to_migrate_one()
2351 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one()
2363 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_migrate_one()
2376 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in try_to_migrate_one()
2379 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_migrate_one()
2381 anon_exclusive = folio_test_anon(folio) && in try_to_migrate_one()
2384 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2385 bool anon = folio_test_anon(folio); in try_to_migrate_one()
2436 folio_mark_dirty(folio); in try_to_migrate_one()
2457 folio_mark_dirty(folio); in try_to_migrate_one()
2467 VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && in try_to_migrate_one()
2468 !anon_exclusive, folio); in try_to_migrate_one()
2474 VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); in try_to_migrate_one()
2477 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2478 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_migrate_one()
2482 dec_mm_counter(mm, mm_counter(folio)); in try_to_migrate_one()
2497 dec_mm_counter(mm, mm_counter(folio)); in try_to_migrate_one()
2508 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2519 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2521 hugetlb_try_share_anon_rmap(folio)) { in try_to_migrate_one()
2529 folio_try_share_anon_rmap_pte(folio, subpage)) { in try_to_migrate_one()
2567 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2573 folio_order(folio)); in try_to_migrate_one()
2580 if (unlikely(folio_test_hugetlb(folio))) in try_to_migrate_one()
2581 hugetlb_remove_rmap(folio); in try_to_migrate_one()
2583 folio_remove_rmap_pte(folio, subpage, vma); in try_to_migrate_one()
2586 folio_put(folio); in try_to_migrate_one()
2602 void try_to_migrate(struct folio *folio, enum ttu_flags flags) in try_to_migrate() argument
2619 if (folio_is_zone_device(folio) && in try_to_migrate()
2620 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) in try_to_migrate()
2631 if (!folio_test_ksm(folio) && folio_test_anon(folio)) in try_to_migrate()
2635 rmap_walk_locked(folio, &rwc); in try_to_migrate()
2637 rmap_walk(folio, &rwc); in try_to_migrate()
2680 void *owner, struct folio **foliop) in make_device_exclusive()
2683 struct folio *folio, *fw_folio; in make_device_exclusive() local
2709 folio = page_folio(page); in make_device_exclusive()
2711 if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { in make_device_exclusive()
2712 folio_put(folio); in make_device_exclusive()
2716 ret = folio_lock_killable(folio); in make_device_exclusive()
2718 folio_put(folio); in make_device_exclusive()
2738 if (fw_folio != folio || fw.page != page || in make_device_exclusive()
2743 folio_unlock(folio); in make_device_exclusive()
2744 folio_put(folio); in make_device_exclusive()
2754 folio_mark_dirty(folio); in make_device_exclusive()
2770 *foliop = folio; in make_device_exclusive()
2785 static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio, in rmap_walk_anon_lock() argument
2791 return rwc->anon_lock(folio, rwc); in rmap_walk_anon_lock()
2799 anon_vma = folio_anon_vma(folio); in rmap_walk_anon_lock()
2827 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument
2838 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in rmap_walk_anon()
2841 anon_vma = folio_anon_vma(folio); in rmap_walk_anon()
2843 VM_BUG_ON_FOLIO(!anon_vma, folio); in rmap_walk_anon()
2845 anon_vma = rmap_walk_anon_lock(folio, rwc); in rmap_walk_anon()
2850 pgoff_start = folio_pgoff(folio); in rmap_walk_anon()
2851 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_anon()
2856 folio_nr_pages(folio)); in rmap_walk_anon()
2864 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2866 if (rwc->done && rwc->done(folio)) in rmap_walk_anon()
2894 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, in __rmap_walk_file() argument
2901 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); in __rmap_walk_file()
2902 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); in __rmap_walk_file()
2903 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); in __rmap_walk_file()
2927 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in __rmap_walk_file()
2929 if (rwc->done && rwc->done(folio)) in __rmap_walk_file()
2946 static void rmap_walk_file(struct folio *folio, in rmap_walk_file() argument
2955 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in rmap_walk_file()
2957 if (!folio->mapping) in rmap_walk_file()
2960 __rmap_walk_file(folio, folio->mapping, folio->index, in rmap_walk_file()
2961 folio_nr_pages(folio), rwc, locked); in rmap_walk_file()
2964 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk() argument
2966 if (unlikely(folio_test_ksm(folio))) in rmap_walk()
2967 rmap_walk_ksm(folio, rwc); in rmap_walk()
2968 else if (folio_test_anon(folio)) in rmap_walk()
2969 rmap_walk_anon(folio, rwc, false); in rmap_walk()
2971 rmap_walk_file(folio, rwc, false); in rmap_walk()
2975 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2978 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); in rmap_walk_locked()
2979 if (folio_test_anon(folio)) in rmap_walk_locked()
2980 rmap_walk_anon(folio, rwc, true); in rmap_walk_locked()
2982 rmap_walk_file(folio, rwc, true); in rmap_walk_locked()
2991 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in hugetlb_add_anon_rmap() argument
2994 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_add_anon_rmap()
2995 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_add_anon_rmap()
2997 atomic_inc(&folio->_entire_mapcount); in hugetlb_add_anon_rmap()
2998 atomic_inc(&folio->_large_mapcount); in hugetlb_add_anon_rmap()
3000 SetPageAnonExclusive(&folio->page); in hugetlb_add_anon_rmap()
3001 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && in hugetlb_add_anon_rmap()
3002 PageAnonExclusive(&folio->page), folio); in hugetlb_add_anon_rmap()
3005 void hugetlb_add_new_anon_rmap(struct folio *folio, in hugetlb_add_new_anon_rmap() argument
3008 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_add_new_anon_rmap()
3012 atomic_set(&folio->_entire_mapcount, 0); in hugetlb_add_new_anon_rmap()
3013 atomic_set(&folio->_large_mapcount, 0); in hugetlb_add_new_anon_rmap()
3014 folio_clear_hugetlb_restore_reserve(folio); in hugetlb_add_new_anon_rmap()
3015 __folio_set_anon(folio, vma, address, true); in hugetlb_add_new_anon_rmap()
3016 SetPageAnonExclusive(&folio->page); in hugetlb_add_new_anon_rmap()