/linux/include/linux/ |
H A D | vmstat.h | 431 __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); in __zone_stat_add_folio() 437 __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); in __zone_stat_sub_folio() 449 mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); in zone_stat_add_folio() 455 mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); in zone_stat_sub_folio() 467 __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); in __node_stat_add_folio() 473 __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); in __node_stat_sub_folio() 485 mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); in node_stat_add_folio() 491 mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); in node_stat_sub_folio() 603 __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); in __lruvec_stat_add_folio() 609 __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); in __lruvec_stat_sub_folio() [all …]
|
/linux/arch/arm/mm/ |
H A D | flush.c | 212 for (i = 0; i < folio_nr_pages(folio); i++) { in __flush_dcache_folio() 219 for (i = 0; i < folio_nr_pages(folio); i++) { in __flush_dcache_folio() 251 pgoff_end = pgoff + folio_nr_pages(folio) - 1; in __flush_dcache_aliases() 268 nr = folio_nr_pages(folio); in __flush_dcache_aliases()
|
/linux/mm/ |
H A D | mlock.c | 81 folio_nr_pages(folio)); in __mlock_folio() 97 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); in __mlock_folio() 115 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); in __mlock_new_folio() 124 int nr_pages = folio_nr_pages(folio); in __munlock_folio() 250 int nr_pages = folio_nr_pages(folio); in mlock_folio() 270 int nr_pages = folio_nr_pages(folio); in mlock_new_folio() 346 if (step != folio_nr_pages(folio)) in allow_mlock_munlock()
|
H A D | migrate.c | 160 folio_is_file_lru(folio), -folio_nr_pages(folio)); in putback_movable_pages() 455 refs += folio_nr_pages(folio); in folio_expected_refs() 476 long nr = folio_nr_pages(folio); in __folio_migrate_mapping() 639 folio_ref_add(dst, folio_nr_pages(dst)); in migrate_huge_page_move_mapping() 643 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); in migrate_huge_page_move_mapping() 1174 folio_is_file_lru(src), -folio_nr_pages(src)); in migrate_folio_done() 1620 nr_pages = folio_nr_pages(folio); in migrate_hugetlbs() 1706 nr_pages = folio_nr_pages(folio); in migrate_folios_move() 1802 nr_pages = folio_nr_pages(folio); in migrate_pages_batch() 2082 nr_pages += folio_nr_pages(folio); in migrate_pages() [all …]
|
H A D | util.c | 883 long nr = folio_nr_pages(src); in folio_copy() 896 long nr = folio_nr_pages(src); in folio_mc_copy() 1238 long i, nr = folio_nr_pages(folio); in flush_dcache_folio()
|
H A D | khugepaged.c | 520 -folio_nr_pages(folio)); in release_pte_folio() 556 expected_refcount += folio_nr_pages(folio); in is_refcount_suitable() 672 folio_nr_pages(folio)); in __collapse_huge_page_isolate() 2036 if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) { in collapse_file() 2047 index += folio_nr_pages(folio); in collapse_file() 2095 int i, nr_pages = folio_nr_pages(folio); in collapse_file() 2225 folio_put_refs(folio, 2 + folio_nr_pages(folio)); in collapse_file() 2337 present += folio_nr_pages(folio); in hpage_collapse_scan_file()
|
H A D | compaction.c | 1025 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block() 1224 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block() 1225 nr_scanned += folio_nr_pages(folio) - 1; in isolate_migratepages_block() 1233 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block() 1239 folio_nr_pages(folio)); in isolate_migratepages_block() 1244 cc->nr_migratepages += folio_nr_pages(folio); in isolate_migratepages_block() 1245 nr_isolated += folio_nr_pages(folio); in isolate_migratepages_block() 1246 nr_scanned += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
|
H A D | swap_cgroup.c | 67 unsigned int nr_ents = folio_nr_pages(folio); in swap_cgroup_record()
|
H A D | page-writeback.c | 2570 wbc->nr_to_write -= folio_nr_pages(folio); in writeback_iter() 2742 long nr = folio_nr_pages(folio); in folio_account_dirtied() 2766 long nr = folio_nr_pages(folio); in folio_account_cleaned() 2854 long nr = folio_nr_pages(folio); in folio_redirty_for_writepage() 3030 long nr = folio_nr_pages(folio); in folio_clear_dirty_for_io() 3067 long nr = folio_nr_pages(folio); in __folio_end_writeback() 3107 long nr = folio_nr_pages(folio); in __folio_start_writeback()
|
H A D | filemap.c | 136 nr = folio_nr_pages(folio); in page_cache_delete() 181 nr = folio_nr_pages(folio); in filemap_unaccount_folio() 236 refs = folio_nr_pages(folio); in filemap_free_folio() 313 total_pages += folio_nr_pages(folio); in page_cache_delete_batch() 874 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio() 877 nr = folio_nr_pages(folio); in __filemap_add_folio() 2092 nr = folio_nr_pages(folio); in find_get_entries() 2134 nr = folio_nr_pages(folio); in find_lock_entries() 2243 nr = folio_nr_pages(folio); in filemap_get_folios_contig() 2303 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios_tag()
|
H A D | huge_memory.c | 1902 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) in do_huge_pmd_wp_page() 3139 i += folio_nr_pages(folio); in remap_page() 3251 folio_nr_pages(new_folio) : 0)); in __split_huge_page_tail() 3307 folio_put_refs(tail, folio_nr_pages(tail)); in __split_huge_page() 3384 folio_nr_pages(folio) : 0; in can_split_folio() 3386 extra_pins = folio_nr_pages(folio); in can_split_folio() 3612 int nr = folio_nr_pages(folio); in split_huge_page_to_list_to_order() 3640 remap_page(folio, folio_nr_pages(folio), 0); in split_huge_page_to_list_to_order() 3799 for (i = 0; i < folio_nr_pages(folio); i++) { in thp_underused() 3957 nr_pages = folio_nr_pages(folio); in split_huge_pages_all() [all …]
|
H A D | madvise.c | 486 if (nr < folio_nr_pages(folio)) { in madvise_cold_or_pageout_pte_range() 520 folio_mapcount(folio) != folio_nr_pages(folio)) in madvise_cold_or_pageout_pte_range() 721 if (nr < folio_nr_pages(folio)) { in madvise_free_pte_range() 759 if (folio_mapcount(folio) != folio_nr_pages(folio)) { in madvise_free_pte_range()
|
/linux/fs/freevxfs/ |
H A D | vxfs_immed.c | 37 for (i = 0; i < folio_nr_pages(folio); i++) { in vxfs_immed_read_folio()
|
/linux/arch/openrisc/mm/ |
H A D | cache.c | 55 unsigned int nr = folio_nr_pages(folio); in update_cache()
|
/linux/arch/arm64/mm/ |
H A D | copypage.c | 46 nr_pages = folio_nr_pages(src); in copy_highpage()
|
/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 80 unsigned long nr = folio_nr_pages(folio); in flush_aliases() 238 folio_nr_pages(folio)); in update_mmu_cache_range()
|
/linux/arch/parisc/kernel/ |
H A D | cache.c | 123 nr = folio_nr_pages(folio); in __update_cache() 488 nr = folio_nr_pages(folio); in flush_dcache_folio() 511 nr = folio_nr_pages(folio); in flush_dcache_folio() 536 if (nr == folio_nr_pages(folio)) in flush_dcache_folio()
|
/linux/arch/csky/abiv2/ |
H A D | cacheflush.c | 28 for (i = 0; i < folio_nr_pages(folio); i++) { in update_mmu_cache_range()
|
/linux/arch/mips/include/asm/ |
H A D | cacheflush.h | 59 __flush_dcache_pages(&folio->page, folio_nr_pages(folio)); in flush_dcache_folio()
|
/linux/virt/kvm/ |
H A D | guest_memfd.c | 25 return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1)); in folio_file_pfn() 63 nr_pages = folio_nr_pages(folio); in kvm_gmem_prepare_folio() 343 end = start + folio_nr_pages(folio); in kvm_gmem_error_folio()
|
/linux/arch/powerpc/mm/ |
H A D | cacheflush.c | 153 unsigned int i, nr = folio_nr_pages(folio); in flush_dcache_icache_folio()
|
/linux/arch/sh/mm/ |
H A D | cache-sh7705.c | 144 unsigned int i, nr = folio_nr_pages(folio); in sh7705_flush_dcache_folio()
|
/linux/arch/m68k/include/asm/ |
H A D | cacheflush_mm.h | 260 __flush_pages_to_ram(folio_address(folio), folio_nr_pages(folio))
|
/linux/arch/mips/mm/ |
H A D | cache.c | 162 for (i = 0; i < folio_nr_pages(folio); i++) { in __update_cache()
|
/linux/fs/ramfs/ |
H A D | file-nommu.c | 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
|