/linux/virt/kvm/ |
H A D | guest_memfd.c | 34 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio() 80 WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio))); in kvm_gmem_prepare_folio() 82 index = ALIGN_DOWN(index, 1 << folio_order(folio)); in kvm_gmem_prepare_folio() 370 int order = folio_order(folio);
|
/linux/mm/ |
H A D | huge_memory.c | 3278 int order = folio_order(folio); in __split_huge_page() 3450 int order = folio_order(folio); in split_huge_page_to_list_to_order() 3458 if (new_order >= folio_order(folio)) in split_huge_page_to_list_to_order() 3546 xas_split_alloc(&xas, folio, folio_order(folio), gfp); in split_huge_page_to_list_to_order() 3594 if (folio_order(folio) > 1 && in split_huge_page_to_list_to_order() 3599 mod_mthp_stat(folio_order(folio), in split_huge_page_to_list_to_order() 3614 xas_split(&xas, folio, folio_order(folio)); in split_huge_page_to_list_to_order() 3711 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split() 3735 if (folio_order(folio) <= 1) in deferred_split_folio() 3757 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); in deferred_split_folio() [all …]
|
H A D | migrate.c | 493 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 519 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 2181 order = folio_order(src); in alloc_migration_target() 2627 int order = folio_order(src); in alloc_misplaced_dst_folio() 2691 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
|
H A D | debug.c | 83 folio_order(folio), in __dump_folio()
|
H A D | readahead.c | 632 unsigned int order = folio_order(folio); in page_cache_async_ra()
|
H A D | shmem.c | 873 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache() 1922 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); in shmem_alloc_and_add_folio() 1923 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); in shmem_alloc_and_add_folio() 2071 new = shmem_alloc_folio(gfp, folio_order(old), info, index); in shmem_replace_folio() 2341 } else if (order != folio_order(folio)) { in shmem_swapin_folio() 2372 xa_get_order(&mapping->i_pages, index) != folio_order(folio)) { in shmem_swapin_folio() 2538 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); in shmem_get_folio_gfp()
|
H A D | khugepaged.c | 1548 if (folio_order(folio) != HPAGE_PMD_ORDER) { in collapse_pte_mapped_thp() 1983 if (folio_order(folio) == HPAGE_PMD_ORDER && in collapse_file() 2300 if (folio_order(folio) == HPAGE_PMD_ORDER && in hpage_collapse_scan_file()
|
H A D | compaction.c | 1221 if (unlikely(skip_isolation_on_order(folio_order(folio), in isolate_migratepages_block() 1837 int order = folio_order(src); in compaction_alloc_noprof() 1894 int order = folio_order(dst); in compaction_free()
|
H A D | filemap.c | 135 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 870 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio() 875 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio() 910 if (order > 0 && order > folio_order(folio)) { in __filemap_add_folio()
|
H A D | mempolicy.c | 1224 order = folio_order(src); in alloc_migration_target_by_mpol() 1381 order = folio_order(folio); in do_mbind() 2778 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx); in mpol_misplaced()
|
H A D | internal.h | 693 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) in folio_unqueue_deferred_split()
|
H A D | swapfile.c | 1570 int size = 1 << swap_entry_order(folio_order(folio)); in put_swap_folio() 1717 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
|
H A D | slub.c | 2198 folio_order(folio))) in memcg_slab_post_charge() 2647 int order = folio_order(folio); in __free_slab() 4717 unsigned int order = folio_order(folio); in free_large_kmalloc()
|
H A D | memory.c | 4936 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); in do_anonymous_page() 5053 if (folio_order(folio) != HPAGE_PMD_ORDER) in do_set_pmd()
|
/linux/fs/bcachefs/ |
H A D | fs-io-pagecache.h | 26 return PAGE_SECTORS << folio_order(folio); in folio_sectors()
|
/linux/fs/netfs/ |
H A D | rolling_buffer.c | 137 unsigned int order = folio_order(folio); in rolling_buffer_load_from_ra()
|
/linux/include/trace/events/ |
H A D | filemap.h | 38 __entry->order = folio_order(folio);
|
/linux/fs/btrfs/ |
H A D | extent_io.h | 300 if (folio_order(eb->folios[0])) in num_extent_folios()
|
H A D | subpage.c | 197 ASSERT(folio_order(folio) == 0); in btrfs_subpage_assert()
|
H A D | bio.c | 180 ASSERT(folio_order(page_folio(bv->bv_page)) == 0); in btrfs_end_repair_bio()
|
H A D | file.c | 879 ASSERT(folio_order(folio) == 0); in prepare_one_folio() 1794 ASSERT(folio_order(folio) == 0); in btrfs_page_mkwrite()
|
H A D | disk-io.c | 3858 ASSERT(folio_order(folio) == 0); in write_dev_supers() 3931 ASSERT(folio_order(folio) == 0); in wait_dev_supers()
|
/linux/include/linux/ |
H A D | mm.h | 1126 static inline unsigned int folio_order(const struct folio *folio) in folio_order() function 2135 return PAGE_SHIFT + folio_order(folio); in folio_shift() 2148 return PAGE_SIZE << folio_order(folio); in folio_size()
|
/linux/fs/ubifs/ |
H A D | file.c | 153 if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio))) in do_readpage()
|
/linux/drivers/md/ |
H A D | dm-crypt.c | 1784 1 << folio_order(fi.folio)); in crypt_free_buffer_pages()
|