Home
last modified time | relevance | path

Searched refs:folio_order (Results 1 – 25 of 30) sorted by relevance

12

/linux/virt/kvm/
H A Dguest_memfd.c34 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio()
80 WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio))); in kvm_gmem_prepare_folio()
82 index = ALIGN_DOWN(index, 1 << folio_order(folio)); in kvm_gmem_prepare_folio()
370 int order = folio_order(folio);
/linux/mm/
H A Dhuge_memory.c3278 int order = folio_order(folio); in __split_huge_page()
3450 int order = folio_order(folio); in split_huge_page_to_list_to_order()
3458 if (new_order >= folio_order(folio)) in split_huge_page_to_list_to_order()
3546 xas_split_alloc(&xas, folio, folio_order(folio), gfp); in split_huge_page_to_list_to_order()
3594 if (folio_order(folio) > 1 && in split_huge_page_to_list_to_order()
3599 mod_mthp_stat(folio_order(folio), in split_huge_page_to_list_to_order()
3614 xas_split(&xas, folio, folio_order(folio)); in split_huge_page_to_list_to_order()
3711 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split()
3735 if (folio_order(folio) <= 1) in deferred_split_folio()
3757 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); in deferred_split_folio()
[all …]
H A Dmigrate.c493 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
519 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
2181 order = folio_order(src); in alloc_migration_target()
2627 int order = folio_order(src); in alloc_misplaced_dst_folio()
2691 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
H A Ddebug.c83 folio_order(folio), in __dump_folio()
H A Dreadahead.c632 unsigned int order = folio_order(folio); in page_cache_async_ra()
H A Dshmem.c873 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
1922 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); in shmem_alloc_and_add_folio()
1923 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); in shmem_alloc_and_add_folio()
2071 new = shmem_alloc_folio(gfp, folio_order(old), info, index); in shmem_replace_folio()
2341 } else if (order != folio_order(folio)) { in shmem_swapin_folio()
2372 xa_get_order(&mapping->i_pages, index) != folio_order(folio)) { in shmem_swapin_folio()
2538 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); in shmem_get_folio_gfp()
H A Dkhugepaged.c1548 if (folio_order(folio) != HPAGE_PMD_ORDER) { in collapse_pte_mapped_thp()
1983 if (folio_order(folio) == HPAGE_PMD_ORDER && in collapse_file()
2300 if (folio_order(folio) == HPAGE_PMD_ORDER && in hpage_collapse_scan_file()
H A Dcompaction.c1221 if (unlikely(skip_isolation_on_order(folio_order(folio), in isolate_migratepages_block()
1837 int order = folio_order(src); in compaction_alloc_noprof()
1894 int order = folio_order(dst); in compaction_free()
H A Dfilemap.c135 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
870 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio()
875 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
910 if (order > 0 && order > folio_order(folio)) { in __filemap_add_folio()
H A Dmempolicy.c1224 order = folio_order(src); in alloc_migration_target_by_mpol()
1381 order = folio_order(folio); in do_mbind()
2778 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx); in mpol_misplaced()
H A Dinternal.h693 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) in folio_unqueue_deferred_split()
H A Dswapfile.c1570 int size = 1 << swap_entry_order(folio_order(folio)); in put_swap_folio()
1717 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
H A Dslub.c2198 folio_order(folio))) in memcg_slab_post_charge()
2647 int order = folio_order(folio); in __free_slab()
4717 unsigned int order = folio_order(folio); in free_large_kmalloc()
H A Dmemory.c4936 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); in do_anonymous_page()
5053 if (folio_order(folio) != HPAGE_PMD_ORDER) in do_set_pmd()
/linux/fs/bcachefs/
H A Dfs-io-pagecache.h26 return PAGE_SECTORS << folio_order(folio); in folio_sectors()
/linux/fs/netfs/
H A Drolling_buffer.c137 unsigned int order = folio_order(folio); in rolling_buffer_load_from_ra()
/linux/include/trace/events/
H A Dfilemap.h38 __entry->order = folio_order(folio);
/linux/fs/btrfs/
H A Dextent_io.h300 if (folio_order(eb->folios[0])) in num_extent_folios()
H A Dsubpage.c197 ASSERT(folio_order(folio) == 0); in btrfs_subpage_assert()
H A Dbio.c180 ASSERT(folio_order(page_folio(bv->bv_page)) == 0); in btrfs_end_repair_bio()
H A Dfile.c879 ASSERT(folio_order(folio) == 0); in prepare_one_folio()
1794 ASSERT(folio_order(folio) == 0); in btrfs_page_mkwrite()
H A Ddisk-io.c3858 ASSERT(folio_order(folio) == 0); in write_dev_supers()
3931 ASSERT(folio_order(folio) == 0); in wait_dev_supers()
/linux/include/linux/
H A Dmm.h1126 static inline unsigned int folio_order(const struct folio *folio) in folio_order() function
2135 return PAGE_SHIFT + folio_order(folio); in folio_shift()
2148 return PAGE_SIZE << folio_order(folio); in folio_size()
/linux/fs/ubifs/
H A Dfile.c153 if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio))) in do_readpage()
/linux/drivers/md/
H A Ddm-crypt.c1784 1 << folio_order(fi.folio)); in crypt_free_buffer_pages()

12