Home
last modified time | relevance | path

Searched refs:folio_order (Results 1 – 25 of 40) sorted by relevance

12

/linux/mm/
H A Dpage_io.c276 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); in swap_writeout()
299 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); in count_swpout_vm_event()
491 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in sio_read_complete()
586 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in swap_read_folio_bdev_sync()
603 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in swap_read_folio_bdev_async()
H A Dhuge_memory.c1643 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) in vmf_insert_folio_pmd()
1759 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) in vmf_insert_folio_pud()
3633 int old_order = folio_order(folio); in __split_unmapped_folio()
3787 int old_order = folio_order(folio); in __folio_freeze_and_split_unmapped()
3953 int old_order = folio_order(folio); in __folio_split()
4303 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split()
4324 if (folio_order(folio) <= 1) in deferred_split_folio()
4346 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); in deferred_split_folio()
4347 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); in deferred_split_folio()
4428 mod_mthp_stat(folio_order(folio), in deferred_split_scan()
[all …]
H A Dswap_state.c346 folio_order(old) != folio_order(new)) { in free_pages_and_swap_cache()
H A Dmigrate.c594 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
627 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping()
2199 order = folio_order(src); in alloc_migration_target()
2640 int order = folio_order(src); in alloc_misplaced_dst_folio()
2703 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
H A Ddebug.c90 folio_order(folio), in __dump_folio()
H A Dswapfile.c886 order = folio_order(folio); in cluster_alloc_range()
921 unsigned int order = likely(folio) ? folio_order(folio) : 0; in alloc_swap_scan_cluster()
1045 unsigned int order = likely(folio) ? folio_order(folio) : 0; in cluster_alloc_swap_entry()
1320 unsigned int order = folio_order(folio); in swap_alloc_fast()
1495 unsigned int order = folio_order(folio); in folio_alloc_swap()
1834 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
H A Dslab.h177 return folio_order(slab_folio(slab)); in slab_order()
H A Dshmem.c885 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
1627 order = folio_order(folio); in shmem_writeout()
1992 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); in shmem_alloc_and_add_folio()
1993 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); in shmem_alloc_and_add_folio()
2151 new = shmem_alloc_folio(gfp, folio_order(old), info, index); in shmem_replace_folio()
2364 if (order > folio_order(folio)) { in shmem_swapin_folio()
2560 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); in shmem_get_folio_gfp()
H A Dcompaction.c1185 if (unlikely(skip_isolation_on_order(folio_order(folio), in isolate_migratepages_block()
1801 int order = folio_order(src); in compaction_alloc_noprof()
1858 int order = folio_order(dst); in compaction_free()
H A Dkhugepaged.c1511 if (folio_order(folio) != HPAGE_PMD_ORDER) { in try_collapse_pte_mapped_thp()
1994 if (folio_order(folio) == HPAGE_PMD_ORDER && in collapse_file()
2323 if (folio_order(folio) == HPAGE_PMD_ORDER && in hpage_collapse_scan_file()
H A Dswap.c112 free_frozen_pages(&folio->page, folio_order(folio)); in __folio_put()
/linux/drivers/gpu/drm/
H A Ddrm_pagemap.c244 order = folio_order(folio); in drm_pagemap_migrate_map_pages()
561 unsigned long nr_pages = src_page ? NR_PAGES(folio_order(page_folio(src_page))) : 1; in drm_pagemap_migrate_to_devmem()
653 unsigned long nr_pages = page ? NR_PAGES(folio_order(page_folio(page))) : 1; in drm_pagemap_migrate_to_devmem()
723 order = folio_order(page_folio(src_page)); in drm_pagemap_migrate_populate_ram_pfn()
755 order = folio_order(page_folio(page)); in drm_pagemap_migrate_populate_ram_pfn()
775 order = folio_order(page_folio(page)); in drm_pagemap_migrate_populate_ram_pfn()
/linux/lib/
H A Dtest_kho.c73 unsigned int order = folio_order(folio); in kho_test_preserve_data()
271 if (folio_order(folio) != order) in kho_test_restore_data()
H A Dtest_hmm.c969 order = folio_order(page_folio(spage)); in dmirror_devmem_fault_alloc_and_copy()
1405 order = folio_order(page_folio(spage)); in dmirror_device_evict_chunk()
1591 unsigned int order = folio_order(rfolio); in dmirror_devmem_free()
1635 order = folio_order(page_folio(vmf->page)); in dmirror_devmem_fault()
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_dmem.c125 if (folio_order(folio)) { in nouveau_dmem_folio_free()
206 order = folio_order(sfolio); in nouveau_dmem_migrate_to_ram()
494 unsigned int order = folio_order(folio); in nouveau_dmem_evict_chunk()
770 if (folio_order(page_folio(dpage))) in nouveau_dmem_migrate_copy_one()
804 order = folio_order(folio); in nouveau_dmem_migrate_chunk()
/linux/drivers/iommu/
H A Diommu-pages.c28 return 1UL << (folio_order(ioptdesc_folio(desc)) + PAGE_SHIFT); in ioptdesc_mem_size()
/linux/virt/kvm/
H A Dguest_memfd.c68 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio()
147 WARN_ON_ONCE(!IS_ERR(folio) && folio_order(folio)); in kvm_gmem_get_folio()
530 int order = folio_order(folio); in kvm_gmem_free_folio()
/linux/fs/netfs/
H A Drolling_buffer.c137 unsigned int order = folio_order(folio); in rolling_buffer_load_from_ra()
/linux/include/trace/events/
H A Dfilemap.h38 __entry->order = folio_order(folio);
/linux/fs/btrfs/
H A Dextent_io.h296 if (folio_order(eb->folios[0])) in num_extent_folios()
/linux/drivers/hv/
H A Dmshv_regions.c49 page_order = folio_order(page_folio(page)); in mshv_chunk_stride()
/linux/include/linux/
H A Dmm.h1449 static inline unsigned int folio_order(const struct folio *folio) in folio_order() function
2327 return folio_order(folio) > 1; in folio_has_pincount()
2559 return PAGE_SHIFT + folio_order(folio); in folio_shift()
2572 return PAGE_SIZE << folio_order(folio); in folio_size()
2674 const int order = folio_order(folio); in folio_expected_ref_count()
H A Dhuge_mm.h518 return folio_order(folio) >= HPAGE_PMD_ORDER; in folio_test_pmd_mappable()
/linux/kernel/liveupdate/
H A Dkexec_handover.c819 const unsigned int order = folio_order(folio); in kho_preserve_folio()
840 const unsigned int order = folio_order(folio); in kho_unpreserve_folio()
/linux/fs/
H A Ddax.c395 order = folio_order(folio); in dax_folio_put()
431 WARN_ON_ONCE(folio_order(folio)); in dax_folio_init()
457 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); in dax_associate_entry()

12