| /linux/mm/ |
| H A D | page_io.c | 276 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); in swap_writeout() 299 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); in count_swpout_vm_event() 491 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in sio_read_complete() 586 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in swap_read_folio_bdev_sync() 603 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in swap_read_folio_bdev_async()
|
| H A D | huge_memory.c | 1643 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) in vmf_insert_folio_pmd() 1759 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) in vmf_insert_folio_pud() 3633 int old_order = folio_order(folio); in __split_unmapped_folio() 3787 int old_order = folio_order(folio); in __folio_freeze_and_split_unmapped() 3953 int old_order = folio_order(folio); in __folio_split() 4303 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split() 4324 if (folio_order(folio) <= 1) in deferred_split_folio() 4346 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); in deferred_split_folio() 4347 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); in deferred_split_folio() 4428 mod_mthp_stat(folio_order(folio), in deferred_split_scan() [all …]
|
| H A D | swap_state.c | 346 folio_order(old) != folio_order(new)) { in free_pages_and_swap_cache()
|
| H A D | migrate.c | 594 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 627 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 2199 order = folio_order(src); in alloc_migration_target() 2640 int order = folio_order(src); in alloc_misplaced_dst_folio() 2703 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
|
| H A D | debug.c | 90 folio_order(folio), in __dump_folio()
|
| H A D | swapfile.c | 886 order = folio_order(folio); in cluster_alloc_range() 921 unsigned int order = likely(folio) ? folio_order(folio) : 0; in alloc_swap_scan_cluster() 1045 unsigned int order = likely(folio) ? folio_order(folio) : 0; in cluster_alloc_swap_entry() 1320 unsigned int order = folio_order(folio); in swap_alloc_fast() 1495 unsigned int order = folio_order(folio); in folio_alloc_swap() 1834 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
|
| H A D | slab.h | 177 return folio_order(slab_folio(slab)); in slab_order()
|
| H A D | shmem.c | 885 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache() 1627 order = folio_order(folio); in shmem_writeout() 1992 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); in shmem_alloc_and_add_folio() 1993 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); in shmem_alloc_and_add_folio() 2151 new = shmem_alloc_folio(gfp, folio_order(old), info, index); in shmem_replace_folio() 2364 if (order > folio_order(folio)) { in shmem_swapin_folio() 2560 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); in shmem_get_folio_gfp()
|
| H A D | compaction.c | 1185 if (unlikely(skip_isolation_on_order(folio_order(folio), in isolate_migratepages_block() 1801 int order = folio_order(src); in compaction_alloc_noprof() 1858 int order = folio_order(dst); in compaction_free()
|
| H A D | khugepaged.c | 1511 if (folio_order(folio) != HPAGE_PMD_ORDER) { in try_collapse_pte_mapped_thp() 1994 if (folio_order(folio) == HPAGE_PMD_ORDER && in collapse_file() 2323 if (folio_order(folio) == HPAGE_PMD_ORDER && in hpage_collapse_scan_file()
|
| H A D | swap.c | 112 free_frozen_pages(&folio->page, folio_order(folio)); in __folio_put()
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_pagemap.c | 244 order = folio_order(folio); in drm_pagemap_migrate_map_pages() 561 unsigned long nr_pages = src_page ? NR_PAGES(folio_order(page_folio(src_page))) : 1; in drm_pagemap_migrate_to_devmem() 653 unsigned long nr_pages = page ? NR_PAGES(folio_order(page_folio(page))) : 1; in drm_pagemap_migrate_to_devmem() 723 order = folio_order(page_folio(src_page)); in drm_pagemap_migrate_populate_ram_pfn() 755 order = folio_order(page_folio(page)); in drm_pagemap_migrate_populate_ram_pfn() 775 order = folio_order(page_folio(page)); in drm_pagemap_migrate_populate_ram_pfn()
|
| /linux/lib/ |
| H A D | test_kho.c | 73 unsigned int order = folio_order(folio); in kho_test_preserve_data() 271 if (folio_order(folio) != order) in kho_test_restore_data()
|
| H A D | test_hmm.c | 969 order = folio_order(page_folio(spage)); in dmirror_devmem_fault_alloc_and_copy() 1405 order = folio_order(page_folio(spage)); in dmirror_device_evict_chunk() 1591 unsigned int order = folio_order(rfolio); in dmirror_devmem_free() 1635 order = folio_order(page_folio(vmf->page)); in dmirror_devmem_fault()
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_dmem.c | 125 if (folio_order(folio)) { in nouveau_dmem_folio_free() 206 order = folio_order(sfolio); in nouveau_dmem_migrate_to_ram() 494 unsigned int order = folio_order(folio); in nouveau_dmem_evict_chunk() 770 if (folio_order(page_folio(dpage))) in nouveau_dmem_migrate_copy_one() 804 order = folio_order(folio); in nouveau_dmem_migrate_chunk()
|
| /linux/drivers/iommu/ |
| H A D | iommu-pages.c | 28 return 1UL << (folio_order(ioptdesc_folio(desc)) + PAGE_SHIFT); in ioptdesc_mem_size()
|
| /linux/virt/kvm/ |
| H A D | guest_memfd.c | 68 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio() 147 WARN_ON_ONCE(!IS_ERR(folio) && folio_order(folio)); in kvm_gmem_get_folio() 530 int order = folio_order(folio); in kvm_gmem_free_folio()
|
| /linux/fs/netfs/ |
| H A D | rolling_buffer.c | 137 unsigned int order = folio_order(folio); in rolling_buffer_load_from_ra()
|
| /linux/include/trace/events/ |
| H A D | filemap.h | 38 __entry->order = folio_order(folio);
|
| /linux/fs/btrfs/ |
| H A D | extent_io.h | 296 if (folio_order(eb->folios[0])) in num_extent_folios()
|
| /linux/drivers/hv/ |
| H A D | mshv_regions.c | 49 page_order = folio_order(page_folio(page)); in mshv_chunk_stride()
|
| /linux/include/linux/ |
| H A D | mm.h | 1449 static inline unsigned int folio_order(const struct folio *folio) in folio_order() function 2327 return folio_order(folio) > 1; in folio_has_pincount() 2559 return PAGE_SHIFT + folio_order(folio); in folio_shift() 2572 return PAGE_SIZE << folio_order(folio); in folio_size() 2674 const int order = folio_order(folio); in folio_expected_ref_count()
|
| H A D | huge_mm.h | 518 return folio_order(folio) >= HPAGE_PMD_ORDER; in folio_test_pmd_mappable()
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 819 const unsigned int order = folio_order(folio); in kho_preserve_folio() 840 const unsigned int order = folio_order(folio); in kho_unpreserve_folio()
|
| /linux/fs/ |
| H A D | dax.c | 395 order = folio_order(folio); in dax_folio_put() 431 WARN_ON_ONCE(folio_order(folio)); in dax_folio_init() 457 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); in dax_associate_entry()
|