| /linux/drivers/gpu/drm/ |
| H A D | drm_pagemap.c | 190 order = folio_order(folio); in drm_pagemap_migration_unlock_put_pages() 255 order = folio_order(folio); in drm_pagemap_migrate_map_pages() 480 order = folio_order(folio); in drm_pagemap_cpages() 608 unsigned long nr_pages = src_page ? NR_PAGES(folio_order(page_folio(src_page))) : 1; in drm_pagemap_migrate_to_devmem() 671 folio_order(page_folio(src_page)) != HPAGE_PMD_ORDER, in drm_pagemap_migrate_to_devmem() 720 unsigned long nr_pages = page ? NR_PAGES(folio_order(page_folio(page))) : 1; in drm_pagemap_migrate_to_devmem() 790 order = folio_order(page_folio(src_page)); in drm_pagemap_migrate_populate_ram_pfn() 824 order = folio_order(page_folio(page)); in drm_pagemap_migrate_populate_ram_pfn() 844 order = folio_order(page_folio(page)); in drm_pagemap_migrate_populate_ram_pfn() 1066 order = folio_order(page_folio(pages[i])); in drm_pagemap_evict_to_ram() [all …]
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_dmem.c | 125 if (folio_order(folio)) { in nouveau_dmem_folio_free() 206 order = folio_order(sfolio); in nouveau_dmem_migrate_to_ram() 494 unsigned int order = folio_order(folio); in nouveau_dmem_evict_chunk() 770 if (folio_order(page_folio(dpage))) in nouveau_dmem_migrate_copy_one() 804 order = folio_order(folio); in nouveau_dmem_migrate_chunk()
|
| /linux/drivers/iommu/ |
| H A D | iommu-pages.c | 28 return 1UL << (folio_order(ioptdesc_folio(desc)) + PAGE_SHIFT); in ioptdesc_mem_size()
|
| /linux/virt/kvm/ |
| H A D | guest_memfd.c | 69 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio() 147 WARN_ON_ONCE(!IS_ERR(folio) && folio_order(folio)); in kvm_gmem_get_folio() 530 int order = folio_order(folio); in kvm_gmem_free_folio()
|
| /linux/fs/netfs/ |
| H A D | rolling_buffer.c | 137 unsigned int order = folio_order(folio); in rolling_buffer_load_from_ra()
|
| /linux/mm/ |
| H A D | migrate.c | 594 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 627 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 1378 if (folio_order(src) > 1 && in migrate_folio_move() 2218 order = folio_order(src); in alloc_migration_target() 2659 int order = folio_order(src); in alloc_misplaced_dst_folio() 2722 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
|
| H A D | huge_memory.c | 1720 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) in vmf_insert_folio_pmd() 1836 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) in vmf_insert_folio_pud() 3735 int old_order = folio_order(folio); in __split_unmapped_folio() 3889 int old_order = folio_order(folio); in __folio_freeze_and_split_unmapped() 4055 int old_order = folio_order(folio); in __folio_split() 4405 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split() 4426 if (folio_order(folio) <= 1) in deferred_split_folio() 4448 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); in deferred_split_folio() 4449 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); in deferred_split_folio() 4530 mod_mthp_stat(folio_order(folio), in deferred_split_scan() [all …]
|
| H A D | swap_state.c | 345 folio_order(old) != folio_order(new)) { in __swap_cache_replace_folio()
|
| H A D | debug.c | 90 folio_order(folio), in __dump_folio()
|
| H A D | slab.h | 173 return folio_order(slab_folio(slab)); in slab_order()
|
| H A D | filemap.c | 137 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 852 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in __filemap_add_folio() 855 unsigned int forder = folio_order(folio); in __filemap_add_folio() 859 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio()
|
| H A D | mempolicy.c | 1444 order = folio_order(src); in alloc_migration_target_by_mpol() 1601 order = folio_order(folio); in do_mbind() 3002 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx); in mpol_misplaced()
|
| H A D | rmap.c | 1697 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in folio_add_new_anon_rmap() 2708 folio_order(folio)); in try_to_migrate_one()
|
| H A D | migrate_device.c | 1179 nr = 1 << folio_order(folio); in __migrate_device_pages()
|
| H A D | hugetlb.c | 1528 free_frozen_pages(&folio->page, folio_order(folio)); in __update_and_free_hugetlb_folio() 2757 if (order_is_gigantic(folio_order(folio))) in isolate_or_dissolve_huge_folio() 2810 if (order_is_gigantic(folio_order(folio))) { in replace_free_hugepage_folios()
|
| H A D | memory.c | 5309 const unsigned int order = folio_order(folio); in map_anon_folio_pte_pf() 5523 if (!is_pmd_order(folio_order(folio))) in do_set_pmd()
|
| /linux/fs/btrfs/ |
| H A D | extent_io.h | 320 if (folio_order(eb->folios[0])) in num_extent_folios()
|
| H A D | compression.c | 211 if (folio_order(folio)) in btrfs_free_compr_folio()
|
| H A D | disk-io.c | 190 ASSERT(folio_order(folio) == 0); in btrfs_repair_eb_io_failure()
|
| /linux/drivers/hv/ |
| H A D | mshv_regions.c | 49 page_order = folio_order(page_folio(page)); in mshv_chunk_stride()
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 836 const unsigned int order = folio_order(folio); in kho_preserve_folio() 857 const unsigned int order = folio_order(folio); in kho_unpreserve_folio()
|
| /linux/include/linux/ |
| H A D | mm.h | 1722 static inline unsigned int folio_order(const struct folio *folio) in folio_order() function 2600 return folio_order(folio) > 1; in folio_has_pincount() 2802 return PAGE_SHIFT + folio_order(folio); in folio_shift() 2815 return PAGE_SIZE << folio_order(folio); in folio_size() 2917 const int order = folio_order(folio); in folio_expected_ref_count()
|
| /linux/fs/nfs/ |
| H A D | internal.h | 877 pgoff_t index = folio->index >> folio_order(folio); in nfs_folio_length()
|
| H A D | write.c | 197 end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); in nfs_grow_file()
|
| /linux/arch/s390/kvm/ |
| H A D | gmap.c | 711 order = folio_order(page_folio(f->page)); in gmap_link()
|