Home
last modified time | relevance | path

Searched refs:PMD_ORDER (Results 1 – 15 of 15) sorted by relevance

/linux/tools/mm/
H A Dthpmaps30 PMD_ORDER = int(math.log2(PMD_SIZE / PAGE_SIZE))
237 'aligned': [0] * (PMD_ORDER + 1),
238 'unaligned': [0] * (PMD_ORDER + 1),
242 'aligned': [0] * (PMD_ORDER + 1),
243 'unaligned': [0] * (PMD_ORDER + 1),
288 …stats['anon']['aligned'][PMD_ORDER] = max(0, stats['anon']['aligned'][PMD_ORDER] - kbnr(anon_pmd_m…
289 …stats['file']['aligned'][PMD_ORDER] = max(0, stats['file']['aligned'][PMD_ORDER] - kbnr(file_pmd_m…
522 if order > PMD_ORDER:
/linux/include/linux/
H A Dhuge_mm.h79 #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
87 (BIT(PMD_ORDER) | BIT(PUD_ORDER))
157 if (order <= 0 || order > PMD_ORDER) in mod_mthp_stat()
H A Dswap.h245 #define SWAP_NR_ORDERS (PMD_ORDER + 1)
H A Dpgtable.h8 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) macro
/linux/mm/
H A Dkhugepaged.c413 if (test_bit(PMD_ORDER, &huge_anon_orders_always)) in hugepage_pmd_enabled()
415 if (test_bit(PMD_ORDER, &huge_anon_orders_madvise)) in hugepage_pmd_enabled()
417 if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) && in hugepage_pmd_enabled()
459 if (thp_vma_allowable_order(vma, vm_flags, TVA_KHUGEPAGED, PMD_ORDER)) in khugepaged_enter_vma()
905 if (!thp_vma_suitable_order(vma, address, PMD_ORDER)) in hugepage_vma_revalidate()
907 if (!thp_vma_allowable_order(vma, vma->vm_flags, type, PMD_ORDER)) in hugepage_vma_revalidate()
1499 if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER)) in try_collapse_pte_mapped_thp()
2435 if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_KHUGEPAGED, PMD_ORDER)) { in khugepaged_scan_mm_slot()
2769 if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER)) in madvise_collapse()
H A Dpage_vma_mapped.c286 PMD_ORDER) && in page_vma_mapped_walk()
H A Dmemory.c4634 BIT(PMD_ORDER) - 1); in alloc_swap_folio()
5151 BIT(PMD_ORDER) - 1); in alloc_anon_folio()
5426 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) in do_set_pmd()
6145 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
6168 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
6416 thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { in __handle_mm_fault()
H A Dhuge_memory.c791 huge_anon_orders_inherit = BIT(PMD_ORDER); in hugepage_init_sysfs()
1467 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) in do_huge_pmd_anonymous_page()
1643 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) in vmf_insert_folio_pmd()
H A Dswapfile.c1086 if (order < PMD_ORDER) { in cluster_alloc_swap_entry()
1102 if (order < PMD_ORDER) { in cluster_alloc_swap_entry()
/linux/fs/
H A Ddax.c91 return PMD_ORDER; in dax_entry_order()
1976 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
2012 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
2077 else if (order == PMD_ORDER) in dax_iomap_fault()
2121 else if (order == PMD_ORDER) in dax_insert_pfn_mkwrite()
/linux/Documentation/admin-guide/mm/
H A Dtranshuge.rst366 ``thp_anon`` is not specified, PMD_ORDER THP will default to ``inherit``.
368 PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER
400 ``thp_shmem`` is not specified, PMD_ORDER hugepage will default to
402 user, the PMD_ORDER hugepage policy will be overridden. If the policy for
403 PMD_ORDER is not defined within a valid ``thp_shmem``, its policy will
/linux/drivers/hv/
H A Dmshv_regions.c51 if (page_order != PMD_ORDER) in mshv_chunk_stride()
H A Dmshv_vtl_main.c1241 case PMD_ORDER: in mshv_vtl_low_huge_fault()
/linux/drivers/dax/
H A Ddevice.c254 else if (order == PMD_ORDER) in dev_dax_huge_fault()
/linux/drivers/vfio/pci/
H A Dvfio_pci_core.c1677 case PMD_ORDER: in vfio_pci_vmf_insert_pfn()