| /linux/mm/ |
| H A D | pgtable-generic.c | 119 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_set_access_flags() 133 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_clear_flush_young() 146 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_huge_clear_flush() 204 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_invalidate() 233 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_collapse_flush()
|
| H A D | khugepaged.c | 684 unsigned long end = address + HPAGE_PMD_SIZE; in __collapse_huge_page_copy_succeeded() 1166 address + HPAGE_PMD_SIZE); in collapse_huge_page() 1498 unsigned long end = haddr + HPAGE_PMD_SIZE; in collapse_pte_mapped_thp() 1510 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) in collapse_pte_mapped_thp() 1590 haddr, haddr + HPAGE_PMD_SIZE); in collapse_pte_mapped_thp() 1765 vma->vm_end < addr + HPAGE_PMD_SIZE) in retract_page_tables() 1780 addr, addr + HPAGE_PMD_SIZE); in retract_page_tables() 2447 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); in khugepaged_scan_mm_slot() 2448 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); in khugepaged_scan_mm_slot() 2463 khugepaged_scan.address + HPAGE_PMD_SIZE > in khugepaged_scan_mm_slot() [all …]
|
| H A D | huge_memory.c | 450 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); in hpage_pmd_size_show() 2038 haddr + HPAGE_PMD_SIZE); in do_huge_zero_wp_pmd() 2271 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_free_huge_pmd() 2302 if (next - addr != HPAGE_PMD_SIZE) { in madvise_free_huge_pmd() 2347 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in zap_huge_pmd() 2417 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); in zap_huge_pmd() 2567 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in change_huge_pmd() 2642 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); in change_huge_pmd() 2753 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd() 2755 src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd() [all …]
|
| H A D | migrate_device.c | 50 (IS_ALIGNED(start, HPAGE_PMD_SIZE) && in migrate_vma_collect_hole() 51 IS_ALIGNED(end, HPAGE_PMD_SIZE))) { in migrate_vma_collect_hole() 200 (IS_ALIGNED(start, HPAGE_PMD_SIZE) && in migrate_vma_collect_huge_pmd() 201 IS_ALIGNED(end, HPAGE_PMD_SIZE))) { in migrate_vma_collect_huge_pmd() 883 flush_cache_page(vma, addr, addr + HPAGE_PMD_SIZE); in migrate_vma_insert_huge_pmd_page()
|
| H A D | madvise.c | 382 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_cold_or_pageout_pte_range() 406 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range()
|
| H A D | userfaultfd.c | 1522 src_end - src_addr < HPAGE_PMD_SIZE; in move_splits_huge_pmd() 1885 step_size = HPAGE_PMD_SIZE; in move_pages()
|
| H A D | mprotect.c | 478 if ((next - addr != HPAGE_PMD_SIZE) || in change_pmd_range()
|
| H A D | mremap.c | 855 if (extent == HPAGE_PMD_SIZE && in move_page_tables()
|
| H A D | memory.c | 1381 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); in copy_pmd_range() 1928 if (next - addr != HPAGE_PMD_SIZE) in zap_pmd_range() 1937 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { in zap_pmd_range()
|
| H A D | rmap.c | 1038 address + HPAGE_PMD_SIZE); in page_vma_mkclean_one()
|
| H A D | shmem.c | 1300 stat->blksize = HPAGE_PMD_SIZE; in shmem_getattr() 2829 hpage_size = HPAGE_PMD_SIZE; in shmem_get_unmapped_area()
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | pgtable.c | 186 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_invalidate() 215 flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE); in pmdp_huge_get_and_clear_full()
|
| H A D | radix_tlb.c | 1331 end = addr + HPAGE_PMD_SIZE; in radix__flush_tlb_collapsed_pmd()
|
| H A D | hash_utils.c | 2251 max_hpte_count = HPAGE_PMD_SIZE >> shift; in flush_hash_hugepage()
|
| /linux/arch/arc/mm/ |
| H A D | tlb.c | 650 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) in arc_mmu_init() 652 (unsigned long)TO_MB(HPAGE_PMD_SIZE)); in arc_mmu_init()
|
| /linux/include/linux/ |
| H A D | huge_mm.h | 119 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 120 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) macro
|
| /linux/mm/damon/ |
| H A D | ops-common.c | 94 young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE); in damon_pmdp_mkold()
|
| H A D | vaddr.c | 454 *priv->folio_sz = HPAGE_PMD_SIZE; in damon_young_pmd_entry()
|
| /linux/include/asm-generic/ |
| H A D | tlb.h | 706 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
|
| /linux/drivers/nvdimm/ |
| H A D | pfn_devs.c | 98 alignments[1] = HPAGE_PMD_SIZE; in nd_pfn_supported_alignments() 113 return HPAGE_PMD_SIZE; in nd_pfn_default_alignment()
|
| /linux/arch/x86/mm/ |
| H A D | pgtable.c | 515 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_clear_flush_young()
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 1077 mss->anonymous_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry() 1079 mss->shmem_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry() 1083 mss->file_thp += HPAGE_PMD_SIZE; in smaps_pmd_entry() 3223 HPAGE_PMD_SIZE/PAGE_SIZE); in gather_pte_stats()
|