Home
last modified time | relevance | path

Searched refs:PMD_SIZE (Results 1 – 25 of 79) sorted by relevance

1234

/linux/arch/m68k/mm/
H A Dkmap.c50 #define IO_SIZE PMD_SIZE
85 virtaddr += PMD_SIZE; in __free_io_area()
86 size -= PMD_SIZE; in __free_io_area()
254 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap()
269 physaddr += PMD_SIZE; in __ioremap()
270 virtaddr += PMD_SIZE; in __ioremap()
271 size -= PMD_SIZE; in __ioremap()
379 virtaddr += PMD_SIZE; in kernel_set_cachemode()
380 size -= PMD_SIZE; in kernel_set_cachemode()
/linux/arch/s390/mm/
H A Dvmem.c105 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); in vmemmap_flush_unused_sub_pmd()
128 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd()
138 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd()
146 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
153 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
160 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_unuse_sub_pmd()
164 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE); in vmemmap_unuse_sub_pmd()
240 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table()
241 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table()
243 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap); in modify_pmd_table()
[all …]
/linux/arch/riscv/kvm/
H A Dmmu.c258 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_age_gfn()
278 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); in kvm_test_age_gfn()
324 if ((gpa_start & (PMD_SIZE - 1)) != (uaddr_start & (PMD_SIZE - 1))) in fault_supports_gstage_huge_mapping()
339 return (hva >= ALIGN(uaddr_start, PMD_SIZE)) && (hva < ALIGN_DOWN(uaddr_end, PMD_SIZE)); in fault_supports_gstage_huge_mapping()
388 size = PMD_SIZE; in get_hva_mapping_size()
411 if (sz < PMD_SIZE) in transparent_hugepage_adjust()
418 return PMD_SIZE; in transparent_hugepage_adjust()
471 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) in kvm_riscv_mmu_map()
486 vma_pagesize != PMD_SIZE && in kvm_riscv_mmu_map()
/linux/arch/sh/include/asm/
H A Dpgtable-3level.h23 #define PMD_SIZE (1UL << PMD_SHIFT) macro
24 #define PMD_MASK (~(PMD_SIZE-1))
26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
/linux/arch/riscv/mm/
H A Dinit.c223 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem()
499 if (sz == PMD_SIZE) { in create_pmd_mapping()
717 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size()
718 return PMD_SIZE; in best_map_size()
830 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode()
832 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode()
833 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode()
834 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode()
888 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) in create_kernel_page_table()
891 PMD_SIZE, in create_kernel_page_table()
[all …]
H A Dtlbflush.c192 else if (stride_size >= PMD_SIZE) in flush_tlb_range()
193 stride_size = PMD_SIZE; in flush_tlb_range()
214 start, end - start, PMD_SIZE); in flush_pmd_tlb_range()
H A Dkasan_init.c66 if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) && in kasan_populate_pmd()
67 (next - vaddr) >= PMD_SIZE) { in kasan_populate_pmd()
68 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); in kasan_populate_pmd()
71 memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE); in kasan_populate_pmd()
/linux/arch/parisc/kernel/
H A Dpci-dma.c85 if (end > PMD_SIZE) in map_pte_uncached()
86 end = PMD_SIZE; in map_pte_uncached()
120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached()
121 orig_vaddr += PMD_SIZE; in map_pmd_uncached()
170 if (end > PMD_SIZE) in unmap_uncached_pte()
171 end = PMD_SIZE; in unmap_uncached_pte()
210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd()
211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dmm.c335 phys = ALIGN(hyp_memory[i].base, PMD_SIZE); in create_fixblock()
336 if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size)) in create_fixblock()
344 addr = ALIGN(__io_map_base, PMD_SIZE); in create_fixblock()
345 ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE); in create_fixblock()
349 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP); in create_fixblock()
353 ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker); in create_fixblock()
367 *size = PMD_SIZE; in hyp_fixblock_map()
/linux/arch/x86/include/asm/
H A Dpgtable_32_types.h12 # define PMD_SIZE (1UL << PMD_SHIFT) macro
13 # define PMD_MASK (~(PMD_SIZE - 1))
H A Dpgtable_64_types.h82 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro
83 #define PMD_MASK (~(PMD_SIZE - 1))
/linux/arch/loongarch/kvm/
H A Dmmu.c399 if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) in kvm_arch_prepare_memory_region()
400 && IS_ALIGNED(hva_start, PMD_SIZE)) in kvm_arch_prepare_memory_region()
427 gpa_offset = gpa_start & (PMD_SIZE - 1); in kvm_arch_prepare_memory_region()
428 hva_offset = hva_start & (PMD_SIZE - 1); in kvm_arch_prepare_memory_region()
433 gpa_offset = PMD_SIZE; in kvm_arch_prepare_memory_region()
434 if ((size + gpa_offset) < (PMD_SIZE * 2)) in kvm_arch_prepare_memory_region()
638 return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); in fault_supports_huge_mapping()
/linux/arch/nios2/mm/
H A Dioremap.c33 if (end > PMD_SIZE) in remap_area_pte()
34 end = PMD_SIZE; in remap_area_pte()
70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
/linux/arch/x86/boot/startup/
H A Dmap_kernel.c53 for (; paddr < paddr_end; paddr += PMD_SIZE) { in sme_postprocess_startup()
179 for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { in __startup_64()
182 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; in __startup_64()
/linux/include/asm-generic/
H A Dpgtable-nopmd.h22 #define PMD_SIZE (1UL << PMD_SHIFT) macro
23 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/loongarch/include/asm/
H A Dpgtable.h34 #define PMD_SIZE (1UL << PMD_SHIFT) macro
35 #define PMD_MASK (~(PMD_SIZE-1))
39 #define PMD_SIZE (1UL << PMD_SHIFT) macro
40 #define PMD_MASK (~(PMD_SIZE-1))
112 …R_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - …
116 …D * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - …
119 #define VMEMMAP_ALIGN max(PMD_SIZE, MAX_FOLIO_VMEMMAP_ALIGN)
/linux/arch/x86/kernel/
H A Dvmlinux.lds.S68 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
69 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
91 . = ALIGN(PMD_SIZE); \
97 . = ALIGN(PMD_SIZE); \
/linux/arch/arm64/include/asm/
H A Dkernel-pgtable.h21 #if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
/linux/arch/powerpc/include/asm/nohash/64/
H A Dpgtable-4k.h31 #define PMD_SIZE (1UL << PMD_SHIFT) macro
32 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/x86/mm/
H A Dmem_encrypt_amd.c169 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem()
170 paddr += PMD_SIZE; in __sme_early_map_unmap_mem()
171 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; in __sme_early_map_unmap_mem()
/linux/arch/um/include/asm/
H A Dpgtable-4level.h31 #define PMD_SIZE (1UL << PMD_SHIFT) macro
32 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/x86/boot/compressed/
H A Dident_map_64.c98 start = round_down(start, PMD_SIZE); in kernel_add_identity_map()
99 end = round_up(end, PMD_SIZE); in kernel_add_identity_map()
370 end = address + PMD_SIZE; in do_boot_page_fault()
/linux/mm/
H A Dpage_table_check.c166 page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT); in __page_table_check_pmd_clear()
231 unsigned long stride = PMD_SIZE >> PAGE_SHIFT; in __page_table_check_pmds_set()
240 __page_table_check_pmd_clear(mm, addr + PMD_SIZE * i, *(pmdp + i)); in __page_table_check_pmds_set()
/linux/arch/s390/boot/
H A Dvmem.c174 IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { in kasan_pmd_populate_zero_shadow()
323 !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE)) in try_get_large_pmd_pa()
327 if (!IS_ALIGNED(pa, PMD_SIZE)) in try_get_large_pmd_pa()
/linux/arch/x86/mm/pat/
H A Dset_memory.c261 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; in highmap_end_pfn()
430 for (addr = start; within(addr, start, end); addr += PMD_SIZE) in cpa_collapse_large_pages()
1167 pfninc = PMD_SIZE >> PAGE_SHIFT; in __split_large_page()
1169 lpinc = PMD_SIZE; in __split_large_page()
1467 if (start & (PMD_SIZE - 1)) { in unmap_pmd_range()
1468 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; in unmap_pmd_range()
1480 while (end - start >= PMD_SIZE) { in unmap_pmd_range()
1484 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); in unmap_pmd_range()
1486 start += PMD_SIZE; in unmap_pmd_range()
1601 if (start & (PMD_SIZE - 1)) { in populate_pmd()
[all …]

1234