Home
last modified time | relevance | path

Searched refs:PMD_SIZE (Results 1 – 25 of 123) sorted by relevance

12345

/linux/arch/m68k/mm/
H A Dkmap.c50 #define IO_SIZE PMD_SIZE
85 virtaddr += PMD_SIZE; in __free_io_area()
86 size -= PMD_SIZE; in __free_io_area()
254 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap()
269 physaddr += PMD_SIZE; in __ioremap()
270 virtaddr += PMD_SIZE; in __ioremap()
271 size -= PMD_SIZE; in __ioremap()
379 virtaddr += PMD_SIZE; in kernel_set_cachemode()
380 size -= PMD_SIZE; in kernel_set_cachemode()
/linux/arch/x86/mm/
H A Dinit.c355 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask()
356 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask()
422 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range()
424 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
426 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
436 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
438 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
441 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) in split_mem_range()
442 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
463 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range()
[all …]
H A Dinit_64.c396 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping()
448 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in cleanup_highmap()
459 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
536 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; in phys_pmd_init()
866 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); in vmemmap_flush_unused_pmd()
874 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); in vmemmap_pmd_is_unused()
883 return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); in vmemmap_pmd_is_unused()
906 if (likely(IS_ALIGNED(end, PMD_SIZE))) in vmemmap_use_sub_pmd()
924 const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd()
937 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
[all …]
/linux/arch/s390/mm/
H A Dvmem.c105 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); in vmemmap_flush_unused_sub_pmd()
128 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd()
138 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd()
146 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
153 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd()
160 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_unuse_sub_pmd()
164 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE); in vmemmap_unuse_sub_pmd()
240 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table()
241 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table()
243 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap); in modify_pmd_table()
[all …]
/linux/arch/riscv/mm/
H A Dinit.c239 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem()
544 if (sz == PMD_SIZE) { in create_pmd_mapping()
762 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size()
763 return PMD_SIZE; in best_map_size()
890 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode()
892 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode()
893 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode()
894 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode()
951 for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE) in create_kernel_page_table()
954 PMD_SIZE, PAGE_KERNEL_EXEC); in create_kernel_page_table()
[all …]
H A Dhugetlbpage.c56 if (sz == PMD_SIZE) { in huge_pte_alloc()
112 if (sz == PMD_SIZE) in huge_pte_offset()
137 case PMD_SIZE: in hugetlb_mask_last_page()
138 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
140 return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER); in hugetlb_mask_last_page()
228 else if (sz >= PMD_SIZE) in num_contig_ptes_from_size()
H A Dtlbflush.c192 else if (stride_size >= PMD_SIZE) in flush_tlb_range()
193 stride_size = PMD_SIZE; in flush_tlb_range()
214 start, end - start, PMD_SIZE); in flush_pmd_tlb_range()
/linux/arch/sh/include/asm/
H A Dpgtable-3level.h23 #define PMD_SIZE (1UL << PMD_SHIFT) macro
24 #define PMD_MASK (~(PMD_SIZE-1))
26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
/linux/arch/parisc/kernel/
H A Dpci-dma.c85 if (end > PMD_SIZE) in map_pte_uncached()
86 end = PMD_SIZE; in map_pte_uncached()
120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached()
121 orig_vaddr += PMD_SIZE; in map_pmd_uncached()
170 if (end > PMD_SIZE) in unmap_uncached_pte()
171 end = PMD_SIZE; in unmap_uncached_pte()
210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd()
211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
/linux/arch/arm64/mm/
H A Dhugetlbpage.c60 case PMD_SIZE: in __hugetlb_valid_size()
95 *pgsize = PMD_SIZE; in find_num_contig()
109 *pgsize = PMD_SIZE; in num_contig_ptes()
267 } else if (sz == PMD_SIZE) { in huge_pte_alloc()
311 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset()
336 case PMD_SIZE: in hugetlb_mask_last_page()
337 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page()
339 return PMD_SIZE - CONT_PTE_SIZE; in hugetlb_mask_last_page()
360 case PMD_SIZE: in arch_make_huge_pte()
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dmm.c335 phys = ALIGN(hyp_memory[i].base, PMD_SIZE); in create_fixblock()
336 if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size)) in create_fixblock()
344 addr = ALIGN(__io_map_base, PMD_SIZE); in create_fixblock()
345 ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE); in create_fixblock()
349 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP); in create_fixblock()
353 ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker); in create_fixblock()
367 *size = PMD_SIZE; in hyp_fixblock_map()
/linux/arch/x86/include/asm/
H A Dpgtable_32_types.h12 # define PMD_SIZE (1UL << PMD_SHIFT) macro
13 # define PMD_MASK (~(PMD_SIZE - 1))
/linux/arch/x86/boot/startup/
H A Dsme.c92 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
193 ppd->vaddr += PMD_SIZE; in __sme_map_range_pmd()
194 ppd->paddr += PMD_SIZE; in __sme_map_range_pmd()
220 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE); in __sme_map_range()
319 kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE); in sme_encrypt_kernel()
346 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; in sme_encrypt_kernel()
369 workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE); in sme_encrypt_kernel()
H A Dmap_kernel.c53 for (; paddr < paddr_end; paddr += PMD_SIZE) { in sme_postprocess_startup()
179 for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { in __startup_64()
182 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; in __startup_64()
/linux/arch/powerpc/mm/book3s64/
H A Dradix_pgtable.c102 if (map_page_size == PMD_SIZE) { in early_map_kernel_page()
165 if (map_page_size == PMD_SIZE) { in __map_kernel_page()
329 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && in create_physical_mapping()
331 mapping_size = PMD_SIZE; in create_physical_mapping()
740 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); in vmemmap_pmd_is_unused()
742 return !vmemmap_populated(start, PMD_SIZE); in vmemmap_pmd_is_unused()
835 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table()
836 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table()
838 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE)); in remove_pmd_table()
844 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE)); in remove_pmd_table()
[all …]
/linux/arch/loongarch/kvm/
H A Dmmu.c399 if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) in kvm_arch_prepare_memory_region()
400 && IS_ALIGNED(hva_start, PMD_SIZE)) in kvm_arch_prepare_memory_region()
427 gpa_offset = gpa_start & (PMD_SIZE - 1); in kvm_arch_prepare_memory_region()
428 hva_offset = hva_start & (PMD_SIZE - 1); in kvm_arch_prepare_memory_region()
433 gpa_offset = PMD_SIZE; in kvm_arch_prepare_memory_region()
434 if ((size + gpa_offset) < (PMD_SIZE * 2)) in kvm_arch_prepare_memory_region()
638 return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); in fault_supports_huge_mapping()
/linux/arch/loongarch/include/asm/
H A Dpgtable.h30 #define PMD_SIZE (1UL << PMD_SHIFT) macro
31 #define PMD_MASK (~(PMD_SIZE-1))
35 #define PMD_SIZE (1UL << PMD_SHIFT) macro
36 #define PMD_MASK (~(PMD_SIZE-1))
109 …R_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - …
113 …D * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - …
116 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
/linux/arch/nios2/mm/
H A Dioremap.c33 if (end > PMD_SIZE) in remap_area_pte()
34 end = PMD_SIZE; in remap_area_pte()
70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
/linux/include/asm-generic/
H A Dpgtable-nopmd.h22 #define PMD_SIZE (1UL << PMD_SHIFT) macro
23 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/x86/kernel/
H A Dvmlinux.lds.S68 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
69 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
80 . = ALIGN(PMD_SIZE); \
86 . = ALIGN(PMD_SIZE); \
/linux/arch/arm64/include/asm/
H A Dkernel-pgtable.h21 #if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
/linux/arch/powerpc/include/asm/nohash/64/
H A Dpgtable-4k.h31 #define PMD_SIZE (1UL << PMD_SHIFT) macro
32 #define PMD_MASK (~(PMD_SIZE-1))
/linux/arch/x86/virt/svm/
H A Dsev.c178 if (IS_ALIGNED(pa, PMD_SIZE)) in __snp_fixup_e820_tables()
197 pa = ALIGN_DOWN(pa, PMD_SIZE); in __snp_fixup_e820_tables()
198 if (e820__mapped_any(pa, pa + PMD_SIZE, E820_TYPE_RAM)) { in __snp_fixup_e820_tables()
200 e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); in __snp_fixup_e820_tables()
201 e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); in __snp_fixup_e820_tables()
202 if (!memblock_is_region_reserved(pa, PMD_SIZE)) in __snp_fixup_e820_tables()
203 memblock_reserve(pa, PMD_SIZE); in __snp_fixup_e820_tables()
/linux/arch/arm/mm/
H A Dmmu.c1114 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1206 if (!IS_ALIGNED(block_start, PMD_SIZE)) { in adjust_lowmem_bounds()
1209 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds()
1242 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds()
1244 else if (!IS_ALIGNED(block_end, PMD_SIZE)) in adjust_lowmem_bounds()
1263 memblock_limit = round_down(memblock_limit, PMD_SIZE); in adjust_lowmem_bounds()
1293 for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE) in prepare_page_table()
1301 for (addr = KASAN_SHADOW_END; addr < MODULES_VADDR; addr += PMD_SIZE) in prepare_page_table()
1304 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) in prepare_page_table()
1310 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
[all …]
/linux/arch/m68k/include/asm/
H A Dpgtable_mm.h39 #define PMD_SIZE (1UL << PMD_SHIFT) macro
40 #define PMD_MASK (~(PMD_SIZE-1))

12345