Lines Matching full:pmd
75 DEFINE_POPULATE(pud_populate, pud, pmd, init) in DEFINE_POPULATE()
76 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) in DEFINE_POPULATE()
90 DEFINE_ENTRY(pmd, pmd, init)
294 pmd_t *pmd = (pmd_t *) spp_getpage(); in fill_pmd() local
295 pud_populate(&init_mm, pud, pmd); in fill_pmd()
296 if (pmd != pmd_offset(pud, 0)) in fill_pmd()
298 pmd, pmd_offset(pud, 0)); in fill_pmd()
303 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) in fill_pte() argument
305 if (pmd_none(*pmd)) { in fill_pte()
307 pmd_populate_kernel(&init_mm, pmd, pte); in fill_pte()
308 if (pte != pte_offset_kernel(pmd, 0)) in fill_pte()
311 return pte_offset_kernel(pmd, vaddr); in fill_pte()
316 pmd_t *pmd = fill_pmd(pud, vaddr); in __set_pte_vaddr() local
317 pte_t *pte = fill_pte(pmd, vaddr); in __set_pte_vaddr()
375 pmd_t *pmd; in populate_extra_pte() local
377 pmd = populate_extra_pmd(vaddr); in populate_extra_pte()
378 return fill_pte(pmd, vaddr); in populate_extra_pte()
390 pmd_t *pmd; in __init_extra_mapping() local
411 pmd = (pmd_t *) spp_getpage(); in __init_extra_mapping()
412 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | in __init_extra_mapping()
415 pmd = pmd_offset(pud, phys); in __init_extra_mapping()
416 BUG_ON(!pmd_none(*pmd)); in __init_extra_mapping()
417 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); in __init_extra_mapping()
449 pmd_t *pmd = level2_kernel_pgt; in cleanup_highmap() local
459 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap()
460 if (pmd_none(*pmd)) in cleanup_highmap()
463 set_pmd(pmd, __pmd(0)); in cleanup_highmap()
521 * Create PMD level page table mapping for physical addresses. The virtual
535 pmd_t *pmd = pmd_page + pmd_index(paddr); in phys_pmd_init() local
546 set_pmd_init(pmd, __pmd(0), init); in phys_pmd_init()
550 if (!pmd_none(*pmd)) { in phys_pmd_init()
551 if (!pmd_leaf(*pmd)) { in phys_pmd_init()
553 pte = (pte_t *)pmd_page_vaddr(*pmd); in phys_pmd_init()
578 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); in phys_pmd_init()
584 set_pmd_init(pmd, in phys_pmd_init()
596 pmd_populate_kernel_init(&init_mm, pmd, pte, init); in phys_pmd_init()
620 pmd_t *pmd; in phys_pud_init() local
639 pmd = pmd_offset(pud, 0); in phys_pud_init()
640 paddr_last = phys_pmd_init(pmd, paddr, in phys_pud_init()
678 pmd = alloc_low_page(); in phys_pud_init()
679 paddr_last = phys_pmd_init(pmd, paddr, paddr_end, in phys_pud_init()
683 pud_populate_init(&init_mm, pud, pmd, init); in phys_pud_init()
797 * The virtual and physical addresses have to be aligned on PMD level
811 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
874 /* Returns true if the PMD is completely unused and thus it can be freed */
945 * consecutive sections. Remember for the last added PMD where the in vmemmap_use_new_sub_pmd()
1047 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) in free_pte_table() argument
1059 free_pagetable(pmd_page(*pmd), 0); in free_pte_table()
1061 pmd_clear(pmd); in free_pte_table()
1067 pmd_t *pmd; in free_pmd_table() local
1071 pmd = pmd_start + i; in free_pmd_table()
1072 if (!pmd_none(*pmd)) in free_pmd_table()
1076 /* free a pmd table */ in free_pmd_table()
1150 pmd_t *pmd; in remove_pmd_table() local
1152 pmd = pmd_start + pmd_index(addr); in remove_pmd_table()
1153 for (; addr < end; addr = next, pmd++) { in remove_pmd_table()
1156 if (!pmd_present(*pmd)) in remove_pmd_table()
1159 if (pmd_leaf(*pmd)) { in remove_pmd_table()
1163 free_hugepage_table(pmd_page(*pmd), in remove_pmd_table()
1167 pmd_clear(pmd); in remove_pmd_table()
1171 free_hugepage_table(pmd_page(*pmd), in remove_pmd_table()
1174 pmd_clear(pmd); in remove_pmd_table()
1180 pte_base = (pte_t *)pmd_page_vaddr(*pmd); in remove_pmd_table()
1182 free_pte_table(pte_base, pmd); in remove_pmd_table()
1419 * is a full PMD. If we would align _brk_end to PAGE_SIZE we in mark_rodata_ro()
1420 * split the PMD and the reminder between _brk_end and the end in mark_rodata_ro()
1421 * of the PMD will remain mapped executable. in mark_rodata_ro()
1423 * Any PMD which was setup after the one which covers _brk_end in mark_rodata_ro()
1519 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1525 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, in vmemmap_set_pmd() argument
1532 set_pmd(pmd, __pmd(pte_val(entry))); in vmemmap_set_pmd()
1537 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", in vmemmap_set_pmd()
1552 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, in vmemmap_check_pmd() argument
1555 int large = pmd_leaf(*pmd); in vmemmap_check_pmd()
1557 if (pmd_leaf(*pmd)) { in vmemmap_check_pmd()
1558 vmemmap_verify((pte_t *)pmd, node, addr, next); in vmemmap_check_pmd()
1598 pmd_t *pmd; in register_page_bootmem_memmap() local
1626 pmd = pmd_offset(pud, addr); in register_page_bootmem_memmap()
1627 if (pmd_none(*pmd)) { in register_page_bootmem_memmap()
1632 if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) { in register_page_bootmem_memmap()
1634 get_page_bootmem(section_nr, pmd_page(*pmd), in register_page_bootmem_memmap()
1637 pte = pte_offset_kernel(pmd, addr); in register_page_bootmem_memmap()
1645 page = pmd_page(*pmd); in register_page_bootmem_memmap()
1657 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", in vmemmap_populate_print_last()