Lines Matching full:pmd
28 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) in ___pmd_free_tlb() argument
30 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); in ___pmd_free_tlb()
38 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); in ___pmd_free_tlb()
105 * kernel pmd is shared. If PAE were not to share the pmd a similar
137 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) in pud_populate() argument
139 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate()
142 reserved at the pmd (PDPT) level. */ in pud_populate()
143 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); in pud_populate()
187 pmd_t *pmd = NULL; in preallocate_pmds() local
199 pmd = ptdesc_address(ptdesc); in preallocate_pmds()
202 pmds[i] = pmd; in preallocate_pmds()
214 * Mop up any pmd pages which may still be attached to the pgd.
215 * Normally they will be freed by munmap/exit_mmap, but any pmd we
224 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); in mop_up_one_pmd() local
229 pmd_free(mm, pmd); in mop_up_one_pmd()
263 pmd_t *pmd = pmds[i]; in pgd_prepopulate_pmd() local
266 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), in pgd_prepopulate_pmd()
269 pud_populate(mm, pud, pmd); in pgd_prepopulate_pmd()
290 pmd_t *pmd = pmds[i]; in pgd_prepopulate_user_pmd() local
292 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd), in pgd_prepopulate_user_pmd()
295 pud_populate(mm, u_pud, pmd); in pgd_prepopulate_user_pmd()
415 * We had a write-protection fault here and changed the pmd in pmdp_set_access_flags()
655 * pmd_set_huge - Set up kernel PMD mapping
656 * @pmd: Pointer to the PMD entry
657 * @addr: Virtual address associated with the PMD entry
664 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) in pmd_set_huge() argument
676 if (pmd_present(*pmd) && !pmd_leaf(*pmd)) in pmd_set_huge()
679 set_pte((pte_t *)pmd, pfn_pte( in pmd_set_huge()
703 * pmd_clear_huge - Clear kernel PMD mapping when it is set
704 * @pmd: Pointer to the PMD entry to clear.
706 * Returns 1 on success and 0 on failure (no PMD map is found).
708 int pmd_clear_huge(pmd_t *pmd) in pmd_clear_huge() argument
710 if (pmd_leaf(*pmd)) { in pmd_clear_huge()
711 pmd_clear(pmd); in pmd_clear_huge()
720 * pud_free_pmd_page - Clear PUD entry and free PMD page
731 pmd_t *pmd, *pmd_sv; in pud_free_pmd_page() local
735 pmd = pud_pgtable(*pud); in pud_free_pmd_page()
741 pmd_sv[i] = pmd[i]; in pud_free_pmd_page()
742 if (!pmd_none(pmd[i])) in pud_free_pmd_page()
743 pmd_clear(&pmd[i]); in pud_free_pmd_page()
760 pmd_free(&init_mm, pmd); in pud_free_pmd_page()
766 * pmd_free_pte_page - Clear PMD entry and free PTE page.
767 * @pmd: Pointer to the PMD
768 * @addr: Virtual address associated with PMD
770 * Context: The PMD range has been unmapped and TLB purged.
773 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) in pmd_free_pte_page() argument
777 pte = (pte_t *)pmd_page_vaddr(*pmd); in pmd_free_pte_page()
778 pmd_clear(pmd); in pmd_free_pte_page()
792 * does not update sync'd PMD entries. See vmalloc_sync_one().
794 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) in pmd_free_pte_page() argument
796 return pmd_none(*pmd); in pmd_free_pte_page()
812 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in pmd_mkwrite() argument
815 return pmd_mkwrite_shstk(pmd); in pmd_mkwrite()
817 pmd = pmd_mkwrite_novma(pmd); in pmd_mkwrite()
819 return pmd_clear_saveddirty(pmd); in pmd_mkwrite()
835 void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd) in arch_check_zapped_pmd() argument
839 pmd_shstk(pmd)); in arch_check_zapped_pmd()