Lines Matching +full:d +full:- +full:tlb +full:- +full:sets
1 // SPDX-License-Identifier: GPL-2.0
6 #include <asm/tlb.h>
11 phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
24 void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
29 tlb_remove_page(tlb, ptdesc_page(ptdesc));
33 void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
35 tlb_remove_table(tlb, table);
50 return -EINVAL;
59 return -EINVAL;
64 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
67 paravirt_tlb_remove_table(tlb, page_ptdesc(pte));
71 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
75 * NOTE! For PAE, any changes to the top page-directory-pointer-table
79 tlb->need_flush_all = 1;
81 paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pmd));
85 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
88 paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pud));
92 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
95 paravirt_tlb_remove_table(tlb, virt_to_ptdesc(p4d));
105 list_add(&ptdesc->pt_list, &pgd_list);
112 list_del(&ptdesc->pt_list);
123 virt_to_ptdesc(pgd)->pt_mm = mm;
128 return page_ptdesc(page)->pt_mm;
134 ptes in non-PAE, or shared PMD in PAE), then just copy the
162 * List of all pgd's needed for non-PAE so it can invalidate entries
165 * tactic would be needed. This is essentially codepath-based locking
169 * -- nyc
174 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
175 * updating the top-level pagetable entries to guarantee the
177 * all 4 top-level entries are used almost immediately in a
178 * new process's life, we just pre-populate them here.
188 * We allocate separate PMDs for the kernel part of the user page-table
189 * when PTI is enabled. We need them to map the per-process LDT into the
190 * user-space page-table.
205 * According to Intel App note "TLBs, Paging-Structure Caches,
206 * and Their Invalidation", April 2007, document 317080-001,
208 * TLB via cr3 if the top-level pgd is changed...
214 /* No need to prepopulate any pagetable entries in non-PAE modes. */
267 return -ENOMEM;
393 * page for pgd. We are able to just allocate a 32-byte for pgd.
394 * During boot time, we create a 32-byte slab for pgd table allocation.
411 * a 32-byte slab for pgd to save memory space.
447 mm->pgd = pgd;
461 * Make sure that pre-populating the pmds is atomic with
529 * We had a write-protection fault here and changed the pmd
530 * to to more permissive. No need to flush the TLB for that,
532 * worst-case we'll generate a spurious fault.
549 * We had a write-protection fault here and changed the pud
550 * to to more permissive. No need to flush the TLB for that,
552 * worst-case we'll generate a spurious fault.
567 (unsigned long *) &ptep->pte);
604 * On x86 CPUs, clearing the accessed bit without a TLB flush
609 * So as a performance optimization don't flush the TLB when
660 * reserve_top_address - reserves a hole in the top of kernel address space
661 * @reserve - size of hole to reserve
670 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
672 -reserve, __FIXADDR_TOP + PAGE_SIZE);
711 * p4d_set_huge - setup kernel P4D mapping
713 * No 512GB pages yet -- always return 0
721 * p4d_clear_huge - clear kernel P4D mapping when it is set
723 * No 512GB pages yet -- always return 0
731 * pud_set_huge - setup kernel PUD mapping
734 * function sets up a huge page only if the complete range has the same MTRR
737 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
750 /* Bail out if we are we on a populated non-leaf entry: */
762 * pmd_set_huge - setup kernel PMD mapping
774 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
779 /* Bail out if we are we on a populated non-leaf entry: */
791 * pud_clear_huge - clear kernel PUD mapping when it is set
806 * pmd_clear_huge - clear kernel PMD mapping when it is set
822 * pud_free_pmd_page - Clear pud entry and free pmd page.
826 * Context: The pud range has been unmapped and TLB purged.
850 /* INVLPG to clear all paging-structure caches */
851 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
869 * pmd_free_pte_page - Clear pmd entry and free pte page.
873 * Context: The pmd range has been unmapped and TLB purged.
883 /* INVLPG to clear all paging-structure caches */
884 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
894 * Disable free page handling on x86-PAE. This assures that ioremap()
895 * does not update sync'd pmd entries. See vmalloc_sync_one().
907 if (vma->vm_flags & VM_SHADOW_STACK)
917 if (vma->vm_flags & VM_SHADOW_STACK)
934 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
941 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
948 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud));