Lines Matching +full:page +full:- +full:level

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
37 * Jolitz uses a recursive map [a pde points to the page directory] to
38 * map the page tables using the pagetables themselves. This is done to
81 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
82 * (PTE) page mappings have identical settings for the following fields:
114 ((unsigned long)-1 << 47) | \
120 ((unsigned long)-1 << 56) | \
150 * one-to-one with the kernel map.
156 * We use the same numbering of the page table pages for 5-level and
157 * 4-level paging structures.
174 * the recursive page table map.
183 * Note: KPML4I is the index of the (single) level 4 page that maps
185 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E
187 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to
197 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
198 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */
200 #define KPML4I (NPML4EPG-1)
201 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
203 #define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */
205 #define KMSANSHADPML4I (KPML4BASE - NKMSANSHADPML4E)
206 #define KMSANORIGPML4I (DMPML4I - NKMSANORIGPML4E)
210 #define LMEPML4I (KASANPML4I - 1)
216 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
249 * Address of current address space page table maps and directories.
270 extern int nkpt; /* Initial number of kernel page tables */
271 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
272 extern u_int64_t KPML5phys; /* physical address of kernel level 5 */
275 * virtual address to page table entry and
311 PT_X86, /* regular x86 page tables */
312 PT_EPT, /* Intel's nested page tables */
313 PT_RVI, /* AMD's nested page tables */
317 * The kernel virtual address (KVA) of the level 4 page table page is always
322 pml4_entry_t *pm_pmltop; /* KVA of top level page table */
323 pml4_entry_t *pm_pmltopu; /* KVA of user top page table */
330 struct vm_radix pm_root; /* spare page table pages */
350 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
352 mtx_assert(&(pmap)->pm_mtx, (type))
353 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
354 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
356 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
357 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
358 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
359 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
376 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
377 #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
381 KASSERT(m->phys_addr < kernphys || \
382 m->phys_addr >= kernphys + (vm_offset_t)&_end - KERNSTART, \
383 ("allocating kernel page %p pa %#lx kernphys %#lx end %p", \
384 m, m->phys_addr, kernphys, &_end));
455 return (&pmap->pm_active); in pmap_invalidate_cpu_mask()
486 pcidp = zpcpu_get(pmap->pm_pcidp); in pmap_get_pcid()
487 return (pcidp->pm_pcid); in pmap_get_pcid()
525 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); in pmap_pte_index()
532 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); in pmap_pde_index()
539 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); in pmap_pdpe_index()
546 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); in pmap_pml4e_index()
553 return ((va >> PML5SHIFT) & ((1ul << NPML5EPGSHIFT) - 1)); in pmap_pml5e_index()