1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1991 Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department and William Jolitz of UUNET Technologies Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Derived from hp300 version by Mike Hibler, this version by William 37 * Jolitz uses a recursive map [a pde points to the page directory] to 38 * map the page tables using the pagetables themselves. This is done to 39 * reduce the impact on kernel virtual memory for lots of sparse address 40 * space, and to reduce the cost of memory to each process. 41 * 42 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 43 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 44 * $FreeBSD$ 45 */ 46 47 #ifndef _MACHINE_PMAP_H_ 48 #define _MACHINE_PMAP_H_ 49 50 /* 51 * Page-directory and page-table entries follow this format, with a few 52 * of the fields not present here and there, depending on a lot of things. 53 */ 54 /* ---- Intel Nomenclature ---- */ 55 #define X86_PG_V 0x001 /* P Valid */ 56 #define X86_PG_RW 0x002 /* R/W Read/Write */ 57 #define X86_PG_U 0x004 /* U/S User/Supervisor */ 58 #define X86_PG_NC_PWT 0x008 /* PWT Write through */ 59 #define X86_PG_NC_PCD 0x010 /* PCD Cache disable */ 60 #define X86_PG_A 0x020 /* A Accessed */ 61 #define X86_PG_M 0x040 /* D Dirty */ 62 #define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */ 63 #define X86_PG_PTE_PAT 0x080 /* PAT PAT index */ 64 #define X86_PG_G 0x100 /* G Global */ 65 #define X86_PG_AVAIL1 0x200 /* / Available for system */ 66 #define X86_PG_AVAIL2 0x400 /* < programmers use */ 67 #define X86_PG_AVAIL3 0x800 /* \ */ 68 #define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */ 69 #define X86_PG_NX (1ul<<63) /* No-execute */ 70 #define X86_PG_AVAIL(x) (1ul << (x)) 71 72 /* Page level cache control fields used to determine the PAT type */ 73 #define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 74 #define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 75 76 /* 77 * Intel extended page table (EPT) bit definitions. 78 */ 79 #define EPT_PG_READ 0x001 /* R Read */ 80 #define EPT_PG_WRITE 0x002 /* W Write */ 81 #define EPT_PG_EXECUTE 0x004 /* X Execute */ 82 #define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */ 83 #define EPT_PG_PS 0x080 /* PS Page size */ 84 #define EPT_PG_A 0x100 /* A Accessed */ 85 #define EPT_PG_M 0x200 /* D Dirty */ 86 #define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */ 87 88 /* 89 * Define the PG_xx macros in terms of the bits on x86 PTEs. 90 */ 91 #define PG_V X86_PG_V 92 #define PG_RW X86_PG_RW 93 #define PG_U X86_PG_U 94 #define PG_NC_PWT X86_PG_NC_PWT 95 #define PG_NC_PCD X86_PG_NC_PCD 96 #define PG_A X86_PG_A 97 #define PG_M X86_PG_M 98 #define PG_PS X86_PG_PS 99 #define PG_PTE_PAT X86_PG_PTE_PAT 100 #define PG_G X86_PG_G 101 #define PG_AVAIL1 X86_PG_AVAIL1 102 #define PG_AVAIL2 X86_PG_AVAIL2 103 #define PG_AVAIL3 X86_PG_AVAIL3 104 #define PG_PDE_PAT X86_PG_PDE_PAT 105 #define PG_NX X86_PG_NX 106 #define PG_PDE_CACHE X86_PG_PDE_CACHE 107 #define PG_PTE_CACHE X86_PG_PTE_CACHE 108 109 /* Our various interpretations of the above */ 110 #define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */ 111 #define PG_MANAGED X86_PG_AVAIL2 112 #define EPT_PG_EMUL_V X86_PG_AVAIL(52) 113 #define EPT_PG_EMUL_RW X86_PG_AVAIL(53) 114 #define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */ 115 #define PG_FRAME (0x000ffffffffff000ul) 116 #define PG_PS_FRAME (0x000fffffffe00000ul) 117 118 /* 119 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 120 * (PTE) page mappings have identical settings for the following fields: 121 */ 122 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \ 123 PG_M | PG_A | PG_U | PG_RW | PG_V) 124 125 /* 126 * Page Protection Exception bits 127 */ 128 129 #define PGEX_P 0x01 /* Protection violation vs. not present */ 130 #define PGEX_W 0x02 /* during a Write cycle */ 131 #define PGEX_U 0x04 /* access from User mode (UPL) */ 132 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 133 #define PGEX_I 0x10 /* during an instruction fetch */ 134 135 /* 136 * undef the PG_xx macros that define bits in the regular x86 PTEs that 137 * have a different position in nested PTEs. This is done when compiling 138 * code that needs to be aware of the differences between regular x86 and 139 * nested PTEs. 140 * 141 * The appropriate bitmask will be calculated at runtime based on the pmap 142 * type. 143 */ 144 #ifdef AMD64_NPT_AWARE 145 #undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */ 146 #undef PG_G 147 #undef PG_A 148 #undef PG_M 149 #undef PG_PDE_PAT 150 #undef PG_PDE_CACHE 151 #undef PG_PTE_PAT 152 #undef PG_PTE_CACHE 153 #undef PG_RW 154 #undef PG_V 155 #endif 156 157 /* 158 * Pte related macros. This is complicated by having to deal with 159 * the sign extension of the 48th bit. 160 */ 161 #define KVADDR(l4, l3, l2, l1) ( \ 162 ((unsigned long)-1 << 47) | \ 163 ((unsigned long)(l4) << PML4SHIFT) | \ 164 ((unsigned long)(l3) << PDPSHIFT) | \ 165 ((unsigned long)(l2) << PDRSHIFT) | \ 166 ((unsigned long)(l1) << PAGE_SHIFT)) 167 168 #define UVADDR(l4, l3, l2, l1) ( \ 169 ((unsigned long)(l4) << PML4SHIFT) | \ 170 ((unsigned long)(l3) << PDPSHIFT) | \ 171 ((unsigned long)(l2) << PDRSHIFT) | \ 172 ((unsigned long)(l1) << PAGE_SHIFT)) 173 174 /* 175 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so, 176 * but setting it larger than NDMPML4E makes no sense. 177 * 178 * Each slot provides .5 TB of kernel virtual space. 179 */ 180 #define NKPML4E 4 181 182 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ 183 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ 184 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */ 185 186 /* 187 * NDMPML4E is the maximum number of PML4 entries that will be 188 * used to implement the direct map. It must be a power of two, 189 * and should generally exceed NKPML4E. The maximum possible 190 * value is 64; using 128 will make the direct map intrude into 191 * the recursive page table map. 192 */ 193 #define NDMPML4E 8 194 195 /* 196 * These values control the layout of virtual memory. The starting address 197 * of the direct map, which is controlled by DMPML4I, must be a multiple of 198 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.) 199 * 200 * Note: KPML4I is the index of the (single) level 4 page that maps 201 * the KVA that holds KERNBASE, while KPML4BASE is the index of the 202 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E 203 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra 204 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to 205 * KERNBASE. 206 * 207 * (KPML4I combines with KPDPI to choose where KERNBASE starts. 208 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE, 209 * and KPDPI provides bits 30..38.) 210 */ 211 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 212 213 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */ 214 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */ 215 216 #define KPML4I (NPML4EPG-1) 217 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 218 219 /* 220 * XXX doesn't really belong here I guess... 221 */ 222 #define ISA_HOLE_START 0xa0000 223 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 224 225 #define PMAP_PCID_NONE 0xffffffff 226 #define PMAP_PCID_KERN 0 227 #define PMAP_PCID_OVERMAX 0x1000 228 229 #ifndef LOCORE 230 231 #include <sys/queue.h> 232 #include <sys/_cpuset.h> 233 #include <sys/_lock.h> 234 #include <sys/_mutex.h> 235 236 #include <vm/_vm_radix.h> 237 238 typedef u_int64_t pd_entry_t; 239 typedef u_int64_t pt_entry_t; 240 typedef u_int64_t pdp_entry_t; 241 typedef u_int64_t pml4_entry_t; 242 243 /* 244 * Address of current address space page table maps and directories. 245 */ 246 #ifdef _KERNEL 247 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 248 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 249 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 250 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 251 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 252 #define PTmap ((pt_entry_t *)(addr_PTmap)) 253 #define PDmap ((pd_entry_t *)(addr_PDmap)) 254 #define PDPmap ((pd_entry_t *)(addr_PDPmap)) 255 #define PML4map ((pd_entry_t *)(addr_PML4map)) 256 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 257 258 extern int nkpt; /* Initial number of kernel page tables */ 259 extern u_int64_t KPDPphys; /* physical address of kernel level 3 */ 260 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 261 262 /* 263 * virtual address to page table entry and 264 * to physical address. 265 * Note: these work recursively, thus vtopte of a pte will give 266 * the corresponding pde that in turn maps it. 267 */ 268 pt_entry_t *vtopte(vm_offset_t); 269 #define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 270 271 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte) 272 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0) 273 #define pte_store(ptep, pte) do { \ 274 *(u_long *)(ptep) = (u_long)(pte); \ 275 } while (0) 276 #define pte_clear(ptep) pte_store(ptep, 0) 277 278 #define pde_store(pdep, pde) pte_store(pdep, pde) 279 280 extern pt_entry_t pg_nx; 281 282 #endif /* _KERNEL */ 283 284 /* 285 * Pmap stuff 286 */ 287 struct pv_entry; 288 struct pv_chunk; 289 290 /* 291 * Locks 292 * (p) PV list lock 293 */ 294 struct md_page { 295 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */ 296 int pv_gen; /* (p) */ 297 int pat_mode; 298 }; 299 300 enum pmap_type { 301 PT_X86, /* regular x86 page tables */ 302 PT_EPT, /* Intel's nested page tables */ 303 PT_RVI, /* AMD's nested page tables */ 304 }; 305 306 struct pmap_pcids { 307 uint32_t pm_pcid; 308 uint32_t pm_gen; 309 }; 310 311 /* 312 * The kernel virtual address (KVA) of the level 4 page table page is always 313 * within the direct map (DMAP) region. 314 */ 315 struct pmap { 316 struct mtx pm_mtx; 317 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 318 pml4_entry_t *pm_pml4u; /* KVA of user l4 page table */ 319 uint64_t pm_cr3; 320 uint64_t pm_ucr3; 321 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 322 cpuset_t pm_active; /* active on cpus */ 323 enum pmap_type pm_type; /* regular or nested tables */ 324 struct pmap_statistics pm_stats; /* pmap statistics */ 325 struct vm_radix pm_root; /* spare page table pages */ 326 long pm_eptgen; /* EPT pmap generation id */ 327 int pm_flags; 328 struct pmap_pcids pm_pcids[MAXCPU]; 329 }; 330 331 /* flags */ 332 #define PMAP_NESTED_IPIMASK 0xff 333 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */ 334 #define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */ 335 #define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */ 336 337 typedef struct pmap *pmap_t; 338 339 #ifdef _KERNEL 340 extern struct pmap kernel_pmap_store; 341 #define kernel_pmap (&kernel_pmap_store) 342 343 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 344 #define PMAP_LOCK_ASSERT(pmap, type) \ 345 mtx_assert(&(pmap)->pm_mtx, (type)) 346 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 347 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 348 NULL, MTX_DEF | MTX_DUPOK) 349 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 350 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 351 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 352 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 353 354 int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags); 355 int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype); 356 #endif 357 358 /* 359 * For each vm_page_t, there is a list of all currently valid virtual 360 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 361 */ 362 typedef struct pv_entry { 363 vm_offset_t pv_va; /* virtual address for mapping */ 364 TAILQ_ENTRY(pv_entry) pv_next; 365 } *pv_entry_t; 366 367 /* 368 * pv_entries are allocated in chunks per-process. This avoids the 369 * need to track per-pmap assignments. 370 */ 371 #define _NPCM 3 372 #define _NPCPV 168 373 #define PV_CHUNK_HEADER \ 374 pmap_t pc_pmap; \ 375 TAILQ_ENTRY(pv_chunk) pc_list; \ 376 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \ 377 TAILQ_ENTRY(pv_chunk) pc_lru; 378 379 struct pv_chunk_header { 380 PV_CHUNK_HEADER 381 }; 382 383 struct pv_chunk { 384 PV_CHUNK_HEADER 385 struct pv_entry pc_pventry[_NPCPV]; 386 }; 387 388 #ifdef _KERNEL 389 390 extern caddr_t CADDR1; 391 extern pt_entry_t *CMAP1; 392 extern vm_paddr_t phys_avail[]; 393 extern vm_paddr_t dump_avail[]; 394 extern vm_offset_t virtual_avail; 395 extern vm_offset_t virtual_end; 396 extern vm_paddr_t dmaplimit; 397 extern int pmap_pcid_enabled; 398 extern int invpcid_works; 399 400 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 401 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 402 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 403 404 struct thread; 405 406 void pmap_activate_sw(struct thread *); 407 void pmap_bootstrap(vm_paddr_t *); 408 int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde); 409 int pmap_change_attr(vm_offset_t, vm_size_t, int); 410 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate); 411 void pmap_init_pat(void); 412 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 413 void *pmap_kenter_temporary(vm_paddr_t pa, int i); 414 vm_paddr_t pmap_kextract(vm_offset_t); 415 void pmap_kremove(vm_offset_t); 416 void *pmap_mapbios(vm_paddr_t, vm_size_t); 417 void *pmap_mapdev(vm_paddr_t, vm_size_t); 418 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 419 boolean_t pmap_page_is_mapped(vm_page_t m); 420 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 421 void pmap_pinit_pml4(vm_page_t); 422 bool pmap_ps_enabled(pmap_t pmap); 423 void pmap_unmapdev(vm_offset_t, vm_size_t); 424 void pmap_invalidate_page(pmap_t, vm_offset_t); 425 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 426 void pmap_invalidate_all(pmap_t); 427 void pmap_invalidate_cache(void); 428 void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 429 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, 430 boolean_t force); 431 void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); 432 boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); 433 void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); 434 void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec); 435 void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva); 436 #endif /* _KERNEL */ 437 438 /* Return various clipped indexes for a given VA */ 439 static __inline vm_pindex_t 440 pmap_pte_index(vm_offset_t va) 441 { 442 443 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); 444 } 445 446 static __inline vm_pindex_t 447 pmap_pde_index(vm_offset_t va) 448 { 449 450 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 451 } 452 453 static __inline vm_pindex_t 454 pmap_pdpe_index(vm_offset_t va) 455 { 456 457 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 458 } 459 460 static __inline vm_pindex_t 461 pmap_pml4e_index(vm_offset_t va) 462 { 463 464 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 465 } 466 467 #endif /* !LOCORE */ 468 469 #endif /* !_MACHINE_PMAP_H_ */ 470