1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1991 Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department and William Jolitz of UUNET Technologies Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Derived from hp300 version by Mike Hibler, this version by William 37 * Jolitz uses a recursive map [a pde points to the page directory] to 38 * map the page tables using the pagetables themselves. This is done to 39 * reduce the impact on kernel virtual memory for lots of sparse address 40 * space, and to reduce the cost of memory to each process. 41 */ 42 43 #ifdef __i386__ 44 #include <i386/pmap.h> 45 #else /* !__i386__ */ 46 47 #ifndef _MACHINE_PMAP_H_ 48 #define _MACHINE_PMAP_H_ 49 50 /* 51 * Page-directory and page-table entries follow this format, with a few 52 * of the fields not present here and there, depending on a lot of things. 53 */ 54 /* ---- Intel Nomenclature ---- */ 55 #define X86_PG_V 0x001 /* P Valid */ 56 #define X86_PG_RW 0x002 /* R/W Read/Write */ 57 #define X86_PG_U 0x004 /* U/S User/Supervisor */ 58 #define X86_PG_NC_PWT 0x008 /* PWT Write through */ 59 #define X86_PG_NC_PCD 0x010 /* PCD Cache disable */ 60 #define X86_PG_A 0x020 /* A Accessed */ 61 #define X86_PG_M 0x040 /* D Dirty */ 62 #define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */ 63 #define X86_PG_PTE_PAT 0x080 /* PAT PAT index */ 64 #define X86_PG_G 0x100 /* G Global */ 65 #define X86_PG_AVAIL1 0x200 /* / Available for system */ 66 #define X86_PG_AVAIL2 0x400 /* < programmers use */ 67 #define X86_PG_AVAIL3 0x800 /* \ */ 68 #define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */ 69 #define X86_PG_PKU(idx) ((pt_entry_t)idx << 59) 70 #define X86_PG_NX (1ul<<63) /* No-execute */ 71 #define X86_PG_AVAIL(x) (1ul << (x)) 72 73 /* Page level cache control fields used to determine the PAT type */ 74 #define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 75 #define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD) 76 77 /* Protection keys indexes */ 78 #define PMAP_MAX_PKRU_IDX 0xf 79 #define X86_PG_PKU_MASK X86_PG_PKU(PMAP_MAX_PKRU_IDX) 80 81 /* 82 * Intel extended page table (EPT) bit definitions. 83 */ 84 #define EPT_PG_READ 0x001 /* R Read */ 85 #define EPT_PG_WRITE 0x002 /* W Write */ 86 #define EPT_PG_EXECUTE 0x004 /* X Execute */ 87 #define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */ 88 #define EPT_PG_PS 0x080 /* PS Page size */ 89 #define EPT_PG_A 0x100 /* A Accessed */ 90 #define EPT_PG_M 0x200 /* D Dirty */ 91 #define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */ 92 93 /* 94 * Define the PG_xx macros in terms of the bits on x86 PTEs. 95 */ 96 #define PG_V X86_PG_V 97 #define PG_RW X86_PG_RW 98 #define PG_U X86_PG_U 99 #define PG_NC_PWT X86_PG_NC_PWT 100 #define PG_NC_PCD X86_PG_NC_PCD 101 #define PG_A X86_PG_A 102 #define PG_M X86_PG_M 103 #define PG_PS X86_PG_PS 104 #define PG_PTE_PAT X86_PG_PTE_PAT 105 #define PG_G X86_PG_G 106 #define PG_AVAIL1 X86_PG_AVAIL1 107 #define PG_AVAIL2 X86_PG_AVAIL2 108 #define PG_AVAIL3 X86_PG_AVAIL3 109 #define PG_PDE_PAT X86_PG_PDE_PAT 110 #define PG_NX X86_PG_NX 111 #define PG_PDE_CACHE X86_PG_PDE_CACHE 112 #define PG_PTE_CACHE X86_PG_PTE_CACHE 113 114 /* Our various interpretations of the above */ 115 #define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */ 116 #define PG_MANAGED X86_PG_AVAIL2 117 #define EPT_PG_EMUL_V X86_PG_AVAIL(52) 118 #define EPT_PG_EMUL_RW X86_PG_AVAIL(53) 119 #define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */ 120 #define PG_FRAME (0x000ffffffffff000ul) 121 #define PG_PS_FRAME (0x000fffffffe00000ul) 122 #define PG_PS_PDP_FRAME (0x000fffffc0000000ul) 123 124 /* 125 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 126 * (PTE) page mappings have identical settings for the following fields: 127 */ 128 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \ 129 PG_M | PG_U | PG_RW | PG_V | PG_PKU_MASK) 130 131 /* 132 * Page Protection Exception bits 133 */ 134 135 #define PGEX_P 0x01 /* Protection violation vs. not present */ 136 #define PGEX_W 0x02 /* during a Write cycle */ 137 #define PGEX_U 0x04 /* access from User mode (UPL) */ 138 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 139 #define PGEX_I 0x10 /* during an instruction fetch */ 140 #define PGEX_PK 0x20 /* protection key violation */ 141 #define PGEX_SGX 0x8000 /* SGX-related */ 142 143 /* 144 * undef the PG_xx macros that define bits in the regular x86 PTEs that 145 * have a different position in nested PTEs. This is done when compiling 146 * code that needs to be aware of the differences between regular x86 and 147 * nested PTEs. 148 * 149 * The appropriate bitmask will be calculated at runtime based on the pmap 150 * type. 151 */ 152 #ifdef AMD64_NPT_AWARE 153 #undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */ 154 #undef PG_G 155 #undef PG_A 156 #undef PG_M 157 #undef PG_PDE_PAT 158 #undef PG_PDE_CACHE 159 #undef PG_PTE_PAT 160 #undef PG_PTE_CACHE 161 #undef PG_RW 162 #undef PG_V 163 #endif 164 165 /* 166 * Pte related macros. This is complicated by having to deal with 167 * the sign extension of the 48th bit. 168 */ 169 #define KV4ADDR(l4, l3, l2, l1) ( \ 170 ((unsigned long)-1 << 47) | \ 171 ((unsigned long)(l4) << PML4SHIFT) | \ 172 ((unsigned long)(l3) << PDPSHIFT) | \ 173 ((unsigned long)(l2) << PDRSHIFT) | \ 174 ((unsigned long)(l1) << PAGE_SHIFT)) 175 #define KV5ADDR(l5, l4, l3, l2, l1) ( \ 176 ((unsigned long)-1 << 56) | \ 177 ((unsigned long)(l5) << PML5SHIFT) | \ 178 ((unsigned long)(l4) << PML4SHIFT) | \ 179 ((unsigned long)(l3) << PDPSHIFT) | \ 180 ((unsigned long)(l2) << PDRSHIFT) | \ 181 ((unsigned long)(l1) << PAGE_SHIFT)) 182 183 #define UVADDR(l5, l4, l3, l2, l1) ( \ 184 ((unsigned long)(l5) << PML5SHIFT) | \ 185 ((unsigned long)(l4) << PML4SHIFT) | \ 186 ((unsigned long)(l3) << PDPSHIFT) | \ 187 ((unsigned long)(l2) << PDRSHIFT) | \ 188 ((unsigned long)(l1) << PAGE_SHIFT)) 189 190 /* 191 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so, 192 * but setting it larger than NDMPML4E makes no sense. 193 * 194 * Each slot provides .5 TB of kernel virtual space. 195 */ 196 #define NKPML4E 4 197 198 /* 199 * Number of PML4 slots for the KASAN shadow map. It requires 1 byte of memory 200 * for every 8 bytes of the kernel address space. 201 */ 202 #define NKASANPML4E ((NKPML4E + 7) / 8) 203 204 /* 205 * Number of PML4 slots for the KMSAN shadow and origin maps. These are 206 * one-to-one with the kernel map. 207 */ 208 #define NKMSANSHADPML4E NKPML4E 209 #define NKMSANORIGPML4E NKPML4E 210 211 /* 212 * We use the same numbering of the page table pages for 5-level and 213 * 4-level paging structures. 214 */ 215 #define NUPML5E (NPML5EPG / 2) /* number of userland PML5 216 pages */ 217 #define NUPML4E (NUPML5E * NPML4EPG) /* number of userland PML4 218 pages */ 219 #define NUPDPE (NUPML4E * NPDPEPG) /* number of userland PDP 220 pages */ 221 #define NUPDE (NUPDPE * NPDEPG) /* number of userland PD 222 entries */ 223 #define NUP4ML4E (NPML4EPG / 2) 224 225 /* 226 * NDMPML4E is the maximum number of PML4 entries that will be 227 * used to implement the direct map. It must be a power of two, 228 * and should generally exceed NKPML4E. The maximum possible 229 * value is 64; using 128 will make the direct map intrude into 230 * the recursive page table map. 231 */ 232 #define NDMPML4E 8 233 234 /* 235 * These values control the layout of virtual memory. The starting address 236 * of the direct map, which is controlled by DMPML4I, must be a multiple of 237 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.) 238 * 239 * Note: KPML4I is the index of the (single) level 4 page that maps 240 * the KVA that holds KERNBASE, while KPML4BASE is the index of the 241 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E 242 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra 243 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to 244 * KERNBASE. 245 * 246 * (KPML4I combines with KPDPI to choose where KERNBASE starts. 247 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE, 248 * and KPDPI provides bits 30..38.) 249 */ 250 #define PML4PML4I (NPML4EPG / 2) /* Index of recursive pml4 mapping */ 251 #define PML5PML5I (NPML5EPG / 2) /* Index of recursive pml5 mapping */ 252 253 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */ 254 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */ 255 256 #define KPML4I (NPML4EPG-1) 257 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 258 259 #define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */ 260 261 #define KMSANSHADPML4I (KPML4BASE - NKMSANSHADPML4E) 262 #define KMSANORIGPML4I (DMPML4I - NKMSANORIGPML4E) 263 264 /* Large map: index of the first and max last pml4 entry */ 265 #define LMSPML4I (PML4PML4I + 1) 266 #define LMEPML4I (KASANPML4I - 1) 267 268 /* 269 * XXX doesn't really belong here I guess... 270 */ 271 #define ISA_HOLE_START 0xa0000 272 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 273 274 #define PMAP_PCID_NONE 0xffffffff 275 #define PMAP_PCID_KERN 0 276 #define PMAP_PCID_OVERMAX 0x1000 277 #define PMAP_PCID_OVERMAX_KERN 0x800 278 #define PMAP_PCID_USER_PT 0x800 279 280 #define PMAP_NO_CR3 0xffffffffffffffff 281 #define PMAP_UCR3_NOMASK 0xffffffffffffffff 282 283 #ifndef LOCORE 284 285 #include <sys/kassert.h> 286 #include <sys/queue.h> 287 #include <sys/_cpuset.h> 288 #include <sys/_lock.h> 289 #include <sys/_mutex.h> 290 #include <sys/_pctrie.h> 291 #include <machine/_pmap.h> 292 #include <sys/_pv_entry.h> 293 #include <sys/_rangeset.h> 294 #include <sys/_smr.h> 295 296 #include <vm/_vm_radix.h> 297 298 typedef u_int64_t pd_entry_t; 299 typedef u_int64_t pt_entry_t; 300 typedef u_int64_t pdp_entry_t; 301 typedef u_int64_t pml4_entry_t; 302 typedef u_int64_t pml5_entry_t; 303 304 /* 305 * Address of current address space page table maps and directories. 306 */ 307 #ifdef _KERNEL 308 #define addr_P4Tmap (KV4ADDR(PML4PML4I, 0, 0, 0)) 309 #define addr_P4Dmap (KV4ADDR(PML4PML4I, PML4PML4I, 0, 0)) 310 #define addr_P4DPmap (KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 311 #define addr_P4ML4map (KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 312 #define addr_P4ML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 313 #define P4Tmap ((pt_entry_t *)(addr_P4Tmap)) 314 #define P4Dmap ((pd_entry_t *)(addr_P4Dmap)) 315 316 #define addr_P5Tmap (KV5ADDR(PML5PML5I, 0, 0, 0, 0)) 317 #define addr_P5Dmap (KV5ADDR(PML5PML5I, PML5PML5I, 0, 0, 0)) 318 #define addr_P5DPmap (KV5ADDR(PML5PML5I, PML5PML5I, PML5PML5I, 0, 0)) 319 #define addr_P5ML4map (KV5ADDR(PML5PML5I, PML5PML5I, PML5PML5I, PML5PML5I, 0)) 320 #define addr_P5ML5map \ 321 (KVADDR(PML5PML5I, PML5PML5I, PML5PML5I, PML5PML5I, PML5PML5I)) 322 #define addr_P5ML5pml5e (addr_P5ML5map + (PML5PML5I * sizeof(pml5_entry_t))) 323 #define P5Tmap ((pt_entry_t *)(addr_P5Tmap)) 324 #define P5Dmap ((pd_entry_t *)(addr_P5Dmap)) 325 326 extern int nkpt; /* Initial number of kernel page tables */ 327 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 328 extern u_int64_t KPML5phys; /* physical address of kernel level 5 */ 329 330 /* 331 * virtual address to page table entry and 332 * to physical address. 333 * Note: these work recursively, thus vtopte of a pte will give 334 * the corresponding pde that in turn maps it. 335 */ 336 pt_entry_t *vtopte(vm_offset_t); 337 #define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 338 339 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte) 340 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0) 341 #define pte_store(ptep, pte) do { \ 342 *(u_long *)(ptep) = (u_long)(pte); \ 343 } while (0) 344 #define pte_clear(ptep) pte_store(ptep, 0) 345 346 #define pde_store(pdep, pde) pte_store(pdep, pde) 347 348 extern pt_entry_t pg_nx; 349 350 #endif /* _KERNEL */ 351 352 /* 353 * Pmap stuff 354 */ 355 356 /* 357 * Locks 358 * (p) PV list lock 359 */ 360 struct md_page { 361 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */ 362 int pv_gen; /* (p) */ 363 int pat_mode; 364 }; 365 366 enum pmap_type { 367 PT_X86, /* regular x86 page tables */ 368 PT_EPT, /* Intel's nested page tables */ 369 PT_RVI, /* AMD's nested page tables */ 370 }; 371 372 /* 373 * The kernel virtual address (KVA) of the level 4 page table page is always 374 * within the direct map (DMAP) region. 375 */ 376 struct pmap { 377 struct mtx pm_mtx; 378 pml4_entry_t *pm_pmltop; /* KVA of top level page table */ 379 pml4_entry_t *pm_pmltopu; /* KVA of user top page table */ 380 uint64_t pm_cr3; 381 uint64_t pm_ucr3; 382 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 383 cpuset_t pm_active; /* active on cpus */ 384 enum pmap_type pm_type; /* regular or nested tables */ 385 struct pmap_statistics pm_stats; /* pmap statistics */ 386 struct vm_radix pm_root; /* spare page table pages */ 387 long pm_eptgen; /* EPT pmap generation id */ 388 smr_t pm_eptsmr; 389 int pm_flags; 390 struct pmap_pcid *pm_pcidp; 391 struct rangeset pm_pkru; 392 }; 393 394 /* flags */ 395 #define PMAP_NESTED_IPIMASK 0xff 396 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */ 397 #define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */ 398 #define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */ 399 400 typedef struct pmap *pmap_t; 401 402 #ifdef _KERNEL 403 extern struct pmap kernel_pmap_store; 404 #define kernel_pmap (&kernel_pmap_store) 405 406 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 407 #define PMAP_LOCK_ASSERT(pmap, type) \ 408 mtx_assert(&(pmap)->pm_mtx, (type)) 409 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 410 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 411 NULL, MTX_DEF | MTX_DUPOK) 412 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 413 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 414 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 415 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 416 417 int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags); 418 int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype); 419 420 extern caddr_t CADDR1; 421 extern pt_entry_t *CMAP1; 422 extern vm_offset_t virtual_avail; 423 extern vm_offset_t virtual_end; 424 extern vm_paddr_t dmaplimit; 425 extern int pmap_pcid_enabled; 426 extern int invpcid_works; 427 extern int pmap_pcid_invlpg_workaround; 428 extern int pmap_pcid_invlpg_workaround_uena; 429 430 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 431 #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0) 432 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 433 434 #define pmap_vm_page_alloc_check(m) \ 435 KASSERT(m->phys_addr < kernphys || \ 436 m->phys_addr >= kernphys + (vm_offset_t)&_end - KERNSTART, \ 437 ("allocating kernel page %p pa %#lx kernphys %#lx end %p", \ 438 m, m->phys_addr, kernphys, &_end)); 439 440 struct thread; 441 442 void pmap_activate_boot(pmap_t pmap); 443 void pmap_activate_sw(struct thread *); 444 void pmap_allow_2m_x_ept_recalculate(void); 445 void pmap_bootstrap(vm_paddr_t *); 446 int pmap_cache_bits(pmap_t pmap, int mode, bool is_pde); 447 int pmap_change_attr(vm_offset_t, vm_size_t, int); 448 int pmap_change_prot(vm_offset_t, vm_size_t, vm_prot_t); 449 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate); 450 void pmap_flush_cache_range(vm_offset_t, vm_offset_t); 451 void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t); 452 void pmap_init_pat(void); 453 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 454 void *pmap_kenter_temporary(vm_paddr_t pa, int i); 455 vm_paddr_t pmap_kextract(vm_offset_t); 456 void pmap_kremove(vm_offset_t); 457 int pmap_large_map(vm_paddr_t, vm_size_t, void **, vm_memattr_t); 458 void pmap_large_map_wb(void *sva, vm_size_t len); 459 void pmap_large_unmap(void *sva, vm_size_t len); 460 void *pmap_mapbios(vm_paddr_t, vm_size_t); 461 void *pmap_mapdev(vm_paddr_t, vm_size_t); 462 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 463 void *pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size); 464 bool pmap_not_in_di(void); 465 bool pmap_page_is_mapped(vm_page_t m); 466 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 467 void pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma); 468 void pmap_pinit_pml4(vm_page_t); 469 void pmap_pinit_pml5(vm_page_t); 470 bool pmap_ps_enabled(pmap_t pmap); 471 void pmap_unmapdev(void *, vm_size_t); 472 void pmap_invalidate_page(pmap_t, vm_offset_t); 473 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 474 void pmap_invalidate_all(pmap_t); 475 void pmap_invalidate_cache(void); 476 void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 477 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 478 void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 479 void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); 480 bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); 481 void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); 482 void pmap_map_delete(pmap_t, vm_offset_t, vm_offset_t); 483 void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec); 484 void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva); 485 void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3); 486 void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va); 487 void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva, 488 vm_offset_t eva); 489 int pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); 490 int pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 491 u_int keyidx, int flags); 492 void pmap_thread_init_invl_gen(struct thread *td); 493 int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap); 494 void pmap_page_array_startup(long count); 495 vm_page_t pmap_page_alloc_below_4g(bool zeroed); 496 497 #if defined(KASAN) || defined(KMSAN) 498 void pmap_san_enter(vm_offset_t); 499 #endif 500 501 /* 502 * Returns a pointer to a set of CPUs on which the pmap is currently active. 503 * Note that the set can be modified without any mutual exclusion, so a copy 504 * must be made if a stable value is required. 505 */ 506 static __inline volatile cpuset_t * 507 pmap_invalidate_cpu_mask(pmap_t pmap) 508 { 509 return (&pmap->pm_active); 510 } 511 512 #if defined(_SYS_PCPU_H_) && defined(_MACHINE_CPUFUNC_H_) 513 /* 514 * It seems that AlderLake+ small cores have some microarchitectural 515 * bug, which results in the INVLPG instruction failing to flush all 516 * global TLB entries when PCID is enabled. Work around it for now, 517 * by doing global invalidation on small cores instead of INVLPG. 518 */ 519 static __inline void 520 pmap_invlpg(pmap_t pmap, vm_offset_t va) 521 { 522 if (pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) { 523 struct invpcid_descr d = { 0 }; 524 525 invpcid(&d, INVPCID_CTXGLOB); 526 } else { 527 invlpg(va); 528 } 529 } 530 #endif /* sys/pcpu.h && machine/cpufunc.h */ 531 532 #if defined(_SYS_PCPU_H_) 533 /* Return pcid for the pmap pmap on current cpu */ 534 static __inline uint32_t 535 pmap_get_pcid(pmap_t pmap) 536 { 537 struct pmap_pcid *pcidp; 538 539 MPASS(pmap_pcid_enabled); 540 pcidp = zpcpu_get(pmap->pm_pcidp); 541 return (pcidp->pm_pcid); 542 } 543 #endif /* sys/pcpu.h */ 544 545 /* 546 * Invalidation request. PCPU pc_smp_tlb_op uses u_int instead of the 547 * enum to avoid both namespace and ABI issues (with enums). 548 */ 549 enum invl_op_codes { 550 INVL_OP_TLB = 1, 551 INVL_OP_TLB_INVPCID = 2, 552 INVL_OP_TLB_INVPCID_PTI = 3, 553 INVL_OP_TLB_PCID = 4, 554 INVL_OP_PGRNG = 5, 555 INVL_OP_PGRNG_INVPCID = 6, 556 INVL_OP_PGRNG_PCID = 7, 557 INVL_OP_PG = 8, 558 INVL_OP_PG_INVPCID = 9, 559 INVL_OP_PG_PCID = 10, 560 INVL_OP_CACHE = 11, 561 }; 562 563 typedef void (*smp_invl_local_cb_t)(struct pmap *, vm_offset_t addr1, 564 vm_offset_t addr2); 565 typedef void (*smp_targeted_tlb_shootdown_t)(pmap_t, vm_offset_t, vm_offset_t, 566 smp_invl_local_cb_t, enum invl_op_codes); 567 568 void smp_targeted_tlb_shootdown_native(pmap_t, vm_offset_t, vm_offset_t, 569 smp_invl_local_cb_t, enum invl_op_codes); 570 extern smp_targeted_tlb_shootdown_t smp_targeted_tlb_shootdown; 571 572 #endif /* _KERNEL */ 573 574 /* Return various clipped indexes for a given VA */ 575 static __inline vm_pindex_t 576 pmap_pte_index(vm_offset_t va) 577 { 578 579 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); 580 } 581 582 static __inline vm_pindex_t 583 pmap_pde_index(vm_offset_t va) 584 { 585 586 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 587 } 588 589 static __inline vm_pindex_t 590 pmap_pdpe_index(vm_offset_t va) 591 { 592 593 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 594 } 595 596 static __inline vm_pindex_t 597 pmap_pml4e_index(vm_offset_t va) 598 { 599 600 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 601 } 602 603 static __inline vm_pindex_t 604 pmap_pml5e_index(vm_offset_t va) 605 { 606 607 return ((va >> PML5SHIFT) & ((1ul << NPML5EPGSHIFT) - 1)); 608 } 609 610 #endif /* !LOCORE */ 611 612 #endif /* !_MACHINE_PMAP_H_ */ 613 614 #endif /* __i386__ */ 615