1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department and William Jolitz of UUNET Technologies Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Derived from hp300 version by Mike Hibler, this version by William 36 * Jolitz uses a recursive map [a pde points to the page directory] to 37 * map the page tables using the pagetables themselves. This is done to 38 * reduce the impact on kernel virtual memory for lots of sparse address 39 * space, and to reduce the cost of memory to each process. 40 * 41 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 42 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 43 * $FreeBSD$ 44 */ 45 46 #ifndef _MACHINE_PMAP_H_ 47 #define _MACHINE_PMAP_H_ 48 49 /* 50 * Page-directory and page-table entries follow this format, with a few 51 * of the fields not present here and there, depending on a lot of things. 52 */ 53 /* ---- Intel Nomenclature ---- */ 54 #define PG_V 0x001 /* P Valid */ 55 #define PG_RW 0x002 /* R/W Read/Write */ 56 #define PG_U 0x004 /* U/S User/Supervisor */ 57 #define PG_NC_PWT 0x008 /* PWT Write through */ 58 #define PG_NC_PCD 0x010 /* PCD Cache disable */ 59 #define PG_A 0x020 /* A Accessed */ 60 #define PG_M 0x040 /* D Dirty */ 61 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */ 62 #define PG_PTE_PAT 0x080 /* PAT PAT index */ 63 #define PG_G 0x100 /* G Global */ 64 #define PG_AVAIL1 0x200 /* / Available for system */ 65 #define PG_AVAIL2 0x400 /* < programmers use */ 66 #define PG_AVAIL3 0x800 /* \ */ 67 #define PG_PDE_PAT 0x1000 /* PAT PAT index */ 68 #if defined(PAE) || defined(PAE_TABLES) 69 #define PG_NX (1ull<<63) /* No-execute */ 70 #endif 71 72 73 /* Our various interpretations of the above */ 74 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */ 75 #define PG_MANAGED PG_AVAIL2 76 #define PG_PROMOTED PG_AVAIL3 /* PDE only */ 77 #if defined(PAE) || defined(PAE_TABLES) 78 #define PG_FRAME (0x000ffffffffff000ull) 79 #define PG_PS_FRAME (0x000fffffffe00000ull) 80 #else 81 #define PG_FRAME (~PAGE_MASK) 82 #define PG_PS_FRAME (0xffc00000) 83 #endif 84 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */ 85 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */ 86 87 /* Page level cache control fields used to determine the PAT type */ 88 #define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD) 89 #define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD) 90 91 /* 92 * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding 93 * 4KB (PTE) page mappings have identical settings for the following fields: 94 */ 95 #define PG_PTE_PROMOTE (PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \ 96 PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V) 97 98 /* 99 * Page Protection Exception bits 100 */ 101 102 #define PGEX_P 0x01 /* Protection violation vs. not present */ 103 #define PGEX_W 0x02 /* during a Write cycle */ 104 #define PGEX_U 0x04 /* access from User mode (UPL) */ 105 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 106 #define PGEX_I 0x10 /* during an instruction fetch */ 107 108 /* 109 * Size of Kernel address space. This is the number of page table pages 110 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte. 111 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). 112 * For PAE, the page table page unit size is 2MB. This means that 512 pages 113 * is 1 Gigabyte. Double everything. It must be a multiple of 8 for PAE. 114 */ 115 #if defined(PAE) || defined(PAE_TABLES) 116 #define KVA_PAGES (512*4) 117 #else 118 #define KVA_PAGES (256*4) 119 #endif 120 121 /* 122 * Pte related macros 123 */ 124 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT))) 125 126 /* 127 * The initial number of kernel page table pages that are constructed 128 * by locore must be sufficient to map vm_page_array. That number can 129 * be calculated as follows: 130 * max_phys / PAGE_SIZE * sizeof(struct vm_page) / NBPDR 131 * PAE: max_phys 16G, sizeof(vm_page) 76, NBPDR 2M, 152 page table pages. 132 * PAE_TABLES: max_phys 4G, sizeof(vm_page) 68, NBPDR 2M, 36 page table pages. 133 * Non-PAE: max_phys 4G, sizeof(vm_page) 68, NBPDR 4M, 18 page table pages. 134 */ 135 #ifndef NKPT 136 #if defined(PAE) 137 #define NKPT 240 138 #elif defined(PAE_TABLES) 139 #define NKPT 60 140 #else 141 #define NKPT 30 142 #endif 143 #endif 144 145 #ifndef NKPDE 146 #define NKPDE (KVA_PAGES) /* number of page tables/pde's */ 147 #endif 148 149 /* 150 * The *PTDI values control the layout of virtual memory 151 */ 152 #define KPTDI 0 /* start of kernel virtual pde's */ 153 #define LOWPTDI 1 /* low memory map pde */ 154 #define KERNPTDI 2 /* start of kernel text pde */ 155 #define PTDPTDI (NPDEPTD - 1 - NPGPTD) /* ptd entry that points 156 to ptd! */ 157 #define TRPTDI (NPDEPTD - 1) /* u/k trampoline ptd */ 158 159 /* 160 * XXX doesn't really belong here I guess... 161 */ 162 #define ISA_HOLE_START 0xa0000 163 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 164 165 #ifndef LOCORE 166 167 #include <sys/queue.h> 168 #include <sys/_cpuset.h> 169 #include <sys/_lock.h> 170 #include <sys/_mutex.h> 171 172 #include <vm/_vm_radix.h> 173 174 #if defined(PAE) || defined(PAE_TABLES) 175 176 typedef uint64_t pdpt_entry_t; 177 typedef uint64_t pd_entry_t; 178 typedef uint64_t pt_entry_t; 179 180 #define PTESHIFT (3) 181 #define PDESHIFT (3) 182 183 #else 184 185 typedef uint32_t pd_entry_t; 186 typedef uint32_t pt_entry_t; 187 188 #define PTESHIFT (2) 189 #define PDESHIFT (2) 190 191 #endif 192 193 /* 194 * Address of current address space page table maps and directories. 195 */ 196 #ifdef _KERNEL 197 extern pt_entry_t PTmap[]; 198 extern pd_entry_t PTD[]; 199 extern pd_entry_t PTDpde[]; 200 201 #if defined(PAE) || defined(PAE_TABLES) 202 extern pdpt_entry_t *IdlePDPT; 203 #endif 204 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */ 205 206 /* 207 * Translate a virtual address to the kernel virtual address of its page table 208 * entry (PTE). This can be used recursively. If the address of a PTE as 209 * previously returned by this macro is itself given as the argument, then the 210 * address of the page directory entry (PDE) that maps the PTE will be 211 * returned. 212 * 213 * This macro may be used before pmap_bootstrap() is called. 214 */ 215 #define vtopte(va) (PTmap + i386_btop(va)) 216 217 /* 218 * Translate a virtual address to its physical address. 219 * 220 * This macro may be used before pmap_bootstrap() is called. 221 */ 222 #define vtophys(va) pmap_kextract((vm_offset_t)(va)) 223 224 /* 225 * KPTmap is a linear mapping of the kernel page table. It differs from the 226 * recursive mapping in two ways: (1) it only provides access to kernel page 227 * table pages, and not user page table pages, and (2) it provides access to 228 * a kernel page table page after the corresponding virtual addresses have 229 * been promoted to a 2/4MB page mapping. 230 * 231 * KPTmap is first initialized by locore to support just NPKT page table 232 * pages. Later, it is reinitialized by pmap_bootstrap() to allow for 233 * expansion of the kernel page table. 234 */ 235 extern pt_entry_t *KPTmap; 236 237 /* 238 * Extract from the kernel page table the physical address that is mapped by 239 * the given virtual address "va". 240 * 241 * This function may be used before pmap_bootstrap() is called. 242 */ 243 static __inline vm_paddr_t 244 pmap_kextract(vm_offset_t va) 245 { 246 vm_paddr_t pa; 247 248 if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) { 249 pa = (pa & PG_PS_FRAME) | (va & PDRMASK); 250 } else { 251 /* 252 * Beware of a concurrent promotion that changes the PDE at 253 * this point! For example, vtopte() must not be used to 254 * access the PTE because it would use the new PDE. It is, 255 * however, safe to use the old PDE because the page table 256 * page is preserved by the promotion. 257 */ 258 pa = KPTmap[i386_btop(va)]; 259 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 260 } 261 return (pa); 262 } 263 264 #if (defined(PAE) || defined(PAE_TABLES)) 265 266 #define pde_cmpset(pdep, old, new) atomic_cmpset_64_i586(pdep, old, new) 267 #define pte_load_store(ptep, pte) atomic_swap_64_i586(ptep, pte) 268 #define pte_load_clear(ptep) atomic_swap_64_i586(ptep, 0) 269 #define pte_store(ptep, pte) atomic_store_rel_64_i586(ptep, pte) 270 271 extern pt_entry_t pg_nx; 272 273 #else /* !(PAE || PAE_TABLES) */ 274 275 #define pde_cmpset(pdep, old, new) atomic_cmpset_int(pdep, old, new) 276 #define pte_load_store(ptep, pte) atomic_swap_int(ptep, pte) 277 #define pte_load_clear(ptep) atomic_swap_int(ptep, 0) 278 #define pte_store(ptep, pte) do { \ 279 *(u_int *)(ptep) = (u_int)(pte); \ 280 } while (0) 281 282 #endif /* !(PAE || PAE_TABLES) */ 283 284 #define pte_clear(ptep) pte_store(ptep, 0) 285 286 #define pde_store(pdep, pde) pte_store(pdep, pde) 287 288 #endif /* _KERNEL */ 289 290 /* 291 * Pmap stuff 292 */ 293 struct pv_entry; 294 struct pv_chunk; 295 296 struct md_page { 297 TAILQ_HEAD(,pv_entry) pv_list; 298 int pat_mode; 299 }; 300 301 struct pmap { 302 struct mtx pm_mtx; 303 pd_entry_t *pm_pdir; /* KVA of page directory */ 304 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 305 cpuset_t pm_active; /* active on cpus */ 306 struct pmap_statistics pm_stats; /* pmap statistics */ 307 LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ 308 #if defined(PAE) || defined(PAE_TABLES) 309 pdpt_entry_t *pm_pdpt; /* KVA of page directory pointer 310 table */ 311 #endif 312 struct vm_radix pm_root; /* spare page table pages */ 313 vm_page_t pm_ptdpg[NPGPTD]; 314 }; 315 316 typedef struct pmap *pmap_t; 317 318 #ifdef _KERNEL 319 extern struct pmap kernel_pmap_store; 320 #define kernel_pmap (&kernel_pmap_store) 321 322 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 323 #define PMAP_LOCK_ASSERT(pmap, type) \ 324 mtx_assert(&(pmap)->pm_mtx, (type)) 325 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 326 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 327 NULL, MTX_DEF | MTX_DUPOK) 328 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 329 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 330 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 331 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 332 #endif 333 334 /* 335 * For each vm_page_t, there is a list of all currently valid virtual 336 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 337 */ 338 typedef struct pv_entry { 339 vm_offset_t pv_va; /* virtual address for mapping */ 340 TAILQ_ENTRY(pv_entry) pv_next; 341 } *pv_entry_t; 342 343 /* 344 * pv_entries are allocated in chunks per-process. This avoids the 345 * need to track per-pmap assignments. 346 */ 347 #define _NPCM 11 348 #define _NPCPV 336 349 struct pv_chunk { 350 pmap_t pc_pmap; 351 TAILQ_ENTRY(pv_chunk) pc_list; 352 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ 353 TAILQ_ENTRY(pv_chunk) pc_lru; 354 struct pv_entry pc_pventry[_NPCPV]; 355 }; 356 357 #ifdef _KERNEL 358 359 extern caddr_t CADDR3; 360 extern pt_entry_t *CMAP3; 361 extern vm_paddr_t phys_avail[]; 362 extern vm_paddr_t dump_avail[]; 363 extern char *ptvmmap; /* poor name! */ 364 extern vm_offset_t virtual_avail; 365 extern vm_offset_t virtual_end; 366 367 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 368 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 369 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 370 371 /* 372 * Only the following functions or macros may be used before pmap_bootstrap() 373 * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and 374 * vtopte(). 375 */ 376 void pmap_activate_boot(pmap_t pmap); 377 void pmap_bootstrap(vm_paddr_t); 378 int pmap_cache_bits(pmap_t, int mode, boolean_t is_pde); 379 int pmap_change_attr(vm_offset_t, vm_size_t, int); 380 void pmap_init_pat(void); 381 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 382 void *pmap_kenter_temporary(vm_paddr_t pa, int i); 383 void pmap_kremove(vm_offset_t); 384 void *pmap_mapbios(vm_paddr_t, vm_size_t); 385 void *pmap_mapdev(vm_paddr_t, vm_size_t); 386 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 387 boolean_t pmap_page_is_mapped(vm_page_t m); 388 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 389 bool pmap_ps_enabled(pmap_t pmap); 390 void pmap_unmapdev(vm_offset_t, vm_size_t); 391 pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2; 392 void pmap_invalidate_page(pmap_t, vm_offset_t); 393 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 394 void pmap_invalidate_all(pmap_t); 395 void pmap_invalidate_cache(void); 396 void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 397 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 398 void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 399 void *pmap_trm_alloc(size_t size, int flags); 400 void pmap_trm_free(void *addr, size_t size); 401 402 void invltlb_glob(void); 403 404 #endif /* _KERNEL */ 405 406 #endif /* !LOCORE */ 407 408 #endif /* !_MACHINE_PMAP_H_ */ 409