1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department and William Jolitz of UUNET Technologies Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Derived from hp300 version by Mike Hibler, this version by William 35 * Jolitz uses a recursive map [a pde points to the page directory] to 36 * map the page tables using the pagetables themselves. This is done to 37 * reduce the impact on kernel virtual memory for lots of sparse address 38 * space, and to reduce the cost of memory to each process. 39 * 40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 42 * $FreeBSD$ 43 */ 44 45 #ifndef _MACHINE_PMAP_H_ 46 #define _MACHINE_PMAP_H_ 47 48 /* 49 * Page-directory and page-table entries follow this format, with a few 50 * of the fields not present here and there, depending on a lot of things. 51 */ 52 /* ---- Intel Nomenclature ---- */ 53 #define PG_V 0x001 /* P Valid */ 54 #define PG_RW 0x002 /* R/W Read/Write */ 55 #define PG_U 0x004 /* U/S User/Supervisor */ 56 #define PG_NC_PWT 0x008 /* PWT Write through */ 57 #define PG_NC_PCD 0x010 /* PCD Cache disable */ 58 #define PG_A 0x020 /* A Accessed */ 59 #define PG_M 0x040 /* D Dirty */ 60 #define PG_PS 0x080 /* PS Page size (0=4k,1=2M) */ 61 #define PG_PTE_PAT 0x080 /* PAT PAT index */ 62 #define PG_G 0x100 /* G Global */ 63 #define PG_AVAIL1 0x200 /* / Available for system */ 64 #define PG_AVAIL2 0x400 /* < programmers use */ 65 #define PG_AVAIL3 0x800 /* \ */ 66 #define PG_PDE_PAT 0x1000 /* PAT PAT index */ 67 #define PG_NX (1ul<<63) /* No-execute */ 68 69 70 /* Our various interpretations of the above */ 71 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */ 72 #define PG_MANAGED PG_AVAIL2 73 #define PG_FRAME (0x000ffffffffff000ul) 74 #define PG_PS_FRAME (0x000fffffffe00000ul) 75 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */ 76 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */ 77 78 /* Page level cache control fields used to determine the PAT type */ 79 #define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD) 80 #define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD) 81 82 /* 83 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 84 * (PTE) page mappings have identical settings for the following fields: 85 */ 86 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \ 87 PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V) 88 89 /* 90 * Page Protection Exception bits 91 */ 92 93 #define PGEX_P 0x01 /* Protection violation vs. not present */ 94 #define PGEX_W 0x02 /* during a Write cycle */ 95 #define PGEX_U 0x04 /* access from User mode (UPL) */ 96 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */ 97 #define PGEX_I 0x10 /* during an instruction fetch */ 98 99 /* 100 * Pte related macros. This is complicated by having to deal with 101 * the sign extension of the 48th bit. 102 */ 103 #define KVADDR(l4, l3, l2, l1) ( \ 104 ((unsigned long)-1 << 47) | \ 105 ((unsigned long)(l4) << PML4SHIFT) | \ 106 ((unsigned long)(l3) << PDPSHIFT) | \ 107 ((unsigned long)(l2) << PDRSHIFT) | \ 108 ((unsigned long)(l1) << PAGE_SHIFT)) 109 110 #define UVADDR(l4, l3, l2, l1) ( \ 111 ((unsigned long)(l4) << PML4SHIFT) | \ 112 ((unsigned long)(l3) << PDPSHIFT) | \ 113 ((unsigned long)(l2) << PDRSHIFT) | \ 114 ((unsigned long)(l1) << PAGE_SHIFT)) 115 116 #define NKPML4E 1 /* number of kernel PML4 slots */ 117 118 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ 119 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ 120 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */ 121 122 /* 123 * NDMPML4E is the number of PML4 entries that are used to implement the 124 * direct map. It must be a power of two. 125 */ 126 #define NDMPML4E 2 127 128 /* 129 * The *PDI values control the layout of virtual memory. The starting address 130 * of the direct map, which is controlled by DMPML4I, must be a multiple of 131 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.) 132 */ 133 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 134 135 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */ 136 #define DMPML4I rounddown(KPML4I - NDMPML4E, NDMPML4E) /* Below KVM */ 137 138 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 139 140 /* 141 * XXX doesn't really belong here I guess... 142 */ 143 #define ISA_HOLE_START 0xa0000 144 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 145 146 #ifndef LOCORE 147 148 #include <sys/queue.h> 149 #include <sys/_cpuset.h> 150 #include <sys/_lock.h> 151 #include <sys/_mutex.h> 152 153 #include <vm/_vm_radix.h> 154 155 typedef u_int64_t pd_entry_t; 156 typedef u_int64_t pt_entry_t; 157 typedef u_int64_t pdp_entry_t; 158 typedef u_int64_t pml4_entry_t; 159 160 #define PML4ESHIFT (3) 161 #define PDPESHIFT (3) 162 #define PTESHIFT (3) 163 #define PDESHIFT (3) 164 165 /* 166 * Address of current address space page table maps and directories. 167 */ 168 #ifdef _KERNEL 169 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 170 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 171 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 172 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 173 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 174 #define PTmap ((pt_entry_t *)(addr_PTmap)) 175 #define PDmap ((pd_entry_t *)(addr_PDmap)) 176 #define PDPmap ((pd_entry_t *)(addr_PDPmap)) 177 #define PML4map ((pd_entry_t *)(addr_PML4map)) 178 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 179 180 extern int nkpt; /* Initial number of kernel page tables */ 181 extern u_int64_t KPDPphys; /* physical address of kernel level 3 */ 182 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 183 184 /* 185 * virtual address to page table entry and 186 * to physical address. 187 * Note: these work recursively, thus vtopte of a pte will give 188 * the corresponding pde that in turn maps it. 189 */ 190 pt_entry_t *vtopte(vm_offset_t); 191 #define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 192 193 static __inline pt_entry_t 194 pte_load(pt_entry_t *ptep) 195 { 196 pt_entry_t r; 197 198 r = *ptep; 199 return (r); 200 } 201 202 static __inline pt_entry_t 203 pte_load_store(pt_entry_t *ptep, pt_entry_t pte) 204 { 205 pt_entry_t r; 206 207 __asm __volatile( 208 "xchgq %0,%1" 209 : "=m" (*ptep), 210 "=r" (r) 211 : "1" (pte), 212 "m" (*ptep)); 213 return (r); 214 } 215 216 #define pte_load_clear(pte) atomic_readandclear_long(pte) 217 218 static __inline void 219 pte_store(pt_entry_t *ptep, pt_entry_t pte) 220 { 221 222 *ptep = pte; 223 } 224 225 #define pte_clear(ptep) pte_store((ptep), (pt_entry_t)0ULL) 226 227 #define pde_store(pdep, pde) pte_store((pdep), (pde)) 228 229 extern pt_entry_t pg_nx; 230 231 #endif /* _KERNEL */ 232 233 /* 234 * Pmap stuff 235 */ 236 struct pv_entry; 237 struct pv_chunk; 238 239 struct md_page { 240 TAILQ_HEAD(,pv_entry) pv_list; 241 int pat_mode; 242 }; 243 244 /* 245 * The kernel virtual address (KVA) of the level 4 page table page is always 246 * within the direct map (DMAP) region. 247 */ 248 struct pmap { 249 struct mtx pm_mtx; 250 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 251 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 252 cpuset_t pm_active; /* active on cpus */ 253 /* spare u_int here due to padding */ 254 struct pmap_statistics pm_stats; /* pmap statistics */ 255 struct vm_radix pm_root; /* spare page table pages */ 256 }; 257 258 typedef struct pmap *pmap_t; 259 260 #ifdef _KERNEL 261 extern struct pmap kernel_pmap_store; 262 #define kernel_pmap (&kernel_pmap_store) 263 264 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 265 #define PMAP_LOCK_ASSERT(pmap, type) \ 266 mtx_assert(&(pmap)->pm_mtx, (type)) 267 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 268 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 269 NULL, MTX_DEF | MTX_DUPOK) 270 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 271 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 272 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 273 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 274 #endif 275 276 /* 277 * For each vm_page_t, there is a list of all currently valid virtual 278 * mappings of that page. An entry is a pv_entry_t, the list is pv_list. 279 */ 280 typedef struct pv_entry { 281 vm_offset_t pv_va; /* virtual address for mapping */ 282 TAILQ_ENTRY(pv_entry) pv_next; 283 } *pv_entry_t; 284 285 /* 286 * pv_entries are allocated in chunks per-process. This avoids the 287 * need to track per-pmap assignments. 288 */ 289 #define _NPCM 3 290 #define _NPCPV 168 291 struct pv_chunk { 292 pmap_t pc_pmap; 293 TAILQ_ENTRY(pv_chunk) pc_list; 294 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ 295 TAILQ_ENTRY(pv_chunk) pc_lru; 296 struct pv_entry pc_pventry[_NPCPV]; 297 }; 298 299 #ifdef _KERNEL 300 301 extern caddr_t CADDR1; 302 extern pt_entry_t *CMAP1; 303 extern vm_paddr_t phys_avail[]; 304 extern vm_paddr_t dump_avail[]; 305 extern vm_offset_t virtual_avail; 306 extern vm_offset_t virtual_end; 307 308 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) 309 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) 310 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) 311 312 void pmap_bootstrap(vm_paddr_t *); 313 int pmap_change_attr(vm_offset_t, vm_size_t, int); 314 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate); 315 void pmap_init_pat(void); 316 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 317 void *pmap_kenter_temporary(vm_paddr_t pa, int i); 318 vm_paddr_t pmap_kextract(vm_offset_t); 319 void pmap_kremove(vm_offset_t); 320 void *pmap_mapbios(vm_paddr_t, vm_size_t); 321 void *pmap_mapdev(vm_paddr_t, vm_size_t); 322 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 323 boolean_t pmap_page_is_mapped(vm_page_t m); 324 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 325 void pmap_unmapdev(vm_offset_t, vm_size_t); 326 void pmap_invalidate_page(pmap_t, vm_offset_t); 327 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 328 void pmap_invalidate_all(pmap_t); 329 void pmap_invalidate_cache(void); 330 void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 331 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 332 333 #endif /* _KERNEL */ 334 335 #endif /* !LOCORE */ 336 337 #endif /* !_MACHINE_PMAP_H_ */ 338