1 /* 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department and William Jolitz of UUNET Technologies Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Derived from hp300 version by Mike Hibler, this version by William 35 * Jolitz uses a recursive map [a pde points to the page directory] to 36 * map the page tables using the pagetables themselves. This is done to 37 * reduce the impact on kernel virtual memory for lots of sparse address 38 * space, and to reduce the cost of memory to each process. 39 * 40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 42 * $FreeBSD$ 43 */ 44 45 #ifndef _MACHINE_PMAP_H_ 46 #define _MACHINE_PMAP_H_ 47 48 /* 49 * Page-directory and page-table entires follow this format, with a few 50 * of the fields not present here and there, depending on a lot of things. 51 */ 52 /* ---- Intel Nomenclature ---- */ 53 #define PG_V 0x001 /* P Valid */ 54 #define PG_RW 0x002 /* R/W Read/Write */ 55 #define PG_U 0x004 /* U/S User/Supervisor */ 56 #define PG_NC_PWT 0x008 /* PWT Write through */ 57 #define PG_NC_PCD 0x010 /* PCD Cache disable */ 58 #define PG_A 0x020 /* A Accessed */ 59 #define PG_M 0x040 /* D Dirty */ 60 #define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */ 61 #define PG_G 0x100 /* G Global */ 62 #define PG_AVAIL1 0x200 /* / Available for system */ 63 #define PG_AVAIL2 0x400 /* < programmers use */ 64 #define PG_AVAIL3 0x800 /* \ */ 65 #define PG_NX (1ul<<63) /* No-execute */ 66 67 68 /* Our various interpretations of the above */ 69 #define PG_W PG_AVAIL1 /* "Wired" pseudoflag */ 70 #define PG_MANAGED PG_AVAIL2 71 #define PG_FRAME (0x000ffffffffff000ul) 72 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */ 73 #define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */ 74 75 /* 76 * Page Protection Exception bits 77 */ 78 79 #define PGEX_P 0x01 /* Protection violation vs. not present */ 80 #define PGEX_W 0x02 /* during a Write cycle */ 81 #define PGEX_U 0x04 /* access from User mode (UPL) */ 82 83 /* 84 * Pte related macros. This is complicated by having to deal with 85 * the sign extension of the 48th bit. 86 */ 87 #define KVADDR(l4, l3, l2, l1) ( \ 88 ((unsigned long)-1 << 47) | \ 89 ((unsigned long)(l4) << PML4SHIFT) | \ 90 ((unsigned long)(l3) << PDPSHIFT) | \ 91 ((unsigned long)(l2) << PDRSHIFT) | \ 92 ((unsigned long)(l1) << PAGE_SHIFT)) 93 94 #define UVADDR(l4, l3, l2, l1) ( \ 95 ((unsigned long)(l4) << PML4SHIFT) | \ 96 ((unsigned long)(l3) << PDPSHIFT) | \ 97 ((unsigned long)(l2) << PDRSHIFT) | \ 98 ((unsigned long)(l1) << PAGE_SHIFT)) 99 100 #ifndef NKPT 101 #define NKPT 120 /* initial number of kernel page tables */ 102 #endif 103 104 #define NKPML4E 1 /* number of kernel PML4 slots */ 105 #define NKPDPE 1 /* number of kernel PDP slots */ 106 #define NKPDE (NKPDPE*NPDEPG) /* number of kernel PD slots */ 107 108 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ 109 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ 110 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */ 111 112 #define NDMPML4E 1 /* number of dmap PML4 slots */ 113 114 /* 115 * The *PDI values control the layout of virtual memory 116 */ 117 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 118 119 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */ 120 #define DMPML4I (KPML4I-1) /* Next 512GB down for direct map */ 121 122 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 123 124 /* 125 * XXX doesn't really belong here I guess... 126 */ 127 #define ISA_HOLE_START 0xa0000 128 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 129 130 #ifndef LOCORE 131 132 #include <sys/queue.h> 133 #include <sys/_lock.h> 134 #include <sys/_mutex.h> 135 136 typedef u_int64_t pd_entry_t; 137 typedef u_int64_t pt_entry_t; 138 typedef u_int64_t pdp_entry_t; 139 typedef u_int64_t pml4_entry_t; 140 141 #define PML4ESHIFT (3) 142 #define PDPESHIFT (3) 143 #define PTESHIFT (3) 144 #define PDESHIFT (3) 145 146 /* 147 * Address of current and alternate address space page table maps 148 * and directories. 149 * XXX it might be saner to just direct map all of physical memory 150 * into the kernel using 2MB pages. We have enough space to do 151 * it (2^47 bits of KVM, while current max physical addressability 152 * is 2^40 physical bits). Then we can get rid of the evil hole 153 * in the page tables and the evil overlapping. 154 */ 155 #ifdef _KERNEL 156 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 157 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 158 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 159 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 160 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 161 #define PTmap ((pt_entry_t *)(addr_PTmap)) 162 #define PDmap ((pd_entry_t *)(addr_PDmap)) 163 #define PDPmap ((pd_entry_t *)(addr_PDPmap)) 164 #define PML4map ((pd_entry_t *)(addr_PML4map)) 165 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 166 167 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 168 #endif 169 170 #ifdef _KERNEL 171 /* 172 * virtual address to page table entry and 173 * to physical address. Likewise for alternate address space. 174 * Note: these work recursively, thus vtopte of a pte will give 175 * the corresponding pde that in turn maps it. 176 */ 177 pt_entry_t *vtopte(vm_offset_t); 178 vm_paddr_t pmap_kextract(vm_offset_t); 179 180 #define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 181 182 static __inline pt_entry_t 183 pte_load(pt_entry_t *ptep) 184 { 185 pt_entry_t r; 186 187 r = *ptep; 188 return (r); 189 } 190 191 static __inline pt_entry_t 192 pte_load_store(pt_entry_t *ptep, pt_entry_t pte) 193 { 194 pt_entry_t r; 195 196 r = *ptep; 197 *ptep = pte; 198 return (r); 199 } 200 201 #define pte_load_clear(pte) atomic_readandclear_long(pte) 202 203 #define pte_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL) 204 #define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte) 205 206 #define pde_store(pdep, pde) pte_store((pdep), (pde)) 207 208 extern pt_entry_t pg_nx; 209 210 #endif /* _KERNEL */ 211 212 /* 213 * Pmap stuff 214 */ 215 struct pv_entry; 216 217 struct md_page { 218 int pv_list_count; 219 TAILQ_HEAD(,pv_entry) pv_list; 220 }; 221 222 struct pmap { 223 struct mtx pm_mtx; 224 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 225 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 226 u_int pm_active; /* active on cpus */ 227 /* spare u_int here due to padding */ 228 struct pmap_statistics pm_stats; /* pmap statistics */ 229 }; 230 231 typedef struct pmap *pmap_t; 232 233 #ifdef _KERNEL 234 extern struct pmap kernel_pmap_store; 235 #define kernel_pmap (&kernel_pmap_store) 236 237 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 238 #define PMAP_LOCK_ASSERT(pmap, type) \ 239 mtx_assert(&(pmap)->pm_mtx, (type)) 240 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 241 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 242 NULL, MTX_DEF) 243 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 244 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 245 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 246 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 247 #endif 248 249 /* 250 * For each vm_page_t, there is a list of all currently valid virtual 251 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 252 */ 253 typedef struct pv_entry { 254 pmap_t pv_pmap; /* pmap where mapping lies */ 255 vm_offset_t pv_va; /* virtual address for mapping */ 256 TAILQ_ENTRY(pv_entry) pv_list; 257 TAILQ_ENTRY(pv_entry) pv_plist; 258 } *pv_entry_t; 259 260 #ifdef _KERNEL 261 262 #define NPPROVMTRR 8 263 #define PPRO_VMTRRphysBase0 0x200 264 #define PPRO_VMTRRphysMask0 0x201 265 struct ppro_vmtrr { 266 u_int64_t base, mask; 267 }; 268 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR]; 269 270 extern caddr_t CADDR1; 271 extern pt_entry_t *CMAP1; 272 extern vm_paddr_t avail_end; 273 extern vm_paddr_t phys_avail[]; 274 extern vm_offset_t virtual_avail; 275 extern vm_offset_t virtual_end; 276 277 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 278 279 void pmap_bootstrap(vm_paddr_t *); 280 void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 281 void *pmap_kenter_temporary(vm_paddr_t pa, int i); 282 void pmap_kremove(vm_offset_t); 283 void *pmap_mapdev(vm_paddr_t, vm_size_t); 284 void pmap_unmapdev(vm_offset_t, vm_size_t); 285 void pmap_invalidate_page(pmap_t, vm_offset_t); 286 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 287 void pmap_invalidate_all(pmap_t); 288 289 #endif /* _KERNEL */ 290 291 #endif /* !LOCORE */ 292 293 #endif /* !_MACHINE_PMAP_H_ */ 294