1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/systm.h> 31 #include <sys/archsystm.h> 32 #include <sys/debug.h> 33 #include <sys/bootconf.h> 34 #include <sys/bootsvcs.h> 35 #include <sys/bootinfo.h> 36 #include <sys/mman.h> 37 #include <sys/cmn_err.h> 38 #include <sys/param.h> 39 #include <sys/machparam.h> 40 #include <sys/machsystm.h> 41 #include <sys/promif.h> 42 #include <sys/kobj.h> 43 #include <vm/kboot_mmu.h> 44 #include <vm/hat_pte.h> 45 #include <vm/hat_i86.h> 46 #include <vm/seg_kmem.h> 47 48 #if 0 49 /* 50 * Joe's debug printing 51 */ 52 #define DBG(x) \ 53 bop_printf(NULL, "boot_mmu.c: %s is %" PRIx64 "\n", #x, (uint64_t)(x)); 54 #else 55 #define DBG(x) /* naught */ 56 #endif 57 58 /* 59 * Page table and memory stuff. 60 */ 61 static caddr_t window; 62 static caddr_t pte_to_window; 63 64 /* 65 * this are needed by mmu_init() 66 */ 67 int kbm_nx_support = 0; /* NX bit in PTEs is in use */ 68 int kbm_pae_support = 0; /* PAE is 64 bit Page table entries */ 69 int kbm_pge_support = 0; /* PGE is Page table global bit enabled */ 70 int kbm_largepage_support = 0; 71 uint_t kbm_nucleus_size = 0; 72 73 #define BOOT_SHIFT(l) (shift_amt[l]) 74 #define BOOT_SZ(l) ((size_t)1 << BOOT_SHIFT(l)) 75 #define BOOT_OFFSET(l) (BOOT_SZ(l) - 1) 76 #define BOOT_MASK(l) (~BOOT_OFFSET(l)) 77 78 /* 79 * Initialize memory management parameters for boot time page table management 80 */ 81 void 82 kbm_init(struct xboot_info *bi) 83 { 84 /* 85 * configure mmu information 86 */ 87 kbm_nucleus_size = (uintptr_t)bi->bi_kseg_size; 88 kbm_largepage_support = bi->bi_use_largepage; 89 kbm_nx_support = bi->bi_use_nx; 90 kbm_pae_support = bi->bi_use_pae; 91 kbm_pge_support = bi->bi_use_pge; 92 window = bi->bi_pt_window; 93 DBG(window); 94 pte_to_window = bi->bi_pte_to_pt_window; 95 DBG(pte_to_window); 96 if (kbm_pae_support) { 97 shift_amt = shift_amt_pae; 98 ptes_per_table = 512; 99 pte_size = 8; 100 lpagesize = TWO_MEG; 101 #ifdef __amd64 102 top_level = 3; 103 #else 104 top_level = 2; 105 #endif 106 } else { 107 shift_amt = shift_amt_nopae; 108 ptes_per_table = 1024; 109 pte_size = 4; 110 lpagesize = FOUR_MEG; 111 top_level = 1; 112 } 113 114 top_page_table = bi->bi_top_page_table; 115 DBG(top_page_table); 116 } 117 118 /* 119 * Change the addressible page table window to point at a given page 120 */ 121 /*ARGSUSED*/ 122 void * 123 kbm_remap_window(paddr_t physaddr, int writeable) 124 { 125 uint_t pt_bits = PT_NOCONSIST | PT_VALID | PT_WRITABLE; 126 127 DBG(physaddr); 128 129 if (kbm_pae_support) 130 *((x86pte_t *)pte_to_window) = physaddr | pt_bits; 131 else 132 *((x86pte32_t *)pte_to_window) = physaddr | pt_bits; 133 mmu_tlbflush_entry(window); 134 DBG(window); 135 return (window); 136 } 137 138 /* 139 * Add a mapping for the physical page at the given virtual address. 140 */ 141 void 142 kbm_map(uintptr_t va, paddr_t pa, uint_t level, uint_t is_kernel) 143 { 144 x86pte_t *ptep; 145 paddr_t pte_physaddr; 146 x86pte_t pteval; 147 148 if (khat_running) 149 panic("kbm_map() called too late"); 150 151 pteval = pa_to_ma(pa) | PT_NOCONSIST | PT_VALID | PT_WRITABLE; 152 if (level == 1) 153 pteval |= PT_PAGESIZE; 154 if (kbm_pge_support && is_kernel) 155 pteval |= PT_GLOBAL; 156 157 /* 158 * Find the pte that will map this address. This creates any 159 * missing intermediate level page tables. 160 */ 161 ptep = find_pte(va, &pte_physaddr, level, 0); 162 if (ptep == NULL) 163 bop_panic("kbm_map: find_pte returned NULL"); 164 165 if (kbm_pae_support) 166 *ptep = pteval; 167 else 168 *((x86pte32_t *)ptep) = pteval; 169 mmu_tlbflush_entry((caddr_t)va); 170 } 171 172 /* 173 * Probe the boot time page tables to find the first mapping 174 * including va (or higher) and return non-zero if one is found. 175 * va is updated to the starting address and len to the pagesize. 176 * pp will be set to point to the 1st page_t of the mapped page(s). 177 * 178 * Note that if va is in the middle of a large page, the returned va 179 * will be less than what was asked for. 180 */ 181 int 182 kbm_probe(uintptr_t *va, size_t *len, pfn_t *pfn, uint_t *prot) 183 { 184 uintptr_t probe_va; 185 x86pte_t *ptep; 186 paddr_t pte_physaddr; 187 x86pte_t pte_val; 188 level_t l; 189 190 if (khat_running) 191 panic("kbm_probe() called too late"); 192 *len = 0; 193 *pfn = PFN_INVALID; 194 *prot = 0; 195 probe_va = *va; 196 restart_new_va: 197 l = top_level; 198 for (;;) { 199 if (IN_VA_HOLE(probe_va)) 200 probe_va = mmu.hole_end; 201 202 if (IN_HYPERVISOR_VA(probe_va)) 203 return (0); 204 205 /* 206 * If we don't have a valid PTP/PTE at this level 207 * then we can bump VA by this level's pagesize and try again. 208 * When the probe_va wraps around, we are done. 209 */ 210 ptep = find_pte(probe_va, &pte_physaddr, l, 1); 211 if (ptep == NULL) 212 bop_panic("kbm_probe: find_pte returned NULL"); 213 if (kbm_pae_support) 214 pte_val = *ptep; 215 else 216 pte_val = *((x86pte32_t *)ptep); 217 if (!PTE_ISVALID(pte_val)) { 218 probe_va = (probe_va & BOOT_MASK(l)) + BOOT_SZ(l); 219 if (probe_va <= *va) 220 return (0); 221 goto restart_new_va; 222 } 223 224 /* 225 * If this entry is a pointer to a lower level page table 226 * go down to it. 227 */ 228 if (!PTE_ISPAGE(pte_val, l)) { 229 ASSERT(l > 0); 230 --l; 231 continue; 232 } 233 234 /* 235 * We found a boot level page table entry 236 */ 237 *len = BOOT_SZ(l); 238 *va = probe_va & ~(*len - 1); 239 *pfn = PTE2PFN(pte_val, l); 240 241 242 *prot = PROT_READ | PROT_EXEC; 243 if (PTE_GET(pte_val, PT_WRITABLE)) 244 *prot |= PROT_WRITE; 245 246 /* 247 * pt_nx is cleared if processor doesn't support NX bit 248 */ 249 if (PTE_GET(pte_val, mmu.pt_nx)) 250 *prot &= ~PROT_EXEC; 251 252 return (1); 253 } 254 } 255 256 257 /* 258 * Destroy a boot loader page table 4K mapping. 259 */ 260 void 261 kbm_unmap(uintptr_t va) 262 { 263 if (khat_running) 264 panic("kbm_unmap() called too late"); 265 else { 266 x86pte_t *ptep; 267 level_t level = 0; 268 uint_t probe_only = 1; 269 270 ptep = find_pte(va, NULL, level, probe_only); 271 if (ptep == NULL) 272 return; 273 274 if (kbm_pae_support) 275 *ptep = 0; 276 else 277 *((x86pte32_t *)ptep) = 0; 278 mmu_tlbflush_entry((caddr_t)va); 279 } 280 } 281 282 283 /* 284 * Change a boot loader page table 4K mapping. 285 * Returns the pfn of the old mapping. 286 */ 287 pfn_t 288 kbm_remap(uintptr_t va, pfn_t pfn) 289 { 290 x86pte_t *ptep; 291 level_t level = 0; 292 uint_t probe_only = 1; 293 x86pte_t pte_val = pa_to_ma(pfn_to_pa(pfn)) | PT_WRITABLE | 294 PT_NOCONSIST | PT_VALID; 295 x86pte_t old_pte; 296 297 if (khat_running) 298 panic("kbm_remap() called too late"); 299 ptep = find_pte(va, NULL, level, probe_only); 300 if (ptep == NULL) 301 bop_panic("kbm_remap: find_pte returned NULL"); 302 303 if (kbm_pae_support) 304 old_pte = *ptep; 305 else 306 old_pte = *((x86pte32_t *)ptep); 307 308 if (kbm_pae_support) 309 *((x86pte_t *)ptep) = pte_val; 310 else 311 *((x86pte32_t *)ptep) = pte_val; 312 mmu_tlbflush_entry((caddr_t)va); 313 314 if (!(old_pte & PT_VALID) || ma_to_pa(old_pte) == -1) 315 return (PFN_INVALID); 316 return (mmu_btop(ma_to_pa(old_pte))); 317 } 318 319 320 /* 321 * Change a boot loader page table 4K mapping to read only. 322 */ 323 void 324 kbm_read_only(uintptr_t va, paddr_t pa) 325 { 326 x86pte_t pte_val = pa_to_ma(pa) | 327 PT_NOCONSIST | PT_REF | PT_MOD | PT_VALID; 328 x86pte_t *ptep; 329 level_t level = 0; 330 331 ptep = find_pte(va, NULL, level, 0); 332 if (ptep == NULL) 333 bop_panic("kbm_read_only: find_pte returned NULL"); 334 335 if (kbm_pae_support) 336 *ptep = pte_val; 337 else 338 *((x86pte32_t *)ptep) = pte_val; 339 mmu_tlbflush_entry((caddr_t)va); 340 } 341 342 /* 343 * interfaces for kernel debugger to access physical memory 344 */ 345 static x86pte_t save_pte; 346 347 void * 348 kbm_push(paddr_t pa) 349 { 350 static int first_time = 1; 351 352 if (first_time) { 353 first_time = 0; 354 return (window); 355 } 356 357 if (kbm_pae_support) 358 save_pte = *((x86pte_t *)pte_to_window); 359 else 360 save_pte = *((x86pte32_t *)pte_to_window); 361 return (kbm_remap_window(pa, 0)); 362 } 363 364 void 365 kbm_pop(void) 366 { 367 if (kbm_pae_support) 368 *((x86pte_t *)pte_to_window) = save_pte; 369 else 370 *((x86pte32_t *)pte_to_window) = save_pte; 371 mmu_tlbflush_entry(window); 372 } 373 374 x86pte_t 375 get_pteval(paddr_t table, uint_t index) 376 { 377 void *table_ptr = kbm_remap_window(table, 0); 378 379 if (kbm_pae_support) 380 return (((x86pte_t *)table_ptr)[index]); 381 return (((x86pte32_t *)table_ptr)[index]); 382 } 383 384 void 385 set_pteval(paddr_t table, uint_t index, uint_t level, x86pte_t pteval) 386 { 387 void *table_ptr = kbm_remap_window(table, 0); 388 if (kbm_pae_support) 389 ((x86pte_t *)table_ptr)[index] = pteval; 390 else 391 ((x86pte32_t *)table_ptr)[index] = pteval; 392 if (level == top_level && level == 2) 393 reload_cr3(); 394 } 395 396 paddr_t 397 make_ptable(x86pte_t *pteval, uint_t level) 398 { 399 paddr_t new_table; 400 void *table_ptr; 401 402 new_table = do_bop_phys_alloc(MMU_PAGESIZE, MMU_PAGESIZE); 403 table_ptr = kbm_remap_window(new_table, 1); 404 bzero(table_ptr, MMU_PAGESIZE); 405 406 if (level == top_level && level == 2) 407 *pteval = pa_to_ma(new_table) | PT_VALID; 408 else 409 *pteval = pa_to_ma(new_table) | 410 PT_VALID | PT_REF | PT_USER | PT_WRITABLE; 411 412 return (new_table); 413 } 414 415 x86pte_t * 416 map_pte(paddr_t table, uint_t index) 417 { 418 void *table_ptr = kbm_remap_window(table, 0); 419 return ((x86pte_t *)((caddr_t)table_ptr + index * pte_size)); 420 } 421