1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 30 /* 31 * HAT interfaces used by the kernel debugger to interact with the VM system. 32 * These interfaces are invoked when the world is stopped. As such, no blocking 33 * operations may be performed. 34 */ 35 36 #include <sys/cpuvar.h> 37 #include <sys/kdi_impl.h> 38 #include <sys/errno.h> 39 #include <sys/systm.h> 40 #include <sys/sysmacros.h> 41 #include <sys/mman.h> 42 #include <sys/bootconf.h> 43 #include <sys/cmn_err.h> 44 #include <vm/seg_kmem.h> 45 #include <vm/hat_i86.h> 46 #include <sys/machsystm.h> 47 48 /* 49 * The debugger needs direct access to the PTE of one page table entry 50 * in order to implement vtop and physical read/writes 51 */ 52 extern uintptr_t ptable_va; 53 static uintptr_t hat_kdi_page = 0; /* vaddr for phsical page accesses */ 54 static x86pte_t *hat_kdi_pte = NULL; /* vaddr of pte for hat_kdi_page */ 55 uint_t hat_kdi_use_pae; /* if 0, use x86pte32_t for pte type */ 56 57 /* 58 * Allocate virtual page to use for kernel debugger accesses to physical memory. 59 * This is done very early in boot - before vmem allocator is available, so 60 * we use a special hand picked address. (blech) The address is one page 61 * above where the hat will put pages for pagetables -- see ptable_alloc() -- 62 * and is outside of the kernel's address space. 63 * 64 * We'll pick a new VA after the kernel's hat has been initialized. 65 */ 66 void 67 hat_boot_kdi_init(void) 68 { 69 70 /* 71 * The 1st ptable_va page is for the HAT, we use the 2nd. 72 */ 73 hat_kdi_page = ptable_va + MMU_PAGESIZE; 74 #if defined(__amd64) 75 hat_kdi_use_pae = 1; 76 #elif defined(__i386) 77 hat_kdi_use_pae = 0; 78 #endif 79 } 80 81 /* 82 * Switch to using a page in the kernel's va range for physical memory access. 83 * We need to allocate a virtual page, then permanently map in the page that 84 * contains the PTE to it. 85 */ 86 void 87 hat_kdi_init(void) 88 { 89 htable_t *ht; 90 91 /* 92 * Get an kernel page VA to use for phys mem access. Then make sure 93 * the VA has a page table. 94 */ 95 hat_kdi_use_pae = mmu.pae_hat; 96 hat_kdi_page = (uintptr_t)vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 97 ht = htable_create(kas.a_hat, hat_kdi_page, 0, NULL); 98 99 /* 100 * Get an address at which to put the pagetable and devload it. 101 */ 102 hat_kdi_pte = vmem_xalloc(heap_arena, MMU_PAGESIZE, MMU_PAGESIZE, 0, 103 0, NULL, NULL, VM_SLEEP); 104 hat_devload(kas.a_hat, (caddr_t)hat_kdi_pte, MMU_PAGESIZE, ht->ht_pfn, 105 PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK, 106 HAT_LOAD | HAT_LOAD_NOCONSIST); 107 hat_kdi_pte = (x86pte_t *)((uintptr_t)hat_kdi_pte + 108 (htable_va2entry(hat_kdi_page, ht) << mmu.pte_size_shift)); 109 110 HTABLE_INC(ht->ht_valid_cnt); 111 htable_release(ht); 112 } 113 114 /*ARGSUSED*/ 115 int 116 kdi_vtop(uintptr_t va, uint64_t *pap) 117 { 118 uintptr_t vaddr = va; 119 size_t len; 120 pfn_t pfn; 121 uint_t prot; 122 int level; 123 x86pte_t pte; 124 int index; 125 126 /* 127 * if the mmu struct isn't relevant yet, we need to probe 128 * the boot loader's pagetables. 129 */ 130 if (!khat_running) { 131 if (hat_boot_probe(&vaddr, &len, &pfn, &prot) == 0) 132 return (ENOENT); 133 if (vaddr > va) 134 return (ENOENT); 135 if (vaddr < va) 136 pfn += mmu_btop(va - vaddr); 137 *pap = (uint64_t)mmu_ptob(pfn) + (vaddr & MMU_PAGEOFFSET); 138 return (0); 139 } 140 141 /* 142 * We can't go through normal hat routines, so we'll use 143 * kdi_pread() to walk the page tables 144 */ 145 *pap = getcr3() & MMU_PAGEMASK; 146 for (level = mmu.max_level; ; --level) { 147 index = (va >> LEVEL_SHIFT(level)) & (mmu.ptes_per_table - 1); 148 *pap += index << mmu.pte_size_shift; 149 pte = 0; 150 if (kdi_pread((caddr_t)&pte, mmu.pte_size, *pap, &len) != 0) 151 return (ENOENT); 152 if (pte == 0) 153 return (ENOENT); 154 if (level > 0 && level <= mmu.max_page_level && 155 (pte & PT_PAGESIZE)) { 156 *pap = pte & PT_PADDR_LGPG; 157 break; 158 } else { 159 *pap = pte & PT_PADDR; 160 if (level == 0) 161 break; 162 } 163 } 164 *pap += va & LEVEL_OFFSET(level); 165 return (0); 166 } 167 168 static int 169 kdi_prw(caddr_t buf, size_t nbytes, uint64_t pa, size_t *ncopiedp, int doread) 170 { 171 size_t ncopied = 0; 172 off_t pgoff; 173 size_t sz; 174 caddr_t va; 175 caddr_t from; 176 caddr_t to; 177 x86pte_t pte; 178 179 /* 180 * if this is called before any initialization - fail 181 */ 182 if (hat_kdi_page == 0) 183 return (EAGAIN); 184 185 while (nbytes > 0) { 186 /* 187 * figure out the addresses and construct a minimal PTE 188 */ 189 pgoff = pa & MMU_PAGEOFFSET; 190 sz = MIN(nbytes, MMU_PAGESIZE - pgoff); 191 va = (caddr_t)hat_kdi_page + pgoff; 192 pte = MAKEPTE(btop(pa), 0); 193 if (doread) { 194 from = va; 195 to = buf; 196 } else { 197 PTE_SET(pte, PT_WRITABLE); 198 from = buf; 199 to = va; 200 } 201 202 /* 203 * map the physical page 204 */ 205 if (hat_kdi_pte == NULL) 206 (void) hat_boot_remap(hat_kdi_page, btop(pa)); 207 else if (hat_kdi_use_pae) 208 *hat_kdi_pte = pte; 209 else 210 *(x86pte32_t *)hat_kdi_pte = pte; 211 mmu_tlbflush_entry((caddr_t)hat_kdi_page); 212 213 bcopy(from, to, sz); 214 215 /* 216 * erase the mapping 217 */ 218 if (hat_kdi_pte == NULL) 219 hat_boot_demap(hat_kdi_page); 220 else if (hat_kdi_use_pae) 221 *hat_kdi_pte = 0; 222 else 223 *(x86pte32_t *)hat_kdi_pte = 0; 224 mmu_tlbflush_entry((caddr_t)hat_kdi_page); 225 226 buf += sz; 227 pa += sz; 228 nbytes -= sz; 229 ncopied += sz; 230 } 231 232 if (ncopied == 0) 233 return (ENOENT); 234 235 *ncopiedp = ncopied; 236 return (0); 237 } 238 239 int 240 kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 241 { 242 return (kdi_prw(buf, nbytes, addr, ncopiedp, 1)); 243 } 244 245 int 246 kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp) 247 { 248 return (kdi_prw(buf, nbytes, addr, ncopiedp, 0)); 249 } 250 251 252 /* 253 * Return the number of bytes, relative to the beginning of a given range, that 254 * are non-toxic (can be read from and written to with relative impunity). 255 */ 256 /*ARGSUSED*/ 257 size_t 258 kdi_range_is_nontoxic(uintptr_t va, size_t sz, int write) 259 { 260 #ifdef __amd64 261 extern uintptr_t toxic_addr; 262 extern size_t toxic_size; 263 264 /* 265 * Check 64 bit toxic range. 266 */ 267 if (toxic_addr != 0 && 268 va + sz >= toxic_addr && 269 va < toxic_addr + toxic_size) 270 return (va < toxic_addr ? toxic_addr - va : 0); 271 272 /* 273 * avoid any Virtual Address hole 274 */ 275 if (va + sz >= hole_start && va < hole_end) 276 return (va < hole_start ? hole_start - va : 0); 277 278 return (sz); 279 280 #else 281 extern void *device_arena_contains(void *, size_t, size_t *); 282 uintptr_t v; 283 284 v = (uintptr_t)device_arena_contains((void *)va, sz, NULL); 285 if (v == 0) 286 return (sz); 287 else if (v <= va) 288 return (0); 289 else 290 return (v - va); 291 292 #endif 293 } 294 295 void 296 hat_kdi_fini(void) 297 { 298 } 299