1 /*- 2 * Copyright (c) 1989, 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software developed by the Computer Systems 6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 7 * BG 91-66 and contributed to Berkeley. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #if defined(LIBC_SCCS) && !defined(lint) 42 #if 0 43 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; 44 #endif 45 #endif /* LIBC_SCCS and not lint */ 46 47 /* 48 * AMD64 machine dependent routines for kvm. Hopefully, the forthcoming 49 * vm code will one day obsolete this module. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/user.h> 54 #include <sys/proc.h> 55 #include <sys/stat.h> 56 #include <sys/mman.h> 57 #include <stdlib.h> 58 #include <unistd.h> 59 #include <nlist.h> 60 #include <kvm.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_param.h> 64 65 #include <machine/elf.h> 66 67 #include <limits.h> 68 69 #include "kvm_private.h" 70 71 #ifndef btop 72 #define btop(x) (amd64_btop(x)) 73 #define ptob(x) (amd64_ptob(x)) 74 #endif 75 76 struct vmstate { 77 void *mmapbase; 78 size_t mmapsize; 79 pml4_entry_t *PML4; 80 }; 81 82 /* 83 * Map the ELF headers into the process' address space. We do this in two 84 * steps: first the ELF header itself and using that information the whole 85 * set of headers. (Taken from kvm_ia64.c) 86 */ 87 static int 88 _kvm_maphdrs(kvm_t *kd, size_t sz) 89 { 90 struct vmstate *vm = kd->vmst; 91 92 /* munmap() previous mmap(). */ 93 if (vm->mmapbase != NULL) { 94 munmap(vm->mmapbase, vm->mmapsize); 95 vm->mmapbase = NULL; 96 } 97 98 vm->mmapsize = sz; 99 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); 100 if (vm->mmapbase == MAP_FAILED) { 101 _kvm_err(kd, kd->program, "cannot mmap corefile"); 102 return (-1); 103 } 104 return (0); 105 } 106 107 /* 108 * Translate a physical memory address to a file-offset in the crash-dump. 109 * (Taken from kvm_ia64.c) 110 */ 111 static size_t 112 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) 113 { 114 Elf_Ehdr *e = kd->vmst->mmapbase; 115 Elf_Phdr *p = (Elf_Phdr*)((char*)e + e->e_phoff); 116 int n = e->e_phnum; 117 118 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) 119 p++, n--; 120 if (n == 0) 121 return (0); 122 *ofs = (pa - p->p_paddr) + p->p_offset; 123 return (PAGE_SIZE - ((size_t)pa & PAGE_MASK)); 124 } 125 126 void 127 _kvm_freevtop(kvm_t *kd) 128 { 129 struct vmstate *vm = kd->vmst; 130 131 if (vm->mmapbase != NULL) 132 munmap(vm->mmapbase, vm->mmapsize); 133 if (vm->PML4) 134 free(vm->PML4); 135 free(vm); 136 kd->vmst = NULL; 137 } 138 139 int 140 _kvm_initvtop(kvm_t *kd) 141 { 142 struct nlist nlist[2]; 143 u_long pa; 144 u_long kernbase; 145 pml4_entry_t *PML4; 146 Elf_Ehdr *ehdr; 147 size_t hdrsz; 148 149 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); 150 if (kd->vmst == 0) { 151 _kvm_err(kd, kd->program, "cannot allocate vm"); 152 return (-1); 153 } 154 kd->vmst->PML4 = 0; 155 156 if (_kvm_maphdrs(kd, sizeof(Elf_Ehdr)) == -1) 157 return (-1); 158 159 ehdr = kd->vmst->mmapbase; 160 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; 161 if (_kvm_maphdrs(kd, hdrsz) == -1) 162 return (-1); 163 164 nlist[0].n_name = "kernbase"; 165 nlist[1].n_name = 0; 166 167 if (kvm_nlist(kd, nlist) != 0) { 168 _kvm_err(kd, kd->program, "bad namelist - no kernbase"); 169 return (-1); 170 } 171 kernbase = nlist[0].n_value; 172 173 nlist[0].n_name = "KPML4phys"; 174 nlist[1].n_name = 0; 175 176 if (kvm_nlist(kd, nlist) != 0) { 177 _kvm_err(kd, kd->program, "bad namelist - no KPML4phys"); 178 return (-1); 179 } 180 if (kvm_read(kd, (nlist[0].n_value - kernbase), &pa, sizeof(pa)) != 181 sizeof(pa)) { 182 _kvm_err(kd, kd->program, "cannot read KPML4phys"); 183 return (-1); 184 } 185 PML4 = _kvm_malloc(kd, PAGE_SIZE); 186 if (kvm_read(kd, pa, PML4, PAGE_SIZE) != PAGE_SIZE) { 187 _kvm_err(kd, kd->program, "cannot read KPML4phys"); 188 return (-1); 189 } 190 kd->vmst->PML4 = PML4; 191 return (0); 192 } 193 194 static int 195 _kvm_vatop(kvm_t *kd, u_long va, off_t *pa) 196 { 197 struct vmstate *vm; 198 u_long offset; 199 u_long pdpe_pa; 200 u_long pde_pa; 201 u_long pte_pa; 202 pml4_entry_t pml4e; 203 pdp_entry_t pdpe; 204 pd_entry_t pde; 205 pt_entry_t pte; 206 u_long pml4eindex; 207 u_long pdpeindex; 208 u_long pdeindex; 209 u_long pteindex; 210 int i; 211 u_long a; 212 off_t ofs; 213 size_t s; 214 215 vm = kd->vmst; 216 offset = va & (PAGE_SIZE - 1); 217 218 /* 219 * If we are initializing (kernel page table descriptor pointer 220 * not yet set) then return pa == va to avoid infinite recursion. 221 */ 222 if (vm->PML4 == 0) { 223 s = _kvm_pa2off(kd, va, pa); 224 if (s == 0) { 225 _kvm_err(kd, kd->program, 226 "_kvm_vatop: bootstrap data not in dump"); 227 goto invalid; 228 } else 229 return (PAGE_SIZE - offset); 230 } 231 232 pml4eindex = (va >> PML4SHIFT) & (NPML4EPG - 1); 233 pml4e = vm->PML4[pml4eindex]; 234 if (((u_long)pml4e & PG_V) == 0) { 235 _kvm_err(kd, kd->program, "_kvm_vatop: pml4e not valid"); 236 goto invalid; 237 } 238 239 pdpeindex = (va >> PDPSHIFT) & (NPDPEPG-1); 240 pdpe_pa = ((u_long)pml4e & PG_FRAME) + 241 (pdpeindex * sizeof(pdp_entry_t)); 242 243 s = _kvm_pa2off(kd, pdpe_pa, &ofs); 244 if (s < sizeof pdpe) { 245 _kvm_err(kd, kd->program, "_kvm_vatop: pdpe_pa not found"); 246 goto invalid; 247 } 248 if (lseek(kd->pmfd, ofs, 0) == -1) { 249 _kvm_syserr(kd, kd->program, "_kvm_vatop: lseek pdpe_pa"); 250 goto invalid; 251 } 252 if (read(kd->pmfd, &pdpe, sizeof pdpe) != sizeof pdpe) { 253 _kvm_syserr(kd, kd->program, "_kvm_vatop: read pdpe"); 254 goto invalid; 255 } 256 if (((u_long)pdpe & PG_V) == 0) { 257 _kvm_err(kd, kd->program, "_kvm_vatop: pdpe not valid"); 258 goto invalid; 259 } 260 261 pdeindex = (va >> PDRSHIFT) & (NPDEPG-1); 262 pde_pa = ((u_long)pdpe & PG_FRAME) + (pdeindex * sizeof(pd_entry_t)); 263 264 s = _kvm_pa2off(kd, pde_pa, &ofs); 265 if (s < sizeof pde) { 266 _kvm_syserr(kd, kd->program, "_kvm_vatop: pde_pa not found"); 267 goto invalid; 268 } 269 if (lseek(kd->pmfd, ofs, 0) == -1) { 270 _kvm_err(kd, kd->program, "_kvm_vatop: lseek pde_pa"); 271 goto invalid; 272 } 273 if (read(kd->pmfd, &pde, sizeof pde) != sizeof pde) { 274 _kvm_syserr(kd, kd->program, "_kvm_vatop: read pde"); 275 goto invalid; 276 } 277 if (((u_long)pde & PG_V) == 0) { 278 _kvm_err(kd, kd->program, "_kvm_vatop: pde not valid"); 279 goto invalid; 280 } 281 282 if ((u_long)pde & PG_PS) { 283 /* 284 * No final-level page table; ptd describes one 2MB page. 285 */ 286 #define PAGE2M_MASK (NBPDR - 1) 287 #define PG_FRAME2M (~PAGE2M_MASK) 288 a = ((u_long)pde & PG_FRAME2M) + (va & PAGE2M_MASK); 289 s = _kvm_pa2off(kd, a, pa); 290 if (s == 0) { 291 _kvm_err(kd, kd->program, 292 "_kvm_vatop: 2MB page address not in dump"); 293 goto invalid; 294 } else 295 return (NBPDR - (va & PAGE2M_MASK)); 296 } 297 298 pteindex = (va >> PAGE_SHIFT) & (NPTEPG-1); 299 pte_pa = ((u_long)pde & PG_FRAME) + (pteindex * sizeof(pt_entry_t)); 300 301 s = _kvm_pa2off(kd, pte_pa, &ofs); 302 if (s < sizeof pte) { 303 _kvm_err(kd, kd->program, "_kvm_vatop: pte_pa not found"); 304 goto invalid; 305 } 306 if (lseek(kd->pmfd, ofs, 0) == -1) { 307 _kvm_syserr(kd, kd->program, "_kvm_vatop: lseek"); 308 goto invalid; 309 } 310 if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) { 311 _kvm_syserr(kd, kd->program, "_kvm_vatop: read"); 312 goto invalid; 313 } 314 if (((u_long)pte & PG_V) == 0) { 315 _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); 316 goto invalid; 317 } 318 319 a = ((u_long)pte & PG_FRAME) + offset; 320 s = _kvm_pa2off(kd, a, pa); 321 if (s == 0) { 322 _kvm_err(kd, kd->program, "_kvm_vatop: address not in dump"); 323 goto invalid; 324 } else 325 return (PAGE_SIZE - offset); 326 327 invalid: 328 _kvm_err(kd, 0, "invalid address (0x%lx)", va); 329 return (0); 330 } 331 332 int 333 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) 334 { 335 336 if (ISALIVE(kd)) { 337 _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); 338 return (0); 339 } 340 return (_kvm_vatop(kd, va, pa)); 341 } 342