1 /*- 2 * Copyright (c) 2006 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 /* 30 * AMD64 machine dependent routines for kvm and minidumps. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/user.h> 35 #include <sys/proc.h> 36 #include <sys/stat.h> 37 #include <sys/mman.h> 38 #include <sys/fnv_hash.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <unistd.h> 42 #include <nlist.h> 43 #include <kvm.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_param.h> 47 48 #include <machine/elf.h> 49 #include <machine/cpufunc.h> 50 #include <machine/minidump.h> 51 52 #include <limits.h> 53 54 #include "kvm_private.h" 55 56 struct hpte { 57 struct hpte *next; 58 vm_paddr_t pa; 59 int64_t off; 60 }; 61 62 #define HPT_SIZE 1024 63 64 /* minidump must be the first item! */ 65 struct vmstate { 66 int minidump; /* 1 = minidump mode */ 67 struct minidumphdr hdr; 68 void *hpt_head[HPT_SIZE]; 69 uint64_t *bitmap; 70 uint64_t *page_map; 71 }; 72 73 static void 74 hpt_insert(kvm_t *kd, vm_paddr_t pa, int64_t off) 75 { 76 struct hpte *hpte; 77 uint32_t fnv = FNV1_32_INIT; 78 79 fnv = fnv_32_buf(&pa, sizeof(pa), fnv); 80 fnv &= (HPT_SIZE - 1); 81 hpte = malloc(sizeof(*hpte)); 82 hpte->pa = pa; 83 hpte->off = off; 84 hpte->next = kd->vmst->hpt_head[fnv]; 85 kd->vmst->hpt_head[fnv] = hpte; 86 } 87 88 static int64_t 89 hpt_find(kvm_t *kd, vm_paddr_t pa) 90 { 91 struct hpte *hpte; 92 uint32_t fnv = FNV1_32_INIT; 93 94 fnv = fnv_32_buf(&pa, sizeof(pa), fnv); 95 fnv &= (HPT_SIZE - 1); 96 for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) { 97 if (pa == hpte->pa) 98 return (hpte->off); 99 } 100 return (-1); 101 } 102 103 static int 104 inithash(kvm_t *kd, uint64_t *base, int len, off_t off) 105 { 106 uint64_t idx; 107 uint64_t bit, bits; 108 vm_paddr_t pa; 109 110 for (idx = 0; idx < len / sizeof(*base); idx++) { 111 bits = base[idx]; 112 while (bits) { 113 bit = bsfq(bits); 114 bits &= ~(1ul << bit); 115 pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE; 116 hpt_insert(kd, pa, off); 117 off += PAGE_SIZE; 118 } 119 } 120 return (off); 121 } 122 123 void 124 _kvm_minidump_freevtop(kvm_t *kd) 125 { 126 struct vmstate *vm = kd->vmst; 127 128 if (vm->bitmap) 129 free(vm->bitmap); 130 if (vm->page_map) 131 free(vm->page_map); 132 free(vm); 133 kd->vmst = NULL; 134 } 135 136 int 137 _kvm_minidump_initvtop(kvm_t *kd) 138 { 139 struct vmstate *vmst; 140 off_t off; 141 142 vmst = _kvm_malloc(kd, sizeof(*vmst)); 143 if (vmst == 0) { 144 _kvm_err(kd, kd->program, "cannot allocate vm"); 145 return (-1); 146 } 147 kd->vmst = vmst; 148 vmst->minidump = 1; 149 if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != 150 sizeof(vmst->hdr)) { 151 _kvm_err(kd, kd->program, "cannot read dump header"); 152 return (-1); 153 } 154 if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { 155 _kvm_err(kd, kd->program, "not a minidump for this platform"); 156 return (-1); 157 } 158 159 /* 160 * NB: amd64 minidump header is binary compatible between version 1 161 * and version 2; this may not be the case for the future versions. 162 */ 163 if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { 164 _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", 165 MINIDUMP_VERSION, vmst->hdr.version); 166 return (-1); 167 } 168 169 /* Skip header and msgbuf */ 170 off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); 171 172 vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); 173 if (vmst->bitmap == NULL) { 174 _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); 175 return (-1); 176 } 177 if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != 178 vmst->hdr.bitmapsize) { 179 _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); 180 return (-1); 181 } 182 off += round_page(vmst->hdr.bitmapsize); 183 184 vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize); 185 if (vmst->page_map == NULL) { 186 _kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map", vmst->hdr.pmapsize); 187 return (-1); 188 } 189 if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) != 190 vmst->hdr.pmapsize) { 191 _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", vmst->hdr.pmapsize); 192 return (-1); 193 } 194 off += vmst->hdr.pmapsize; 195 196 /* build physical address hash table for sparse pages */ 197 inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); 198 199 return (0); 200 } 201 202 static int 203 _kvm_minidump_vatop_v1(kvm_t *kd, u_long va, off_t *pa) 204 { 205 struct vmstate *vm; 206 u_long offset; 207 pt_entry_t pte; 208 u_long pteindex; 209 u_long a; 210 off_t ofs; 211 212 vm = kd->vmst; 213 offset = va & (PAGE_SIZE - 1); 214 215 if (va >= vm->hdr.kernbase) { 216 pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; 217 pte = vm->page_map[pteindex]; 218 if (((u_long)pte & PG_V) == 0) { 219 _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); 220 goto invalid; 221 } 222 a = pte & PG_FRAME; 223 ofs = hpt_find(kd, a); 224 if (ofs == -1) { 225 _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); 226 goto invalid; 227 } 228 *pa = ofs + offset; 229 return (PAGE_SIZE - offset); 230 } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { 231 a = (va - vm->hdr.dmapbase) & ~PAGE_MASK; 232 ofs = hpt_find(kd, a); 233 if (ofs == -1) { 234 _kvm_err(kd, kd->program, "_kvm_vatop: direct map address 0x%lx not in minidump", va); 235 goto invalid; 236 } 237 *pa = ofs + offset; 238 return (PAGE_SIZE - offset); 239 } else { 240 _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va); 241 goto invalid; 242 } 243 244 invalid: 245 _kvm_err(kd, 0, "invalid address (0x%lx)", va); 246 return (0); 247 } 248 249 static int 250 _kvm_minidump_vatop(kvm_t *kd, u_long va, off_t *pa) 251 { 252 pt_entry_t pt[NPTEPG]; 253 struct vmstate *vm; 254 u_long offset; 255 pd_entry_t pde; 256 pd_entry_t pte; 257 u_long pteindex; 258 u_long pdeindex; 259 u_long a; 260 off_t ofs; 261 262 vm = kd->vmst; 263 offset = va & PAGE_MASK; 264 265 if (va >= vm->hdr.kernbase) { 266 pdeindex = (va - vm->hdr.kernbase) >> PDRSHIFT; 267 pde = vm->page_map[pdeindex]; 268 if (((u_long)pde & PG_V) == 0) { 269 _kvm_err(kd, kd->program, "_kvm_vatop: pde not valid"); 270 goto invalid; 271 } 272 if ((pde & PG_PS) == 0) { 273 a = pde & PG_FRAME; 274 ofs = hpt_find(kd, a); 275 if (ofs == -1) { 276 _kvm_err(kd, kd->program, "_kvm_vatop: pt physical address 0x%lx not in minidump", a); 277 goto invalid; 278 } 279 if (pread(kd->pmfd, &pt, PAGE_SIZE, ofs) != PAGE_SIZE) { 280 _kvm_err(kd, kd->program, "cannot read %d bytes for pt", PAGE_SIZE); 281 return (-1); 282 } 283 pteindex = (va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1); 284 pte = pt[pteindex]; 285 if (((u_long)pte & PG_V) == 0) { 286 _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); 287 goto invalid; 288 } 289 a = pte & PG_FRAME; 290 } else { 291 a = pde & PG_PS_FRAME; 292 a += (va & PDRMASK) ^ offset; 293 } 294 ofs = hpt_find(kd, a); 295 if (ofs == -1) { 296 _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); 297 goto invalid; 298 } 299 *pa = ofs + offset; 300 return (PAGE_SIZE - offset); 301 } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { 302 a = (va - vm->hdr.dmapbase) & ~PAGE_MASK; 303 ofs = hpt_find(kd, a); 304 if (ofs == -1) { 305 _kvm_err(kd, kd->program, "_kvm_vatop: direct map address 0x%lx not in minidump", va); 306 goto invalid; 307 } 308 *pa = ofs + offset; 309 return (PAGE_SIZE - offset); 310 } else { 311 _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va); 312 goto invalid; 313 } 314 315 invalid: 316 _kvm_err(kd, 0, "invalid address (0x%lx)", va); 317 return (0); 318 } 319 320 int 321 _kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) 322 { 323 324 if (ISALIVE(kd)) { 325 _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); 326 return (0); 327 } 328 if (((struct vmstate *)kd->vmst)->hdr.version == 1) 329 return (_kvm_minidump_vatop_v1(kd, va, pa)); 330 else 331 return (_kvm_minidump_vatop(kd, va, pa)); 332 } 333