1 /*- 2 * Copyright (c) 2006 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * ARM64 (AArch64) machine dependent routines for kvm and minidumps. 33 */ 34 35 #include <sys/param.h> 36 #include <stdint.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <kvm.h> 41 42 #include "../../sys/arm64/include/minidump.h" 43 44 #include <limits.h> 45 46 #include "kvm_private.h" 47 #include "kvm_aarch64.h" 48 49 #define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) 50 51 struct vmstate { 52 struct minidumphdr hdr; 53 uint64_t *page_map; 54 }; 55 56 static int 57 _aarch64_minidump_probe(kvm_t *kd) 58 { 59 60 return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && 61 _kvm_is_minidump(kd)); 62 } 63 64 static void 65 _aarch64_minidump_freevtop(kvm_t *kd) 66 { 67 struct vmstate *vm = kd->vmst; 68 69 free(vm->page_map); 70 free(vm); 71 kd->vmst = NULL; 72 } 73 74 static int 75 _aarch64_minidump_initvtop(kvm_t *kd) 76 { 77 struct vmstate *vmst; 78 off_t off, sparse_off; 79 80 vmst = _kvm_malloc(kd, sizeof(*vmst)); 81 if (vmst == NULL) { 82 _kvm_err(kd, kd->program, "cannot allocate vm"); 83 return (-1); 84 } 85 kd->vmst = vmst; 86 if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != 87 sizeof(vmst->hdr)) { 88 _kvm_err(kd, kd->program, "cannot read dump header"); 89 return (-1); 90 } 91 if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, 92 sizeof(vmst->hdr.magic)) != 0) { 93 _kvm_err(kd, kd->program, "not a minidump for this platform"); 94 return (-1); 95 } 96 97 vmst->hdr.version = le32toh(vmst->hdr.version); 98 if (vmst->hdr.version != MINIDUMP_VERSION) { 99 _kvm_err(kd, kd->program, "wrong minidump version. " 100 "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); 101 return (-1); 102 } 103 vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); 104 vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); 105 vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); 106 vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); 107 vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); 108 vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); 109 vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); 110 111 /* Skip header and msgbuf */ 112 off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); 113 114 /* build physical address lookup table for sparse pages */ 115 sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) + 116 aarch64_round_page(vmst->hdr.pmapsize); 117 if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, 118 AARCH64_PAGE_SIZE, sizeof(uint64_t)) == -1) { 119 _kvm_err(kd, kd->program, "cannot load core bitmap"); 120 return (-1); 121 } 122 off += aarch64_round_page(vmst->hdr.bitmapsize); 123 124 vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize); 125 if (vmst->page_map == NULL) { 126 _kvm_err(kd, kd->program, 127 "cannot allocate %d bytes for page_map", 128 vmst->hdr.pmapsize); 129 return (-1); 130 } 131 /* This is the end of the dump, savecore may have truncated it. */ 132 /* 133 * XXX: This doesn't make sense. The pmap is not at the end, 134 * and if it is truncated we don't have any actual data (it's 135 * all stored after the bitmap and pmap. -- jhb 136 */ 137 if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) < 138 AARCH64_PAGE_SIZE) { 139 _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", 140 vmst->hdr.pmapsize); 141 return (-1); 142 } 143 off += aarch64_round_page(vmst->hdr.pmapsize); 144 145 return (0); 146 } 147 148 static int 149 _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) 150 { 151 struct vmstate *vm; 152 aarch64_physaddr_t offset; 153 aarch64_pte_t l3; 154 kvaddr_t l3_index; 155 aarch64_physaddr_t a; 156 off_t ofs; 157 158 vm = kd->vmst; 159 offset = va & AARCH64_PAGE_MASK; 160 161 if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { 162 a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & 163 ~AARCH64_PAGE_MASK; 164 ofs = _kvm_pt_find(kd, a); 165 if (ofs == -1) { 166 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " 167 "direct map address 0x%jx not in minidump", 168 (uintmax_t)va); 169 goto invalid; 170 } 171 *pa = ofs + offset; 172 return (AARCH64_PAGE_SIZE - offset); 173 } else if (va >= vm->hdr.kernbase) { 174 l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; 175 if (l3_index >= vm->hdr.pmapsize / sizeof(*vm->page_map)) 176 goto invalid; 177 l3 = le64toh(vm->page_map[l3_index]); 178 if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { 179 _kvm_err(kd, kd->program, 180 "_aarch64_minidump_vatop: pde not valid"); 181 goto invalid; 182 } 183 a = l3 & ~AARCH64_ATTR_MASK; 184 ofs = _kvm_pt_find(kd, a); 185 if (ofs == -1) { 186 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " 187 "physical address 0x%jx not in minidump", 188 (uintmax_t)a); 189 goto invalid; 190 } 191 *pa = ofs + offset; 192 return (AARCH64_PAGE_SIZE - offset); 193 } else { 194 _kvm_err(kd, kd->program, 195 "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", 196 (uintmax_t)va); 197 goto invalid; 198 } 199 200 invalid: 201 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); 202 return (0); 203 } 204 205 static int 206 _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) 207 { 208 209 if (ISALIVE(kd)) { 210 _kvm_err(kd, 0, 211 "_aarch64_minidump_kvatop called in live kernel!"); 212 return (0); 213 } 214 return (_aarch64_minidump_vatop(kd, va, pa)); 215 } 216 217 static int 218 _aarch64_native(kvm_t *kd __unused) 219 { 220 221 #ifdef __aarch64__ 222 return (1); 223 #else 224 return (0); 225 #endif 226 } 227 228 static struct kvm_arch kvm_aarch64_minidump = { 229 .ka_probe = _aarch64_minidump_probe, 230 .ka_initvtop = _aarch64_minidump_initvtop, 231 .ka_freevtop = _aarch64_minidump_freevtop, 232 .ka_kvatop = _aarch64_minidump_kvatop, 233 .ka_native = _aarch64_native, 234 }; 235 236 KVM_ARCH(kvm_aarch64_minidump); 237