1 /*- 2 * Copyright (c) 2006 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * ARM64 (AArch64) machine dependent routines for kvm and minidumps. 33 */ 34 35 #include <sys/param.h> 36 #include <stdint.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <vm/vm.h> 41 #include <kvm.h> 42 43 #include "../../sys/arm64/include/minidump.h" 44 45 #include <limits.h> 46 47 #include "kvm_private.h" 48 #include "kvm_aarch64.h" 49 50 #define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) 51 52 struct vmstate { 53 struct minidumphdr hdr; 54 }; 55 56 static aarch64_pte_t 57 _aarch64_pte_get(kvm_t *kd, u_long pteindex) 58 { 59 aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); 60 61 return le64toh(*pte); 62 } 63 64 static int 65 _aarch64_minidump_probe(kvm_t *kd) 66 { 67 68 return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && 69 _kvm_is_minidump(kd)); 70 } 71 72 static void 73 _aarch64_minidump_freevtop(kvm_t *kd) 74 { 75 struct vmstate *vm = kd->vmst; 76 77 free(vm); 78 kd->vmst = NULL; 79 } 80 81 static int 82 _aarch64_minidump_initvtop(kvm_t *kd) 83 { 84 struct vmstate *vmst; 85 off_t off, sparse_off; 86 87 vmst = _kvm_malloc(kd, sizeof(*vmst)); 88 if (vmst == NULL) { 89 _kvm_err(kd, kd->program, "cannot allocate vm"); 90 return (-1); 91 } 92 kd->vmst = vmst; 93 if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != 94 sizeof(vmst->hdr)) { 95 _kvm_err(kd, kd->program, "cannot read dump header"); 96 return (-1); 97 } 98 if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, 99 sizeof(vmst->hdr.magic)) != 0) { 100 _kvm_err(kd, kd->program, "not a minidump for this platform"); 101 return (-1); 102 } 103 104 vmst->hdr.version = le32toh(vmst->hdr.version); 105 if (vmst->hdr.version != MINIDUMP_VERSION) { 106 _kvm_err(kd, kd->program, "wrong minidump version. " 107 "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); 108 return (-1); 109 } 110 vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); 111 vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); 112 vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); 113 vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); 114 vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); 115 vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); 116 vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); 117 118 /* Skip header and msgbuf */ 119 off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); 120 121 /* build physical address lookup table for sparse pages */ 122 sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) + 123 aarch64_round_page(vmst->hdr.pmapsize); 124 if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, 125 AARCH64_PAGE_SIZE, sizeof(uint64_t)) == -1) { 126 return (-1); 127 } 128 off += aarch64_round_page(vmst->hdr.bitmapsize); 129 130 if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { 131 return (-1); 132 } 133 off += aarch64_round_page(vmst->hdr.pmapsize); 134 135 return (0); 136 } 137 138 static int 139 _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) 140 { 141 struct vmstate *vm; 142 aarch64_physaddr_t offset; 143 aarch64_pte_t l3; 144 kvaddr_t l3_index; 145 aarch64_physaddr_t a; 146 off_t ofs; 147 148 vm = kd->vmst; 149 offset = va & AARCH64_PAGE_MASK; 150 151 if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { 152 a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & 153 ~AARCH64_PAGE_MASK; 154 ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); 155 if (ofs == -1) { 156 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " 157 "direct map address 0x%jx not in minidump", 158 (uintmax_t)va); 159 goto invalid; 160 } 161 *pa = ofs + offset; 162 return (AARCH64_PAGE_SIZE - offset); 163 } else if (va >= vm->hdr.kernbase) { 164 l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; 165 if (l3_index >= vm->hdr.pmapsize / sizeof(l3)) 166 goto invalid; 167 l3 = _aarch64_pte_get(kd, l3_index); 168 if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { 169 _kvm_err(kd, kd->program, 170 "_aarch64_minidump_vatop: pde not valid"); 171 goto invalid; 172 } 173 a = l3 & ~AARCH64_ATTR_MASK; 174 ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); 175 if (ofs == -1) { 176 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " 177 "physical address 0x%jx not in minidump", 178 (uintmax_t)a); 179 goto invalid; 180 } 181 *pa = ofs + offset; 182 return (AARCH64_PAGE_SIZE - offset); 183 } else { 184 _kvm_err(kd, kd->program, 185 "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", 186 (uintmax_t)va); 187 goto invalid; 188 } 189 190 invalid: 191 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); 192 return (0); 193 } 194 195 static int 196 _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) 197 { 198 199 if (ISALIVE(kd)) { 200 _kvm_err(kd, 0, 201 "_aarch64_minidump_kvatop called in live kernel!"); 202 return (0); 203 } 204 return (_aarch64_minidump_vatop(kd, va, pa)); 205 } 206 207 static int 208 _aarch64_native(kvm_t *kd __unused) 209 { 210 211 #ifdef __aarch64__ 212 return (1); 213 #else 214 return (0); 215 #endif 216 } 217 218 static vm_prot_t 219 _aarch64_entry_to_prot(aarch64_pte_t pte) 220 { 221 vm_prot_t prot = VM_PROT_READ; 222 223 /* Source: arm64/arm64/pmap.c:pmap_protect() */ 224 if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0) 225 prot |= VM_PROT_WRITE; 226 if ((pte & AARCH64_ATTR_XN) == 0) 227 prot |= VM_PROT_EXECUTE; 228 return prot; 229 } 230 231 static int 232 _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) 233 { 234 struct vmstate *vm = kd->vmst; 235 u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t); 236 u_long bmindex, dva, pa, pteindex, va; 237 struct kvm_bitmap bm; 238 vm_prot_t prot; 239 int ret = 0; 240 241 if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) 242 return (0); 243 244 for (pteindex = 0; pteindex < nptes; pteindex++) { 245 aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex); 246 247 if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) 248 continue; 249 250 va = vm->hdr.kernbase + (pteindex << AARCH64_L3_SHIFT); 251 pa = pte & ~AARCH64_ATTR_MASK; 252 dva = vm->hdr.dmapbase + pa; 253 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, 254 _aarch64_entry_to_prot(pte), AARCH64_PAGE_SIZE, 0)) { 255 goto out; 256 } 257 } 258 259 while (_kvm_bitmap_next(&bm, &bmindex)) { 260 pa = bmindex * AARCH64_PAGE_SIZE; 261 dva = vm->hdr.dmapbase + pa; 262 if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE)) 263 break; 264 va = 0; 265 prot = VM_PROT_READ | VM_PROT_WRITE; 266 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, 267 prot, AARCH64_PAGE_SIZE, 0)) { 268 goto out; 269 } 270 } 271 ret = 1; 272 273 out: 274 _kvm_bitmap_deinit(&bm); 275 return (ret); 276 } 277 278 static struct kvm_arch kvm_aarch64_minidump = { 279 .ka_probe = _aarch64_minidump_probe, 280 .ka_initvtop = _aarch64_minidump_initvtop, 281 .ka_freevtop = _aarch64_minidump_freevtop, 282 .ka_kvatop = _aarch64_minidump_kvatop, 283 .ka_native = _aarch64_native, 284 .ka_walk_pages = _aarch64_minidump_walk_pages, 285 }; 286 287 KVM_ARCH(kvm_aarch64_minidump); 288