1 /*- 2 * Copyright (c) 2006 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * ARM64 (AArch64) machine dependent routines for kvm and minidumps. 33 */ 34 35 #include <sys/param.h> 36 #include <stdint.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <vm/vm.h> 41 #include <kvm.h> 42 43 #include "../../sys/arm64/include/minidump.h" 44 45 #include <limits.h> 46 47 #include "kvm_private.h" 48 #include "kvm_aarch64.h" 49 50 #define aarch64_round_page(x, size) roundup2((kvaddr_t)(x), size) 51 #define aarch64_trunc_page(x, size) rounddown2((kvaddr_t)(x), size) 52 53 struct vmstate { 54 struct minidumphdr hdr; 55 size_t page_size; 56 u_int l3_shift; 57 }; 58 59 static aarch64_pte_t 60 _aarch64_pte_get(kvm_t *kd, u_long pteindex) 61 { 62 aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); 63 64 return le64toh(*pte); 65 } 66 67 static int 68 _aarch64_minidump_probe(kvm_t *kd) 69 { 70 71 return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && 72 _kvm_is_minidump(kd)); 73 } 74 75 static void 76 _aarch64_minidump_freevtop(kvm_t *kd) 77 { 78 struct vmstate *vm = kd->vmst; 79 80 free(vm); 81 kd->vmst = NULL; 82 } 83 84 static int 85 _aarch64_minidump_initvtop(kvm_t *kd) 86 { 87 struct vmstate *vmst; 88 off_t off, dump_avail_off, sparse_off; 89 90 vmst = _kvm_malloc(kd, sizeof(*vmst)); 91 if (vmst == NULL) { 92 _kvm_err(kd, kd->program, "cannot allocate vm"); 93 return (-1); 94 } 95 kd->vmst = vmst; 96 if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != 97 sizeof(vmst->hdr)) { 98 _kvm_err(kd, kd->program, "cannot read dump header"); 99 return (-1); 100 } 101 if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, 102 sizeof(vmst->hdr.magic)) != 0) { 103 _kvm_err(kd, kd->program, "not a minidump for this platform"); 104 return (-1); 105 } 106 107 vmst->hdr.version = le32toh(vmst->hdr.version); 108 if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) { 109 _kvm_err(kd, kd->program, "wrong minidump version. " 110 "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); 111 return (-1); 112 } 113 vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); 114 vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); 115 vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); 116 vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); 117 vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); 118 vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); 119 vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); 120 /* dumpavailsize added in version 2 */ 121 if (vmst->hdr.version >= 2) { 122 vmst->hdr.dumpavailsize = le32toh(vmst->hdr.dumpavailsize); 123 } else { 124 vmst->hdr.dumpavailsize = 0; 125 } 126 /* flags added in version 3 */ 127 if (vmst->hdr.version >= 3) { 128 vmst->hdr.flags = le32toh(vmst->hdr.flags); 129 } else { 130 vmst->hdr.flags = MINIDUMP_FLAG_PS_4K; 131 } 132 133 switch (vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK) { 134 case MINIDUMP_FLAG_PS_4K: 135 vmst->page_size = AARCH64_PAGE_SIZE_4K; 136 vmst->l3_shift = AARCH64_L3_SHIFT_4K; 137 break; 138 case MINIDUMP_FLAG_PS_16K: 139 vmst->page_size = AARCH64_PAGE_SIZE_16K; 140 vmst->l3_shift = AARCH64_L3_SHIFT_16K; 141 break; 142 default: 143 _kvm_err(kd, kd->program, "unknown page size flag %x", 144 vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK); 145 return (-1); 146 } 147 148 /* Skip header and msgbuf */ 149 dump_avail_off = vmst->page_size + 150 aarch64_round_page(vmst->hdr.msgbufsize, vmst->page_size); 151 152 /* Skip dump_avail */ 153 off = dump_avail_off + 154 aarch64_round_page(vmst->hdr.dumpavailsize, vmst->page_size); 155 156 /* build physical address lookup table for sparse pages */ 157 sparse_off = off + 158 aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size) + 159 aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size); 160 if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, 161 vmst->hdr.bitmapsize, off, sparse_off, vmst->page_size) == -1) { 162 return (-1); 163 } 164 off += aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size); 165 166 if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { 167 return (-1); 168 } 169 off += aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size); 170 171 return (0); 172 } 173 174 static int 175 _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) 176 { 177 struct vmstate *vm; 178 aarch64_physaddr_t offset; 179 aarch64_pte_t l3; 180 kvaddr_t l3_index; 181 aarch64_physaddr_t a; 182 off_t ofs; 183 184 vm = kd->vmst; 185 offset = va & (kd->vmst->page_size - 1); 186 187 if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { 188 a = aarch64_trunc_page(va - vm->hdr.dmapbase + vm->hdr.dmapphys, 189 kd->vmst->page_size); 190 ofs = _kvm_pt_find(kd, a, kd->vmst->page_size); 191 if (ofs == -1) { 192 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " 193 "direct map address 0x%jx not in minidump", 194 (uintmax_t)va); 195 goto invalid; 196 } 197 *pa = ofs + offset; 198 return (kd->vmst->page_size - offset); 199 } else if (va >= vm->hdr.kernbase) { 200 l3_index = (va - vm->hdr.kernbase) >> kd->vmst->l3_shift; 201 if (l3_index >= vm->hdr.pmapsize / sizeof(l3)) 202 goto invalid; 203 l3 = _aarch64_pte_get(kd, l3_index); 204 if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { 205 _kvm_err(kd, kd->program, 206 "_aarch64_minidump_vatop: pde not valid"); 207 goto invalid; 208 } 209 a = l3 & ~AARCH64_ATTR_MASK; 210 ofs = _kvm_pt_find(kd, a, kd->vmst->page_size); 211 if (ofs == -1) { 212 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " 213 "physical address 0x%jx not in minidump", 214 (uintmax_t)a); 215 goto invalid; 216 } 217 *pa = ofs + offset; 218 return (kd->vmst->page_size - offset); 219 } else { 220 _kvm_err(kd, kd->program, 221 "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", 222 (uintmax_t)va); 223 goto invalid; 224 } 225 226 invalid: 227 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); 228 return (0); 229 } 230 231 static int 232 _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) 233 { 234 235 if (ISALIVE(kd)) { 236 _kvm_err(kd, 0, 237 "_aarch64_minidump_kvatop called in live kernel!"); 238 return (0); 239 } 240 return (_aarch64_minidump_vatop(kd, va, pa)); 241 } 242 243 static int 244 _aarch64_native(kvm_t *kd __unused) 245 { 246 247 #ifdef __aarch64__ 248 return (1); 249 #else 250 return (0); 251 #endif 252 } 253 254 static vm_prot_t 255 _aarch64_entry_to_prot(aarch64_pte_t pte) 256 { 257 vm_prot_t prot = VM_PROT_READ; 258 259 /* Source: arm64/arm64/pmap.c:pmap_protect() */ 260 if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0) 261 prot |= VM_PROT_WRITE; 262 if ((pte & AARCH64_ATTR_XN) == 0) 263 prot |= VM_PROT_EXECUTE; 264 return prot; 265 } 266 267 static int 268 _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) 269 { 270 struct vmstate *vm = kd->vmst; 271 u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t); 272 u_long bmindex, dva, pa, pteindex, va; 273 struct kvm_bitmap bm; 274 vm_prot_t prot; 275 int ret = 0; 276 277 if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) 278 return (0); 279 280 for (pteindex = 0; pteindex < nptes; pteindex++) { 281 aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex); 282 283 if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) 284 continue; 285 286 va = vm->hdr.kernbase + (pteindex << kd->vmst->l3_shift); 287 pa = pte & ~AARCH64_ATTR_MASK; 288 dva = vm->hdr.dmapbase + pa; 289 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, 290 _aarch64_entry_to_prot(pte), kd->vmst->page_size, 0)) { 291 goto out; 292 } 293 } 294 295 while (_kvm_bitmap_next(&bm, &bmindex)) { 296 pa = _kvm_bit_id_pa(kd, bmindex, kd->vmst->page_size); 297 if (pa == _KVM_PA_INVALID) 298 break; 299 dva = vm->hdr.dmapbase + pa; 300 if (vm->hdr.dmapend < (dva + kd->vmst->page_size)) 301 break; 302 va = 0; 303 prot = VM_PROT_READ | VM_PROT_WRITE; 304 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, 305 prot, kd->vmst->page_size, 0)) { 306 goto out; 307 } 308 } 309 ret = 1; 310 311 out: 312 _kvm_bitmap_deinit(&bm); 313 return (ret); 314 } 315 316 static struct kvm_arch kvm_aarch64_minidump = { 317 .ka_probe = _aarch64_minidump_probe, 318 .ka_initvtop = _aarch64_minidump_initvtop, 319 .ka_freevtop = _aarch64_minidump_freevtop, 320 .ka_kvatop = _aarch64_minidump_kvatop, 321 .ka_native = _aarch64_native, 322 .ka_walk_pages = _aarch64_minidump_walk_pages, 323 }; 324 325 KVM_ARCH(kvm_aarch64_minidump); 326