1 /*- 2 * Copyright (c) 2005 Olivier Houchard 3 * Copyright (c) 1989, 1992, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software developed by the Computer Systems 7 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 8 * BG 91-66 and contributed to Berkeley. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * ARM machine dependent routines for kvm. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/elf32.h> 43 #include <sys/mman.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_param.h> 47 #include <vm/pmap.h> 48 49 #include <machine/pmap.h> 50 51 #include <db.h> 52 #include <limits.h> 53 #include <kvm.h> 54 #include <stdlib.h> 55 #include <string.h> 56 #include <unistd.h> 57 58 #include "kvm_private.h" 59 60 /* minidump must be the first item! */ 61 struct vmstate { 62 int minidump; /* 1 = minidump mode */ 63 pd_entry_t *l1pt; 64 void *mmapbase; 65 size_t mmapsize; 66 }; 67 68 static int 69 _kvm_maphdrs(kvm_t *kd, size_t sz) 70 { 71 struct vmstate *vm = kd->vmst; 72 73 /* munmap() previous mmap(). */ 74 if (vm->mmapbase != NULL) { 75 munmap(vm->mmapbase, vm->mmapsize); 76 vm->mmapbase = NULL; 77 } 78 79 vm->mmapsize = sz; 80 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); 81 if (vm->mmapbase == MAP_FAILED) { 82 _kvm_err(kd, kd->program, "cannot mmap corefile"); 83 return (-1); 84 } 85 86 return (0); 87 } 88 89 /* 90 * Translate a physical memory address to a file-offset in the crash-dump. 91 */ 92 static size_t 93 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) 94 { 95 Elf32_Ehdr *e = kd->vmst->mmapbase; 96 Elf32_Phdr *p = (Elf32_Phdr*)((char*)e + e->e_phoff); 97 int n = e->e_phnum; 98 99 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) 100 p++, n--; 101 if (n == 0) 102 return (0); 103 104 *ofs = (pa - p->p_paddr) + p->p_offset; 105 if (pgsz == 0) 106 return (p->p_memsz - (pa - p->p_paddr)); 107 return (pgsz - ((size_t)pa & (pgsz - 1))); 108 } 109 110 void 111 _kvm_freevtop(kvm_t *kd) 112 { 113 if (kd->vmst != 0) { 114 if (kd->vmst->minidump) 115 return (_kvm_minidump_freevtop(kd)); 116 if (kd->vmst->mmapbase != NULL) 117 munmap(kd->vmst->mmapbase, kd->vmst->mmapsize); 118 free(kd->vmst); 119 kd->vmst = NULL; 120 } 121 } 122 123 int 124 _kvm_initvtop(kvm_t *kd) 125 { 126 struct vmstate *vm; 127 struct nlist nlist[2]; 128 u_long kernbase, physaddr, pa; 129 pd_entry_t *l1pt; 130 Elf32_Ehdr *ehdr; 131 size_t hdrsz; 132 char minihdr[8]; 133 134 if (!kd->rawdump) { 135 if (pread(kd->pmfd, &minihdr, 8, 0) == 8) { 136 if (memcmp(&minihdr, "minidump", 8) == 0) 137 return (_kvm_minidump_initvtop(kd)); 138 } else { 139 _kvm_err(kd, kd->program, "cannot read header"); 140 return (-1); 141 } 142 } 143 144 vm = _kvm_malloc(kd, sizeof(*vm)); 145 if (vm == 0) { 146 _kvm_err(kd, kd->program, "cannot allocate vm"); 147 return (-1); 148 } 149 kd->vmst = vm; 150 vm->l1pt = NULL; 151 if (_kvm_maphdrs(kd, sizeof(Elf32_Ehdr)) == -1) 152 return (-1); 153 ehdr = kd->vmst->mmapbase; 154 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; 155 if (_kvm_maphdrs(kd, hdrsz) == -1) 156 return (-1); 157 nlist[0].n_name = "kernbase"; 158 nlist[1].n_name = NULL; 159 if (kvm_nlist(kd, nlist) != 0) 160 kernbase = KERNBASE; 161 else 162 kernbase = nlist[0].n_value; 163 164 nlist[0].n_name = "physaddr"; 165 if (kvm_nlist(kd, nlist) != 0) { 166 _kvm_err(kd, kd->program, "couldn't get phys addr"); 167 return (-1); 168 } 169 physaddr = nlist[0].n_value; 170 nlist[0].n_name = "kernel_l1pa"; 171 if (kvm_nlist(kd, nlist) != 0) { 172 _kvm_err(kd, kd->program, "bad namelist"); 173 return (-1); 174 } 175 if (kvm_read(kd, (nlist[0].n_value - kernbase + physaddr), &pa, 176 sizeof(pa)) != sizeof(pa)) { 177 _kvm_err(kd, kd->program, "cannot read kernel_l1pa"); 178 return (-1); 179 } 180 l1pt = _kvm_malloc(kd, L1_TABLE_SIZE); 181 if (kvm_read(kd, pa, l1pt, L1_TABLE_SIZE) != L1_TABLE_SIZE) { 182 _kvm_err(kd, kd->program, "cannot read l1pt"); 183 free(l1pt); 184 return (-1); 185 } 186 vm->l1pt = l1pt; 187 return 0; 188 } 189 190 /* from arm/pmap.c */ 191 #define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 192 /* from arm/pmap.h */ 193 #define L1_TYPE_INV 0x00 /* Invalid (fault) */ 194 #define L1_TYPE_C 0x01 /* Coarse L2 */ 195 #define L1_TYPE_S 0x02 /* Section */ 196 #define L1_TYPE_F 0x03 /* Fine L2 */ 197 #define L1_TYPE_MASK 0x03 /* mask of type bits */ 198 199 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 200 #define l1pte_valid(pde) ((pde) != 0) 201 #define l2pte_valid(pte) ((pte) != 0) 202 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 203 204 205 int 206 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) 207 { 208 u_long offset = va & (PAGE_SIZE - 1); 209 struct vmstate *vm = kd->vmst; 210 pd_entry_t pd; 211 pt_entry_t pte; 212 u_long pte_pa; 213 214 if (kd->vmst->minidump) 215 return (_kvm_minidump_kvatop(kd, va, pa)); 216 217 if (vm->l1pt == NULL) 218 return (_kvm_pa2off(kd, va, pa, PAGE_SIZE)); 219 pd = vm->l1pt[L1_IDX(va)]; 220 if (!l1pte_valid(pd)) 221 goto invalid; 222 if (l1pte_section_p(pd)) { 223 /* 1MB section mapping. */ 224 *pa = ((u_long)pd & L1_S_ADDR_MASK) + (va & L1_S_OFFSET); 225 return (_kvm_pa2off(kd, *pa, pa, L1_S_SIZE)); 226 } 227 pte_pa = (pd & L1_ADDR_MASK) + l2pte_index(va) * sizeof(pte); 228 _kvm_pa2off(kd, pte_pa, (off_t *)&pte_pa, L1_S_SIZE); 229 if (lseek(kd->pmfd, pte_pa, 0) == -1) { 230 _kvm_syserr(kd, kd->program, "_kvm_kvatop: lseek"); 231 goto invalid; 232 } 233 if (read(kd->pmfd, &pte, sizeof(pte)) != sizeof (pte)) { 234 _kvm_syserr(kd, kd->program, "_kvm_kvatop: read"); 235 goto invalid; 236 } 237 if (!l2pte_valid(pte)) { 238 goto invalid; 239 } 240 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) { 241 *pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 242 return (_kvm_pa2off(kd, *pa, pa, L2_L_SIZE)); 243 } 244 *pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 245 return (_kvm_pa2off(kd, *pa, pa, PAGE_SIZE)); 246 invalid: 247 _kvm_err(kd, 0, "Invalid address (%x)", va); 248 return 0; 249 } 250 251 /* 252 * Machine-dependent initialization for ALL open kvm descriptors, 253 * not just those for a kernel crash dump. Some architectures 254 * have to deal with these NOT being constants! (i.e. m68k) 255 */ 256 int 257 _kvm_mdopen(kd) 258 kvm_t *kd; 259 { 260 261 #ifdef FBSD_NOT_YET 262 kd->usrstack = USRSTACK; 263 kd->min_uva = VM_MIN_ADDRESS; 264 kd->max_uva = VM_MAXUSER_ADDRESS; 265 #endif 266 267 return (0); 268 } 269