1 /*- 2 * Copyright (c) 2005 Olivier Houchard 3 * Copyright (c) 1989, 1992, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software developed by the Computer Systems 7 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 8 * BG 91-66 and contributed to Berkeley. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * ARM machine dependent routines for kvm. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/elf32.h> 43 #include <sys/mman.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_param.h> 47 #include <vm/pmap.h> 48 49 #include <machine/pmap.h> 50 51 #include <db.h> 52 #include <limits.h> 53 #include <kvm.h> 54 #include <stdlib.h> 55 56 #include "kvm_private.h" 57 58 struct vmstate { 59 pd_entry_t *l1pt; 60 void *mmapbase; 61 size_t mmapsize; 62 }; 63 64 static int 65 _kvm_maphdrs(kvm_t *kd, size_t sz) 66 { 67 struct vmstate *vm = kd->vmst; 68 69 /* munmap() previous mmap(). */ 70 if (vm->mmapbase != NULL) { 71 munmap(vm->mmapbase, vm->mmapsize); 72 vm->mmapbase = NULL; 73 } 74 75 vm->mmapsize = sz; 76 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); 77 if (vm->mmapbase == MAP_FAILED) { 78 _kvm_err(kd, kd->program, "cannot mmap corefile"); 79 return (-1); 80 } 81 82 return (0); 83 } 84 85 /* 86 * Translate a physical memory address to a file-offset in the crash-dump. 87 */ 88 static size_t 89 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) 90 { 91 Elf32_Ehdr *e = kd->vmst->mmapbase; 92 Elf32_Phdr *p = (Elf32_Phdr*)((char*)e + e->e_phoff); 93 int n = e->e_phnum; 94 95 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) 96 p++, n--; 97 if (n == 0) 98 return (0); 99 100 *ofs = (pa - p->p_paddr) + p->p_offset; 101 if (pgsz == 0) 102 return (p->p_memsz - (pa - p->p_paddr)); 103 return (pgsz - ((size_t)pa & (pgsz - 1))); 104 } 105 106 void 107 _kvm_freevtop(kvm_t *kd) 108 { 109 if (kd->vmst != 0) { 110 if (kd->vmst->mmapbase != NULL) 111 munmap(kd->vmst->mmapbase, kd->vmst->mmapsize); 112 free(kd->vmst); 113 kd->vmst = NULL; 114 } 115 } 116 117 int 118 _kvm_initvtop(kvm_t *kd) 119 { 120 struct vmstate *vm = _kvm_malloc(kd, sizeof(*vm)); 121 struct nlist nlist[2]; 122 u_long kernbase, physaddr, pa; 123 pd_entry_t *l1pt; 124 Elf32_Ehdr *ehdr; 125 size_t hdrsz; 126 127 if (vm == 0) { 128 _kvm_err(kd, kd->program, "cannot allocate vm"); 129 return (-1); 130 } 131 kd->vmst = vm; 132 vm->l1pt = NULL; 133 if (_kvm_maphdrs(kd, sizeof(Elf32_Ehdr)) == -1) 134 return (-1); 135 ehdr = kd->vmst->mmapbase; 136 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; 137 if (_kvm_maphdrs(kd, hdrsz) == -1) 138 return (-1); 139 nlist[0].n_name = "kernbase"; 140 nlist[1].n_name = NULL; 141 if (kvm_nlist(kd, nlist) != 0) 142 kernbase = KERNBASE; 143 else 144 kernbase = nlist[0].n_value; 145 146 nlist[0].n_name = "physaddr"; 147 if (kvm_nlist(kd, nlist) != 0) { 148 _kvm_err(kd, kd->program, "couldn't get phys addr"); 149 return (-1); 150 } 151 physaddr = nlist[0].n_value; 152 nlist[0].n_name = "kernel_l1pa"; 153 if (kvm_nlist(kd, nlist) != 0) { 154 _kvm_err(kd, kd->program, "bad namelist"); 155 return (-1); 156 } 157 if (kvm_read(kd, (nlist[0].n_value - kernbase + physaddr), &pa, 158 sizeof(pa)) != sizeof(pa)) { 159 _kvm_err(kd, kd->program, "cannot read kernel_l1pa"); 160 return (-1); 161 } 162 l1pt = _kvm_malloc(kd, L1_TABLE_SIZE); 163 if (kvm_read(kd, pa, l1pt, L1_TABLE_SIZE) != L1_TABLE_SIZE) { 164 _kvm_err(kd, kd->program, "cannot read l1pt"); 165 free(l1pt); 166 return (-1); 167 } 168 vm->l1pt = l1pt; 169 return 0; 170 } 171 172 /* from arm/pmap.c */ 173 #define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 174 /* from arm/pmap.h */ 175 #define L1_TYPE_INV 0x00 /* Invalid (fault) */ 176 #define L1_TYPE_C 0x01 /* Coarse L2 */ 177 #define L1_TYPE_S 0x02 /* Section */ 178 #define L1_TYPE_F 0x03 /* Fine L2 */ 179 #define L1_TYPE_MASK 0x03 /* mask of type bits */ 180 181 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 182 #define l1pte_valid(pde) ((pde) != 0) 183 #define l2pte_valid(pte) ((pte) != 0) 184 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 185 186 187 int 188 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) 189 { 190 u_long offset = va & (PAGE_SIZE - 1); 191 struct vmstate *vm = kd->vmst; 192 pd_entry_t pd; 193 pt_entry_t pte; 194 u_long pte_pa; 195 196 if (vm->l1pt == NULL) 197 return (_kvm_pa2off(kd, va, pa, PAGE_SIZE)); 198 pd = vm->l1pt[L1_IDX(va)]; 199 if (!l1pte_valid(pd)) 200 goto invalid; 201 if (l1pte_section_p(pd)) { 202 /* 1MB section mapping. */ 203 *pa = ((u_long)pd & L1_S_ADDR_MASK) + (va & L1_S_OFFSET); 204 return (_kvm_pa2off(kd, *pa, pa, L1_S_SIZE)); 205 } 206 pte_pa = (pd & L1_ADDR_MASK) + l2pte_index(va) * sizeof(pte); 207 _kvm_pa2off(kd, pte_pa, (off_t *)&pte_pa, L1_S_SIZE); 208 if (lseek(kd->pmfd, pte_pa, 0) == -1) { 209 _kvm_syserr(kd, kd->program, "_kvm_kvatop: lseek"); 210 goto invalid; 211 } 212 if (read(kd->pmfd, &pte, sizeof(pte)) != sizeof (pte)) { 213 _kvm_syserr(kd, kd->program, "_kvm_kvatop: read"); 214 goto invalid; 215 } 216 if (!l2pte_valid(pte)) { 217 goto invalid; 218 } 219 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) { 220 *pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 221 return (_kvm_pa2off(kd, *pa, pa, L2_L_SIZE)); 222 } 223 *pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 224 return (_kvm_pa2off(kd, *pa, pa, PAGE_SIZE)); 225 invalid: 226 _kvm_err(kd, 0, "Invalid address (%x)", va); 227 return 0; 228 } 229 230 /* 231 * Machine-dependent initialization for ALL open kvm descriptors, 232 * not just those for a kernel crash dump. Some architectures 233 * have to deal with these NOT being constants! (i.e. m68k) 234 */ 235 int 236 _kvm_mdopen(kd) 237 kvm_t *kd; 238 { 239 240 #ifdef FBSD_NOT_YET 241 kd->usrstack = USRSTACK; 242 kd->min_uva = VM_MIN_ADDRESS; 243 kd->max_uva = VM_MAXUSER_ADDRESS; 244 #endif 245 246 return (0); 247 } 248