xref: /freebsd/lib/libkvm/kvm_arm.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 2005 Olivier Houchard
3  * Copyright (c) 1989, 1992, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software developed by the Computer Systems
7  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
8  * BG 91-66 and contributed to Berkeley.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * ARM machine dependent routines for kvm.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/elf32.h>
43 #include <sys/mman.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_param.h>
47 #include <vm/pmap.h>
48 
49 #include <machine/pmap.h>
50 
51 #include <db.h>
52 #include <limits.h>
53 #include <kvm.h>
54 #include <stdlib.h>
55 #include <unistd.h>
56 
57 #include "kvm_private.h"
58 
59 /* minidump must be the first item! */
60 struct vmstate {
61 	int minidump;		/* 1 = minidump mode */
62 	pd_entry_t *l1pt;
63 	void *mmapbase;
64 	size_t mmapsize;
65 };
66 
67 static int
68 _kvm_maphdrs(kvm_t *kd, size_t sz)
69 {
70 	struct vmstate *vm = kd->vmst;
71 
72 	/* munmap() previous mmap(). */
73 	if (vm->mmapbase != NULL) {
74 		munmap(vm->mmapbase, vm->mmapsize);
75 		vm->mmapbase = NULL;
76 	}
77 
78 	vm->mmapsize = sz;
79 	vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
80 	if (vm->mmapbase == MAP_FAILED) {
81 		_kvm_err(kd, kd->program, "cannot mmap corefile");
82 		return (-1);
83 	}
84 
85 	return (0);
86 }
87 
88 /*
89  * Translate a physical memory address to a file-offset in the crash-dump.
90  */
91 static size_t
92 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
93 {
94 	Elf32_Ehdr *e = kd->vmst->mmapbase;
95 	Elf32_Phdr *p = (Elf32_Phdr*)((char*)e + e->e_phoff);
96 	int n = e->e_phnum;
97 
98 	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
99 		p++, n--;
100 	if (n == 0)
101 		return (0);
102 
103 	*ofs = (pa - p->p_paddr) + p->p_offset;
104 	if (pgsz == 0)
105 		return (p->p_memsz - (pa - p->p_paddr));
106 	return (pgsz - ((size_t)pa & (pgsz - 1)));
107 }
108 
109 void
110 _kvm_freevtop(kvm_t *kd)
111 {
112 	if (kd->vmst != 0) {
113 		if (kd->vmst->minidump)
114 			return (_kvm_minidump_freevtop(kd));
115 		if (kd->vmst->mmapbase != NULL)
116 			munmap(kd->vmst->mmapbase, kd->vmst->mmapsize);
117 		free(kd->vmst);
118 		kd->vmst = NULL;
119 	}
120 }
121 
122 int
123 _kvm_initvtop(kvm_t *kd)
124 {
125 	struct vmstate *vm;
126 	struct nlist nlist[2];
127 	u_long kernbase, physaddr, pa;
128 	pd_entry_t *l1pt;
129 	Elf32_Ehdr *ehdr;
130 	size_t hdrsz;
131 	char minihdr[8];
132 
133 	if (!kd->rawdump) {
134 		if (pread(kd->pmfd, &minihdr, 8, 0) == 8) {
135 			if (memcmp(&minihdr, "minidump", 8) == 0)
136 				return (_kvm_minidump_initvtop(kd));
137 		} else {
138 			_kvm_err(kd, kd->program, "cannot read header");
139 			return (-1);
140 		}
141 	}
142 
143 	vm = _kvm_malloc(kd, sizeof(*vm));
144 	if (vm == 0) {
145 		_kvm_err(kd, kd->program, "cannot allocate vm");
146 		return (-1);
147 	}
148 	kd->vmst = vm;
149 	vm->l1pt = NULL;
150 	if (_kvm_maphdrs(kd, sizeof(Elf32_Ehdr)) == -1)
151 		return (-1);
152 	ehdr = kd->vmst->mmapbase;
153 	hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
154 	if (_kvm_maphdrs(kd, hdrsz) == -1)
155 		return (-1);
156 	nlist[0].n_name = "kernbase";
157 	nlist[1].n_name = NULL;
158 	if (kvm_nlist(kd, nlist) != 0)
159 		kernbase = KERNBASE;
160 	else
161 		kernbase = nlist[0].n_value;
162 
163 	nlist[0].n_name = "physaddr";
164 	if (kvm_nlist(kd, nlist) != 0) {
165 		_kvm_err(kd, kd->program, "couldn't get phys addr");
166 		return (-1);
167 	}
168 	physaddr = nlist[0].n_value;
169 	nlist[0].n_name = "kernel_l1pa";
170 	if (kvm_nlist(kd, nlist) != 0) {
171 		_kvm_err(kd, kd->program, "bad namelist");
172 		return (-1);
173 	}
174 	if (kvm_read(kd, (nlist[0].n_value - kernbase + physaddr), &pa,
175 	    sizeof(pa)) != sizeof(pa)) {
176 		_kvm_err(kd, kd->program, "cannot read kernel_l1pa");
177 		return (-1);
178 	}
179 	l1pt = _kvm_malloc(kd, L1_TABLE_SIZE);
180 	if (kvm_read(kd, pa, l1pt, L1_TABLE_SIZE) != L1_TABLE_SIZE) {
181 		_kvm_err(kd, kd->program, "cannot read l1pt");
182 		free(l1pt);
183 		return (-1);
184 	}
185 	vm->l1pt = l1pt;
186 	return 0;
187 }
188 
189 /* from arm/pmap.c */
190 #define	L1_IDX(va)		(((vm_offset_t)(va)) >> L1_S_SHIFT)
191 /* from arm/pmap.h */
192 #define	L1_TYPE_INV	0x00		/* Invalid (fault) */
193 #define	L1_TYPE_C	0x01		/* Coarse L2 */
194 #define	L1_TYPE_S	0x02		/* Section */
195 #define	L1_TYPE_F	0x03		/* Fine L2 */
196 #define	L1_TYPE_MASK	0x03		/* mask of type bits */
197 
198 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
199 #define	l1pte_valid(pde)	((pde) != 0)
200 #define	l2pte_valid(pte)	((pte) != 0)
201 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
202 
203 
204 int
205 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
206 {
207 	u_long offset = va & (PAGE_SIZE - 1);
208 	struct vmstate *vm = kd->vmst;
209 	pd_entry_t pd;
210 	pt_entry_t pte;
211 	u_long pte_pa;
212 
213 	if (kd->vmst->minidump)
214 		return (_kvm_minidump_kvatop(kd, va, pa));
215 
216 	if (vm->l1pt == NULL)
217 		return (_kvm_pa2off(kd, va, pa, PAGE_SIZE));
218 	pd = vm->l1pt[L1_IDX(va)];
219 	if (!l1pte_valid(pd))
220 		goto invalid;
221 	if (l1pte_section_p(pd)) {
222 		/* 1MB section mapping. */
223 		*pa = ((u_long)pd & L1_S_ADDR_MASK) + (va & L1_S_OFFSET);
224 		return  (_kvm_pa2off(kd, *pa, pa, L1_S_SIZE));
225 	}
226 	pte_pa = (pd & L1_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
227 	_kvm_pa2off(kd, pte_pa, (off_t *)&pte_pa, L1_S_SIZE);
228 	if (lseek(kd->pmfd, pte_pa, 0) == -1) {
229 		_kvm_syserr(kd, kd->program, "_kvm_kvatop: lseek");
230 		goto invalid;
231 	}
232 	if (read(kd->pmfd, &pte, sizeof(pte)) != sizeof (pte)) {
233 		_kvm_syserr(kd, kd->program, "_kvm_kvatop: read");
234 		goto invalid;
235 	}
236 	if (!l2pte_valid(pte)) {
237 		goto invalid;
238 	}
239 	if ((pte & L2_TYPE_MASK) == L2_TYPE_L) {
240 		*pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
241 		return (_kvm_pa2off(kd, *pa, pa, L2_L_SIZE));
242 	}
243 	*pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
244 	return (_kvm_pa2off(kd, *pa, pa, PAGE_SIZE));
245 invalid:
246 	_kvm_err(kd, 0, "Invalid address (%x)", va);
247 	return 0;
248 }
249 
250 /*
251  * Machine-dependent initialization for ALL open kvm descriptors,
252  * not just those for a kernel crash dump.  Some architectures
253  * have to deal with these NOT being constants!  (i.e. m68k)
254  */
255 int
256 _kvm_mdopen(kd)
257 	kvm_t	*kd;
258 {
259 
260 #ifdef FBSD_NOT_YET
261 	kd->usrstack = USRSTACK;
262 	kd->min_uva = VM_MIN_ADDRESS;
263 	kd->max_uva = VM_MAXUSER_ADDRESS;
264 #endif
265 
266 	return (0);
267 }
268