xref: /freebsd/lib/libkvm/kvm_arm.c (revision 49b49cda41feabe3439f7318e8bf40e3896c7bf4)
1 /*-
2  * Copyright (c) 2005 Olivier Houchard
3  * Copyright (c) 1989, 1992, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software developed by the Computer Systems
7  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
8  * BG 91-66 and contributed to Berkeley.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * ARM machine dependent routines for kvm.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/endian.h>
43 #include <kvm.h>
44 #include <limits.h>
45 #include <stdint.h>
46 #include <stdlib.h>
47 #include <unistd.h>
48 
49 #ifdef __arm__
50 #include <machine/vmparam.h>
51 #endif
52 
53 #include "kvm_private.h"
54 #include "kvm_arm.h"
55 
56 struct vmstate {
57 	arm_pd_entry_t *l1pt;
58 	size_t phnum;
59 	GElf_Phdr *phdr;
60 };
61 
62 /*
63  * Translate a physical memory address to a file-offset in the crash-dump.
64  */
65 static size_t
66 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
67 {
68 	struct vmstate *vm = kd->vmst;
69 	GElf_Phdr *p;
70 	size_t n;
71 
72 	p = vm->phdr;
73 	n = vm->phnum;
74 	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
75 		p++, n--;
76 	if (n == 0)
77 		return (0);
78 
79 	*ofs = (pa - p->p_paddr) + p->p_offset;
80 	if (pgsz == 0)
81 		return (p->p_memsz - (pa - p->p_paddr));
82 	return (pgsz - ((size_t)pa & (pgsz - 1)));
83 }
84 
85 static void
86 _arm_freevtop(kvm_t *kd)
87 {
88 	struct vmstate *vm = kd->vmst;
89 
90 	free(vm->phdr);
91 	free(vm);
92 	kd->vmst = NULL;
93 }
94 
95 static int
96 _arm_probe(kvm_t *kd)
97 {
98 
99 	return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) &&
100 	    !_kvm_is_minidump(kd));
101 }
102 
103 static int
104 _arm_initvtop(kvm_t *kd)
105 {
106 	struct vmstate *vm;
107 	struct kvm_nlist nl[2];
108 	kvaddr_t kernbase;
109 	arm_physaddr_t physaddr, pa;
110 	arm_pd_entry_t *l1pt;
111 	size_t i;
112 	int found;
113 
114 	if (kd->rawdump) {
115 		_kvm_err(kd, kd->program, "raw dumps not supported on arm");
116 		return (-1);
117 	}
118 
119 	vm = _kvm_malloc(kd, sizeof(*vm));
120 	if (vm == 0) {
121 		_kvm_err(kd, kd->program, "cannot allocate vm");
122 		return (-1);
123 	}
124 	kd->vmst = vm;
125 	vm->l1pt = NULL;
126 
127 	if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1)
128 		return (-1);
129 
130 	found = 0;
131 	for (i = 0; i < vm->phnum; i++) {
132 		if (vm->phdr[i].p_type == PT_DUMP_DELTA) {
133 			kernbase = vm->phdr[i].p_vaddr;
134 			physaddr = vm->phdr[i].p_paddr;
135 			found = 1;
136 			break;
137 		}
138 	}
139 
140 	nl[1].n_name = NULL;
141 	if (!found) {
142 		nl[0].n_name = "kernbase";
143 		if (kvm_nlist2(kd, nl) != 0) {
144 #ifdef __arm__
145 			kernbase = KERNBASE;
146 #else
147 		_kvm_err(kd, kd->program, "cannot resolve kernbase");
148 		return (-1);
149 #endif
150 		} else
151 			kernbase = nl[0].n_value;
152 
153 		nl[0].n_name = "physaddr";
154 		if (kvm_nlist2(kd, nl) != 0) {
155 			_kvm_err(kd, kd->program, "couldn't get phys addr");
156 			return (-1);
157 		}
158 		physaddr = nl[0].n_value;
159 	}
160 	nl[0].n_name = "kernel_l1pa";
161 	if (kvm_nlist2(kd, nl) != 0) {
162 		_kvm_err(kd, kd->program, "bad namelist");
163 		return (-1);
164 	}
165 	if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa,
166 	    sizeof(pa)) != sizeof(pa)) {
167 		_kvm_err(kd, kd->program, "cannot read kernel_l1pa");
168 		return (-1);
169 	}
170 	l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE);
171 	if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) {
172 		_kvm_err(kd, kd->program, "cannot read l1pt");
173 		free(l1pt);
174 		return (-1);
175 	}
176 	vm->l1pt = l1pt;
177 	return 0;
178 }
179 
180 /* from arm/pmap.c */
181 #define	ARM_L1_IDX(va)		((va) >> ARM_L1_S_SHIFT)
182 
183 #define	l1pte_section_p(pde)	(((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S)
184 #define	l1pte_valid(pde)	((pde) != 0)
185 #define	l2pte_valid(pte)	((pte) != 0)
186 #define l2pte_index(v)		(((v) & ARM_L2_ADDR_BITS) >> ARM_L2_S_SHIFT)
187 
188 
189 static int
190 _arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
191 {
192 	struct vmstate *vm = kd->vmst;
193 	arm_pd_entry_t pd;
194 	arm_pt_entry_t pte;
195 	arm_physaddr_t pte_pa;
196 	off_t pte_off;
197 
198 	if (vm->l1pt == NULL)
199 		return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
200 	pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
201 	if (!l1pte_valid(pd))
202 		goto invalid;
203 	if (l1pte_section_p(pd)) {
204 		/* 1MB section mapping. */
205 		*pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
206 		return  (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
207 	}
208 	pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
209 	_kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
210 	if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
211 		_kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
212 		goto invalid;
213 	}
214 	pte = _kvm32toh(kd, pte);
215 	if (!l2pte_valid(pte)) {
216 		goto invalid;
217 	}
218 	if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
219 		*pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
220 		return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
221 	}
222 	*pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
223 	return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
224 invalid:
225 	_kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
226 	return 0;
227 }
228 
229 /*
230  * Machine-dependent initialization for ALL open kvm descriptors,
231  * not just those for a kernel crash dump.  Some architectures
232  * have to deal with these NOT being constants!  (i.e. m68k)
233  */
234 #ifdef FBSD_NOT_YET
235 int
236 _kvm_mdopen(kvm_t *kd)
237 {
238 
239 	kd->usrstack = USRSTACK;
240 	kd->min_uva = VM_MIN_ADDRESS;
241 	kd->max_uva = VM_MAXUSER_ADDRESS;
242 
243 	return (0);
244 }
245 #endif
246 
247 int
248 _arm_native(kvm_t *kd)
249 {
250 
251 #ifdef __arm__
252 #if _BYTE_ORDER == _LITTLE_ENDIAN
253 	return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
254 #else
255 	return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
256 #endif
257 #else
258 	return (0);
259 #endif
260 }
261 
262 struct kvm_arch kvm_arm = {
263 	.ka_probe = _arm_probe,
264 	.ka_initvtop = _arm_initvtop,
265 	.ka_freevtop = _arm_freevtop,
266 	.ka_kvatop = _arm_kvatop,
267 	.ka_native = _arm_native,
268 };
269 
270 KVM_ARCH(kvm_arm);
271