xref: /freebsd/lib/libkvm/kvm_i386.c (revision 9336e0699bda8a301cd2bfa37106b6ec5e32012e)
1 /*-
2  * Copyright (c) 1989, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #if defined(LIBC_SCCS) && !defined(lint)
38 #if 0
39 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
40 #endif
41 #endif /* LIBC_SCCS and not lint */
42 
43 /*
44  * i386 machine dependent routines for kvm.  Hopefully, the forthcoming
45  * vm code will one day obsolete this module.
46  */
47 
48 #include <sys/param.h>
49 #include <sys/user.h>
50 #include <sys/proc.h>
51 #include <sys/stat.h>
52 #include <sys/mman.h>
53 #include <stdlib.h>
54 #include <unistd.h>
55 #include <nlist.h>
56 #include <kvm.h>
57 
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 
61 #include <machine/elf.h>
62 
63 #include <limits.h>
64 
65 #include "kvm_private.h"
66 
67 #ifndef btop
68 #define	btop(x)		(i386_btop(x))
69 #define	ptob(x)		(i386_ptob(x))
70 #endif
71 
72 #define	PG_FRAME_PAE	(~((uint64_t)PAGE_MASK))
73 #define	PDRSHIFT_PAE	21
74 #define	NPTEPG_PAE	(PAGE_SIZE/sizeof(uint64_t))
75 #define	NBPDR_PAE	(1<<PDRSHIFT_PAE)
76 
77 /* minidump must be the first item! */
78 struct vmstate {
79 	int		minidump;	/* 1 = minidump mode */
80 	void		*mmapbase;
81 	size_t		mmapsize;
82 	void		*PTD;
83 	int		pae;
84 };
85 
86 /*
87  * Map the ELF headers into the process' address space. We do this in two
88  * steps: first the ELF header itself and using that information the whole
89  * set of headers. (Taken from kvm_ia64.c)
90  */
91 static int
92 _kvm_maphdrs(kvm_t *kd, size_t sz)
93 {
94 	struct vmstate *vm = kd->vmst;
95 
96 	/* munmap() previous mmap(). */
97 	if (vm->mmapbase != NULL) {
98 		munmap(vm->mmapbase, vm->mmapsize);
99 		vm->mmapbase = NULL;
100 	}
101 
102 	vm->mmapsize = sz;
103 	vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
104 	if (vm->mmapbase == MAP_FAILED) {
105 		_kvm_err(kd, kd->program, "cannot mmap corefile");
106 		return (-1);
107 	}
108 	return (0);
109 }
110 
111 /*
112  * Translate a physical memory address to a file-offset in the crash-dump.
113  * (Taken from kvm_ia64.c)
114  */
115 static size_t
116 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs)
117 {
118 	Elf_Ehdr *e = kd->vmst->mmapbase;
119 	Elf_Phdr *p;
120 	int n;
121 
122 	if (kd->rawdump) {
123 		*ofs = pa;
124 		return (PAGE_SIZE - ((size_t)pa & PAGE_MASK));
125 	}
126 
127 	p = (Elf_Phdr*)((char*)e + e->e_phoff);
128 	n = e->e_phnum;
129 	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
130 		p++, n--;
131 	if (n == 0)
132 		return (0);
133 	*ofs = (pa - p->p_paddr) + p->p_offset;
134 	return (PAGE_SIZE - ((size_t)pa & PAGE_MASK));
135 }
136 
137 void
138 _kvm_freevtop(kvm_t *kd)
139 {
140 	struct vmstate *vm = kd->vmst;
141 
142 	if (kd->vmst->minidump)
143 		return (_kvm_minidump_freevtop(kd));
144 	if (vm->mmapbase != NULL)
145 		munmap(vm->mmapbase, vm->mmapsize);
146 	if (vm->PTD)
147 		free(vm->PTD);
148 	free(vm);
149 	kd->vmst = NULL;
150 }
151 
152 int
153 _kvm_initvtop(kvm_t *kd)
154 {
155 	struct nlist nlist[2];
156 	u_long pa;
157 	u_long kernbase;
158 	char		*PTD;
159 	Elf_Ehdr	*ehdr;
160 	size_t		hdrsz;
161 	int		i;
162 	char		minihdr[8];
163 
164 	if (!kd->rawdump && pread(kd->pmfd, &minihdr, 8, 0) == 8)
165 		if (memcmp(&minihdr, "minidump", 8) == 0)
166 			return (_kvm_minidump_initvtop(kd));
167 
168 	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
169 	if (kd->vmst == 0) {
170 		_kvm_err(kd, kd->program, "cannot allocate vm");
171 		return (-1);
172 	}
173 	kd->vmst->PTD = 0;
174 
175 	if (kd->rawdump == 0) {
176 		if (_kvm_maphdrs(kd, sizeof(Elf_Ehdr)) == -1)
177 			return (-1);
178 
179 		ehdr = kd->vmst->mmapbase;
180 		hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
181 		if (_kvm_maphdrs(kd, hdrsz) == -1)
182 			return (-1);
183 	}
184 
185 	nlist[0].n_name = "kernbase";
186 	nlist[1].n_name = 0;
187 
188 	if (kvm_nlist(kd, nlist) != 0)
189 		kernbase = KERNBASE;	/* for old kernels */
190 	else
191 		kernbase = nlist[0].n_value;
192 
193 	nlist[0].n_name = "IdlePDPT";
194 	nlist[1].n_name = 0;
195 
196 	if (kvm_nlist(kd, nlist) == 0) {
197 		uint64_t pa64;
198 
199 		if (kvm_read(kd, (nlist[0].n_value - kernbase), &pa,
200 		    sizeof(pa)) != sizeof(pa)) {
201 			_kvm_err(kd, kd->program, "cannot read IdlePDPT");
202 			return (-1);
203 		}
204 		PTD = _kvm_malloc(kd, 4 * PAGE_SIZE);
205 		for (i = 0; i < 4; i++) {
206 			if (kvm_read(kd, pa + (i * sizeof(pa64)), &pa64,
207 			    sizeof(pa64)) != sizeof(pa64)) {
208 				_kvm_err(kd, kd->program, "Cannot read PDPT");
209 				free(PTD);
210 				return (-1);
211 			}
212 			if (kvm_read(kd, pa64 & PG_FRAME_PAE,
213 			    PTD + (i * PAGE_SIZE), PAGE_SIZE) != (PAGE_SIZE)) {
214 				_kvm_err(kd, kd->program, "cannot read PDPT");
215 				free(PTD);
216 				return (-1);
217 			}
218 		}
219 		kd->vmst->PTD = PTD;
220 		kd->vmst->pae = 1;
221 	} else {
222 		nlist[0].n_name = "IdlePTD";
223 		nlist[1].n_name = 0;
224 
225 		if (kvm_nlist(kd, nlist) != 0) {
226 			_kvm_err(kd, kd->program, "bad namelist");
227 			return (-1);
228 		}
229 		if (kvm_read(kd, (nlist[0].n_value - kernbase), &pa,
230 		    sizeof(pa)) != sizeof(pa)) {
231 			_kvm_err(kd, kd->program, "cannot read IdlePTD");
232 			return (-1);
233 		}
234 		PTD = _kvm_malloc(kd, PAGE_SIZE);
235 		if (kvm_read(kd, pa, PTD, PAGE_SIZE) != PAGE_SIZE) {
236 			_kvm_err(kd, kd->program, "cannot read PTD");
237 			return (-1);
238 		}
239 		kd->vmst->PTD = PTD;
240 		return (0);
241 		kd->vmst->pae = 0;
242 	}
243 	return (0);
244 }
245 
246 static int
247 _kvm_vatop(kvm_t *kd, u_long va, off_t *pa)
248 {
249 	struct vmstate *vm;
250 	u_long offset;
251 	u_long pte_pa;
252 	u_long pde_pa;
253 	pd_entry_t pde;
254 	pt_entry_t pte;
255 	u_long pdeindex;
256 	u_long pteindex;
257 	size_t s;
258 	u_long a;
259 	off_t ofs;
260 	uint32_t *PTD;
261 
262 	vm = kd->vmst;
263 	PTD = (uint32_t *)vm->PTD;
264 	offset = va & (PAGE_SIZE - 1);
265 
266 	/*
267 	 * If we are initializing (kernel page table descriptor pointer
268 	 * not yet set) then return pa == va to avoid infinite recursion.
269 	 */
270 	if (PTD == 0) {
271 		s = _kvm_pa2off(kd, va, pa);
272 		if (s == 0) {
273 			_kvm_err(kd, kd->program,
274 			    "_kvm_vatop: bootstrap data not in dump");
275 			goto invalid;
276 		} else
277 			return (PAGE_SIZE - offset);
278 	}
279 
280 	pdeindex = va >> PDRSHIFT;
281 	pde = PTD[pdeindex];
282 	if (((u_long)pde & PG_V) == 0) {
283 		_kvm_err(kd, kd->program, "_kvm_vatop: pde not valid");
284 		goto invalid;
285 	}
286 
287 	if ((u_long)pde & PG_PS) {
288 	      /*
289 	       * No second-level page table; ptd describes one 4MB page.
290 	       * (We assume that the kernel wouldn't set PG_PS without enabling
291 	       * it cr0).
292 	       */
293 #define	PAGE4M_MASK	(NBPDR - 1)
294 #define	PG_FRAME4M	(~PAGE4M_MASK)
295 		pde_pa = ((u_long)pde & PG_FRAME4M) + (va & PAGE4M_MASK);
296 		s = _kvm_pa2off(kd, pde_pa, &ofs);
297 		if (s < sizeof pde) {
298 			_kvm_syserr(kd, kd->program,
299 			    "_kvm_vatop: pde_pa not found");
300 			goto invalid;
301 		}
302 		*pa = ofs;
303 		return (NBPDR - (va & PAGE4M_MASK));
304 	}
305 
306 	pteindex = (va >> PAGE_SHIFT) & (NPTEPG-1);
307 	pte_pa = ((u_long)pde & PG_FRAME) + (pteindex * sizeof(pde));
308 
309 	s = _kvm_pa2off(kd, pte_pa, &ofs);
310 	if (s < sizeof pte) {
311 		_kvm_err(kd, kd->program, "_kvm_vatop: pdpe_pa not found");
312 		goto invalid;
313 	}
314 
315 	/* XXX This has to be a physical address read, kvm_read is virtual */
316 	if (lseek(kd->pmfd, ofs, 0) == -1) {
317 		_kvm_syserr(kd, kd->program, "_kvm_vatop: lseek");
318 		goto invalid;
319 	}
320 	if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) {
321 		_kvm_syserr(kd, kd->program, "_kvm_vatop: read");
322 		goto invalid;
323 	}
324 	if (((u_long)pte & PG_V) == 0) {
325 		_kvm_err(kd, kd->program, "_kvm_kvatop: pte not valid");
326 		goto invalid;
327 	}
328 
329 	a = ((u_long)pte & PG_FRAME) + offset;
330 	s =_kvm_pa2off(kd, a, pa);
331 	if (s == 0) {
332 		_kvm_err(kd, kd->program, "_kvm_vatop: address not in dump");
333 		goto invalid;
334 	} else
335 		return (PAGE_SIZE - offset);
336 
337 invalid:
338 	_kvm_err(kd, 0, "invalid address (0x%lx)", va);
339 	return (0);
340 }
341 
342 static int
343 _kvm_vatop_pae(kvm_t *kd, u_long va, off_t *pa)
344 {
345 	struct vmstate *vm;
346 	uint64_t offset;
347 	uint64_t pte_pa;
348 	uint64_t pde_pa;
349 	uint64_t pde;
350 	uint64_t pte;
351 	u_long pdeindex;
352 	u_long pteindex;
353 	size_t s;
354 	uint64_t a;
355 	off_t ofs;
356 	uint64_t *PTD;
357 
358 	vm = kd->vmst;
359 	PTD = (uint64_t *)vm->PTD;
360 	offset = va & (PAGE_SIZE - 1);
361 
362 	/*
363 	 * If we are initializing (kernel page table descriptor pointer
364 	 * not yet set) then return pa == va to avoid infinite recursion.
365 	 */
366 	if (PTD == 0) {
367 		s = _kvm_pa2off(kd, va, pa);
368 		if (s == 0) {
369 			_kvm_err(kd, kd->program,
370 			    "_kvm_vatop_pae: bootstrap data not in dump");
371 			goto invalid;
372 		} else
373 			return (PAGE_SIZE - offset);
374 	}
375 
376 	pdeindex = va >> PDRSHIFT_PAE;
377 	pde = PTD[pdeindex];
378 	if (((u_long)pde & PG_V) == 0) {
379 		_kvm_err(kd, kd->program, "_kvm_kvatop_pae: pde not valid");
380 		goto invalid;
381 	}
382 
383 	if ((u_long)pde & PG_PS) {
384 	      /*
385 	       * No second-level page table; ptd describes one 2MB page.
386 	       * (We assume that the kernel wouldn't set PG_PS without enabling
387 	       * it cr0).
388 	       */
389 #define	PAGE2M_MASK	(NBPDR_PAE - 1)
390 #define	PG_FRAME2M	(~PAGE2M_MASK)
391 		pde_pa = ((u_long)pde & PG_FRAME2M) + (va & PAGE2M_MASK);
392 		s = _kvm_pa2off(kd, pde_pa, &ofs);
393 		if (s < sizeof pde) {
394 			_kvm_syserr(kd, kd->program,
395 			    "_kvm_vatop_pae: pde_pa not found");
396 			goto invalid;
397 		}
398 		*pa = ofs;
399 		return (NBPDR_PAE - (va & PAGE2M_MASK));
400 	}
401 
402 	pteindex = (va >> PAGE_SHIFT) & (NPTEPG_PAE-1);
403 	pte_pa = ((uint64_t)pde & PG_FRAME_PAE) + (pteindex * sizeof(pde));
404 
405 	s = _kvm_pa2off(kd, pte_pa, &ofs);
406 	if (s < sizeof pte) {
407 		_kvm_err(kd, kd->program, "_kvm_vatop_pae: pdpe_pa not found");
408 		goto invalid;
409 	}
410 
411 	/* XXX This has to be a physical address read, kvm_read is virtual */
412 	if (lseek(kd->pmfd, ofs, 0) == -1) {
413 		_kvm_syserr(kd, kd->program, "_kvm_vatop_pae: lseek");
414 		goto invalid;
415 	}
416 	if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) {
417 		_kvm_syserr(kd, kd->program, "_kvm_vatop_pae: read");
418 		goto invalid;
419 	}
420 	if (((uint64_t)pte & PG_V) == 0) {
421 		_kvm_err(kd, kd->program, "_kvm_vatop_pae: pte not valid");
422 		goto invalid;
423 	}
424 
425 	a = ((uint64_t)pte & PG_FRAME_PAE) + offset;
426 	s =_kvm_pa2off(kd, a, pa);
427 	if (s == 0) {
428 		_kvm_err(kd, kd->program,
429 		    "_kvm_vatop_pae: address not in dump");
430 		goto invalid;
431 	} else
432 		return (PAGE_SIZE - offset);
433 
434 invalid:
435 	_kvm_err(kd, 0, "invalid address (0x%lx)", va);
436 	return (0);
437 }
438 
439 int
440 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
441 {
442 
443 	if (kd->vmst->minidump)
444 		return (_kvm_minidump_kvatop(kd, va, pa));
445 	if (ISALIVE(kd)) {
446 		_kvm_err(kd, 0, "vatop called in live kernel!");
447 		return (0);
448 	}
449 	if (kd->vmst->pae)
450 		return (_kvm_vatop_pae(kd, va, pa));
451 	else
452 		return (_kvm_vatop(kd, va, pa));
453 }
454