xref: /freebsd/lib/libkvm/kvm_amd64.c (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (c) 1989, 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software developed by the Computer Systems
6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7  * BG 91-66 and contributed to Berkeley.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #if defined(LIBC_SCCS) && !defined(lint)
38 #if 0
39 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
40 #endif
41 #endif /* LIBC_SCCS and not lint */
42 
43 /*
44  * AMD64 machine dependent routines for kvm.  Hopefully, the forthcoming
45  * vm code will one day obsolete this module.
46  */
47 
48 #include <sys/param.h>
49 #include <sys/user.h>
50 #include <sys/proc.h>
51 #include <sys/stat.h>
52 #include <sys/mman.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <unistd.h>
56 #include <nlist.h>
57 #include <kvm.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_param.h>
61 
62 #include <machine/elf.h>
63 
64 #include <limits.h>
65 
66 #include "kvm_private.h"
67 
68 #ifndef btop
69 #define	btop(x)		(amd64_btop(x))
70 #define	ptob(x)		(amd64_ptob(x))
71 #endif
72 
73 /* minidump must be the first item! */
74 struct vmstate {
75 	int		minidump;	/* 1 = minidump mode */
76 	void		*mmapbase;
77 	size_t		mmapsize;
78 	pml4_entry_t	*PML4;
79 };
80 
81 /*
82  * Map the ELF headers into the process' address space. We do this in two
83  * steps: first the ELF header itself and using that information the whole
84  * set of headers. (Taken from kvm_ia64.c)
85  */
86 static int
87 _kvm_maphdrs(kvm_t *kd, size_t sz)
88 {
89 	struct vmstate *vm = kd->vmst;
90 
91 	/* munmap() previous mmap(). */
92 	if (vm->mmapbase != NULL) {
93 		munmap(vm->mmapbase, vm->mmapsize);
94 		vm->mmapbase = NULL;
95 	}
96 
97 	vm->mmapsize = sz;
98 	vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
99 	if (vm->mmapbase == MAP_FAILED) {
100 		_kvm_err(kd, kd->program, "cannot mmap corefile");
101 		return (-1);
102 	}
103 	return (0);
104 }
105 
106 /*
107  * Translate a physical memory address to a file-offset in the crash-dump.
108  * (Taken from kvm_ia64.c)
109  */
110 static size_t
111 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs)
112 {
113 	Elf_Ehdr *e = kd->vmst->mmapbase;
114 	Elf_Phdr *p;
115 	int n;
116 
117 	if (kd->rawdump) {
118 		*ofs = pa;
119 		return (PAGE_SIZE - ((size_t)pa & PAGE_MASK));
120 	}
121 
122 	p = (Elf_Phdr*)((char*)e + e->e_phoff);
123 	n = e->e_phnum;
124 	while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
125 		p++, n--;
126 	if (n == 0)
127 		return (0);
128 	*ofs = (pa - p->p_paddr) + p->p_offset;
129 	return (PAGE_SIZE - ((size_t)pa & PAGE_MASK));
130 }
131 
132 void
133 _kvm_freevtop(kvm_t *kd)
134 {
135 	struct vmstate *vm = kd->vmst;
136 
137 	if (kd->vmst->minidump)
138 		return (_kvm_minidump_freevtop(kd));
139 	if (vm->mmapbase != NULL)
140 		munmap(vm->mmapbase, vm->mmapsize);
141 	if (vm->PML4)
142 		free(vm->PML4);
143 	free(vm);
144 	kd->vmst = NULL;
145 }
146 
147 int
148 _kvm_initvtop(kvm_t *kd)
149 {
150 	struct nlist nl[2];
151 	u_long pa;
152 	u_long kernbase;
153 	pml4_entry_t	*PML4;
154 	Elf_Ehdr *ehdr;
155 	size_t hdrsz;
156 	char minihdr[8];
157 
158 	if (!kd->rawdump && pread(kd->pmfd, &minihdr, 8, 0) == 8)
159 		if (memcmp(&minihdr, "minidump", 8) == 0)
160 			return (_kvm_minidump_initvtop(kd));
161 
162 	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
163 	if (kd->vmst == 0) {
164 		_kvm_err(kd, kd->program, "cannot allocate vm");
165 		return (-1);
166 	}
167 	kd->vmst->PML4 = 0;
168 
169 	if (kd->rawdump == 0) {
170 		if (_kvm_maphdrs(kd, sizeof(Elf_Ehdr)) == -1)
171 			return (-1);
172 
173 		ehdr = kd->vmst->mmapbase;
174 		hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
175 		if (_kvm_maphdrs(kd, hdrsz) == -1)
176 			return (-1);
177 	}
178 
179 	nl[0].n_name = "kernbase";
180 	nl[1].n_name = 0;
181 
182 	if (kvm_nlist(kd, nl) != 0) {
183 		_kvm_err(kd, kd->program, "bad namelist - no kernbase");
184 		return (-1);
185 	}
186 	kernbase = nl[0].n_value;
187 
188 	nl[0].n_name = "KPML4phys";
189 	nl[1].n_name = 0;
190 
191 	if (kvm_nlist(kd, nl) != 0) {
192 		_kvm_err(kd, kd->program, "bad namelist - no KPML4phys");
193 		return (-1);
194 	}
195 	if (kvm_read(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) !=
196 	    sizeof(pa)) {
197 		_kvm_err(kd, kd->program, "cannot read KPML4phys");
198 		return (-1);
199 	}
200 	PML4 = _kvm_malloc(kd, PAGE_SIZE);
201 	if (kvm_read(kd, pa, PML4, PAGE_SIZE) != PAGE_SIZE) {
202 		_kvm_err(kd, kd->program, "cannot read KPML4phys");
203 		return (-1);
204 	}
205 	kd->vmst->PML4 = PML4;
206 	return (0);
207 }
208 
209 static int
210 _kvm_vatop(kvm_t *kd, u_long va, off_t *pa)
211 {
212 	struct vmstate *vm;
213 	u_long offset;
214 	u_long pdpe_pa;
215 	u_long pde_pa;
216 	u_long pte_pa;
217 	pml4_entry_t pml4e;
218 	pdp_entry_t pdpe;
219 	pd_entry_t pde;
220 	pt_entry_t pte;
221 	u_long pml4eindex;
222 	u_long pdpeindex;
223 	u_long pdeindex;
224 	u_long pteindex;
225 	u_long a;
226 	off_t ofs;
227 	size_t s;
228 
229 	vm = kd->vmst;
230 	offset = va & (PAGE_SIZE - 1);
231 
232 	/*
233 	 * If we are initializing (kernel page table descriptor pointer
234 	 * not yet set) then return pa == va to avoid infinite recursion.
235 	 */
236 	if (vm->PML4 == 0) {
237 		s = _kvm_pa2off(kd, va, pa);
238 		if (s == 0) {
239 			_kvm_err(kd, kd->program,
240 			    "_kvm_vatop: bootstrap data not in dump");
241 			goto invalid;
242 		} else
243 			return (PAGE_SIZE - offset);
244 	}
245 
246 	pml4eindex = (va >> PML4SHIFT) & (NPML4EPG - 1);
247 	pml4e = vm->PML4[pml4eindex];
248 	if (((u_long)pml4e & PG_V) == 0) {
249 		_kvm_err(kd, kd->program, "_kvm_vatop: pml4e not valid");
250 		goto invalid;
251 	}
252 
253 	pdpeindex = (va >> PDPSHIFT) & (NPDPEPG-1);
254 	pdpe_pa = ((u_long)pml4e & PG_FRAME) +
255 	    (pdpeindex * sizeof(pdp_entry_t));
256 
257 	s = _kvm_pa2off(kd, pdpe_pa, &ofs);
258 	if (s < sizeof pdpe) {
259 		_kvm_err(kd, kd->program, "_kvm_vatop: pdpe_pa not found");
260 		goto invalid;
261 	}
262 	if (lseek(kd->pmfd, ofs, 0) == -1) {
263 		_kvm_syserr(kd, kd->program, "_kvm_vatop: lseek pdpe_pa");
264 		goto invalid;
265 	}
266 	if (read(kd->pmfd, &pdpe, sizeof pdpe) != sizeof pdpe) {
267 		_kvm_syserr(kd, kd->program, "_kvm_vatop: read pdpe");
268 		goto invalid;
269 	}
270 	if (((u_long)pdpe & PG_V) == 0) {
271 		_kvm_err(kd, kd->program, "_kvm_vatop: pdpe not valid");
272 		goto invalid;
273 	}
274 
275 	pdeindex = (va >> PDRSHIFT) & (NPDEPG-1);
276 	pde_pa = ((u_long)pdpe & PG_FRAME) + (pdeindex * sizeof(pd_entry_t));
277 
278 	s = _kvm_pa2off(kd, pde_pa, &ofs);
279 	if (s < sizeof pde) {
280 		_kvm_syserr(kd, kd->program, "_kvm_vatop: pde_pa not found");
281 		goto invalid;
282 	}
283 	if (lseek(kd->pmfd, ofs, 0) == -1) {
284 		_kvm_err(kd, kd->program, "_kvm_vatop: lseek pde_pa");
285 		goto invalid;
286 	}
287 	if (read(kd->pmfd, &pde, sizeof pde) != sizeof pde) {
288 		_kvm_syserr(kd, kd->program, "_kvm_vatop: read pde");
289 		goto invalid;
290 	}
291 	if (((u_long)pde & PG_V) == 0) {
292 		_kvm_err(kd, kd->program, "_kvm_vatop: pde not valid");
293 		goto invalid;
294 	}
295 
296 	if ((u_long)pde & PG_PS) {
297 	      /*
298 	       * No final-level page table; ptd describes one 2MB page.
299 	       */
300 #define	PAGE2M_MASK	(NBPDR - 1)
301 #define	PG_FRAME2M	(~PAGE2M_MASK)
302 		a = ((u_long)pde & PG_FRAME2M) + (va & PAGE2M_MASK);
303 		s = _kvm_pa2off(kd, a, pa);
304 		if (s == 0) {
305 			_kvm_err(kd, kd->program,
306 			    "_kvm_vatop: 2MB page address not in dump");
307 			goto invalid;
308 		} else
309 			return (NBPDR - (va & PAGE2M_MASK));
310 	}
311 
312 	pteindex = (va >> PAGE_SHIFT) & (NPTEPG-1);
313 	pte_pa = ((u_long)pde & PG_FRAME) + (pteindex * sizeof(pt_entry_t));
314 
315 	s = _kvm_pa2off(kd, pte_pa, &ofs);
316 	if (s < sizeof pte) {
317 		_kvm_err(kd, kd->program, "_kvm_vatop: pte_pa not found");
318 		goto invalid;
319 	}
320 	if (lseek(kd->pmfd, ofs, 0) == -1) {
321 		_kvm_syserr(kd, kd->program, "_kvm_vatop: lseek");
322 		goto invalid;
323 	}
324 	if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) {
325 		_kvm_syserr(kd, kd->program, "_kvm_vatop: read");
326 		goto invalid;
327 	}
328 	if (((u_long)pte & PG_V) == 0) {
329 		_kvm_err(kd, kd->program, "_kvm_vatop: pte not valid");
330 		goto invalid;
331 	}
332 
333 	a = ((u_long)pte & PG_FRAME) + offset;
334 	s = _kvm_pa2off(kd, a, pa);
335 	if (s == 0) {
336 		_kvm_err(kd, kd->program, "_kvm_vatop: address not in dump");
337 		goto invalid;
338 	} else
339 		return (PAGE_SIZE - offset);
340 
341 invalid:
342 	_kvm_err(kd, 0, "invalid address (0x%lx)", va);
343 	return (0);
344 }
345 
346 int
347 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
348 {
349 
350 	if (kd->vmst->minidump)
351 		return (_kvm_minidump_kvatop(kd, va, pa));
352 	if (ISALIVE(kd)) {
353 		_kvm_err(kd, 0, "kvm_kvatop called in live kernel!");
354 		return (0);
355 	}
356 	return (_kvm_vatop(kd, va, pa));
357 }
358