xref: /freebsd/lib/libkvm/kvm_minidump_amd64.c (revision 907b59d76938e654f0d040a888e8dfca3de1e222)
1 /*-
2  * Copyright (c) 2006 Peter Wemm
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 /*
30  * AMD64 machine dependent routines for kvm and minidumps.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <kvm.h>
40 
41 #include "../../sys/amd64/include/minidump.h"
42 
43 #include <limits.h>
44 
45 #include "kvm_private.h"
46 #include "kvm_amd64.h"
47 
48 #define	amd64_round_page(x)	roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE)
49 
50 struct vmstate {
51 	struct minidumphdr hdr;
52 	amd64_pte_t *page_map;
53 };
54 
55 static int
56 _amd64_minidump_probe(kvm_t *kd)
57 {
58 
59 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) &&
60 	    _kvm_is_minidump(kd));
61 }
62 
63 static void
64 _amd64_minidump_freevtop(kvm_t *kd)
65 {
66 	struct vmstate *vm = kd->vmst;
67 
68 	free(vm->page_map);
69 	free(vm);
70 	kd->vmst = NULL;
71 }
72 
73 static int
74 _amd64_minidump_initvtop(kvm_t *kd)
75 {
76 	struct vmstate *vmst;
77 	off_t off, sparse_off;
78 
79 	vmst = _kvm_malloc(kd, sizeof(*vmst));
80 	if (vmst == NULL) {
81 		_kvm_err(kd, kd->program, "cannot allocate vm");
82 		return (-1);
83 	}
84 	kd->vmst = vmst;
85 	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
86 	    sizeof(vmst->hdr)) {
87 		_kvm_err(kd, kd->program, "cannot read dump header");
88 		return (-1);
89 	}
90 	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
91 		_kvm_err(kd, kd->program, "not a minidump for this platform");
92 		return (-1);
93 	}
94 
95 	/*
96 	 * NB: amd64 minidump header is binary compatible between version 1
97 	 * and version 2; this may not be the case for the future versions.
98 	 */
99 	vmst->hdr.version = le32toh(vmst->hdr.version);
100 	if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
101 		_kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
102 		    MINIDUMP_VERSION, vmst->hdr.version);
103 		return (-1);
104 	}
105 	vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
106 	vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
107 	vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
108 	vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
109 	vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
110 	vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
111 
112 	/* Skip header and msgbuf */
113 	off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
114 
115 	sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) +
116 	    amd64_round_page(vmst->hdr.pmapsize);
117 	if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
118 	    AMD64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
119 		_kvm_err(kd, kd->program, "cannot load core bitmap");
120 		return (-1);
121 	}
122 	off += amd64_round_page(vmst->hdr.bitmapsize);
123 
124 	vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize);
125 	if (vmst->page_map == NULL) {
126 		_kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map",
127 		    vmst->hdr.pmapsize);
128 		return (-1);
129 	}
130 	if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) !=
131 	    (ssize_t)vmst->hdr.pmapsize) {
132 		_kvm_err(kd, kd->program, "cannot read %d bytes for page_map",
133 		    vmst->hdr.pmapsize);
134 		return (-1);
135 	}
136 	off += amd64_round_page(vmst->hdr.pmapsize);
137 
138 	return (0);
139 }
140 
141 static int
142 _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa)
143 {
144 	struct vmstate *vm;
145 	amd64_physaddr_t offset;
146 	amd64_pte_t pte;
147 	kvaddr_t pteindex;
148 	amd64_physaddr_t a;
149 	off_t ofs;
150 
151 	vm = kd->vmst;
152 	offset = va & AMD64_PAGE_MASK;
153 
154 	if (va >= vm->hdr.kernbase) {
155 		pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT;
156 		if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map))
157 			goto invalid;
158 		pte = le64toh(vm->page_map[pteindex]);
159 		if ((pte & AMD64_PG_V) == 0) {
160 			_kvm_err(kd, kd->program,
161 			    "_amd64_minidump_vatop_v1: pte not valid");
162 			goto invalid;
163 		}
164 		a = pte & AMD64_PG_FRAME;
165 		ofs = _kvm_pt_find(kd, a);
166 		if (ofs == -1) {
167 			_kvm_err(kd, kd->program,
168 	    "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump",
169 			    (uintmax_t)a);
170 			goto invalid;
171 		}
172 		*pa = ofs + offset;
173 		return (AMD64_PAGE_SIZE - offset);
174 	} else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
175 		a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
176 		ofs = _kvm_pt_find(kd, a);
177 		if (ofs == -1) {
178 			_kvm_err(kd, kd->program,
179     "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump",
180 			    (uintmax_t)va);
181 			goto invalid;
182 		}
183 		*pa = ofs + offset;
184 		return (AMD64_PAGE_SIZE - offset);
185 	} else {
186 		_kvm_err(kd, kd->program,
187 	    "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped",
188 		    (uintmax_t)va);
189 		goto invalid;
190 	}
191 
192 invalid:
193 	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
194 	return (0);
195 }
196 
197 static int
198 _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
199 {
200 	amd64_pte_t pt[AMD64_NPTEPG];
201 	struct vmstate *vm;
202 	amd64_physaddr_t offset;
203 	amd64_pde_t pde;
204 	amd64_pte_t pte;
205 	kvaddr_t pteindex;
206 	kvaddr_t pdeindex;
207 	amd64_physaddr_t a;
208 	off_t ofs;
209 
210 	vm = kd->vmst;
211 	offset = va & AMD64_PAGE_MASK;
212 
213 	if (va >= vm->hdr.kernbase) {
214 		pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT;
215 		if (pdeindex >= vm->hdr.pmapsize / sizeof(*vm->page_map))
216 			goto invalid;
217 		pde = le64toh(vm->page_map[pdeindex]);
218 		if ((pde & AMD64_PG_V) == 0) {
219 			_kvm_err(kd, kd->program,
220 			    "_amd64_minidump_vatop: pde not valid");
221 			goto invalid;
222 		}
223 		if ((pde & AMD64_PG_PS) == 0) {
224 			a = pde & AMD64_PG_FRAME;
225 			/* TODO: Just read the single PTE */
226 			ofs = _kvm_pt_find(kd, a);
227 			if (ofs == -1) {
228 				_kvm_err(kd, kd->program,
229 				    "cannot find page table entry for %ju",
230 				    (uintmax_t)a);
231 				goto invalid;
232 			}
233 			if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) !=
234 			    AMD64_PAGE_SIZE) {
235 				_kvm_err(kd, kd->program,
236 				    "cannot read page table entry for %ju",
237 				    (uintmax_t)a);
238 				goto invalid;
239 			}
240 			pteindex = (va >> AMD64_PAGE_SHIFT) &
241 			    (AMD64_NPTEPG - 1);
242 			pte = le64toh(pt[pteindex]);
243 			if ((pte & AMD64_PG_V) == 0) {
244 				_kvm_err(kd, kd->program,
245 				    "_amd64_minidump_vatop: pte not valid");
246 				goto invalid;
247 			}
248 			a = pte & AMD64_PG_FRAME;
249 		} else {
250 			a = pde & AMD64_PG_PS_FRAME;
251 			a += (va & AMD64_PDRMASK) ^ offset;
252 		}
253 		ofs = _kvm_pt_find(kd, a);
254 		if (ofs == -1) {
255 			_kvm_err(kd, kd->program,
256 	    "_amd64_minidump_vatop: physical address 0x%jx not in minidump",
257 			    (uintmax_t)a);
258 			goto invalid;
259 		}
260 		*pa = ofs + offset;
261 		return (AMD64_PAGE_SIZE - offset);
262 	} else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
263 		a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
264 		ofs = _kvm_pt_find(kd, a);
265 		if (ofs == -1) {
266 			_kvm_err(kd, kd->program,
267 	    "_amd64_minidump_vatop: direct map address 0x%jx not in minidump",
268 			    (uintmax_t)va);
269 			goto invalid;
270 		}
271 		*pa = ofs + offset;
272 		return (AMD64_PAGE_SIZE - offset);
273 	} else {
274 		_kvm_err(kd, kd->program,
275 	    "_amd64_minidump_vatop: virtual address 0x%jx not minidumped",
276 		    (uintmax_t)va);
277 		goto invalid;
278 	}
279 
280 invalid:
281 	_kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
282 	return (0);
283 }
284 
285 static int
286 _amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
287 {
288 
289 	if (ISALIVE(kd)) {
290 		_kvm_err(kd, 0,
291 		    "_amd64_minidump_kvatop called in live kernel!");
292 		return (0);
293 	}
294 	if (((struct vmstate *)kd->vmst)->hdr.version == 1)
295 		return (_amd64_minidump_vatop_v1(kd, va, pa));
296 	else
297 		return (_amd64_minidump_vatop(kd, va, pa));
298 }
299 
300 struct kvm_arch kvm_amd64_minidump = {
301 	.ka_probe = _amd64_minidump_probe,
302 	.ka_initvtop = _amd64_minidump_initvtop,
303 	.ka_freevtop = _amd64_minidump_freevtop,
304 	.ka_kvatop = _amd64_minidump_kvatop,
305 	.ka_native = _amd64_native,
306 };
307 
308 KVM_ARCH(kvm_amd64_minidump);
309