xref: /freebsd/lib/libkvm/kvm_powerpc64.c (revision 994297b01b98816bea1abf45ae4bac1bc69ee7a0)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008, Juniper Networks, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the author nor the names of any co-contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kerneldump.h>
37 #include <sys/mman.h>
38 
39 #include <elf.h>
40 #include <kvm.h>
41 #include <limits.h>
42 #include <stdlib.h>
43 #include <string.h>
44 
45 #include "kvm_private.h"
46 
47 struct vmstate {
48 	void		*map;
49 	size_t		mapsz;
50 	size_t		dmphdrsz;
51 	Elf64_Ehdr	*eh;
52 	Elf64_Phdr	*ph;
53 };
54 
55 static int
56 valid_elf_header(kvm_t *kd, Elf64_Ehdr *eh)
57 {
58 
59 	if (!IS_ELF(*eh))
60 		return (0);
61 	if (eh->e_ident[EI_CLASS] != ELFCLASS64)
62 		return (0);
63 	if (eh->e_ident[EI_DATA] != ELFDATA2MSB &&
64 	    eh->e_ident[EI_DATA] != ELFDATA2LSB)
65 		return (0);
66 	if (eh->e_ident[EI_VERSION] != EV_CURRENT)
67 		return (0);
68 	if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
69 		return (0);
70 	if (_kvm16toh(kd, eh->e_type) != ET_CORE)
71 		return (0);
72 	if (_kvm16toh(kd, eh->e_machine) != EM_PPC64)
73 		return (0);
74 	/* Can't think of anything else to check... */
75 	return (1);
76 }
77 
78 static size_t
79 dump_header_size(struct kerneldumpheader *dh)
80 {
81 
82 	if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0)
83 		return (0);
84 	if (strcmp(dh->architecture, "powerpc64") != 0 &&
85 	    strcmp(dh->architecture, "powerpc64le") != 0)
86 		return (0);
87 	/* That should do it... */
88 	return (sizeof(*dh));
89 }
90 
91 /*
92  * Map the ELF headers into the process' address space. We do this in two
93  * steps: first the ELF header itself and using that information the whole
94  * set of headers.
95  */
96 static int
97 powerpc_maphdrs(kvm_t *kd)
98 {
99 	struct vmstate *vm;
100 	size_t mapsz;
101 
102 	vm = kd->vmst;
103 
104 	vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader);
105 	vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
106 	if (vm->map == MAP_FAILED) {
107 		_kvm_err(kd, kd->program, "cannot map corefile");
108 		return (-1);
109 	}
110 	vm->dmphdrsz = 0;
111 	vm->eh = vm->map;
112 	if (!valid_elf_header(kd, vm->eh)) {
113 		/*
114 		 * Hmmm, no ELF header. Maybe we still have a dump header.
115 		 * This is normal when the core file wasn't created by
116 		 * savecore(8), but instead was dumped over TFTP. We can
117 		 * easily skip the dump header...
118 		 */
119 		vm->dmphdrsz = dump_header_size(vm->map);
120 		if (vm->dmphdrsz == 0)
121 			goto inval;
122 		vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
123 		if (!valid_elf_header(kd, vm->eh))
124 			goto inval;
125 	}
126 	mapsz = _kvm16toh(kd, vm->eh->e_phentsize) *
127 	    _kvm16toh(kd, vm->eh->e_phnum) + _kvm64toh(kd, vm->eh->e_phoff);
128 	munmap(vm->map, vm->mapsz);
129 
130 	/* Map all headers. */
131 	vm->mapsz = vm->dmphdrsz + mapsz;
132 	vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
133 	if (vm->map == MAP_FAILED) {
134 		_kvm_err(kd, kd->program, "cannot map corefile headers");
135 		return (-1);
136 	}
137 	vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
138 	vm->ph = (void *)((uintptr_t)vm->eh +
139 	    (uintptr_t)_kvm64toh(kd, vm->eh->e_phoff));
140 	return (0);
141 
142  inval:
143 	_kvm_err(kd, kd->program, "invalid corefile");
144 	return (-1);
145 }
146 
147 /*
148  * Determine the offset within the corefile corresponding the virtual
149  * address. Return the number of contiguous bytes in the corefile or
150  * 0 when the virtual address is invalid.
151  */
152 static size_t
153 powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs)
154 {
155 	struct vmstate *vm = kd->vmst;
156 	Elf64_Phdr *ph;
157 	int nph;
158 
159 	ph = vm->ph;
160 	nph = _kvm16toh(kd, vm->eh->e_phnum);
161 	while (nph && (va < _kvm64toh(kd, ph->p_vaddr) ||
162 	    va >= _kvm64toh(kd, ph->p_vaddr) + _kvm64toh(kd, ph->p_memsz))) {
163 		nph--;
164 		ph = (void *)((uintptr_t)ph +
165 		    _kvm16toh(kd, vm->eh->e_phentsize));
166 	}
167 	if (nph == 0)
168 		return (0);
169 
170 	/* Segment found. Return file offset and range. */
171 	*ofs = vm->dmphdrsz + _kvm64toh(kd, ph->p_offset) +
172 	    (va - _kvm64toh(kd, ph->p_vaddr));
173 	return (_kvm64toh(kd, ph->p_memsz) -
174 	    (va - _kvm64toh(kd, ph->p_vaddr)));
175 }
176 
177 static void
178 _powerpc64_freevtop(kvm_t *kd)
179 {
180 	struct vmstate *vm = kd->vmst;
181 
182 	if (vm->eh != MAP_FAILED)
183 		munmap(vm->eh, vm->mapsz);
184 	free(vm);
185 	kd->vmst = NULL;
186 }
187 
188 static int
189 _powerpc64_probe(kvm_t *kd)
190 {
191 
192 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
193 	    kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
194 }
195 
196 static int
197 _powerpc64le_probe(kvm_t *kd)
198 {
199 
200 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
201 	    kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
202 }
203 
204 static int
205 _powerpc64_initvtop(kvm_t *kd)
206 {
207 
208 	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
209 	if (kd->vmst == NULL)
210 		return (-1);
211 
212 	if (powerpc_maphdrs(kd) == -1)
213 		return (-1);
214 
215 	return (0);
216 }
217 
218 static int
219 _powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs)
220 {
221 	struct vmstate *vm;
222 
223 	vm = kd->vmst;
224 	if (_kvm64toh(kd, vm->ph->p_paddr) == 0xffffffffffffffff)
225 		return ((int)powerpc64_va2off(kd, va, ofs));
226 
227 	_kvm_err(kd, kd->program, "Raw corefile not supported");
228 	return (0);
229 }
230 
231 static int
232 _powerpc64_native(kvm_t *kd __unused)
233 {
234 
235 #if defined(__powerpc64__) && BYTE_ORDER == BIG_ENDIAN
236 	return (1);
237 #else
238 	return (0);
239 #endif
240 }
241 
242 static int
243 _powerpc64le_native(kvm_t *kd __unused)
244 {
245 
246 #if defined(__powerpc64__) && BYTE_ORDER == LITTLE_ENDIAN
247 	return (1);
248 #else
249 	return (0);
250 #endif
251 }
252 
253 static struct kvm_arch kvm_powerpc64 = {
254 	.ka_probe = _powerpc64_probe,
255 	.ka_initvtop = _powerpc64_initvtop,
256 	.ka_freevtop = _powerpc64_freevtop,
257 	.ka_kvatop = _powerpc64_kvatop,
258 	.ka_native = _powerpc64_native,
259 };
260 
261 static struct kvm_arch kvm_powerpc64le = {
262 	.ka_probe = _powerpc64le_probe,
263 	.ka_initvtop = _powerpc64_initvtop,
264 	.ka_freevtop = _powerpc64_freevtop,
265 	.ka_kvatop = _powerpc64_kvatop,
266 	.ka_native = _powerpc64le_native,
267 };
268 
269 KVM_ARCH(kvm_powerpc64);
270 KVM_ARCH(kvm_powerpc64le);
271