xref: /freebsd/lib/libkvm/kvm_powerpc64.c (revision 61ba55bcf70f2340f9c943c9571113b3fd8eda69)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008, Juniper Networks, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the author nor the names of any co-contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kerneldump.h>
35 #include <sys/mman.h>
36 
37 #include <elf.h>
38 #include <kvm.h>
39 #include <limits.h>
40 #include <stdlib.h>
41 #include <string.h>
42 
43 #include "kvm_private.h"
44 
45 struct vmstate {
46 	void		*map;
47 	size_t		mapsz;
48 	size_t		dmphdrsz;
49 	Elf64_Ehdr	*eh;
50 	Elf64_Phdr	*ph;
51 };
52 
53 static int
54 valid_elf_header(kvm_t *kd, Elf64_Ehdr *eh)
55 {
56 
57 	if (!IS_ELF(*eh))
58 		return (0);
59 	if (eh->e_ident[EI_CLASS] != ELFCLASS64)
60 		return (0);
61 	if (eh->e_ident[EI_DATA] != ELFDATA2MSB &&
62 	    eh->e_ident[EI_DATA] != ELFDATA2LSB)
63 		return (0);
64 	if (eh->e_ident[EI_VERSION] != EV_CURRENT)
65 		return (0);
66 	if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
67 		return (0);
68 	if (_kvm16toh(kd, eh->e_type) != ET_CORE)
69 		return (0);
70 	if (_kvm16toh(kd, eh->e_machine) != EM_PPC64)
71 		return (0);
72 	/* Can't think of anything else to check... */
73 	return (1);
74 }
75 
76 static size_t
77 dump_header_size(struct kerneldumpheader *dh)
78 {
79 
80 	if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0)
81 		return (0);
82 	if (strcmp(dh->architecture, "powerpc64") != 0 &&
83 	    strcmp(dh->architecture, "powerpc64le") != 0)
84 		return (0);
85 	/* That should do it... */
86 	return (sizeof(*dh));
87 }
88 
89 /*
90  * Map the ELF headers into the process' address space. We do this in two
91  * steps: first the ELF header itself and using that information the whole
92  * set of headers.
93  */
94 static int
95 powerpc_maphdrs(kvm_t *kd)
96 {
97 	struct vmstate *vm;
98 	size_t mapsz;
99 
100 	vm = kd->vmst;
101 
102 	vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader);
103 	vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
104 	if (vm->map == MAP_FAILED) {
105 		_kvm_err(kd, kd->program, "cannot map corefile");
106 		return (-1);
107 	}
108 	vm->dmphdrsz = 0;
109 	vm->eh = vm->map;
110 	if (!valid_elf_header(kd, vm->eh)) {
111 		/*
112 		 * Hmmm, no ELF header. Maybe we still have a dump header.
113 		 * This is normal when the core file wasn't created by
114 		 * savecore(8), but instead was dumped over TFTP. We can
115 		 * easily skip the dump header...
116 		 */
117 		vm->dmphdrsz = dump_header_size(vm->map);
118 		if (vm->dmphdrsz == 0)
119 			goto inval;
120 		vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
121 		if (!valid_elf_header(kd, vm->eh))
122 			goto inval;
123 	}
124 	mapsz = _kvm16toh(kd, vm->eh->e_phentsize) *
125 	    _kvm16toh(kd, vm->eh->e_phnum) + _kvm64toh(kd, vm->eh->e_phoff);
126 	munmap(vm->map, vm->mapsz);
127 
128 	/* Map all headers. */
129 	vm->mapsz = vm->dmphdrsz + mapsz;
130 	vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
131 	if (vm->map == MAP_FAILED) {
132 		_kvm_err(kd, kd->program, "cannot map corefile headers");
133 		return (-1);
134 	}
135 	vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
136 	vm->ph = (void *)((uintptr_t)vm->eh +
137 	    (uintptr_t)_kvm64toh(kd, vm->eh->e_phoff));
138 	return (0);
139 
140  inval:
141 	_kvm_err(kd, kd->program, "invalid corefile");
142 	return (-1);
143 }
144 
145 /*
146  * Determine the offset within the corefile corresponding the virtual
147  * address. Return the number of contiguous bytes in the corefile or
148  * 0 when the virtual address is invalid.
149  */
150 static size_t
151 powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs)
152 {
153 	struct vmstate *vm = kd->vmst;
154 	Elf64_Phdr *ph;
155 	int nph;
156 
157 	ph = vm->ph;
158 	nph = _kvm16toh(kd, vm->eh->e_phnum);
159 	while (nph && (va < _kvm64toh(kd, ph->p_vaddr) ||
160 	    va >= _kvm64toh(kd, ph->p_vaddr) + _kvm64toh(kd, ph->p_memsz))) {
161 		nph--;
162 		ph = (void *)((uintptr_t)ph +
163 		    _kvm16toh(kd, vm->eh->e_phentsize));
164 	}
165 	if (nph == 0)
166 		return (0);
167 
168 	/* Segment found. Return file offset and range. */
169 	*ofs = vm->dmphdrsz + _kvm64toh(kd, ph->p_offset) +
170 	    (va - _kvm64toh(kd, ph->p_vaddr));
171 	return (_kvm64toh(kd, ph->p_memsz) -
172 	    (va - _kvm64toh(kd, ph->p_vaddr)));
173 }
174 
175 static void
176 _powerpc64_freevtop(kvm_t *kd)
177 {
178 	struct vmstate *vm = kd->vmst;
179 
180 	if (vm->eh != MAP_FAILED)
181 		munmap(vm->eh, vm->mapsz);
182 	free(vm);
183 	kd->vmst = NULL;
184 }
185 
186 static int
187 _powerpc64_probe(kvm_t *kd)
188 {
189 
190 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
191 	    kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
192 }
193 
194 static int
195 _powerpc64le_probe(kvm_t *kd)
196 {
197 
198 	return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
199 	    kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
200 }
201 
202 static int
203 _powerpc64_initvtop(kvm_t *kd)
204 {
205 
206 	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
207 	if (kd->vmst == NULL)
208 		return (-1);
209 
210 	if (powerpc_maphdrs(kd) == -1)
211 		return (-1);
212 
213 	return (0);
214 }
215 
216 static int
217 _powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs)
218 {
219 	struct vmstate *vm;
220 
221 	vm = kd->vmst;
222 	if (_kvm64toh(kd, vm->ph->p_paddr) == 0xffffffffffffffff)
223 		return ((int)powerpc64_va2off(kd, va, ofs));
224 
225 	_kvm_err(kd, kd->program, "Raw corefile not supported");
226 	return (0);
227 }
228 
229 static int
230 _powerpc64_native(kvm_t *kd __unused)
231 {
232 
233 #if defined(__powerpc64__) && BYTE_ORDER == BIG_ENDIAN
234 	return (1);
235 #else
236 	return (0);
237 #endif
238 }
239 
240 static int
241 _powerpc64le_native(kvm_t *kd __unused)
242 {
243 
244 #if defined(__powerpc64__) && BYTE_ORDER == LITTLE_ENDIAN
245 	return (1);
246 #else
247 	return (0);
248 #endif
249 }
250 
251 static struct kvm_arch kvm_powerpc64 = {
252 	.ka_probe = _powerpc64_probe,
253 	.ka_initvtop = _powerpc64_initvtop,
254 	.ka_freevtop = _powerpc64_freevtop,
255 	.ka_kvatop = _powerpc64_kvatop,
256 	.ka_native = _powerpc64_native,
257 };
258 
259 static struct kvm_arch kvm_powerpc64le = {
260 	.ka_probe = _powerpc64le_probe,
261 	.ka_initvtop = _powerpc64_initvtop,
262 	.ka_freevtop = _powerpc64_freevtop,
263 	.ka_kvatop = _powerpc64_kvatop,
264 	.ka_native = _powerpc64le_native,
265 };
266 
267 KVM_ARCH(kvm_powerpc64);
268 KVM_ARCH(kvm_powerpc64le);
269