xref: /freebsd/lib/libkvm/kvm_pcpu.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 2013 Gleb Smirnoff <glebius@FreeBSD.org>
3  * Copyright (c) 2010 Juniper Networks, Inc.
4  * Copyright (c) 2009 Robert N. M. Watson
5  * Copyright (c) 2009 Bjoern A. Zeeb <bz@FreeBSD.org>
6  * Copyright (c) 2008 Yahoo!, Inc.
7  * All rights reserved.
8  *
9  * Written by: John Baldwin <jhb@FreeBSD.org>
10  *
11  * This software was developed by Robert N. M. Watson under contract
12  * to Juniper Networks, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the author nor the names of any co-contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/pcpu.h>
44 #include <sys/sysctl.h>
45 #include <kvm.h>
46 #include <limits.h>
47 #include <stdlib.h>
48 
49 #include "kvm_private.h"
50 
51 static struct nlist kvm_pcpu_nl[] = {
52 	{ .n_name = "_cpuid_to_pcpu" },
53 	{ .n_name = "_mp_maxcpus" },
54 	{ .n_name = "_mp_ncpus" },
55 	{ .n_name = NULL },
56 };
57 #define	NL_CPUID_TO_PCPU	0
58 #define	NL_MP_MAXCPUS		1
59 #define	NL_MP_NCPUS		2
60 
61 /*
62  * Kernel per-CPU data state.  We cache this stuff on the first
63  * access.
64  *
65  * XXXRW: Possibly, this (and kvmpcpu_nl) should be per-kvm_t, in case the
66  * consumer has multiple handles in flight to differently configured
67  * kernels/crashdumps.
68  */
69 static void **pcpu_data;
70 static int maxcpu;
71 static int mp_ncpus;
72 
73 static int
74 _kvm_pcpu_init(kvm_t *kd)
75 {
76 	size_t len;
77 	int max;
78 	void *data;
79 
80 	if (kvm_nlist(kd, kvm_pcpu_nl) < 0)
81 		return (-1);
82 	if (kvm_pcpu_nl[NL_CPUID_TO_PCPU].n_value == 0) {
83 		_kvm_err(kd, kd->program, "unable to find cpuid_to_pcpu");
84 		return (-1);
85 	}
86 	if (kvm_pcpu_nl[NL_MP_MAXCPUS].n_value == 0) {
87 		_kvm_err(kd, kd->program, "unable to find mp_maxcpus");
88 		return (-1);
89 	}
90 	if (kvm_read(kd, kvm_pcpu_nl[NL_MP_MAXCPUS].n_value, &max,
91 	    sizeof(max)) != sizeof(max)) {
92 		_kvm_err(kd, kd->program, "cannot read mp_maxcpus");
93 		return (-1);
94 	}
95 	if (kvm_pcpu_nl[NL_MP_NCPUS].n_value == 0) {
96 		_kvm_err(kd, kd->program, "unable to find mp_ncpus");
97 		return (-1);
98 	}
99 	if (kvm_read(kd, kvm_pcpu_nl[NL_MP_NCPUS].n_value, &mp_ncpus,
100 	    sizeof(mp_ncpus)) != sizeof(mp_ncpus)) {
101 		_kvm_err(kd, kd->program, "cannot read mp_ncpus");
102 		return (-1);
103 	}
104 	len = max * sizeof(void *);
105 	data = malloc(len);
106 	if (data == NULL) {
107 		_kvm_err(kd, kd->program, "out of memory");
108 		return (-1);
109 	}
110 	if (kvm_read(kd, kvm_pcpu_nl[NL_CPUID_TO_PCPU].n_value, data, len) !=
111 	   (ssize_t)len) {
112 		_kvm_err(kd, kd->program, "cannot read cpuid_to_pcpu array");
113 		free(data);
114 		return (-1);
115 	}
116 	pcpu_data = data;
117 	maxcpu = max;
118 	return (0);
119 }
120 
121 static void
122 _kvm_pcpu_clear(void)
123 {
124 
125 	maxcpu = 0;
126 	free(pcpu_data);
127 	pcpu_data = NULL;
128 }
129 
130 void *
131 kvm_getpcpu(kvm_t *kd, int cpu)
132 {
133 	char *buf;
134 
135 	if (kd == NULL) {
136 		_kvm_pcpu_clear();
137 		return (NULL);
138 	}
139 
140 	if (maxcpu == 0)
141 		if (_kvm_pcpu_init(kd) < 0)
142 			return ((void *)-1);
143 
144 	if (cpu >= maxcpu || pcpu_data[cpu] == NULL)
145 		return (NULL);
146 
147 	buf = malloc(sizeof(struct pcpu));
148 	if (buf == NULL) {
149 		_kvm_err(kd, kd->program, "out of memory");
150 		return ((void *)-1);
151 	}
152 	if (kvm_read(kd, (uintptr_t)pcpu_data[cpu], buf,
153 	    sizeof(struct pcpu)) != sizeof(struct pcpu)) {
154 		_kvm_err(kd, kd->program, "unable to read per-CPU data");
155 		free(buf);
156 		return ((void *)-1);
157 	}
158 	return (buf);
159 }
160 
161 int
162 kvm_getmaxcpu(kvm_t *kd)
163 {
164 
165 	if (kd == NULL) {
166 		_kvm_pcpu_clear();
167 		return (0);
168 	}
169 
170 	if (maxcpu == 0)
171 		if (_kvm_pcpu_init(kd) < 0)
172 			return (-1);
173 	return (maxcpu);
174 }
175 
176 int
177 kvm_getncpus(kvm_t *kd)
178 {
179 
180 	if (mp_ncpus == 0)
181 		if (_kvm_pcpu_init(kd) < 0)
182 			return (-1);
183 	return (mp_ncpus);
184 }
185 
186 static int
187 _kvm_dpcpu_setcpu(kvm_t *kd, u_int cpu, int report_error)
188 {
189 
190 	if (!kd->dpcpu_initialized) {
191 		if (report_error)
192 			_kvm_err(kd, kd->program, "%s: not initialized",
193 			    __func__);
194 		return (-1);
195 	}
196 	if (cpu >= kd->dpcpu_maxcpus) {
197 		if (report_error)
198 			_kvm_err(kd, kd->program, "%s: CPU %u too big",
199 			    __func__, cpu);
200 		return (-1);
201 	}
202 	if (kd->dpcpu_off[cpu] == 0) {
203 		if (report_error)
204 			_kvm_err(kd, kd->program, "%s: CPU %u not found",
205 			    __func__, cpu);
206 		return (-1);
207 	}
208 	kd->dpcpu_curcpu = cpu;
209 	kd->dpcpu_curoff = kd->dpcpu_off[cpu];
210 	return (0);
211 }
212 
213 /*
214  * Set up libkvm to handle dynamic per-CPU memory.
215  */
216 static int
217 _kvm_dpcpu_init(kvm_t *kd)
218 {
219 	struct kvm_nlist nl[] = {
220 #define	NLIST_START_SET_PCPU	0
221 		{ .n_name = "___start_" DPCPU_SETNAME },
222 #define	NLIST_STOP_SET_PCPU	1
223 		{ .n_name = "___stop_" DPCPU_SETNAME },
224 #define	NLIST_DPCPU_OFF		2
225 		{ .n_name = "_dpcpu_off" },
226 #define	NLIST_MP_MAXCPUS	3
227 		{ .n_name = "_mp_maxcpus" },
228 		{ .n_name = NULL },
229 	};
230 	uintptr_t *dpcpu_off_buf;
231 	size_t len;
232 	u_int dpcpu_maxcpus;
233 
234 	/*
235 	 * XXX: This only works for native kernels for now.
236 	 */
237 	if (!kvm_native(kd))
238 		return (-1);
239 
240 	/*
241 	 * Locate and cache locations of important symbols using the internal
242 	 * version of _kvm_nlist, turning off initialization to avoid
243 	 * recursion in case of unresolveable symbols.
244 	 */
245 	if (_kvm_nlist(kd, nl, 0) != 0)
246 		return (-1);
247 	if (kvm_read(kd, nl[NLIST_MP_MAXCPUS].n_value, &dpcpu_maxcpus,
248 	    sizeof(dpcpu_maxcpus)) != sizeof(dpcpu_maxcpus))
249 		return (-1);
250 	len = dpcpu_maxcpus * sizeof(*dpcpu_off_buf);
251 	dpcpu_off_buf = malloc(len);
252 	if (dpcpu_off_buf == NULL)
253 		return (-1);
254 	if (kvm_read(kd, nl[NLIST_DPCPU_OFF].n_value, dpcpu_off_buf, len) !=
255 	    (ssize_t)len) {
256 		free(dpcpu_off_buf);
257 		return (-1);
258 	}
259 	kd->dpcpu_start = nl[NLIST_START_SET_PCPU].n_value;
260 	kd->dpcpu_stop = nl[NLIST_STOP_SET_PCPU].n_value;
261 	kd->dpcpu_maxcpus = dpcpu_maxcpus;
262 	kd->dpcpu_off = dpcpu_off_buf;
263 	kd->dpcpu_initialized = 1;
264 	(void)_kvm_dpcpu_setcpu(kd, 0, 0);
265 	return (0);
266 }
267 
268 /*
269  * Check whether the dpcpu module has been initialized sucessfully or not,
270  * initialize it if permitted.
271  */
272 int
273 _kvm_dpcpu_initialized(kvm_t *kd, int intialize)
274 {
275 
276 	if (kd->dpcpu_initialized || !intialize)
277 		return (kd->dpcpu_initialized);
278 
279 	(void)_kvm_dpcpu_init(kd);
280 
281 	return (kd->dpcpu_initialized);
282 }
283 
284 /*
285  * Check whether the value is within the dpcpu symbol range and only if so
286  * adjust the offset relative to the current offset.
287  */
288 kvaddr_t
289 _kvm_dpcpu_validaddr(kvm_t *kd, kvaddr_t value)
290 {
291 
292 	if (value == 0)
293 		return (value);
294 
295 	if (!kd->dpcpu_initialized)
296 		return (value);
297 
298 	if (value < kd->dpcpu_start || value >= kd->dpcpu_stop)
299 		return (value);
300 
301 	return (kd->dpcpu_curoff + value);
302 }
303 
304 int
305 kvm_dpcpu_setcpu(kvm_t *kd, u_int cpu)
306 {
307 	int ret;
308 
309 	if (!kd->dpcpu_initialized) {
310 		ret = _kvm_dpcpu_init(kd);
311 		if (ret != 0) {
312 			_kvm_err(kd, kd->program, "%s: init failed",
313 			    __func__);
314 			return (ret);
315 		}
316 	}
317 
318 	return (_kvm_dpcpu_setcpu(kd, cpu, 1));
319 }
320 
321 /*
322  * Obtain a per-CPU copy for given cpu from UMA_ZONE_PCPU allocation.
323  */
324 ssize_t
325 kvm_read_zpcpu(kvm_t *kd, u_long base, void *buf, size_t size, int cpu)
326 {
327 
328 	if (!kvm_native(kd))
329 		return (-1);
330 	return (kvm_read(kd, (uintptr_t)(base + sizeof(struct pcpu) * cpu),
331 	    buf, size));
332 }
333 
334 /*
335  * Fetch value of a counter(9).
336  */
337 uint64_t
338 kvm_counter_u64_fetch(kvm_t *kd, u_long base)
339 {
340 	uint64_t r, c;
341 
342 	if (mp_ncpus == 0)
343 		if (_kvm_pcpu_init(kd) < 0)
344 			return (0);
345 
346 	r = 0;
347 	for (int i = 0; i < mp_ncpus; i++) {
348 		if (kvm_read_zpcpu(kd, base, &c, sizeof(c), i) != sizeof(c))
349 			return (0);
350 		r += c;
351 	}
352 
353 	return (r);
354 }
355