1794a9a6cSJohn Baldwin /*-
28a16b7a1SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause
38a16b7a1SPedro F. Giffuni *
42a213404SGleb Smirnoff * Copyright (c) 2013 Gleb Smirnoff <glebius@FreeBSD.org>
5ccd8bad0SRobert Watson * Copyright (c) 2010 Juniper Networks, Inc.
6ccd8bad0SRobert Watson * Copyright (c) 2009 Robert N. M. Watson
7ccd8bad0SRobert Watson * Copyright (c) 2009 Bjoern A. Zeeb <bz@FreeBSD.org>
8794a9a6cSJohn Baldwin * Copyright (c) 2008 Yahoo!, Inc.
9794a9a6cSJohn Baldwin * All rights reserved.
10ccd8bad0SRobert Watson *
11794a9a6cSJohn Baldwin * Written by: John Baldwin <jhb@FreeBSD.org>
12794a9a6cSJohn Baldwin *
13ccd8bad0SRobert Watson * This software was developed by Robert N. M. Watson under contract
14ccd8bad0SRobert Watson * to Juniper Networks, Inc.
15ccd8bad0SRobert Watson *
16794a9a6cSJohn Baldwin * Redistribution and use in source and binary forms, with or without
17794a9a6cSJohn Baldwin * modification, are permitted provided that the following conditions
18794a9a6cSJohn Baldwin * are met:
19794a9a6cSJohn Baldwin * 1. Redistributions of source code must retain the above copyright
20794a9a6cSJohn Baldwin * notice, this list of conditions and the following disclaimer.
21794a9a6cSJohn Baldwin * 2. Redistributions in binary form must reproduce the above copyright
22794a9a6cSJohn Baldwin * notice, this list of conditions and the following disclaimer in the
23794a9a6cSJohn Baldwin * documentation and/or other materials provided with the distribution.
24794a9a6cSJohn Baldwin * 3. Neither the name of the author nor the names of any co-contributors
25794a9a6cSJohn Baldwin * may be used to endorse or promote products derived from this software
26794a9a6cSJohn Baldwin * without specific prior written permission.
27794a9a6cSJohn Baldwin *
28794a9a6cSJohn Baldwin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29794a9a6cSJohn Baldwin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30794a9a6cSJohn Baldwin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31794a9a6cSJohn Baldwin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32794a9a6cSJohn Baldwin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33794a9a6cSJohn Baldwin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34794a9a6cSJohn Baldwin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35794a9a6cSJohn Baldwin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36794a9a6cSJohn Baldwin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37794a9a6cSJohn Baldwin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38794a9a6cSJohn Baldwin * SUCH DAMAGE.
39794a9a6cSJohn Baldwin */
40794a9a6cSJohn Baldwin
41794a9a6cSJohn Baldwin #include <sys/param.h>
42794a9a6cSJohn Baldwin #include <sys/pcpu.h>
43794a9a6cSJohn Baldwin #include <sys/sysctl.h>
44794a9a6cSJohn Baldwin #include <kvm.h>
45794a9a6cSJohn Baldwin #include <limits.h>
46794a9a6cSJohn Baldwin #include <stdlib.h>
47794a9a6cSJohn Baldwin
48794a9a6cSJohn Baldwin #include "kvm_private.h"
49794a9a6cSJohn Baldwin
50*f165a1dfSMateusz Guzik #ifdef __amd64__
51*f165a1dfSMateusz Guzik #define __OFFSET_BY_PCPU
52*f165a1dfSMateusz Guzik #endif
53*f165a1dfSMateusz Guzik
54794a9a6cSJohn Baldwin static struct nlist kvm_pcpu_nl[] = {
55c10970ddSUlrich Spörlein { .n_name = "_cpuid_to_pcpu" },
56c10970ddSUlrich Spörlein { .n_name = "_mp_maxcpus" },
572a213404SGleb Smirnoff { .n_name = "_mp_ncpus" },
58*f165a1dfSMateusz Guzik #ifdef __OFFSET_BY_PCPU
59*f165a1dfSMateusz Guzik { .n_name = "___pcpu" },
60*f165a1dfSMateusz Guzik #endif
61c10970ddSUlrich Spörlein { .n_name = NULL },
62794a9a6cSJohn Baldwin };
632a213404SGleb Smirnoff #define NL_CPUID_TO_PCPU 0
642a213404SGleb Smirnoff #define NL_MP_MAXCPUS 1
652a213404SGleb Smirnoff #define NL_MP_NCPUS 2
66*f165a1dfSMateusz Guzik #define NL___PCPU 3
67794a9a6cSJohn Baldwin
68794a9a6cSJohn Baldwin /*
69794a9a6cSJohn Baldwin * Kernel per-CPU data state. We cache this stuff on the first
70794a9a6cSJohn Baldwin * access.
71ccd8bad0SRobert Watson *
72ccd8bad0SRobert Watson * XXXRW: Possibly, this (and kvmpcpu_nl) should be per-kvm_t, in case the
73ccd8bad0SRobert Watson * consumer has multiple handles in flight to differently configured
74ccd8bad0SRobert Watson * kernels/crashdumps.
75794a9a6cSJohn Baldwin */
76794a9a6cSJohn Baldwin static void **pcpu_data;
77794a9a6cSJohn Baldwin static int maxcpu;
782a213404SGleb Smirnoff static int mp_ncpus;
79*f165a1dfSMateusz Guzik #ifdef __OFFSET_BY_PCPU
80*f165a1dfSMateusz Guzik static unsigned long __pcpu;
81*f165a1dfSMateusz Guzik #endif
82794a9a6cSJohn Baldwin
83794a9a6cSJohn Baldwin static int
_kvm_pcpu_init(kvm_t * kd)84794a9a6cSJohn Baldwin _kvm_pcpu_init(kvm_t *kd)
85794a9a6cSJohn Baldwin {
86794a9a6cSJohn Baldwin size_t len;
87794a9a6cSJohn Baldwin int max;
88794a9a6cSJohn Baldwin void *data;
89794a9a6cSJohn Baldwin
90794a9a6cSJohn Baldwin if (kvm_nlist(kd, kvm_pcpu_nl) < 0)
91794a9a6cSJohn Baldwin return (-1);
92794a9a6cSJohn Baldwin if (kvm_pcpu_nl[NL_CPUID_TO_PCPU].n_value == 0) {
93794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "unable to find cpuid_to_pcpu");
94794a9a6cSJohn Baldwin return (-1);
95794a9a6cSJohn Baldwin }
96794a9a6cSJohn Baldwin if (kvm_pcpu_nl[NL_MP_MAXCPUS].n_value == 0) {
97794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "unable to find mp_maxcpus");
98794a9a6cSJohn Baldwin return (-1);
99794a9a6cSJohn Baldwin }
100794a9a6cSJohn Baldwin if (kvm_read(kd, kvm_pcpu_nl[NL_MP_MAXCPUS].n_value, &max,
101794a9a6cSJohn Baldwin sizeof(max)) != sizeof(max)) {
102794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "cannot read mp_maxcpus");
103794a9a6cSJohn Baldwin return (-1);
104794a9a6cSJohn Baldwin }
1052a213404SGleb Smirnoff if (kvm_pcpu_nl[NL_MP_NCPUS].n_value == 0) {
1062a213404SGleb Smirnoff _kvm_err(kd, kd->program, "unable to find mp_ncpus");
1072a213404SGleb Smirnoff return (-1);
1082a213404SGleb Smirnoff }
1092a213404SGleb Smirnoff if (kvm_read(kd, kvm_pcpu_nl[NL_MP_NCPUS].n_value, &mp_ncpus,
1102a213404SGleb Smirnoff sizeof(mp_ncpus)) != sizeof(mp_ncpus)) {
1112a213404SGleb Smirnoff _kvm_err(kd, kd->program, "cannot read mp_ncpus");
1122a213404SGleb Smirnoff return (-1);
1132a213404SGleb Smirnoff }
114*f165a1dfSMateusz Guzik #ifdef __OFFSET_BY_PCPU
115*f165a1dfSMateusz Guzik if (kvm_pcpu_nl[NL___PCPU].n_value == 0) {
116*f165a1dfSMateusz Guzik _kvm_err(kd, kd->program, "unable to find __pcpu");
117*f165a1dfSMateusz Guzik return (-1);
118*f165a1dfSMateusz Guzik }
119*f165a1dfSMateusz Guzik if (kvm_read(kd, kvm_pcpu_nl[NL___PCPU].n_value, &__pcpu,
120*f165a1dfSMateusz Guzik sizeof(__pcpu)) != sizeof(__pcpu)) {
121*f165a1dfSMateusz Guzik _kvm_err(kd, kd->program, "cannot read __pcpu");
122*f165a1dfSMateusz Guzik return (-1);
123*f165a1dfSMateusz Guzik }
124*f165a1dfSMateusz Guzik #endif
125794a9a6cSJohn Baldwin len = max * sizeof(void *);
126794a9a6cSJohn Baldwin data = malloc(len);
127794a9a6cSJohn Baldwin if (data == NULL) {
128794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "out of memory");
129794a9a6cSJohn Baldwin return (-1);
130794a9a6cSJohn Baldwin }
131794a9a6cSJohn Baldwin if (kvm_read(kd, kvm_pcpu_nl[NL_CPUID_TO_PCPU].n_value, data, len) !=
132c10970ddSUlrich Spörlein (ssize_t)len) {
133794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "cannot read cpuid_to_pcpu array");
134794a9a6cSJohn Baldwin free(data);
135794a9a6cSJohn Baldwin return (-1);
136794a9a6cSJohn Baldwin }
137794a9a6cSJohn Baldwin pcpu_data = data;
138794a9a6cSJohn Baldwin maxcpu = max;
139794a9a6cSJohn Baldwin return (0);
140794a9a6cSJohn Baldwin }
141794a9a6cSJohn Baldwin
142794a9a6cSJohn Baldwin static void
_kvm_pcpu_clear(void)143794a9a6cSJohn Baldwin _kvm_pcpu_clear(void)
144794a9a6cSJohn Baldwin {
145794a9a6cSJohn Baldwin
146794a9a6cSJohn Baldwin maxcpu = 0;
147794a9a6cSJohn Baldwin free(pcpu_data);
148794a9a6cSJohn Baldwin pcpu_data = NULL;
149794a9a6cSJohn Baldwin }
150794a9a6cSJohn Baldwin
151794a9a6cSJohn Baldwin void *
kvm_getpcpu(kvm_t * kd,int cpu)152794a9a6cSJohn Baldwin kvm_getpcpu(kvm_t *kd, int cpu)
153794a9a6cSJohn Baldwin {
154794a9a6cSJohn Baldwin char *buf;
155794a9a6cSJohn Baldwin
156794a9a6cSJohn Baldwin if (kd == NULL) {
157794a9a6cSJohn Baldwin _kvm_pcpu_clear();
158794a9a6cSJohn Baldwin return (NULL);
159794a9a6cSJohn Baldwin }
160794a9a6cSJohn Baldwin
161794a9a6cSJohn Baldwin if (maxcpu == 0)
162794a9a6cSJohn Baldwin if (_kvm_pcpu_init(kd) < 0)
163794a9a6cSJohn Baldwin return ((void *)-1);
164794a9a6cSJohn Baldwin
165794a9a6cSJohn Baldwin if (cpu >= maxcpu || pcpu_data[cpu] == NULL)
166794a9a6cSJohn Baldwin return (NULL);
167794a9a6cSJohn Baldwin
168794a9a6cSJohn Baldwin buf = malloc(sizeof(struct pcpu));
169794a9a6cSJohn Baldwin if (buf == NULL) {
170794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "out of memory");
171794a9a6cSJohn Baldwin return ((void *)-1);
172794a9a6cSJohn Baldwin }
173a2f4e284SAttilio Rao if (kvm_read(kd, (uintptr_t)pcpu_data[cpu], buf,
174a2f4e284SAttilio Rao sizeof(struct pcpu)) != sizeof(struct pcpu)) {
175794a9a6cSJohn Baldwin _kvm_err(kd, kd->program, "unable to read per-CPU data");
176794a9a6cSJohn Baldwin free(buf);
177794a9a6cSJohn Baldwin return ((void *)-1);
178794a9a6cSJohn Baldwin }
179794a9a6cSJohn Baldwin return (buf);
180794a9a6cSJohn Baldwin }
181794a9a6cSJohn Baldwin
182794a9a6cSJohn Baldwin int
kvm_getmaxcpu(kvm_t * kd)183794a9a6cSJohn Baldwin kvm_getmaxcpu(kvm_t *kd)
184794a9a6cSJohn Baldwin {
185794a9a6cSJohn Baldwin
186794a9a6cSJohn Baldwin if (kd == NULL) {
187794a9a6cSJohn Baldwin _kvm_pcpu_clear();
188794a9a6cSJohn Baldwin return (0);
189794a9a6cSJohn Baldwin }
190794a9a6cSJohn Baldwin
191794a9a6cSJohn Baldwin if (maxcpu == 0)
192794a9a6cSJohn Baldwin if (_kvm_pcpu_init(kd) < 0)
193794a9a6cSJohn Baldwin return (-1);
194794a9a6cSJohn Baldwin return (maxcpu);
195794a9a6cSJohn Baldwin }
196ccd8bad0SRobert Watson
1979292aad4SGleb Smirnoff int
kvm_getncpus(kvm_t * kd)1989292aad4SGleb Smirnoff kvm_getncpus(kvm_t *kd)
1999292aad4SGleb Smirnoff {
2009292aad4SGleb Smirnoff
2019292aad4SGleb Smirnoff if (mp_ncpus == 0)
2029292aad4SGleb Smirnoff if (_kvm_pcpu_init(kd) < 0)
2039292aad4SGleb Smirnoff return (-1);
2049292aad4SGleb Smirnoff return (mp_ncpus);
2059292aad4SGleb Smirnoff }
2069292aad4SGleb Smirnoff
207ccd8bad0SRobert Watson static int
_kvm_dpcpu_setcpu(kvm_t * kd,u_int cpu,int report_error)208ccd8bad0SRobert Watson _kvm_dpcpu_setcpu(kvm_t *kd, u_int cpu, int report_error)
209ccd8bad0SRobert Watson {
210ccd8bad0SRobert Watson
211ccd8bad0SRobert Watson if (!kd->dpcpu_initialized) {
212ccd8bad0SRobert Watson if (report_error)
213ccd8bad0SRobert Watson _kvm_err(kd, kd->program, "%s: not initialized",
214ccd8bad0SRobert Watson __func__);
215ccd8bad0SRobert Watson return (-1);
216ccd8bad0SRobert Watson }
217ccd8bad0SRobert Watson if (cpu >= kd->dpcpu_maxcpus) {
218ccd8bad0SRobert Watson if (report_error)
219ccd8bad0SRobert Watson _kvm_err(kd, kd->program, "%s: CPU %u too big",
220ccd8bad0SRobert Watson __func__, cpu);
221ccd8bad0SRobert Watson return (-1);
222ccd8bad0SRobert Watson }
223ccd8bad0SRobert Watson if (kd->dpcpu_off[cpu] == 0) {
224ccd8bad0SRobert Watson if (report_error)
225ccd8bad0SRobert Watson _kvm_err(kd, kd->program, "%s: CPU %u not found",
226ccd8bad0SRobert Watson __func__, cpu);
227ccd8bad0SRobert Watson return (-1);
228ccd8bad0SRobert Watson }
229ccd8bad0SRobert Watson kd->dpcpu_curcpu = cpu;
230ccd8bad0SRobert Watson kd->dpcpu_curoff = kd->dpcpu_off[cpu];
231ccd8bad0SRobert Watson return (0);
232ccd8bad0SRobert Watson }
233ccd8bad0SRobert Watson
234ccd8bad0SRobert Watson /*
235ccd8bad0SRobert Watson * Set up libkvm to handle dynamic per-CPU memory.
236ccd8bad0SRobert Watson */
237ccd8bad0SRobert Watson static int
_kvm_dpcpu_init(kvm_t * kd)238ccd8bad0SRobert Watson _kvm_dpcpu_init(kvm_t *kd)
239ccd8bad0SRobert Watson {
2407f911abeSJohn Baldwin struct kvm_nlist nl[] = {
241ccd8bad0SRobert Watson #define NLIST_START_SET_PCPU 0
242c10970ddSUlrich Spörlein { .n_name = "___start_" DPCPU_SETNAME },
243ccd8bad0SRobert Watson #define NLIST_STOP_SET_PCPU 1
244c10970ddSUlrich Spörlein { .n_name = "___stop_" DPCPU_SETNAME },
245ccd8bad0SRobert Watson #define NLIST_DPCPU_OFF 2
246c10970ddSUlrich Spörlein { .n_name = "_dpcpu_off" },
247ccd8bad0SRobert Watson #define NLIST_MP_MAXCPUS 3
248c10970ddSUlrich Spörlein { .n_name = "_mp_maxcpus" },
249c10970ddSUlrich Spörlein { .n_name = NULL },
250ccd8bad0SRobert Watson };
251ccd8bad0SRobert Watson uintptr_t *dpcpu_off_buf;
252ccd8bad0SRobert Watson size_t len;
253ccd8bad0SRobert Watson u_int dpcpu_maxcpus;
254ccd8bad0SRobert Watson
255ccd8bad0SRobert Watson /*
2567f911abeSJohn Baldwin * XXX: This only works for native kernels for now.
2577f911abeSJohn Baldwin */
2587f911abeSJohn Baldwin if (!kvm_native(kd))
2597f911abeSJohn Baldwin return (-1);
2607f911abeSJohn Baldwin
2617f911abeSJohn Baldwin /*
262ccd8bad0SRobert Watson * Locate and cache locations of important symbols using the internal
263ccd8bad0SRobert Watson * version of _kvm_nlist, turning off initialization to avoid
264ccd8bad0SRobert Watson * recursion in case of unresolveable symbols.
265ccd8bad0SRobert Watson */
266ccd8bad0SRobert Watson if (_kvm_nlist(kd, nl, 0) != 0)
267ccd8bad0SRobert Watson return (-1);
268ccd8bad0SRobert Watson if (kvm_read(kd, nl[NLIST_MP_MAXCPUS].n_value, &dpcpu_maxcpus,
269ccd8bad0SRobert Watson sizeof(dpcpu_maxcpus)) != sizeof(dpcpu_maxcpus))
270ccd8bad0SRobert Watson return (-1);
271ccd8bad0SRobert Watson len = dpcpu_maxcpus * sizeof(*dpcpu_off_buf);
272ccd8bad0SRobert Watson dpcpu_off_buf = malloc(len);
273ccd8bad0SRobert Watson if (dpcpu_off_buf == NULL)
274ccd8bad0SRobert Watson return (-1);
275ccd8bad0SRobert Watson if (kvm_read(kd, nl[NLIST_DPCPU_OFF].n_value, dpcpu_off_buf, len) !=
276c10970ddSUlrich Spörlein (ssize_t)len) {
277ccd8bad0SRobert Watson free(dpcpu_off_buf);
278ccd8bad0SRobert Watson return (-1);
279ccd8bad0SRobert Watson }
280ccd8bad0SRobert Watson kd->dpcpu_start = nl[NLIST_START_SET_PCPU].n_value;
281ccd8bad0SRobert Watson kd->dpcpu_stop = nl[NLIST_STOP_SET_PCPU].n_value;
282ccd8bad0SRobert Watson kd->dpcpu_maxcpus = dpcpu_maxcpus;
283ccd8bad0SRobert Watson kd->dpcpu_off = dpcpu_off_buf;
284ccd8bad0SRobert Watson kd->dpcpu_initialized = 1;
285ccd8bad0SRobert Watson (void)_kvm_dpcpu_setcpu(kd, 0, 0);
286ccd8bad0SRobert Watson return (0);
287ccd8bad0SRobert Watson }
288ccd8bad0SRobert Watson
289ccd8bad0SRobert Watson /*
29075f46cf6SPedro F. Giffuni * Check whether the dpcpu module has been initialized successfully or not,
291ccd8bad0SRobert Watson * initialize it if permitted.
292ccd8bad0SRobert Watson */
293ccd8bad0SRobert Watson int
_kvm_dpcpu_initialized(kvm_t * kd,int intialize)294ccd8bad0SRobert Watson _kvm_dpcpu_initialized(kvm_t *kd, int intialize)
295ccd8bad0SRobert Watson {
296ccd8bad0SRobert Watson
297ccd8bad0SRobert Watson if (kd->dpcpu_initialized || !intialize)
298ccd8bad0SRobert Watson return (kd->dpcpu_initialized);
299ccd8bad0SRobert Watson
300ccd8bad0SRobert Watson (void)_kvm_dpcpu_init(kd);
301ccd8bad0SRobert Watson
302ccd8bad0SRobert Watson return (kd->dpcpu_initialized);
303ccd8bad0SRobert Watson }
304ccd8bad0SRobert Watson
305ccd8bad0SRobert Watson /*
306ccd8bad0SRobert Watson * Check whether the value is within the dpcpu symbol range and only if so
307ccd8bad0SRobert Watson * adjust the offset relative to the current offset.
308ccd8bad0SRobert Watson */
3097f911abeSJohn Baldwin kvaddr_t
_kvm_dpcpu_validaddr(kvm_t * kd,kvaddr_t value)3107f911abeSJohn Baldwin _kvm_dpcpu_validaddr(kvm_t *kd, kvaddr_t value)
311ccd8bad0SRobert Watson {
312ccd8bad0SRobert Watson
313ccd8bad0SRobert Watson if (value == 0)
314ccd8bad0SRobert Watson return (value);
315ccd8bad0SRobert Watson
316ccd8bad0SRobert Watson if (!kd->dpcpu_initialized)
317ccd8bad0SRobert Watson return (value);
318ccd8bad0SRobert Watson
319ccd8bad0SRobert Watson if (value < kd->dpcpu_start || value >= kd->dpcpu_stop)
320ccd8bad0SRobert Watson return (value);
321ccd8bad0SRobert Watson
322ccd8bad0SRobert Watson return (kd->dpcpu_curoff + value);
323ccd8bad0SRobert Watson }
324ccd8bad0SRobert Watson
325ccd8bad0SRobert Watson int
kvm_dpcpu_setcpu(kvm_t * kd,u_int cpu)326ccd8bad0SRobert Watson kvm_dpcpu_setcpu(kvm_t *kd, u_int cpu)
327ccd8bad0SRobert Watson {
328ccd8bad0SRobert Watson int ret;
329ccd8bad0SRobert Watson
330ccd8bad0SRobert Watson if (!kd->dpcpu_initialized) {
331ccd8bad0SRobert Watson ret = _kvm_dpcpu_init(kd);
332ccd8bad0SRobert Watson if (ret != 0) {
333ccd8bad0SRobert Watson _kvm_err(kd, kd->program, "%s: init failed",
334ccd8bad0SRobert Watson __func__);
335ccd8bad0SRobert Watson return (ret);
336ccd8bad0SRobert Watson }
337ccd8bad0SRobert Watson }
338ccd8bad0SRobert Watson
339ccd8bad0SRobert Watson return (_kvm_dpcpu_setcpu(kd, cpu, 1));
340ccd8bad0SRobert Watson }
3412a213404SGleb Smirnoff
3422a213404SGleb Smirnoff /*
3432a213404SGleb Smirnoff * Obtain a per-CPU copy for given cpu from UMA_ZONE_PCPU allocation.
3442a213404SGleb Smirnoff */
3452a213404SGleb Smirnoff ssize_t
kvm_read_zpcpu(kvm_t * kd,u_long base,void * buf,size_t size,int cpu)34619b5cffeSGleb Smirnoff kvm_read_zpcpu(kvm_t *kd, u_long base, void *buf, size_t size, int cpu)
3472a213404SGleb Smirnoff {
3482a213404SGleb Smirnoff
3497f911abeSJohn Baldwin if (!kvm_native(kd))
3507f911abeSJohn Baldwin return (-1);
351*f165a1dfSMateusz Guzik if (mp_ncpus == 0)
352*f165a1dfSMateusz Guzik if (_kvm_pcpu_init(kd) < 0)
353*f165a1dfSMateusz Guzik return (0);
354*f165a1dfSMateusz Guzik
355*f165a1dfSMateusz Guzik #ifdef __OFFSET_BY_PCPU
356*f165a1dfSMateusz Guzik base += __pcpu;
357*f165a1dfSMateusz Guzik #endif
3582a213404SGleb Smirnoff return (kvm_read(kd, (uintptr_t)(base + sizeof(struct pcpu) * cpu),
3592a213404SGleb Smirnoff buf, size));
3602a213404SGleb Smirnoff }
3612a213404SGleb Smirnoff
3622a213404SGleb Smirnoff /*
3632a213404SGleb Smirnoff * Fetch value of a counter(9).
3642a213404SGleb Smirnoff */
3652a213404SGleb Smirnoff uint64_t
kvm_counter_u64_fetch(kvm_t * kd,u_long base)3662a213404SGleb Smirnoff kvm_counter_u64_fetch(kvm_t *kd, u_long base)
3672a213404SGleb Smirnoff {
36833299737SGleb Smirnoff uint64_t r, c;
3692a213404SGleb Smirnoff
3702a213404SGleb Smirnoff if (mp_ncpus == 0)
3712a213404SGleb Smirnoff if (_kvm_pcpu_init(kd) < 0)
3722a213404SGleb Smirnoff return (0);
3732a213404SGleb Smirnoff
3742a213404SGleb Smirnoff r = 0;
3752a213404SGleb Smirnoff for (int i = 0; i < mp_ncpus; i++) {
37619b5cffeSGleb Smirnoff if (kvm_read_zpcpu(kd, base, &c, sizeof(c), i) != sizeof(c))
3772a213404SGleb Smirnoff return (0);
3782a213404SGleb Smirnoff r += c;
3792a213404SGleb Smirnoff }
3802a213404SGleb Smirnoff
3812a213404SGleb Smirnoff return (r);
3822a213404SGleb Smirnoff }
383