xref: /freebsd/sys/kern/kern_pmc.c (revision 3c5ba95ad12285ad37c182a4bfc1b240ec6d18a7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_hwpmc_hooks.h"
37 
38 #include <sys/types.h>
39 #include <sys/ctype.h>
40 #include <sys/param.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/pmc.h>
46 #include <sys/pmckern.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 
55 #ifdef	HWPMC_HOOKS
56 FEATURE(hwpmc_hooks, "Kernel support for HW PMC");
57 #define	PMC_KERNEL_VERSION	PMC_VERSION
58 #else
59 #define	PMC_KERNEL_VERSION	0
60 #endif
61 
62 MALLOC_DECLARE(M_PMCHOOKS);
63 MALLOC_DEFINE(M_PMCHOOKS, "pmchooks", "Memory space for PMC hooks");
64 
65 /* memory pool */
66 MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
67 
68 const int pmc_kernel_version = PMC_KERNEL_VERSION;
69 
70 /* Hook variable. */
71 int __read_mostly (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
72 
73 /* Interrupt handler */
74 int __read_mostly (*pmc_intr)(struct trapframe *tf) = NULL;
75 
76 DPCPU_DEFINE(uint8_t, pmc_sampled);
77 
78 /*
79  * A global count of SS mode PMCs.  When non-zero, this means that
80  * we have processes that are sampling the system as a whole.
81  */
82 volatile int pmc_ss_count;
83 
84 /*
85  * Since PMC(4) may not be loaded in the current kernel, the
86  * convention followed is that a non-NULL value of 'pmc_hook' implies
87  * the presence of this kernel module.
88  *
89  * This requires us to protect 'pmc_hook' with a
90  * shared (sx) lock -- thus making the process of calling into PMC(4)
91  * somewhat more expensive than a simple 'if' check and indirect call.
92  */
93 struct sx pmc_sx;
94 SX_SYSINIT(pmcsx, &pmc_sx, "pmc-sx");
95 
96 /*
97  * PMC Soft per cpu trapframe.
98  */
99 struct trapframe pmc_tf[MAXCPU];
100 
101 /*
102  * Per domain list of buffer headers
103  */
104 __read_mostly struct pmc_domain_buffer_header *pmc_dom_hdrs[MAXMEMDOM];
105 
106 /*
107  * PMC Soft use a global table to store registered events.
108  */
109 
110 SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters");
111 
112 static int pmc_softevents = 16;
113 SYSCTL_INT(_kern_hwpmc, OID_AUTO, softevents, CTLFLAG_RDTUN,
114     &pmc_softevents, 0, "maximum number of soft events");
115 
116 int pmc_softs_count;
117 struct pmc_soft **pmc_softs;
118 
119 struct mtx pmc_softs_mtx;
120 MTX_SYSINIT(pmc_soft_mtx, &pmc_softs_mtx, "pmc-softs", MTX_SPIN);
121 
122 /*
123  * Helper functions.
124  */
125 
126 /*
127  * A note on the CPU numbering scheme used by the hwpmc(4) driver.
128  *
129  * CPUs are denoted using numbers in the range 0..[pmc_cpu_max()-1].
130  * CPUs could be numbered "sparsely" in this range; the predicate
131  * `pmc_cpu_is_present()' is used to test whether a given CPU is
132  * physically present.
133  *
134  * Further, a CPU that is physically present may be administratively
135  * disabled or otherwise unavailable for use by hwpmc(4).  The
136  * `pmc_cpu_is_active()' predicate tests for CPU usability.  An
137  * "active" CPU participates in thread scheduling and can field
138  * interrupts raised by PMC hardware.
139  *
140  * On systems with hyperthreaded CPUs, multiple logical CPUs may share
141  * PMC hardware resources.  For such processors one logical CPU is
142  * denoted as the primary owner of the in-CPU PMC resources. The
143  * pmc_cpu_is_primary() predicate is used to distinguish this primary
144  * CPU from the others.
145  */
146 
147 int
148 pmc_cpu_is_active(int cpu)
149 {
150 #ifdef	SMP
151 	return (pmc_cpu_is_present(cpu) &&
152 	    !CPU_ISSET(cpu, &hlt_cpus_mask));
153 #else
154 	return (1);
155 #endif
156 }
157 
158 /* Deprecated. */
159 int
160 pmc_cpu_is_disabled(int cpu)
161 {
162 	return (!pmc_cpu_is_active(cpu));
163 }
164 
165 int
166 pmc_cpu_is_present(int cpu)
167 {
168 #ifdef	SMP
169 	return (!CPU_ABSENT(cpu));
170 #else
171 	return (1);
172 #endif
173 }
174 
175 int
176 pmc_cpu_is_primary(int cpu)
177 {
178 #ifdef	SMP
179 	return (!CPU_ISSET(cpu, &logical_cpus_mask));
180 #else
181 	return (1);
182 #endif
183 }
184 
185 
186 /*
187  * Return the maximum CPU number supported by the system.  The return
188  * value is used for scaling internal data structures and for runtime
189  * checks.
190  */
191 unsigned int
192 pmc_cpu_max(void)
193 {
194 #ifdef	SMP
195 	return (mp_maxid+1);
196 #else
197 	return (1);
198 #endif
199 }
200 
201 #ifdef	INVARIANTS
202 
203 /*
204  * Return the count of CPUs in the `active' state in the system.
205  */
206 int
207 pmc_cpu_max_active(void)
208 {
209 #ifdef	SMP
210 	/*
211 	 * When support for CPU hot-plugging is added to the kernel,
212 	 * this function would change to return the current number
213 	 * of "active" CPUs.
214 	 */
215 	return (mp_ncpus);
216 #else
217 	return (1);
218 #endif
219 }
220 
221 #endif
222 
223 /*
224  * Cleanup event name:
225  * - remove duplicate '_'
226  * - all uppercase
227  */
228 static void
229 pmc_soft_namecleanup(char *name)
230 {
231 	char *p, *q;
232 
233 	p = q = name;
234 
235 	for ( ; *p == '_' ; p++)
236 		;
237 	for ( ; *p ; p++) {
238 		if (*p == '_' && (*(p + 1) == '_' || *(p + 1) == '\0'))
239 			continue;
240 		else
241 			*q++ = toupper(*p);
242 	}
243 	*q = '\0';
244 }
245 
246 void
247 pmc_soft_ev_register(struct pmc_soft *ps)
248 {
249 	static int warned = 0;
250 	int n;
251 
252 	ps->ps_running  = 0;
253 	ps->ps_ev.pm_ev_code = 0; /* invalid */
254 	pmc_soft_namecleanup(ps->ps_ev.pm_ev_name);
255 
256 	mtx_lock_spin(&pmc_softs_mtx);
257 
258 	if (pmc_softs_count >= pmc_softevents) {
259 		/*
260 		 * XXX Reusing events can enter a race condition where
261 		 * new allocated event will be used as an old one.
262 		 */
263 		for (n = 0; n < pmc_softevents; n++)
264 			if (pmc_softs[n] == NULL)
265 				break;
266 		if (n == pmc_softevents) {
267 			mtx_unlock_spin(&pmc_softs_mtx);
268 			if (!warned) {
269 				printf("hwpmc: too many soft events, "
270 				    "increase kern.hwpmc.softevents tunable\n");
271 				warned = 1;
272 			}
273 			return;
274 		}
275 
276 		ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + n;
277 		pmc_softs[n] = ps;
278 	} else {
279 		ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + pmc_softs_count;
280 		pmc_softs[pmc_softs_count++] = ps;
281 	}
282 
283 	mtx_unlock_spin(&pmc_softs_mtx);
284 }
285 
286 void
287 pmc_soft_ev_deregister(struct pmc_soft *ps)
288 {
289 
290 	KASSERT(ps != NULL, ("pmc_soft_deregister: called with NULL"));
291 
292 	mtx_lock_spin(&pmc_softs_mtx);
293 
294 	if (ps->ps_ev.pm_ev_code != 0 &&
295 	    (ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST) < pmc_softevents) {
296 		KASSERT((int)ps->ps_ev.pm_ev_code >= PMC_EV_SOFT_FIRST &&
297 		    (int)ps->ps_ev.pm_ev_code <= PMC_EV_SOFT_LAST,
298 		    ("pmc_soft_deregister: invalid event value"));
299 		pmc_softs[ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST] = NULL;
300 	}
301 
302 	mtx_unlock_spin(&pmc_softs_mtx);
303 }
304 
305 struct pmc_soft *
306 pmc_soft_ev_acquire(enum pmc_event ev)
307 {
308 	struct pmc_soft *ps;
309 
310 	if (ev == 0 || (ev - PMC_EV_SOFT_FIRST) >= pmc_softevents)
311 		return NULL;
312 
313 	KASSERT((int)ev >= PMC_EV_SOFT_FIRST &&
314 	    (int)ev <= PMC_EV_SOFT_LAST,
315 	    ("event out of range"));
316 
317 	mtx_lock_spin(&pmc_softs_mtx);
318 
319 	ps = pmc_softs[ev - PMC_EV_SOFT_FIRST];
320 	if (ps == NULL)
321 		mtx_unlock_spin(&pmc_softs_mtx);
322 
323 	return ps;
324 }
325 
326 void
327 pmc_soft_ev_release(struct pmc_soft *ps)
328 {
329 
330 	mtx_unlock_spin(&pmc_softs_mtx);
331 }
332 
333 #ifdef NUMA
334 #define NDOMAINS vm_ndomains
335 
336 static int
337 getdomain(int cpu)
338 {
339 	struct pcpu *pc;
340 
341 	pc = pcpu_find(cpu);
342 	return (pc->pc_domain);
343 }
344 #else
345 #define NDOMAINS 1
346 #define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags))
347 #define getdomain(cpu) 0
348 #endif
349 /*
350  *  Initialise hwpmc.
351  */
352 static void
353 init_hwpmc(void *dummy __unused)
354 {
355 	int domain, cpu;
356 
357 	if (pmc_softevents <= 0 ||
358 	    pmc_softevents > PMC_EV_DYN_COUNT) {
359 		(void) printf("hwpmc: tunable \"softevents\"=%d out of "
360 		    "range.\n", pmc_softevents);
361 		pmc_softevents = PMC_EV_DYN_COUNT;
362 	}
363 	pmc_softs = malloc(pmc_softevents * sizeof(struct pmc_soft *), M_PMCHOOKS, M_NOWAIT|M_ZERO);
364 	KASSERT(pmc_softs != NULL, ("cannot allocate soft events table"));
365 
366 	for (domain = 0; domain < NDOMAINS; domain++) {
367 		pmc_dom_hdrs[domain] = malloc_domain(sizeof(struct pmc_domain_buffer_header), M_PMC, domain,
368 										M_WAITOK|M_ZERO);
369 		mtx_init(&pmc_dom_hdrs[domain]->pdbh_mtx, "pmc_bufferlist_mtx", "pmc-leaf", MTX_SPIN);
370 		TAILQ_INIT(&pmc_dom_hdrs[domain]->pdbh_head);
371 	}
372 	CPU_FOREACH(cpu) {
373 		domain = getdomain(cpu);
374 		KASSERT(pmc_dom_hdrs[domain] != NULL, ("no mem allocated for domain: %d", domain));
375 		pmc_dom_hdrs[domain]->pdbh_ncpus++;
376 	}
377 
378 }
379 
380 SYSINIT(hwpmc, SI_SUB_KDTRACE, SI_ORDER_FIRST, init_hwpmc, NULL);
381 
382