xref: /freebsd/sys/dev/hwpmc/hwpmc_mod.c (revision 2f3dcbb5ef27edfee1d560395fda3c2c7fd5e5cd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * Copyright (c) 2018 Matthew Macy
7  * All rights reserved.
8  *
9  * Portions of this software were developed by A. Joseph Koshy under
10  * sponsorship from the FreeBSD Foundation and Google, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/domainset.h>
37 #include <sys/eventhandler.h>
38 #include <sys/jail.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/limits.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/pmc.h>
48 #include <sys/pmckern.h>
49 #include <sys/pmclog.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/resourcevar.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/signalvar.h>
57 #include <sys/smp.h>
58 #include <sys/sx.h>
59 #include <sys/sysctl.h>
60 #include <sys/sysent.h>
61 #include <sys/syslog.h>
62 #include <sys/taskqueue.h>
63 #include <sys/vnode.h>
64 
65 #include <sys/linker.h>		/* needs to be after <sys/malloc.h> */
66 
67 #include <machine/atomic.h>
68 #include <machine/md_var.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_extern.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_object.h>
75 
76 #include "hwpmc_soft.h"
77 
78 #define PMC_EPOCH_ENTER()						\
79     struct epoch_tracker pmc_et;					\
80     epoch_enter_preempt(global_epoch_preempt, &pmc_et)
81 
82 #define PMC_EPOCH_EXIT()						\
83     epoch_exit_preempt(global_epoch_preempt, &pmc_et)
84 
85 /*
86  * Types
87  */
88 
89 enum pmc_flags {
90 	PMC_FLAG_NONE	  = 0x00, /* do nothing */
91 	PMC_FLAG_REMOVE   = 0x01, /* atomically remove entry from hash */
92 	PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
93 	PMC_FLAG_NOWAIT   = 0x04, /* do not wait for mallocs */
94 };
95 
96 /*
97  * The offset in sysent where the syscall is allocated.
98  */
99 static int pmc_syscall_num = NO_SYSCALL;
100 
101 struct pmc_cpu		**pmc_pcpu;	 /* per-cpu state */
102 pmc_value_t		*pmc_pcpu_saved; /* saved PMC values: CSW handling */
103 
104 #define	PMC_PCPU_SAVED(C, R)	pmc_pcpu_saved[(R) + md->pmd_npmc * (C)]
105 
106 struct mtx_pool		*pmc_mtxpool;
107 static int		*pmc_pmcdisp;	 /* PMC row dispositions */
108 
109 #define	PMC_ROW_DISP_IS_FREE(R)		(pmc_pmcdisp[(R)] == 0)
110 #define	PMC_ROW_DISP_IS_THREAD(R)	(pmc_pmcdisp[(R)] > 0)
111 #define	PMC_ROW_DISP_IS_STANDALONE(R)	(pmc_pmcdisp[(R)] < 0)
112 
113 #define	PMC_MARK_ROW_FREE(R) do {					  \
114 	pmc_pmcdisp[(R)] = 0;						  \
115 } while (0)
116 
117 #define	PMC_MARK_ROW_STANDALONE(R) do {					  \
118 	KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
119 		    __LINE__));						  \
120 	atomic_add_int(&pmc_pmcdisp[(R)], -1);				  \
121 	KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()),		  \
122 		("[pmc,%d] row disposition error", __LINE__));		  \
123 } while (0)
124 
125 #define	PMC_UNMARK_ROW_STANDALONE(R) do { 				  \
126 	atomic_add_int(&pmc_pmcdisp[(R)], 1);				  \
127 	KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
128 		    __LINE__));						  \
129 } while (0)
130 
131 #define	PMC_MARK_ROW_THREAD(R) do {					  \
132 	KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
133 		    __LINE__));						  \
134 	atomic_add_int(&pmc_pmcdisp[(R)], 1);				  \
135 } while (0)
136 
137 #define	PMC_UNMARK_ROW_THREAD(R) do {					  \
138 	atomic_add_int(&pmc_pmcdisp[(R)], -1);				  \
139 	KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
140 		    __LINE__));						  \
141 } while (0)
142 
143 /* various event handlers */
144 static eventhandler_tag	pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag,
145     pmc_kld_unload_tag;
146 
147 /* Module statistics */
148 struct pmc_driverstats pmc_stats;
149 
150 /* Machine/processor dependent operations */
151 static struct pmc_mdep  *md;
152 
153 /*
154  * Hash tables mapping owner processes and target threads to PMCs.
155  */
156 struct mtx pmc_processhash_mtx;		/* spin mutex */
157 static u_long pmc_processhashmask;
158 static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
159 
160 /*
161  * Hash table of PMC owner descriptors.  This table is protected by
162  * the shared PMC "sx" lock.
163  */
164 static u_long pmc_ownerhashmask;
165 static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
166 
167 /*
168  * List of PMC owners with system-wide sampling PMCs.
169  */
170 static CK_LIST_HEAD(, pmc_owner) pmc_ss_owners;
171 
172 /*
173  * List of free thread entries. This is protected by the spin
174  * mutex.
175  */
176 static struct mtx pmc_threadfreelist_mtx;	/* spin mutex */
177 static LIST_HEAD(, pmc_thread) pmc_threadfreelist;
178 static int pmc_threadfreelist_entries = 0;
179 #define	THREADENTRY_SIZE	(sizeof(struct pmc_thread) +		\
180     (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
181 
182 /*
183  * Task to free thread descriptors
184  */
185 static struct task free_task;
186 
187 /*
188  * A map of row indices to classdep structures.
189  */
190 static struct pmc_classdep **pmc_rowindex_to_classdep;
191 
192 /*
193  * Prototypes
194  */
195 
196 #ifdef HWPMC_DEBUG
197 static int	pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
198 static int	pmc_debugflags_parse(char *newstr, char *fence);
199 #endif
200 
201 static void	pmc_multipart_add(struct pmc_sample *ps, int type,
202     int length);
203 static void	pmc_multipart_copydata(struct pmc_sample *ps,
204     struct pmc_multipart *mp);
205 
206 static int	load(struct module *module, int cmd, void *arg);
207 static int	pmc_add_sample(ring_type_t ring, struct pmc *pm,
208     struct trapframe *tf, struct pmc_multipart *mp);
209 static void	pmc_add_thread_descriptors_from_proc(struct proc *p,
210     struct pmc_process *pp);
211 static int	pmc_attach_process(struct proc *p, struct pmc *pm);
212 static struct pmc *pmc_allocate_pmc_descriptor(void);
213 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
214 static int	pmc_attach_one_process(struct proc *p, struct pmc *pm);
215 static bool	pmc_can_allocate_row(int ri, enum pmc_mode mode);
216 static bool	pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
217     int cpu);
218 static bool	pmc_can_attach(struct pmc *pm, struct proc *p);
219 static void	pmc_capture_user_callchain(int cpu, int soft,
220     struct trapframe *tf);
221 static void	pmc_cleanup(void);
222 static int	pmc_detach_process(struct proc *p, struct pmc *pm);
223 static int	pmc_detach_one_process(struct proc *p, struct pmc *pm,
224     int flags);
225 static void	pmc_destroy_owner_descriptor(struct pmc_owner *po);
226 static void	pmc_destroy_pmc_descriptor(struct pmc *pm);
227 static void	pmc_destroy_process_descriptor(struct pmc_process *pp);
228 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
229 static int	pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
230 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
231     pmc_id_t pmc);
232 static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
233     uint32_t mode);
234 static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp,
235     struct thread *td, uint32_t mode);
236 static void	pmc_force_context_switch(void);
237 static void	pmc_link_target_process(struct pmc *pm,
238     struct pmc_process *pp);
239 static void	pmc_log_all_process_mappings(struct pmc_owner *po);
240 static void	pmc_log_kernel_mappings(struct pmc *pm);
241 static void	pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
242 static void	pmc_maybe_remove_owner(struct pmc_owner *po);
243 static void	pmc_post_callchain_callback(void);
244 static void	pmc_process_allproc(struct pmc *pm);
245 static void	pmc_process_csw_in(struct thread *td);
246 static void	pmc_process_csw_out(struct thread *td);
247 static void	pmc_process_exec(struct thread *td,
248     struct pmckern_procexec *pk);
249 static void	pmc_process_exit(void *arg, struct proc *p);
250 static void	pmc_process_fork(void *arg, struct proc *p1,
251     struct proc *p2, int n);
252 static void	pmc_process_proccreate(struct proc *p);
253 static void	pmc_process_samples(int cpu, ring_type_t soft);
254 static void	pmc_process_threadcreate(struct thread *td);
255 static void	pmc_process_threadexit(struct thread *td);
256 static void	pmc_process_thread_add(struct thread *td);
257 static void	pmc_process_thread_delete(struct thread *td);
258 static void	pmc_process_thread_userret(struct thread *td);
259 static void	pmc_release_pmc_descriptor(struct pmc *pmc);
260 static void	pmc_remove_owner(struct pmc_owner *po);
261 static void	pmc_remove_process_descriptor(struct pmc_process *pp);
262 static int	pmc_start(struct pmc *pm);
263 static int	pmc_stop(struct pmc *pm);
264 static int	pmc_syscall_handler(struct thread *td, void *syscall_args);
265 static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void);
266 static void	pmc_thread_descriptor_pool_drain(void);
267 static void	pmc_thread_descriptor_pool_free(struct pmc_thread *pt);
268 static void	pmc_unlink_target_process(struct pmc *pmc,
269     struct pmc_process *pp);
270 
271 static int	generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
272 static int	generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
273 static struct pmc_mdep *pmc_generic_cpu_initialize(void);
274 static void	pmc_generic_cpu_finalize(struct pmc_mdep *md);
275 
276 /*
277  * Kernel tunables and sysctl(8) interface.
278  */
279 
280 SYSCTL_DECL(_kern_hwpmc);
281 SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
282     "HWPMC stats");
283 
284 /* Stats. */
285 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW,
286     &pmc_stats.pm_intr_ignored,
287     "# of interrupts ignored");
288 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW,
289     &pmc_stats.pm_intr_processed,
290     "# of interrupts processed");
291 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW,
292     &pmc_stats.pm_intr_bufferfull,
293     "# of interrupts where buffer was full");
294 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW,
295     &pmc_stats.pm_syscalls,
296     "# of syscalls");
297 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW,
298     &pmc_stats.pm_syscall_errors,
299     "# of syscall_errors");
300 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW,
301     &pmc_stats.pm_buffer_requests,
302     "# of buffer requests");
303 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed,
304     CTLFLAG_RW, &pmc_stats.pm_buffer_requests_failed,
305     "# of buffer requests which failed");
306 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW,
307     &pmc_stats.pm_log_sweeps,
308     "# of times samples were processed");
309 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, merges, CTLFLAG_RW,
310     &pmc_stats.pm_merges,
311     "# of times kernel stack was found for user trace");
312 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, overwrites, CTLFLAG_RW,
313     &pmc_stats.pm_overwrites,
314     "# of times a sample was overwritten before being logged");
315 
316 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
317 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN,
318     &pmc_callchaindepth, 0,
319     "depth of call chain records");
320 
321 char pmc_cpuid[PMC_CPUID_LEN];
322 SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD,
323     pmc_cpuid, 0,
324     "cpu version string");
325 
326 #ifdef HWPMC_DEBUG
327 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
328 char	pmc_debugstr[PMC_DEBUG_STRSIZE];
329 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
330     sizeof(pmc_debugstr));
331 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
332     CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
333     0, 0, pmc_debugflags_sysctl_handler, "A",
334     "debug flags");
335 #endif
336 
337 /*
338  * kern.hwpmc.hashsize -- determines the number of rows in the
339  * of the hash table used to look up threads
340  */
341 static int pmc_hashsize = PMC_HASH_SIZE;
342 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN,
343     &pmc_hashsize, 0,
344     "rows in hash tables");
345 
346 /*
347  * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
348  */
349 static int pmc_nsamples = PMC_NSAMPLES;
350 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN,
351     &pmc_nsamples, 0,
352     "number of PC samples per CPU");
353 
354 static uint64_t pmc_sample_mask = PMC_NSAMPLES - 1;
355 
356 /*
357  * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
358  */
359 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
360 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN,
361     &pmc_mtxpool_size, 0,
362     "size of spin mutex pool");
363 
364 /*
365  * kern.hwpmc.threadfreelist_entries -- number of free entries
366  */
367 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD,
368     &pmc_threadfreelist_entries, 0,
369     "number of available thread entries");
370 
371 /*
372  * kern.hwpmc.threadfreelist_max -- maximum number of free entries
373  */
374 static int pmc_threadfreelist_max = PMC_THREADLIST_MAX;
375 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW,
376     &pmc_threadfreelist_max, 0,
377     "maximum number of available thread entries before freeing some");
378 
379 /*
380  * kern.hwpmc.mincount -- minimum sample count
381  */
382 static u_int pmc_mincount = 1000;
383 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mincount, CTLFLAG_RWTUN,
384     &pmc_mincount, 0,
385     "minimum count for sampling counters");
386 
387 /*
388  * security.bsd.unprivileged_syspmcs -- allow non-root processes to
389  * allocate system-wide PMCs.
390  *
391  * Allowing unprivileged processes to allocate system PMCs is convenient
392  * if system-wide measurements need to be taken concurrently with other
393  * per-process measurements.  This feature is turned off by default.
394  */
395 static int pmc_unprivileged_syspmcs = 0;
396 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN,
397     &pmc_unprivileged_syspmcs, 0,
398     "allow unprivileged process to allocate system PMCs");
399 
400 /*
401  * Hash function.  Discard the lower 2 bits of the pointer since
402  * these are always zero for our uses.  The hash multiplier is
403  * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
404  */
405 #if	LONG_BIT == 64
406 #define	_PMC_HM		11400714819323198486u
407 #elif	LONG_BIT == 32
408 #define	_PMC_HM		2654435769u
409 #else
410 #error 	Must know the size of 'long' to compile
411 #endif
412 
413 #define	PMC_HASH_PTR(P,M)	((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
414 
415 /*
416  * Syscall structures
417  */
418 
419 /* The `sysent' for the new syscall */
420 static struct sysent pmc_sysent = {
421 	.sy_narg =	2,
422 	.sy_call =	pmc_syscall_handler,
423 };
424 
425 static struct syscall_module_data pmc_syscall_mod = {
426 	.chainevh =	load,
427 	.chainarg =	NULL,
428 	.offset =	&pmc_syscall_num,
429 	.new_sysent =	&pmc_sysent,
430 	.old_sysent =	{ .sy_narg = 0, .sy_call = NULL },
431 	.flags =	SY_THR_STATIC_KLD,
432 };
433 
434 static moduledata_t pmc_mod = {
435 	.name =		PMC_MODULE_NAME,
436 	.evhand =	syscall_module_handler,
437 	.priv =		&pmc_syscall_mod,
438 };
439 
440 #ifdef EARLY_AP_STARTUP
441 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY);
442 #else
443 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
444 #endif
445 MODULE_VERSION(pmc, PMC_VERSION);
446 
447 #ifdef HWPMC_DEBUG
448 enum pmc_dbgparse_state {
449 	PMCDS_WS,		/* in whitespace */
450 	PMCDS_MAJOR,		/* seen a major keyword */
451 	PMCDS_MINOR
452 };
453 
454 static int
pmc_debugflags_parse(char * newstr,char * fence)455 pmc_debugflags_parse(char *newstr, char *fence)
456 {
457 	struct pmc_debugflags *tmpflags;
458 	size_t kwlen;
459 	char c, *p, *q;
460 	int error, *newbits, tmp;
461 	int found;
462 
463 	tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK | M_ZERO);
464 
465 	error = 0;
466 	for (p = newstr; p < fence && (c = *p); p++) {
467 		/* skip white space */
468 		if (c == ' ' || c == '\t')
469 			continue;
470 
471 		/* look for a keyword followed by "=" */
472 		for (q = p; p < fence && (c = *p) && c != '='; p++)
473 			;
474 		if (c != '=') {
475 			error = EINVAL;
476 			goto done;
477 		}
478 
479 		kwlen = p - q;
480 		newbits = NULL;
481 
482 		/* lookup flag group name */
483 #define	DBG_SET_FLAG_MAJ(S,F)						\
484 		if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0)	\
485 			newbits = &tmpflags->pdb_ ## F;
486 
487 		DBG_SET_FLAG_MAJ("cpu",		CPU);
488 		DBG_SET_FLAG_MAJ("csw",		CSW);
489 		DBG_SET_FLAG_MAJ("logging",	LOG);
490 		DBG_SET_FLAG_MAJ("module",	MOD);
491 		DBG_SET_FLAG_MAJ("md", 		MDP);
492 		DBG_SET_FLAG_MAJ("owner",	OWN);
493 		DBG_SET_FLAG_MAJ("pmc",		PMC);
494 		DBG_SET_FLAG_MAJ("process",	PRC);
495 		DBG_SET_FLAG_MAJ("sampling", 	SAM);
496 #undef DBG_SET_FLAG_MAJ
497 
498 		if (newbits == NULL) {
499 			error = EINVAL;
500 			goto done;
501 		}
502 
503 		p++;		/* skip the '=' */
504 
505 		/* Now parse the individual flags */
506 		tmp = 0;
507 	newflag:
508 		for (q = p; p < fence && (c = *p); p++)
509 			if (c == ' ' || c == '\t' || c == ',')
510 				break;
511 
512 		/* p == fence or c == ws or c == "," or c == 0 */
513 
514 		if ((kwlen = p - q) == 0) {
515 			*newbits = tmp;
516 			continue;
517 		}
518 
519 		found = 0;
520 #define	DBG_SET_FLAG_MIN(S,F)						\
521 		if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0)	\
522 			tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
523 
524 		/* a '*' denotes all possible flags in the group */
525 		if (kwlen == 1 && *q == '*')
526 			tmp = found = ~0;
527 		/* look for individual flag names */
528 		DBG_SET_FLAG_MIN("allocaterow", ALR);
529 		DBG_SET_FLAG_MIN("allocate",	ALL);
530 		DBG_SET_FLAG_MIN("attach",	ATT);
531 		DBG_SET_FLAG_MIN("bind",	BND);
532 		DBG_SET_FLAG_MIN("config",	CFG);
533 		DBG_SET_FLAG_MIN("exec",	EXC);
534 		DBG_SET_FLAG_MIN("exit",	EXT);
535 		DBG_SET_FLAG_MIN("find",	FND);
536 		DBG_SET_FLAG_MIN("flush",	FLS);
537 		DBG_SET_FLAG_MIN("fork",	FRK);
538 		DBG_SET_FLAG_MIN("getbuf",	GTB);
539 		DBG_SET_FLAG_MIN("hook",	PMH);
540 		DBG_SET_FLAG_MIN("init",	INI);
541 		DBG_SET_FLAG_MIN("intr",	INT);
542 		DBG_SET_FLAG_MIN("linktarget",	TLK);
543 		DBG_SET_FLAG_MIN("mayberemove", OMR);
544 		DBG_SET_FLAG_MIN("ops",		OPS);
545 		DBG_SET_FLAG_MIN("read",	REA);
546 		DBG_SET_FLAG_MIN("register",	REG);
547 		DBG_SET_FLAG_MIN("release",	REL);
548 		DBG_SET_FLAG_MIN("remove",	ORM);
549 		DBG_SET_FLAG_MIN("sample",	SAM);
550 		DBG_SET_FLAG_MIN("scheduleio",	SIO);
551 		DBG_SET_FLAG_MIN("select",	SEL);
552 		DBG_SET_FLAG_MIN("signal",	SIG);
553 		DBG_SET_FLAG_MIN("swi",		SWI);
554 		DBG_SET_FLAG_MIN("swo",		SWO);
555 		DBG_SET_FLAG_MIN("start",	STA);
556 		DBG_SET_FLAG_MIN("stop",	STO);
557 		DBG_SET_FLAG_MIN("syscall",	PMS);
558 		DBG_SET_FLAG_MIN("unlinktarget", TUL);
559 		DBG_SET_FLAG_MIN("write",	WRI);
560 #undef DBG_SET_FLAG_MIN
561 		if (found == 0) {
562 			/* unrecognized flag name */
563 			error = EINVAL;
564 			goto done;
565 		}
566 
567 		if (c == 0 || c == ' ' || c == '\t') {	/* end of flag group */
568 			*newbits = tmp;
569 			continue;
570 		}
571 
572 		p++;
573 		goto newflag;
574 	}
575 
576 	/* save the new flag set */
577 	bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
578 done:
579 	free(tmpflags, M_PMC);
580 	return (error);
581 }
582 
583 static int
pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)584 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
585 {
586 	char *fence, *newstr;
587 	int error;
588 	u_int n;
589 
590 	n = sizeof(pmc_debugstr);
591 	newstr = malloc(n, M_PMC, M_WAITOK | M_ZERO);
592 	strlcpy(newstr, pmc_debugstr, n);
593 
594 	error = sysctl_handle_string(oidp, newstr, n, req);
595 
596 	/* if there is a new string, parse and copy it */
597 	if (error == 0 && req->newptr != NULL) {
598 		fence = newstr + (n < req->newlen ? n : req->newlen + 1);
599 		error = pmc_debugflags_parse(newstr, fence);
600 		if (error == 0)
601 			strlcpy(pmc_debugstr, newstr, sizeof(pmc_debugstr));
602 	}
603 	free(newstr, M_PMC);
604 
605 	return (error);
606 }
607 #endif
608 
609 /*
610  * Map a row index to a classdep structure and return the adjusted row
611  * index for the PMC class index.
612  */
613 static struct pmc_classdep *
pmc_ri_to_classdep(struct pmc_mdep * md __unused,int ri,int * adjri)614 pmc_ri_to_classdep(struct pmc_mdep *md __unused, int ri, int *adjri)
615 {
616 	struct pmc_classdep *pcd;
617 
618 	KASSERT(ri >= 0 && ri < md->pmd_npmc,
619 	    ("[pmc,%d] illegal row-index %d", __LINE__, ri));
620 
621 	pcd = pmc_rowindex_to_classdep[ri];
622 	KASSERT(pcd != NULL,
623 	    ("[pmc,%d] ri %d null pcd", __LINE__, ri));
624 
625 	*adjri = ri - pcd->pcd_ri;
626 	KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
627 	    ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
628 
629 	return (pcd);
630 }
631 
632 /*
633  * Concurrency Control
634  *
635  * The driver manages the following data structures:
636  *
637  *   - target process descriptors, one per target process
638  *   - owner process descriptors (and attached lists), one per owner process
639  *   - lookup hash tables for owner and target processes
640  *   - PMC descriptors (and attached lists)
641  *   - per-cpu hardware state
642  *   - the 'hook' variable through which the kernel calls into
643  *     this module
644  *   - the machine hardware state (managed by the MD layer)
645  *
646  * These data structures are accessed from:
647  *
648  * - thread context-switch code
649  * - interrupt handlers (possibly on multiple cpus)
650  * - kernel threads on multiple cpus running on behalf of user
651  *   processes doing system calls
652  * - this driver's private kernel threads
653  *
654  * = Locks and Locking strategy =
655  *
656  * The driver uses four locking strategies for its operation:
657  *
658  * - The global SX lock "pmc_sx" is used to protect internal
659  *   data structures.
660  *
661  *   Calls into the module by syscall() start with this lock being
662  *   held in exclusive mode.  Depending on the requested operation,
663  *   the lock may be downgraded to 'shared' mode to allow more
664  *   concurrent readers into the module.  Calls into the module from
665  *   other parts of the kernel acquire the lock in shared mode.
666  *
667  *   This SX lock is held in exclusive mode for any operations that
668  *   modify the linkages between the driver's internal data structures.
669  *
670  *   The 'pmc_hook' function pointer is also protected by this lock.
671  *   It is only examined with the sx lock held in exclusive mode.  The
672  *   kernel module is allowed to be unloaded only with the sx lock held
673  *   in exclusive mode.  In normal syscall handling, after acquiring the
674  *   pmc_sx lock we first check that 'pmc_hook' is non-null before
675  *   proceeding.  This prevents races between the thread unloading the module
676  *   and other threads seeking to use the module.
677  *
678  * - Lookups of target process structures and owner process structures
679  *   cannot use the global "pmc_sx" SX lock because these lookups need
680  *   to happen during context switches and in other critical sections
681  *   where sleeping is not allowed.  We protect these lookup tables
682  *   with their own private spin-mutexes, "pmc_processhash_mtx" and
683  *   "pmc_ownerhash_mtx".
684  *
685  * - Interrupt handlers work in a lock free manner.  At interrupt
686  *   time, handlers look at the PMC pointer (phw->phw_pmc) configured
687  *   when the PMC was started.  If this pointer is NULL, the interrupt
688  *   is ignored after updating driver statistics.  We ensure that this
689  *   pointer is set (using an atomic operation if necessary) before the
690  *   PMC hardware is started.  Conversely, this pointer is unset atomically
691  *   only after the PMC hardware is stopped.
692  *
693  *   We ensure that everything needed for the operation of an
694  *   interrupt handler is available without it needing to acquire any
695  *   locks.  We also ensure that a PMC's software state is destroyed only
696  *   after the PMC is taken off hardware (on all CPUs).
697  *
698  * - Context-switch handling with process-private PMCs needs more
699  *   care.
700  *
701  *   A given process may be the target of multiple PMCs.  For example,
702  *   PMCATTACH and PMCDETACH may be requested by a process on one CPU
703  *   while the target process is running on another.  A PMC could also
704  *   be getting released because its owner is exiting.  We tackle
705  *   these situations in the following manner:
706  *
707  *   - each target process structure 'pmc_process' has an array
708  *     of 'struct pmc *' pointers, one for each hardware PMC.
709  *
710  *   - At context switch IN time, each "target" PMC in RUNNING state
711  *     gets started on hardware and a pointer to each PMC is copied into
712  *     the per-cpu phw array.  The 'runcount' for the PMC is
713  *     incremented.
714  *
715  *   - At context switch OUT time, all process-virtual PMCs are stopped
716  *     on hardware.  The saved value is added to the PMCs value field
717  *     only if the PMC is in a non-deleted state (the PMCs state could
718  *     have changed during the current time slice).
719  *
720  *     Note that since in-between a switch IN on a processor and a switch
721  *     OUT, the PMC could have been released on another CPU.  Therefore
722  *     context switch OUT always looks at the hardware state to turn
723  *     OFF PMCs and will update a PMC's saved value only if reachable
724  *     from the target process record.
725  *
726  *   - OP PMCRELEASE could be called on a PMC at any time (the PMC could
727  *     be attached to many processes at the time of the call and could
728  *     be active on multiple CPUs).
729  *
730  *     We prevent further scheduling of the PMC by marking it as in
731  *     state 'DELETED'.  If the runcount of the PMC is non-zero then
732  *     this PMC is currently running on a CPU somewhere.  The thread
733  *     doing the PMCRELEASE operation waits by repeatedly doing a
734  *     pause() till the runcount comes to zero.
735  *
736  * The contents of a PMC descriptor (struct pmc) are protected using
737  * a spin-mutex.  In order to save space, we use a mutex pool.
738  *
739  * In terms of lock types used by witness(4), we use:
740  * - Type "pmc-sx", used by the global SX lock.
741  * - Type "pmc-sleep", for sleep mutexes used by logger threads.
742  * - Type "pmc-per-proc", for protecting PMC owner descriptors.
743  * - Type "pmc-leaf", used for all other spin mutexes.
744  */
745 
746 /*
747  * Save the CPU binding of the current kthread.
748  */
749 void
pmc_save_cpu_binding(struct pmc_binding * pb)750 pmc_save_cpu_binding(struct pmc_binding *pb)
751 {
752 	PMCDBG0(CPU,BND,2, "save-cpu");
753 	thread_lock(curthread);
754 	pb->pb_bound = sched_is_bound(curthread);
755 	pb->pb_cpu   = curthread->td_oncpu;
756 	pb->pb_priority = curthread->td_priority;
757 	thread_unlock(curthread);
758 	PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
759 }
760 
761 /*
762  * Restore the CPU binding of the current thread.
763  */
764 void
pmc_restore_cpu_binding(struct pmc_binding * pb)765 pmc_restore_cpu_binding(struct pmc_binding *pb)
766 {
767 	PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
768 	    curthread->td_oncpu, pb->pb_cpu);
769 	thread_lock(curthread);
770 	sched_bind(curthread, pb->pb_cpu);
771 	if (!pb->pb_bound)
772 		sched_unbind(curthread);
773 	sched_prio(curthread, pb->pb_priority);
774 	thread_unlock(curthread);
775 	PMCDBG0(CPU,BND,2, "restore-cpu done");
776 }
777 
778 /*
779  * Move execution over to the specified CPU and bind it there.
780  */
781 void
pmc_select_cpu(int cpu)782 pmc_select_cpu(int cpu)
783 {
784 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
785 	    ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
786 
787 	/* Never move to an inactive CPU. */
788 	KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
789 	    "CPU %d", __LINE__, cpu));
790 
791 	PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu);
792 	thread_lock(curthread);
793 	sched_prio(curthread, PRI_MIN);
794 	sched_bind(curthread, cpu);
795 	thread_unlock(curthread);
796 
797 	KASSERT(curthread->td_oncpu == cpu,
798 	    ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
799 		cpu, curthread->td_oncpu));
800 
801 	PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
802 }
803 
804 /*
805  * Force a context switch.
806  *
807  * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
808  * guaranteed to force a context switch.
809  */
810 static void
pmc_force_context_switch(void)811 pmc_force_context_switch(void)
812 {
813 
814 	pause("pmcctx", 1);
815 }
816 
817 uint64_t
pmc_rdtsc(void)818 pmc_rdtsc(void)
819 {
820 #if defined(__i386__)
821 	/* Unfortunately get_cyclecount on i386 uses cpu_ticks. */
822 	return (rdtsc());
823 #else
824 	return (get_cyclecount());
825 #endif
826 }
827 
828 /*
829  * Get the file name for an executable.  This is a simple wrapper
830  * around vn_fullpath(9).
831  */
832 static void
pmc_getfilename(struct vnode * v,char ** fullpath,char ** freepath)833 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
834 {
835 
836 	*fullpath = "unknown";
837 	*freepath = NULL;
838 	vn_fullpath(v, fullpath, freepath);
839 }
840 
841 /*
842  * Remove a process owning PMCs.
843  */
844 void
pmc_remove_owner(struct pmc_owner * po)845 pmc_remove_owner(struct pmc_owner *po)
846 {
847 	struct pmc *pm, *tmp;
848 
849 	sx_assert(&pmc_sx, SX_XLOCKED);
850 
851 	PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po);
852 
853 	/* Remove descriptor from the owner hash table */
854 	LIST_REMOVE(po, po_next);
855 
856 	/* release all owned PMC descriptors */
857 	LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
858 		PMCDBG1(OWN,ORM,2, "pmc=%p", pm);
859 		KASSERT(pm->pm_owner == po,
860 		    ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
861 
862 		pmc_release_pmc_descriptor(pm);	/* will unlink from the list */
863 		pmc_destroy_pmc_descriptor(pm);
864 	}
865 
866 	KASSERT(po->po_sscount == 0,
867 	    ("[pmc,%d] SS count not zero", __LINE__));
868 	KASSERT(LIST_EMPTY(&po->po_pmcs),
869 	    ("[pmc,%d] PMC list not empty", __LINE__));
870 
871 	/* de-configure the log file if present */
872 	if (po->po_flags & PMC_PO_OWNS_LOGFILE)
873 		pmclog_deconfigure_log(po);
874 }
875 
876 /*
877  * Remove an owner process record if all conditions are met.
878  */
879 static void
pmc_maybe_remove_owner(struct pmc_owner * po)880 pmc_maybe_remove_owner(struct pmc_owner *po)
881 {
882 
883 	PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po);
884 
885 	/*
886 	 * Remove owner record if
887 	 * - this process does not own any PMCs
888 	 * - this process has not allocated a system-wide sampling buffer
889 	 */
890 	if (LIST_EMPTY(&po->po_pmcs) &&
891 	    ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
892 		pmc_remove_owner(po);
893 		pmc_destroy_owner_descriptor(po);
894 	}
895 }
896 
897 /*
898  * Add an association between a target process and a PMC.
899  */
900 static void
pmc_link_target_process(struct pmc * pm,struct pmc_process * pp)901 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
902 {
903 	struct pmc_target *pt;
904 	struct pmc_thread *pt_td __diagused;
905 	int ri;
906 
907 	sx_assert(&pmc_sx, SX_XLOCKED);
908 	KASSERT(pm != NULL && pp != NULL,
909 	    ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
910 	KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
911 	    ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
912 		__LINE__, pm, pp->pp_proc->p_pid));
913 	KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),
914 	    ("[pmc,%d] Illegal reference count %d for process record %p",
915 		__LINE__, pp->pp_refcnt, (void *) pp));
916 
917 	ri = PMC_TO_ROWINDEX(pm);
918 
919 	PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
920 	    pm, ri, pp);
921 
922 #ifdef HWPMC_DEBUG
923 	LIST_FOREACH(pt, &pm->pm_targets, pt_next) {
924 		if (pt->pt_process == pp)
925 			KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
926 			    __LINE__, pp, pm));
927 	}
928 #endif
929 	pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK | M_ZERO);
930 	pt->pt_process = pp;
931 
932 	LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
933 
934 	atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
935 	    (uintptr_t)pm);
936 
937 	if (pm->pm_owner->po_owner == pp->pp_proc)
938 		pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
939 
940 	/*
941 	 * Initialize the per-process values at this row index.
942 	 */
943 	pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
944 	    pm->pm_sc.pm_reloadcount : 0;
945 	pp->pp_refcnt++;
946 
947 #ifdef INVARIANTS
948 	/* Confirm that the per-thread values at this row index are cleared. */
949 	if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
950 		mtx_lock_spin(pp->pp_tdslock);
951 		LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) {
952 			KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0,
953 			    ("[pmc,%d] pt_pmcval not cleared for pid=%d at "
954 			    "ri=%d", __LINE__, pp->pp_proc->p_pid, ri));
955 		}
956 		mtx_unlock_spin(pp->pp_tdslock);
957 	}
958 #endif
959 }
960 
961 /*
962  * Removes the association between a target process and a PMC.
963  */
964 static void
pmc_unlink_target_process(struct pmc * pm,struct pmc_process * pp)965 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
966 {
967 	int ri;
968 	struct proc *p;
969 	struct pmc_target *ptgt;
970 	struct pmc_thread *pt;
971 
972 	sx_assert(&pmc_sx, SX_XLOCKED);
973 
974 	KASSERT(pm != NULL && pp != NULL,
975 	    ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
976 
977 	KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,
978 	    ("[pmc,%d] Illegal ref count %d on process record %p",
979 		__LINE__, pp->pp_refcnt, (void *) pp));
980 
981 	ri = PMC_TO_ROWINDEX(pm);
982 
983 	PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
984 	    pm, ri, pp);
985 
986 	KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
987 	    ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
988 		ri, pm, pp->pp_pmcs[ri].pp_pmc));
989 
990 	pp->pp_pmcs[ri].pp_pmc = NULL;
991 	pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t)0;
992 
993 	/* Clear the per-thread values at this row index. */
994 	if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
995 		mtx_lock_spin(pp->pp_tdslock);
996 		LIST_FOREACH(pt, &pp->pp_tds, pt_next)
997 			pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t)0;
998 		mtx_unlock_spin(pp->pp_tdslock);
999 	}
1000 
1001 	/* Remove owner-specific flags */
1002 	if (pm->pm_owner->po_owner == pp->pp_proc) {
1003 		pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
1004 		pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
1005 	}
1006 
1007 	pp->pp_refcnt--;
1008 
1009 	/* Remove the target process from the PMC structure */
1010 	LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
1011 		if (ptgt->pt_process == pp)
1012 			break;
1013 
1014 	KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
1015 		    "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
1016 
1017 	LIST_REMOVE(ptgt, pt_next);
1018 	free(ptgt, M_PMC);
1019 
1020 	/* if the PMC now lacks targets, send the owner a SIGIO */
1021 	if (LIST_EMPTY(&pm->pm_targets)) {
1022 		p = pm->pm_owner->po_owner;
1023 		PROC_LOCK(p);
1024 		kern_psignal(p, SIGIO);
1025 		PROC_UNLOCK(p);
1026 
1027 		PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p, SIGIO);
1028 	}
1029 }
1030 
1031 /*
1032  * Check if PMC 'pm' may be attached to target process 't'.
1033  */
1034 
1035 static bool
pmc_can_attach(struct pmc * pm,struct proc * t)1036 pmc_can_attach(struct pmc *pm, struct proc *t)
1037 {
1038 	struct proc *o;		/* pmc owner */
1039 	struct ucred *oc, *tc;	/* owner, target credentials */
1040 	bool decline_attach;
1041 
1042 	/*
1043 	 * A PMC's owner can always attach that PMC to itself.
1044 	 */
1045 
1046 	if ((o = pm->pm_owner->po_owner) == t)
1047 		return (true);
1048 
1049 	PROC_LOCK(o);
1050 	oc = o->p_ucred;
1051 	crhold(oc);
1052 	PROC_UNLOCK(o);
1053 
1054 	PROC_LOCK(t);
1055 	tc = t->p_ucred;
1056 	crhold(tc);
1057 	PROC_UNLOCK(t);
1058 
1059 	/*
1060 	 * The effective uid of the PMC owner should match at least one
1061 	 * of the {effective,real,saved} uids of the target process.
1062 	 */
1063 
1064 	decline_attach = oc->cr_uid != tc->cr_uid &&
1065 	    oc->cr_uid != tc->cr_svuid &&
1066 	    oc->cr_uid != tc->cr_ruid;
1067 
1068 	/*
1069 	 * Every one of the target's group ids, must be in the owner's
1070 	 * group list.
1071 	 */
1072 	for (int i = 0; !decline_attach && i < tc->cr_ngroups; i++)
1073 		decline_attach = !groupmember(tc->cr_groups[i], oc);
1074 	if (!decline_attach)
1075 		decline_attach = !groupmember(tc->cr_gid, oc) ||
1076 		    !groupmember(tc->cr_rgid, oc) ||
1077 		    !groupmember(tc->cr_svgid, oc);
1078 
1079 	crfree(tc);
1080 	crfree(oc);
1081 
1082 	return (!decline_attach);
1083 }
1084 
1085 /*
1086  * Attach a process to a PMC.
1087  */
1088 static int
pmc_attach_one_process(struct proc * p,struct pmc * pm)1089 pmc_attach_one_process(struct proc *p, struct pmc *pm)
1090 {
1091 	int ri, error;
1092 	char *fullpath, *freepath;
1093 	struct pmc_process	*pp;
1094 
1095 	sx_assert(&pmc_sx, SX_XLOCKED);
1096 
1097 	PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
1098 	    PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1099 
1100 	/*
1101 	 * Locate the process descriptor corresponding to process 'p',
1102 	 * allocating space as needed.
1103 	 *
1104 	 * Verify that rowindex 'pm_rowindex' is free in the process
1105 	 * descriptor.
1106 	 *
1107 	 * If not, allocate space for a descriptor and link the
1108 	 * process descriptor and PMC.
1109 	 */
1110 	ri = PMC_TO_ROWINDEX(pm);
1111 
1112 	/* mark process as using HWPMCs */
1113 	PROC_LOCK(p);
1114 	p->p_flag |= P_HWPMC;
1115 	PROC_UNLOCK(p);
1116 
1117 	if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) {
1118 		error = ENOMEM;
1119 		goto fail;
1120 	}
1121 
1122 	if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */
1123 		error = EEXIST;
1124 		goto fail;
1125 	}
1126 
1127 	if (pp->pp_pmcs[ri].pp_pmc != NULL) {
1128 		error = EBUSY;
1129 		goto fail;
1130 	}
1131 
1132 	pmc_link_target_process(pm, pp);
1133 
1134 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
1135 	    (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
1136 		pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
1137 
1138 	pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
1139 
1140 	/* issue an attach event to a configured log file */
1141 	if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
1142 		if (p->p_flag & P_KPROC) {
1143 			fullpath = kernelname;
1144 			freepath = NULL;
1145 		} else {
1146 			pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1147 			pmclog_process_pmcattach(pm, p->p_pid, fullpath);
1148 		}
1149 		free(freepath, M_TEMP);
1150 		if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
1151 			pmc_log_process_mappings(pm->pm_owner, p);
1152 	}
1153 
1154 	return (0);
1155 fail:
1156 	PROC_LOCK(p);
1157 	p->p_flag &= ~P_HWPMC;
1158 	PROC_UNLOCK(p);
1159 	return (error);
1160 }
1161 
1162 /*
1163  * Attach a process and optionally its children
1164  */
1165 static int
pmc_attach_process(struct proc * p,struct pmc * pm)1166 pmc_attach_process(struct proc *p, struct pmc *pm)
1167 {
1168 	int error;
1169 	struct proc *top;
1170 
1171 	sx_assert(&pmc_sx, SX_XLOCKED);
1172 
1173 	PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
1174 	    PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1175 
1176 	/*
1177 	 * If this PMC successfully allowed a GETMSR operation
1178 	 * in the past, disallow further ATTACHes.
1179 	 */
1180 	if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
1181 		return (EPERM);
1182 
1183 	if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1184 		return (pmc_attach_one_process(p, pm));
1185 
1186 	/*
1187 	 * Traverse all child processes, attaching them to
1188 	 * this PMC.
1189 	 */
1190 	sx_slock(&proctree_lock);
1191 
1192 	top = p;
1193 	for (;;) {
1194 		if ((error = pmc_attach_one_process(p, pm)) != 0)
1195 			break;
1196 		if (!LIST_EMPTY(&p->p_children))
1197 			p = LIST_FIRST(&p->p_children);
1198 		else for (;;) {
1199 			if (p == top)
1200 				goto done;
1201 			if (LIST_NEXT(p, p_sibling)) {
1202 				p = LIST_NEXT(p, p_sibling);
1203 				break;
1204 			}
1205 			p = p->p_pptr;
1206 		}
1207 	}
1208 
1209 	if (error != 0)
1210 		(void)pmc_detach_process(top, pm);
1211 
1212 done:
1213 	sx_sunlock(&proctree_lock);
1214 	return (error);
1215 }
1216 
1217 /*
1218  * Detach a process from a PMC.  If there are no other PMCs tracking
1219  * this process, remove the process structure from its hash table.  If
1220  * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
1221  */
1222 static int
pmc_detach_one_process(struct proc * p,struct pmc * pm,int flags)1223 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
1224 {
1225 	int ri;
1226 	struct pmc_process *pp;
1227 
1228 	sx_assert(&pmc_sx, SX_XLOCKED);
1229 
1230 	KASSERT(pm != NULL,
1231 	    ("[pmc,%d] null pm pointer", __LINE__));
1232 
1233 	ri = PMC_TO_ROWINDEX(pm);
1234 
1235 	PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
1236 	    pm, ri, p, p->p_pid, p->p_comm, flags);
1237 
1238 	if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
1239 		return (ESRCH);
1240 
1241 	if (pp->pp_pmcs[ri].pp_pmc != pm)
1242 		return (EINVAL);
1243 
1244 	pmc_unlink_target_process(pm, pp);
1245 
1246 	/* Issue a detach entry if a log file is configured */
1247 	if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
1248 		pmclog_process_pmcdetach(pm, p->p_pid);
1249 
1250 	/*
1251 	 * If there are no PMCs targeting this process, we remove its
1252 	 * descriptor from the target hash table and unset the P_HWPMC
1253 	 * flag in the struct proc.
1254 	 */
1255 	KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
1256 	    ("[pmc,%d] Illegal refcnt %d for process struct %p",
1257 		__LINE__, pp->pp_refcnt, pp));
1258 
1259 	if (pp->pp_refcnt != 0)	/* still a target of some PMC */
1260 		return (0);
1261 
1262 	pmc_remove_process_descriptor(pp);
1263 
1264 	if (flags & PMC_FLAG_REMOVE)
1265 		pmc_destroy_process_descriptor(pp);
1266 
1267 	PROC_LOCK(p);
1268 	p->p_flag &= ~P_HWPMC;
1269 	PROC_UNLOCK(p);
1270 
1271 	return (0);
1272 }
1273 
1274 /*
1275  * Detach a process and optionally its descendants from a PMC.
1276  */
1277 static int
pmc_detach_process(struct proc * p,struct pmc * pm)1278 pmc_detach_process(struct proc *p, struct pmc *pm)
1279 {
1280 	struct proc *top;
1281 
1282 	sx_assert(&pmc_sx, SX_XLOCKED);
1283 
1284 	PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
1285 	    PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
1286 
1287 	if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
1288 		return (pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE));
1289 
1290 	/*
1291 	 * Traverse all children, detaching them from this PMC.  We
1292 	 * ignore errors since we could be detaching a PMC from a
1293 	 * partially attached proc tree.
1294 	 */
1295 	sx_slock(&proctree_lock);
1296 
1297 	top = p;
1298 	for (;;) {
1299 		(void)pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
1300 
1301 		if (!LIST_EMPTY(&p->p_children)) {
1302 			p = LIST_FIRST(&p->p_children);
1303 		} else {
1304 			for (;;) {
1305 				if (p == top)
1306 					goto done;
1307 				if (LIST_NEXT(p, p_sibling)) {
1308 					p = LIST_NEXT(p, p_sibling);
1309 					break;
1310 				}
1311 				p = p->p_pptr;
1312 			}
1313 		}
1314 	}
1315 done:
1316 	sx_sunlock(&proctree_lock);
1317 	if (LIST_EMPTY(&pm->pm_targets))
1318 		pm->pm_flags &= ~PMC_F_ATTACH_DONE;
1319 
1320 	return (0);
1321 }
1322 
1323 /*
1324  * Handle events after an exec() for a process:
1325  *  - Inform log owners of the new exec() event
1326  *  - Release any PMCs owned by the process before the exec()
1327  *  - Detach PMCs from the target if required
1328  */
1329 static void
pmc_process_exec(struct thread * td,struct pmckern_procexec * pk)1330 pmc_process_exec(struct thread *td, struct pmckern_procexec *pk)
1331 {
1332 	struct pmc *pm;
1333 	struct pmc_owner *po;
1334 	struct pmc_process *pp;
1335 	struct proc *p;
1336 	char *fullpath, *freepath;
1337 	u_int ri;
1338 	bool is_using_hwpmcs;
1339 
1340 	sx_assert(&pmc_sx, SX_XLOCKED);
1341 
1342 	p = td->td_proc;
1343 	pmc_getfilename(p->p_textvp, &fullpath, &freepath);
1344 
1345 	PMC_EPOCH_ENTER();
1346 	/* Inform owners of SS mode PMCs of the exec event. */
1347 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
1348 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) {
1349 			pmclog_process_procexec(po, PMC_ID_INVALID, p->p_pid,
1350 			    pk->pm_baseaddr, pk->pm_dynaddr, fullpath);
1351 		}
1352 	}
1353 	PMC_EPOCH_EXIT();
1354 
1355 	PROC_LOCK(p);
1356 	is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0;
1357 	PROC_UNLOCK(p);
1358 
1359 	if (!is_using_hwpmcs) {
1360 		if (freepath != NULL)
1361 			free(freepath, M_TEMP);
1362 		return;
1363 	}
1364 
1365 	/*
1366 	 * PMCs are not inherited across an exec(): remove any PMCs that this
1367 	 * process is the owner of.
1368 	 */
1369 	if ((po = pmc_find_owner_descriptor(p)) != NULL) {
1370 		pmc_remove_owner(po);
1371 		pmc_destroy_owner_descriptor(po);
1372 	}
1373 
1374 	/*
1375 	 * If the process being exec'ed is not the target of any PMC, we are
1376 	 * done.
1377 	 */
1378 	if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
1379 		if (freepath != NULL)
1380 			free(freepath, M_TEMP);
1381 		return;
1382 	}
1383 
1384 	/*
1385 	 * Log the exec event to all monitoring owners. Skip owners who have
1386 	 * already received the event because they had system sampling PMCs
1387 	 * active.
1388 	 */
1389 	for (ri = 0; ri < md->pmd_npmc; ri++) {
1390 		if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1391 			continue;
1392 
1393 		po = pm->pm_owner;
1394 		if (po->po_sscount == 0 &&
1395 		    (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) {
1396 			pmclog_process_procexec(po, pm->pm_id, p->p_pid,
1397 			    pk->pm_baseaddr, pk->pm_dynaddr, fullpath);
1398 		}
1399 	}
1400 
1401 	if (freepath != NULL)
1402 		free(freepath, M_TEMP);
1403 
1404 	PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
1405 	    p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
1406 
1407 	if (pk->pm_credentialschanged == 0) /* no change */
1408 		return;
1409 
1410 	/*
1411 	 * If the newly exec()'ed process has a different credential
1412 	 * than before, allow it to be the target of a PMC only if
1413 	 * the PMC's owner has sufficient privilege.
1414 	 */
1415 	for (ri = 0; ri < md->pmd_npmc; ri++) {
1416 		if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
1417 			if (pmc_can_attach(pm, td->td_proc)) {
1418 				pmc_detach_one_process(td->td_proc, pm,
1419 				    PMC_FLAG_NONE);
1420 			}
1421 		}
1422 	}
1423 
1424 	KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= md->pmd_npmc,
1425 	    ("[pmc,%d] Illegal ref count %u on pp %p", __LINE__,
1426 		pp->pp_refcnt, pp));
1427 
1428 	/*
1429 	 * If this process is no longer the target of any
1430 	 * PMCs, we can remove the process entry and free
1431 	 * up space.
1432 	 */
1433 	if (pp->pp_refcnt == 0) {
1434 		pmc_remove_process_descriptor(pp);
1435 		pmc_destroy_process_descriptor(pp);
1436 	}
1437 }
1438 
1439 /*
1440  * Thread context switch IN.
1441  */
1442 static void
pmc_process_csw_in(struct thread * td)1443 pmc_process_csw_in(struct thread *td)
1444 {
1445 	struct pmc *pm;
1446 	struct pmc_classdep *pcd;
1447 	struct pmc_cpu *pc;
1448 	struct pmc_hw *phw __diagused;
1449 	struct pmc_process *pp;
1450 	struct pmc_thread *pt;
1451 	struct proc *p;
1452 	pmc_value_t newvalue;
1453 	int cpu;
1454 	u_int adjri, ri;
1455 
1456 	p = td->td_proc;
1457 	pt = NULL;
1458 	if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
1459 		return;
1460 
1461 	KASSERT(pp->pp_proc == td->td_proc,
1462 	    ("[pmc,%d] not my thread state", __LINE__));
1463 
1464 	critical_enter(); /* no preemption from this point */
1465 
1466 	cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1467 
1468 	PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1469 	    p->p_pid, p->p_comm, pp);
1470 
1471 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1472 	    ("[pmc,%d] weird CPU id %d", __LINE__, cpu));
1473 
1474 	pc = pmc_pcpu[cpu];
1475 	for (ri = 0; ri < md->pmd_npmc; ri++) {
1476 		if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
1477 			continue;
1478 
1479 		KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
1480 		    ("[pmc,%d] Target PMC in non-virtual mode (%d)",
1481 		    __LINE__, PMC_TO_MODE(pm)));
1482 		KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1483 		    ("[pmc,%d] Row index mismatch pmc %d != ri %d",
1484 		    __LINE__, PMC_TO_ROWINDEX(pm), ri));
1485 
1486 		/*
1487 		 * Only PMCs that are marked as 'RUNNING' need
1488 		 * be placed on hardware.
1489 		 */
1490 		if (pm->pm_state != PMC_STATE_RUNNING)
1491 			continue;
1492 
1493 		KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,
1494 		    ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm,
1495 		    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
1496 
1497 		/* increment PMC runcount */
1498 		counter_u64_add(pm->pm_runcount, 1);
1499 
1500 		/* configure the HWPMC we are going to use. */
1501 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
1502 		(void)pcd->pcd_config_pmc(cpu, adjri, pm);
1503 
1504 		phw = pc->pc_hwpmcs[ri];
1505 
1506 		KASSERT(phw != NULL,
1507 		    ("[pmc,%d] null hw pointer", __LINE__));
1508 
1509 		KASSERT(phw->phw_pmc == pm,
1510 		    ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
1511 			phw->phw_pmc, pm));
1512 
1513 		/*
1514 		 * Write out saved value and start the PMC.
1515 		 *
1516 		 * Sampling PMCs use a per-thread value, while
1517 		 * counting mode PMCs use a per-pmc value that is
1518 		 * inherited across descendants.
1519 		 */
1520 		if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
1521 			if (pt == NULL)
1522 				pt = pmc_find_thread_descriptor(pp, td,
1523 				    PMC_FLAG_NONE);
1524 
1525 			KASSERT(pt != NULL,
1526 			    ("[pmc,%d] No thread found for td=%p", __LINE__,
1527 			    td));
1528 
1529 			mtx_pool_lock_spin(pmc_mtxpool, pm);
1530 
1531 			/*
1532 			 * If we have a thread descriptor, use the per-thread
1533 			 * counter in the descriptor. If not, we will use
1534 			 * a per-process counter.
1535 			 *
1536 			 * TODO: Remove the per-process "safety net" once
1537 			 * we have thoroughly tested that we don't hit the
1538 			 * above assert.
1539 			 */
1540 			if (pt != NULL) {
1541 				if (pt->pt_pmcs[ri].pt_pmcval > 0)
1542 					newvalue = pt->pt_pmcs[ri].pt_pmcval;
1543 				else
1544 					newvalue = pm->pm_sc.pm_reloadcount;
1545 			} else {
1546 				/*
1547 				 * Use the saved value calculated after the most
1548 				 * recent time a thread using the shared counter
1549 				 * switched out. Reset the saved count in case
1550 				 * another thread from this process switches in
1551 				 * before any threads switch out.
1552 				 */
1553 				newvalue = pp->pp_pmcs[ri].pp_pmcval;
1554 				pp->pp_pmcs[ri].pp_pmcval =
1555 				    pm->pm_sc.pm_reloadcount;
1556 			}
1557 			mtx_pool_unlock_spin(pmc_mtxpool, pm);
1558 			KASSERT(newvalue > 0 && newvalue <=
1559 			    pm->pm_sc.pm_reloadcount,
1560 			    ("[pmc,%d] pmcval outside of expected range cpu=%d "
1561 			    "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__,
1562 			    cpu, ri, newvalue, pm->pm_sc.pm_reloadcount));
1563 		} else {
1564 			KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
1565 			    ("[pmc,%d] illegal mode=%d", __LINE__,
1566 			    PMC_TO_MODE(pm)));
1567 			mtx_pool_lock_spin(pmc_mtxpool, pm);
1568 			newvalue = PMC_PCPU_SAVED(cpu, ri) =
1569 			    pm->pm_gv.pm_savedvalue;
1570 			mtx_pool_unlock_spin(pmc_mtxpool, pm);
1571 		}
1572 
1573 		PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
1574 
1575 		(void)pcd->pcd_write_pmc(cpu, adjri, pm, newvalue);
1576 
1577 		/* If a sampling mode PMC, reset stalled state. */
1578 		if (PMC_TO_MODE(pm) == PMC_MODE_TS)
1579 			pm->pm_pcpu_state[cpu].pps_stalled = 0;
1580 
1581 		/* Indicate that we desire this to run. */
1582 		pm->pm_pcpu_state[cpu].pps_cpustate = 1;
1583 
1584 		/* Start the PMC. */
1585 		(void)pcd->pcd_start_pmc(cpu, adjri, pm);
1586 	}
1587 
1588 	/*
1589 	 * Perform any other architecture/cpu dependent thread
1590 	 * switch-in actions.
1591 	 */
1592 	(void)(*md->pmd_switch_in)(pc, pp);
1593 
1594 	critical_exit();
1595 }
1596 
1597 /*
1598  * Thread context switch OUT.
1599  */
1600 static void
pmc_process_csw_out(struct thread * td)1601 pmc_process_csw_out(struct thread *td)
1602 {
1603 	struct pmc *pm;
1604 	struct pmc_classdep *pcd;
1605 	struct pmc_cpu *pc;
1606 	struct pmc_process *pp;
1607 	struct pmc_thread *pt = NULL;
1608 	struct proc *p;
1609 	pmc_value_t newvalue;
1610 	int64_t tmp;
1611 	enum pmc_mode mode;
1612 	int cpu;
1613 	u_int adjri, ri;
1614 
1615 	/*
1616 	 * Locate our process descriptor; this may be NULL if
1617 	 * this process is exiting and we have already removed
1618 	 * the process from the target process table.
1619 	 *
1620 	 * Note that due to kernel preemption, multiple
1621 	 * context switches may happen while the process is
1622 	 * exiting.
1623 	 *
1624 	 * Note also that if the target process cannot be
1625 	 * found we still need to deconfigure any PMCs that
1626 	 * are currently running on hardware.
1627 	 */
1628 	p = td->td_proc;
1629 	pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
1630 
1631 	critical_enter();
1632 
1633 	cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
1634 
1635 	PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
1636 	    p->p_pid, p->p_comm, pp);
1637 
1638 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1639 	    ("[pmc,%d weird CPU id %d", __LINE__, cpu));
1640 
1641 	pc = pmc_pcpu[cpu];
1642 
1643 	/*
1644 	 * When a PMC gets unlinked from a target PMC, it will
1645 	 * be removed from the target's pp_pmc[] array.
1646 	 *
1647 	 * However, on a MP system, the target could have been
1648 	 * executing on another CPU at the time of the unlink.
1649 	 * So, at context switch OUT time, we need to look at
1650 	 * the hardware to determine if a PMC is scheduled on
1651 	 * it.
1652 	 */
1653 	for (ri = 0; ri < md->pmd_npmc; ri++) {
1654 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
1655 		pm  = NULL;
1656 		(void)(*pcd->pcd_get_config)(cpu, adjri, &pm);
1657 
1658 		if (pm == NULL)	/* nothing at this row index */
1659 			continue;
1660 
1661 		mode = PMC_TO_MODE(pm);
1662 		if (!PMC_IS_VIRTUAL_MODE(mode))
1663 			continue; /* not a process virtual PMC */
1664 
1665 		KASSERT(PMC_TO_ROWINDEX(pm) == ri,
1666 		    ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
1667 			__LINE__, PMC_TO_ROWINDEX(pm), ri));
1668 
1669 		/*
1670 		 * Change desired state, and then stop if not stalled.
1671 		 * This two-step dance should avoid race conditions where
1672 		 * an interrupt re-enables the PMC after this code has
1673 		 * already checked the pm_stalled flag.
1674 		 */
1675 		pm->pm_pcpu_state[cpu].pps_cpustate = 0;
1676 		if (pm->pm_pcpu_state[cpu].pps_stalled == 0)
1677 			(void)pcd->pcd_stop_pmc(cpu, adjri, pm);
1678 
1679 		KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
1680 		    ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm,
1681 		    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
1682 
1683 		/* reduce this PMC's runcount */
1684 		counter_u64_add(pm->pm_runcount, -1);
1685 
1686 		/*
1687 		 * If this PMC is associated with this process,
1688 		 * save the reading.
1689 		 */
1690 		if (pm->pm_state != PMC_STATE_DELETED && pp != NULL &&
1691 		    pp->pp_pmcs[ri].pp_pmc != NULL) {
1692 			KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
1693 			    ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
1694 				pm, ri, pp->pp_pmcs[ri].pp_pmc));
1695 			KASSERT(pp->pp_refcnt > 0,
1696 			    ("[pmc,%d] pp refcnt = %d", __LINE__,
1697 				pp->pp_refcnt));
1698 
1699 			(void)pcd->pcd_read_pmc(cpu, adjri, pm, &newvalue);
1700 
1701 			if (mode == PMC_MODE_TS) {
1702 				PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)",
1703 				    cpu, ri, newvalue);
1704 
1705 				if (pt == NULL)
1706 					pt = pmc_find_thread_descriptor(pp, td,
1707 					    PMC_FLAG_NONE);
1708 
1709 				KASSERT(pt != NULL,
1710 				    ("[pmc,%d] No thread found for td=%p",
1711 				    __LINE__, td));
1712 
1713 				mtx_pool_lock_spin(pmc_mtxpool, pm);
1714 
1715 				/*
1716 				 * If we have a thread descriptor, save the
1717 				 * per-thread counter in the descriptor. If not,
1718 				 * we will update the per-process counter.
1719 				 *
1720 				 * TODO: Remove the per-process "safety net"
1721 				 * once we have thoroughly tested that we
1722 				 * don't hit the above assert.
1723 				 */
1724 				if (pt != NULL) {
1725 					pt->pt_pmcs[ri].pt_pmcval = newvalue;
1726 				} else {
1727 					/*
1728 					 * For sampling process-virtual PMCs,
1729 					 * newvalue is the number of events to
1730 					 * be seen until the next sampling
1731 					 * interrupt. We can just add the events
1732 					 * left from this invocation to the
1733 					 * counter, then adjust in case we
1734 					 * overflow our range.
1735 					 *
1736 					 * (Recall that we reload the counter
1737 					 * every time we use it.)
1738 					 */
1739 					pp->pp_pmcs[ri].pp_pmcval += newvalue;
1740 					if (pp->pp_pmcs[ri].pp_pmcval >
1741 					    pm->pm_sc.pm_reloadcount) {
1742 						pp->pp_pmcs[ri].pp_pmcval -=
1743 						    pm->pm_sc.pm_reloadcount;
1744 					}
1745 				}
1746 				mtx_pool_unlock_spin(pmc_mtxpool, pm);
1747 			} else {
1748 				tmp = newvalue - PMC_PCPU_SAVED(cpu, ri);
1749 
1750 				PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)",
1751 				    cpu, ri, tmp);
1752 
1753 				/*
1754 				 * For counting process-virtual PMCs,
1755 				 * we expect the count to be
1756 				 * increasing monotonically, modulo a 64
1757 				 * bit wraparound.
1758 				 */
1759 				KASSERT(tmp >= 0,
1760 				    ("[pmc,%d] negative increment cpu=%d "
1761 				     "ri=%d newvalue=%jx saved=%jx "
1762 				     "incr=%jx", __LINE__, cpu, ri,
1763 				     newvalue, PMC_PCPU_SAVED(cpu, ri), tmp));
1764 
1765 				mtx_pool_lock_spin(pmc_mtxpool, pm);
1766 				pm->pm_gv.pm_savedvalue += tmp;
1767 				pp->pp_pmcs[ri].pp_pmcval += tmp;
1768 				mtx_pool_unlock_spin(pmc_mtxpool, pm);
1769 
1770 				if (pm->pm_flags & PMC_F_LOG_PROCCSW)
1771 					pmclog_process_proccsw(pm, pp, tmp, td);
1772 			}
1773 		}
1774 
1775 		/* Mark hardware as free. */
1776 		(void)pcd->pcd_config_pmc(cpu, adjri, NULL);
1777 	}
1778 
1779 	/*
1780 	 * Perform any other architecture/cpu dependent thread
1781 	 * switch out functions.
1782 	 */
1783 	(void)(*md->pmd_switch_out)(pc, pp);
1784 
1785 	critical_exit();
1786 }
1787 
1788 /*
1789  * A new thread for a process.
1790  */
1791 static void
pmc_process_thread_add(struct thread * td)1792 pmc_process_thread_add(struct thread *td)
1793 {
1794 	struct pmc_process *pmc;
1795 
1796 	pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1797 	if (pmc != NULL)
1798 		pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE);
1799 }
1800 
1801 /*
1802  * A thread delete for a process.
1803  */
1804 static void
pmc_process_thread_delete(struct thread * td)1805 pmc_process_thread_delete(struct thread *td)
1806 {
1807 	struct pmc_process *pmc;
1808 
1809 	pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE);
1810 	if (pmc != NULL)
1811 		pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc,
1812 		    td, PMC_FLAG_REMOVE));
1813 }
1814 
1815 /*
1816  * A userret() call for a thread.
1817  */
1818 static void
pmc_process_thread_userret(struct thread * td)1819 pmc_process_thread_userret(struct thread *td)
1820 {
1821 	sched_pin();
1822 	pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame);
1823 	sched_unpin();
1824 }
1825 
1826 /*
1827  * A mapping change for a process.
1828  */
1829 static void
pmc_process_mmap(struct thread * td,struct pmckern_map_in * pkm)1830 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
1831 {
1832 	const struct pmc *pm;
1833 	const struct pmc_process *pp;
1834 	struct pmc_owner *po;
1835 	char *fullpath, *freepath;
1836 	pid_t pid;
1837 	int ri;
1838 
1839 	MPASS(!in_epoch(global_epoch_preempt));
1840 
1841 	freepath = fullpath = NULL;
1842 	pmc_getfilename((struct vnode *)pkm->pm_file, &fullpath, &freepath);
1843 
1844 	pid = td->td_proc->p_pid;
1845 
1846 	PMC_EPOCH_ENTER();
1847 	/* Inform owners of all system-wide sampling PMCs. */
1848 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
1849 		if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1850 			pmclog_process_map_in(po, pid, pkm->pm_address,
1851 			    fullpath);
1852 	}
1853 
1854 	if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1855 		goto done;
1856 
1857 	/*
1858 	 * Inform sampling PMC owners tracking this process.
1859 	 */
1860 	for (ri = 0; ri < md->pmd_npmc; ri++) {
1861 		if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
1862 		    PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
1863 			pmclog_process_map_in(pm->pm_owner,
1864 			    pid, pkm->pm_address, fullpath);
1865 		}
1866 	}
1867 
1868 done:
1869 	if (freepath != NULL)
1870 		free(freepath, M_TEMP);
1871 	PMC_EPOCH_EXIT();
1872 }
1873 
1874 /*
1875  * Log an munmap request.
1876  */
1877 static void
pmc_process_munmap(struct thread * td,struct pmckern_map_out * pkm)1878 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
1879 {
1880 	const struct pmc *pm;
1881 	const struct pmc_process *pp;
1882 	struct pmc_owner *po;
1883 	pid_t pid;
1884 	int ri;
1885 
1886 	pid = td->td_proc->p_pid;
1887 
1888 	PMC_EPOCH_ENTER();
1889 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
1890 		if (po->po_flags & PMC_PO_OWNS_LOGFILE)
1891 			pmclog_process_map_out(po, pid, pkm->pm_address,
1892 			    pkm->pm_address + pkm->pm_size);
1893 	}
1894 	PMC_EPOCH_EXIT();
1895 
1896 	if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
1897 		return;
1898 
1899 	for (ri = 0; ri < md->pmd_npmc; ri++) {
1900 		pm = pp->pp_pmcs[ri].pp_pmc;
1901 		if (pm != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
1902 			pmclog_process_map_out(pm->pm_owner, pid,
1903 			    pkm->pm_address, pkm->pm_address + pkm->pm_size);
1904 		}
1905 	}
1906 }
1907 
1908 /*
1909  * Log mapping information about the kernel.
1910  */
1911 static void
pmc_log_kernel_mappings(struct pmc * pm)1912 pmc_log_kernel_mappings(struct pmc *pm)
1913 {
1914 	struct pmc_owner *po;
1915 	struct pmckern_map_in *km, *kmbase;
1916 
1917 	MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx));
1918 	KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
1919 	    ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
1920 		__LINE__, (void *) pm));
1921 
1922 	po = pm->pm_owner;
1923 	if ((po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE) != 0)
1924 		return;
1925 
1926 	if (PMC_TO_MODE(pm) == PMC_MODE_SS)
1927 		pmc_process_allproc(pm);
1928 
1929 	/*
1930 	 * Log the current set of kernel modules.
1931 	 */
1932 	kmbase = linker_hwpmc_list_objects();
1933 	for (km = kmbase; km->pm_file != NULL; km++) {
1934 		PMCDBG2(LOG,REG,1,"%s %p", (char *)km->pm_file,
1935 		    (void *)km->pm_address);
1936 		pmclog_process_map_in(po, (pid_t)-1, km->pm_address,
1937 		    km->pm_file);
1938 	}
1939 	free(kmbase, M_LINKER);
1940 
1941 	po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
1942 }
1943 
1944 /*
1945  * Log the mappings for a single process.
1946  */
1947 static void
pmc_log_process_mappings(struct pmc_owner * po,struct proc * p)1948 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
1949 {
1950 	vm_map_t map;
1951 	vm_map_entry_t entry;
1952 	vm_object_t obj, lobj, tobj;
1953 	vm_offset_t last_end;
1954 	vm_offset_t start_addr;
1955 	struct vnode *vp, *last_vp;
1956 	struct vmspace *vm;
1957 	char *fullpath, *freepath;
1958 	u_int last_timestamp;
1959 
1960 	last_vp = NULL;
1961 	last_end = (vm_offset_t)0;
1962 	fullpath = freepath = NULL;
1963 
1964 	if ((vm = vmspace_acquire_ref(p)) == NULL)
1965 		return;
1966 
1967 	map = &vm->vm_map;
1968 	vm_map_lock_read(map);
1969 	VM_MAP_ENTRY_FOREACH(entry, map) {
1970 		if (entry == NULL) {
1971 			PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
1972 			    "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
1973 			break;
1974 		}
1975 
1976 		/*
1977 		 * We only care about executable map entries.
1978 		 */
1979 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
1980 		    (entry->protection & VM_PROT_EXECUTE) == 0 ||
1981 		    entry->object.vm_object == NULL) {
1982 			continue;
1983 		}
1984 
1985 		obj = entry->object.vm_object;
1986 		VM_OBJECT_RLOCK(obj);
1987 
1988 		/*
1989 		 * Walk the backing_object list to find the base (non-shadowed)
1990 		 * vm_object.
1991 		 */
1992 		for (lobj = tobj = obj; tobj != NULL;
1993 		    tobj = tobj->backing_object) {
1994 			if (tobj != obj)
1995 				VM_OBJECT_RLOCK(tobj);
1996 			if (lobj != obj)
1997 				VM_OBJECT_RUNLOCK(lobj);
1998 			lobj = tobj;
1999 		}
2000 
2001 		/*
2002 		 * At this point lobj is the base vm_object and it is locked.
2003 		 */
2004 		if (lobj == NULL) {
2005 			PMCDBG3(LOG,OPS,2,
2006 			    "hwpmc: lobj unexpectedly NULL! pid=%d "
2007 			    "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
2008 			VM_OBJECT_RUNLOCK(obj);
2009 			continue;
2010 		}
2011 
2012 		vp = vm_object_vnode(lobj);
2013 		if (vp == NULL) {
2014 			if (lobj != obj)
2015 				VM_OBJECT_RUNLOCK(lobj);
2016 			VM_OBJECT_RUNLOCK(obj);
2017 			continue;
2018 		}
2019 
2020 		/*
2021 		 * Skip contiguous regions that point to the same vnode, so we
2022 		 * don't emit redundant MAP-IN directives.
2023 		 */
2024 		if (entry->start == last_end && vp == last_vp) {
2025 			last_end = entry->end;
2026 			if (lobj != obj)
2027 				VM_OBJECT_RUNLOCK(lobj);
2028 			VM_OBJECT_RUNLOCK(obj);
2029 			continue;
2030 		}
2031 
2032 		/*
2033 		 * We don't want to keep the proc's vm_map or this vm_object
2034 		 * locked while we walk the pathname, since vn_fullpath() can
2035 		 * sleep.  However, if we drop the lock, it's possible for
2036 		 * concurrent activity to modify the vm_map list.  To protect
2037 		 * against this, we save the vm_map timestamp before we release
2038 		 * the lock, and check it after we reacquire the lock below.
2039 		 */
2040 		start_addr = entry->start;
2041 		last_end = entry->end;
2042 		last_timestamp = map->timestamp;
2043 		vm_map_unlock_read(map);
2044 
2045 		vref(vp);
2046 		if (lobj != obj)
2047 			VM_OBJECT_RUNLOCK(lobj);
2048 		VM_OBJECT_RUNLOCK(obj);
2049 
2050 		freepath = NULL;
2051 		pmc_getfilename(vp, &fullpath, &freepath);
2052 		last_vp = vp;
2053 
2054 		vrele(vp);
2055 
2056 		vp = NULL;
2057 		pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
2058 		if (freepath != NULL)
2059 			free(freepath, M_TEMP);
2060 
2061 		vm_map_lock_read(map);
2062 
2063 		/*
2064 		 * If our saved timestamp doesn't match, this means
2065 		 * that the vm_map was modified out from under us and
2066 		 * we can't trust our current "entry" pointer.  Do a
2067 		 * new lookup for this entry.  If there is no entry
2068 		 * for this address range, vm_map_lookup_entry() will
2069 		 * return the previous one, so we always want to go to
2070 		 * the next entry on the next loop iteration.
2071 		 *
2072 		 * There is an edge condition here that can occur if
2073 		 * there is no entry at or before this address.  In
2074 		 * this situation, vm_map_lookup_entry returns
2075 		 * &map->header, which would cause our loop to abort
2076 		 * without processing the rest of the map.  However,
2077 		 * in practice this will never happen for process
2078 		 * vm_map.  This is because the executable's text
2079 		 * segment is the first mapping in the proc's address
2080 		 * space, and this mapping is never removed until the
2081 		 * process exits, so there will always be a non-header
2082 		 * entry at or before the requested address for
2083 		 * vm_map_lookup_entry to return.
2084 		 */
2085 		if (map->timestamp != last_timestamp)
2086 			vm_map_lookup_entry(map, last_end - 1, &entry);
2087 	}
2088 
2089 	vm_map_unlock_read(map);
2090 	vmspace_free(vm);
2091 	return;
2092 }
2093 
2094 /*
2095  * Log mappings for all processes in the system.
2096  */
2097 static void
pmc_log_all_process_mappings(struct pmc_owner * po)2098 pmc_log_all_process_mappings(struct pmc_owner *po)
2099 {
2100 	struct proc *p, *top;
2101 
2102 	sx_assert(&pmc_sx, SX_XLOCKED);
2103 
2104 	if ((p = pfind(1)) == NULL)
2105 		panic("[pmc,%d] Cannot find init", __LINE__);
2106 
2107 	PROC_UNLOCK(p);
2108 
2109 	sx_slock(&proctree_lock);
2110 
2111 	top = p;
2112 	for (;;) {
2113 		pmc_log_process_mappings(po, p);
2114 		if (!LIST_EMPTY(&p->p_children))
2115 			p = LIST_FIRST(&p->p_children);
2116 		else for (;;) {
2117 			if (p == top)
2118 				goto done;
2119 			if (LIST_NEXT(p, p_sibling)) {
2120 				p = LIST_NEXT(p, p_sibling);
2121 				break;
2122 			}
2123 			p = p->p_pptr;
2124 		}
2125 	}
2126 done:
2127 	sx_sunlock(&proctree_lock);
2128 }
2129 
2130 #ifdef HWPMC_DEBUG
2131 const char *pmc_hooknames[] = {
2132 	/* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
2133 	"",
2134 	"EXEC",
2135 	"CSW-IN",
2136 	"CSW-OUT",
2137 	"SAMPLE",
2138 	"UNUSED1",
2139 	"UNUSED2",
2140 	"MMAP",
2141 	"MUNMAP",
2142 	"CALLCHAIN-NMI",
2143 	"CALLCHAIN-SOFT",
2144 	"SOFTSAMPLING",
2145 	"THR-CREATE",
2146 	"THR-EXIT",
2147 	"THR-USERRET",
2148 	"THR-CREATE-LOG",
2149 	"THR-EXIT-LOG",
2150 	"PROC-CREATE-LOG"
2151 };
2152 #endif
2153 
2154 /*
2155  * The 'hook' invoked from the kernel proper
2156  */
2157 static int
pmc_hook_handler(struct thread * td,int function,void * arg)2158 pmc_hook_handler(struct thread *td, int function, void *arg)
2159 {
2160 	int cpu;
2161 
2162 	PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
2163 	    pmc_hooknames[function], arg);
2164 
2165 	switch (function) {
2166 	case PMC_FN_PROCESS_EXEC:
2167 		pmc_process_exec(td, (struct pmckern_procexec *)arg);
2168 		break;
2169 
2170 	case PMC_FN_CSW_IN:
2171 		pmc_process_csw_in(td);
2172 		break;
2173 
2174 	case PMC_FN_CSW_OUT:
2175 		pmc_process_csw_out(td);
2176 		break;
2177 
2178 	/*
2179 	 * Process accumulated PC samples.
2180 	 *
2181 	 * This function is expected to be called by hardclock() for
2182 	 * each CPU that has accumulated PC samples.
2183 	 *
2184 	 * This function is to be executed on the CPU whose samples
2185 	 * are being processed.
2186 	 */
2187 	case PMC_FN_DO_SAMPLES:
2188 		/*
2189 		 * Clear the cpu specific bit in the CPU mask before
2190 		 * do the rest of the processing.  If the NMI handler
2191 		 * gets invoked after the "atomic_clear_int()" call
2192 		 * below but before "pmc_process_samples()" gets
2193 		 * around to processing the interrupt, then we will
2194 		 * come back here at the next hardclock() tick (and
2195 		 * may find nothing to do if "pmc_process_samples()"
2196 		 * had already processed the interrupt).  We don't
2197 		 * lose the interrupt sample.
2198 		 */
2199 		DPCPU_SET(pmc_sampled, 0);
2200 		cpu = PCPU_GET(cpuid);
2201 		pmc_process_samples(cpu, PMC_HR);
2202 		pmc_process_samples(cpu, PMC_SR);
2203 		pmc_process_samples(cpu, PMC_UR);
2204 		break;
2205 
2206 	case PMC_FN_MMAP:
2207 		pmc_process_mmap(td, (struct pmckern_map_in *)arg);
2208 		break;
2209 
2210 	case PMC_FN_MUNMAP:
2211 		MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx));
2212 		pmc_process_munmap(td, (struct pmckern_map_out *)arg);
2213 		break;
2214 
2215 	case PMC_FN_PROC_CREATE_LOG:
2216 		pmc_process_proccreate((struct proc *)arg);
2217 		break;
2218 
2219 	case PMC_FN_USER_CALLCHAIN:
2220 		/*
2221 		 * Record a call chain.
2222 		 */
2223 		KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2224 		    __LINE__));
2225 
2226 		pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
2227 		    (struct trapframe *)arg);
2228 
2229 		KASSERT(td->td_pinned == 1,
2230 		    ("[pmc,%d] invalid td_pinned value", __LINE__));
2231 		sched_unpin();  /* Can migrate safely now. */
2232 
2233 		td->td_pflags &= ~TDP_CALLCHAIN;
2234 		break;
2235 
2236 	case PMC_FN_USER_CALLCHAIN_SOFT:
2237 		/*
2238 		 * Record a call chain.
2239 		 */
2240 		KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2241 		    __LINE__));
2242 
2243 		cpu = PCPU_GET(cpuid);
2244 		pmc_capture_user_callchain(cpu, PMC_SR,
2245 		    (struct trapframe *) arg);
2246 
2247 		KASSERT(td->td_pinned == 1,
2248 		    ("[pmc,%d] invalid td_pinned value", __LINE__));
2249 
2250 		sched_unpin();  /* Can migrate safely now. */
2251 
2252 		td->td_pflags &= ~TDP_CALLCHAIN;
2253 		break;
2254 
2255 	case PMC_FN_SOFT_SAMPLING:
2256 		/*
2257 		 * Call soft PMC sampling intr.
2258 		 */
2259 		pmc_soft_intr((struct pmckern_soft *)arg);
2260 		break;
2261 
2262 	case PMC_FN_THR_CREATE:
2263 		pmc_process_thread_add(td);
2264 		pmc_process_threadcreate(td);
2265 		break;
2266 
2267 	case PMC_FN_THR_CREATE_LOG:
2268 		pmc_process_threadcreate(td);
2269 		break;
2270 
2271 	case PMC_FN_THR_EXIT:
2272 		KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2273 		    __LINE__));
2274 		pmc_process_thread_delete(td);
2275 		pmc_process_threadexit(td);
2276 		break;
2277 	case PMC_FN_THR_EXIT_LOG:
2278 		pmc_process_threadexit(td);
2279 		break;
2280 	case PMC_FN_THR_USERRET:
2281 		KASSERT(td == curthread, ("[pmc,%d] td != curthread",
2282 		    __LINE__));
2283 		pmc_process_thread_userret(td);
2284 		break;
2285 	default:
2286 #ifdef HWPMC_DEBUG
2287 		KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
2288 #endif
2289 		break;
2290 	}
2291 
2292 	return (0);
2293 }
2294 
2295 /*
2296  * Allocate a 'struct pmc_owner' descriptor in the owner hash table.
2297  */
2298 static struct pmc_owner *
pmc_allocate_owner_descriptor(struct proc * p)2299 pmc_allocate_owner_descriptor(struct proc *p)
2300 {
2301 	struct pmc_owner *po;
2302 	struct pmc_ownerhash *poh;
2303 	uint32_t hindex;
2304 
2305 	hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2306 	poh = &pmc_ownerhash[hindex];
2307 
2308 	/* Allocate space for N pointers and one descriptor struct. */
2309 	po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK | M_ZERO);
2310 	po->po_owner = p;
2311 	LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
2312 
2313 	TAILQ_INIT(&po->po_logbuffers);
2314 	mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
2315 
2316 	PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
2317 	    p, p->p_pid, p->p_comm, po);
2318 
2319 	return (po);
2320 }
2321 
2322 static void
pmc_destroy_owner_descriptor(struct pmc_owner * po)2323 pmc_destroy_owner_descriptor(struct pmc_owner *po)
2324 {
2325 
2326 	PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
2327 	    po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
2328 
2329 	mtx_destroy(&po->po_mtx);
2330 	free(po, M_PMC);
2331 }
2332 
2333 /*
2334  * Allocate a thread descriptor from the free pool.
2335  *
2336  * NOTE: This *can* return NULL.
2337  */
2338 static struct pmc_thread *
pmc_thread_descriptor_pool_alloc(void)2339 pmc_thread_descriptor_pool_alloc(void)
2340 {
2341 	struct pmc_thread *pt;
2342 
2343 	mtx_lock_spin(&pmc_threadfreelist_mtx);
2344 	if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2345 		LIST_REMOVE(pt, pt_next);
2346 		pmc_threadfreelist_entries--;
2347 	}
2348 	mtx_unlock_spin(&pmc_threadfreelist_mtx);
2349 
2350 	return (pt);
2351 }
2352 
2353 /*
2354  * Add a thread descriptor to the free pool. We use this instead of free()
2355  * to maintain a cache of free entries. Additionally, we can safely call
2356  * this function when we cannot call free(), such as in a critical section.
2357  */
2358 static void
pmc_thread_descriptor_pool_free(struct pmc_thread * pt)2359 pmc_thread_descriptor_pool_free(struct pmc_thread *pt)
2360 {
2361 
2362 	if (pt == NULL)
2363 		return;
2364 
2365 	memset(pt, 0, THREADENTRY_SIZE);
2366 	mtx_lock_spin(&pmc_threadfreelist_mtx);
2367 	LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next);
2368 	pmc_threadfreelist_entries++;
2369 	if (pmc_threadfreelist_entries > pmc_threadfreelist_max)
2370 		taskqueue_enqueue(taskqueue_fast, &free_task);
2371 	mtx_unlock_spin(&pmc_threadfreelist_mtx);
2372 }
2373 
2374 /*
2375  * An asynchronous task to manage the free list.
2376  */
2377 static void
pmc_thread_descriptor_pool_free_task(void * arg __unused,int pending __unused)2378 pmc_thread_descriptor_pool_free_task(void *arg __unused, int pending __unused)
2379 {
2380 	struct pmc_thread *pt;
2381 	LIST_HEAD(, pmc_thread) tmplist;
2382 	int delta;
2383 
2384 	LIST_INIT(&tmplist);
2385 
2386 	/* Determine what changes, if any, we need to make. */
2387 	mtx_lock_spin(&pmc_threadfreelist_mtx);
2388 	delta = pmc_threadfreelist_entries - pmc_threadfreelist_max;
2389 	while (delta > 0 && (pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) {
2390 		delta--;
2391 		pmc_threadfreelist_entries--;
2392 		LIST_REMOVE(pt, pt_next);
2393 		LIST_INSERT_HEAD(&tmplist, pt, pt_next);
2394 	}
2395 	mtx_unlock_spin(&pmc_threadfreelist_mtx);
2396 
2397 	/* If there are entries to free, free them. */
2398 	while (!LIST_EMPTY(&tmplist)) {
2399 		pt = LIST_FIRST(&tmplist);
2400 		LIST_REMOVE(pt, pt_next);
2401 		free(pt, M_PMC);
2402 	}
2403 }
2404 
2405 /*
2406  * Drain the thread free pool, freeing all allocations.
2407  */
2408 static void
pmc_thread_descriptor_pool_drain(void)2409 pmc_thread_descriptor_pool_drain(void)
2410 {
2411 	struct pmc_thread *pt, *next;
2412 
2413 	LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) {
2414 		LIST_REMOVE(pt, pt_next);
2415 		free(pt, M_PMC);
2416 	}
2417 }
2418 
2419 /*
2420  * find the descriptor corresponding to thread 'td', adding or removing it
2421  * as specified by 'mode'.
2422  *
2423  * Note that this supports additional mode flags in addition to those
2424  * supported by pmc_find_process_descriptor():
2425  * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs.
2426  *     This makes it safe to call while holding certain other locks.
2427  */
2428 static struct pmc_thread *
pmc_find_thread_descriptor(struct pmc_process * pp,struct thread * td,uint32_t mode)2429 pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td,
2430     uint32_t mode)
2431 {
2432 	struct pmc_thread *pt = NULL, *ptnew = NULL;
2433 	int wait_flag;
2434 
2435 	KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__));
2436 
2437 	/*
2438 	 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to
2439 	 * acquiring the lock.
2440 	 */
2441 	if ((mode & PMC_FLAG_ALLOCATE) != 0) {
2442 		if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) {
2443 			wait_flag = M_WAITOK;
2444 			if ((mode & PMC_FLAG_NOWAIT) != 0 ||
2445 			    in_epoch(global_epoch_preempt))
2446 				wait_flag = M_NOWAIT;
2447 
2448 			ptnew = malloc(THREADENTRY_SIZE, M_PMC,
2449 			    wait_flag | M_ZERO);
2450 		}
2451 	}
2452 
2453 	mtx_lock_spin(pp->pp_tdslock);
2454 	LIST_FOREACH(pt, &pp->pp_tds, pt_next) {
2455 		if (pt->pt_td == td)
2456 			break;
2457 	}
2458 
2459 	if ((mode & PMC_FLAG_REMOVE) != 0 && pt != NULL)
2460 		LIST_REMOVE(pt, pt_next);
2461 
2462 	if ((mode & PMC_FLAG_ALLOCATE) != 0 && pt == NULL && ptnew != NULL) {
2463 		pt = ptnew;
2464 		ptnew = NULL;
2465 		pt->pt_td = td;
2466 		LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next);
2467 	}
2468 
2469 	mtx_unlock_spin(pp->pp_tdslock);
2470 
2471 	if (ptnew != NULL) {
2472 		free(ptnew, M_PMC);
2473 	}
2474 
2475 	return (pt);
2476 }
2477 
2478 /*
2479  * Try to add thread descriptors for each thread in a process.
2480  */
2481 static void
pmc_add_thread_descriptors_from_proc(struct proc * p,struct pmc_process * pp)2482 pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp)
2483 {
2484 	struct pmc_thread **tdlist;
2485 	struct thread *curtd;
2486 	int i, tdcnt, tdlistsz;
2487 
2488 	KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked",
2489 	    __LINE__));
2490 	tdcnt = 32;
2491 restart:
2492 	tdlistsz = roundup2(tdcnt, 32);
2493 
2494 	tdcnt = 0;
2495 	tdlist = malloc(sizeof(struct pmc_thread *) * tdlistsz, M_TEMP,
2496 	    M_WAITOK);
2497 
2498 	PROC_LOCK(p);
2499 	FOREACH_THREAD_IN_PROC(p, curtd)
2500 		tdcnt++;
2501 	if (tdcnt >= tdlistsz) {
2502 		PROC_UNLOCK(p);
2503 		free(tdlist, M_TEMP);
2504 		goto restart;
2505 	}
2506 
2507 	/*
2508 	 * Try to add each thread to the list without sleeping. If unable,
2509 	 * add to a queue to retry after dropping the process lock.
2510 	 */
2511 	tdcnt = 0;
2512 	FOREACH_THREAD_IN_PROC(p, curtd) {
2513 		tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd,
2514 		    PMC_FLAG_ALLOCATE | PMC_FLAG_NOWAIT);
2515 		if (tdlist[tdcnt] == NULL) {
2516 			PROC_UNLOCK(p);
2517 			for (i = 0; i <= tdcnt; i++)
2518 				pmc_thread_descriptor_pool_free(tdlist[i]);
2519 			free(tdlist, M_TEMP);
2520 			goto restart;
2521 		}
2522 		tdcnt++;
2523 	}
2524 	PROC_UNLOCK(p);
2525 	free(tdlist, M_TEMP);
2526 }
2527 
2528 /*
2529  * Find the descriptor corresponding to process 'p', adding or removing it
2530  * as specified by 'mode'.
2531  */
2532 static struct pmc_process *
pmc_find_process_descriptor(struct proc * p,uint32_t mode)2533 pmc_find_process_descriptor(struct proc *p, uint32_t mode)
2534 {
2535 	struct pmc_process *pp, *ppnew;
2536 	struct pmc_processhash *pph;
2537 	uint32_t hindex;
2538 
2539 	hindex = PMC_HASH_PTR(p, pmc_processhashmask);
2540 	pph = &pmc_processhash[hindex];
2541 
2542 	ppnew = NULL;
2543 
2544 	/*
2545 	 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we
2546 	 * cannot call malloc(9) once we hold a spin lock.
2547 	 */
2548 	if ((mode & PMC_FLAG_ALLOCATE) != 0)
2549 		ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
2550 		    sizeof(struct pmc_targetstate), M_PMC, M_WAITOK | M_ZERO);
2551 
2552 	mtx_lock_spin(&pmc_processhash_mtx);
2553 	LIST_FOREACH(pp, pph, pp_next) {
2554 		if (pp->pp_proc == p)
2555 			break;
2556 	}
2557 
2558 	if ((mode & PMC_FLAG_REMOVE) != 0 && pp != NULL)
2559 		LIST_REMOVE(pp, pp_next);
2560 
2561 	if ((mode & PMC_FLAG_ALLOCATE) != 0 && pp == NULL && ppnew != NULL) {
2562 		ppnew->pp_proc = p;
2563 		LIST_INIT(&ppnew->pp_tds);
2564 		ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew);
2565 		LIST_INSERT_HEAD(pph, ppnew, pp_next);
2566 		mtx_unlock_spin(&pmc_processhash_mtx);
2567 		pp = ppnew;
2568 		ppnew = NULL;
2569 
2570 		/* Add thread descriptors for this process' current threads. */
2571 		pmc_add_thread_descriptors_from_proc(p, pp);
2572 	} else
2573 		mtx_unlock_spin(&pmc_processhash_mtx);
2574 
2575 	if (ppnew != NULL)
2576 		free(ppnew, M_PMC);
2577 	return (pp);
2578 }
2579 
2580 /*
2581  * Remove a process descriptor from the process hash table.
2582  */
2583 static void
pmc_remove_process_descriptor(struct pmc_process * pp)2584 pmc_remove_process_descriptor(struct pmc_process *pp)
2585 {
2586 	KASSERT(pp->pp_refcnt == 0,
2587 	    ("[pmc,%d] Removing process descriptor %p with count %d",
2588 	     __LINE__, pp, pp->pp_refcnt));
2589 
2590 	mtx_lock_spin(&pmc_processhash_mtx);
2591 	LIST_REMOVE(pp, pp_next);
2592 	mtx_unlock_spin(&pmc_processhash_mtx);
2593 }
2594 
2595 /*
2596  * Destroy a process descriptor.
2597  */
2598 static void
pmc_destroy_process_descriptor(struct pmc_process * pp)2599 pmc_destroy_process_descriptor(struct pmc_process *pp)
2600 {
2601 	struct pmc_thread *pmc_td;
2602 
2603 	while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) {
2604 		LIST_REMOVE(pmc_td, pt_next);
2605 		pmc_thread_descriptor_pool_free(pmc_td);
2606 	}
2607 	free(pp, M_PMC);
2608 }
2609 
2610 /*
2611  * Find an owner descriptor corresponding to proc 'p'.
2612  */
2613 static struct pmc_owner *
pmc_find_owner_descriptor(struct proc * p)2614 pmc_find_owner_descriptor(struct proc *p)
2615 {
2616 	struct pmc_owner *po;
2617 	struct pmc_ownerhash *poh;
2618 	uint32_t hindex;
2619 
2620 	hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
2621 	poh = &pmc_ownerhash[hindex];
2622 
2623 	po = NULL;
2624 	LIST_FOREACH(po, poh, po_next) {
2625 		if (po->po_owner == p)
2626 			break;
2627 	}
2628 
2629 	PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
2630 	    "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
2631 
2632 	return (po);
2633 }
2634 
2635 /*
2636  * Allocate a pmc descriptor and initialize its fields.
2637  */
2638 static struct pmc *
pmc_allocate_pmc_descriptor(void)2639 pmc_allocate_pmc_descriptor(void)
2640 {
2641 	struct pmc *pmc;
2642 
2643 	pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK | M_ZERO);
2644 	pmc->pm_runcount = counter_u64_alloc(M_WAITOK);
2645 	pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state) * mp_ncpus,
2646 	    M_PMC, M_WAITOK | M_ZERO);
2647 	PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
2648 
2649 	return (pmc);
2650 }
2651 
2652 /*
2653  * Destroy a pmc descriptor.
2654  */
2655 static void
pmc_destroy_pmc_descriptor(struct pmc * pm)2656 pmc_destroy_pmc_descriptor(struct pmc *pm)
2657 {
2658 
2659 	KASSERT(pm->pm_state == PMC_STATE_DELETED ||
2660 	    pm->pm_state == PMC_STATE_FREE,
2661 	    ("[pmc,%d] destroying non-deleted PMC", __LINE__));
2662 	KASSERT(LIST_EMPTY(&pm->pm_targets),
2663 	    ("[pmc,%d] destroying pmc with targets", __LINE__));
2664 	KASSERT(pm->pm_owner == NULL,
2665 	    ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
2666 	KASSERT(counter_u64_fetch(pm->pm_runcount) == 0,
2667 	    ("[pmc,%d] pmc has non-zero run count %ju", __LINE__,
2668 	    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
2669 
2670 	counter_u64_free(pm->pm_runcount);
2671 	free(pm->pm_pcpu_state, M_PMC);
2672 	free(pm, M_PMC);
2673 }
2674 
2675 static void
pmc_wait_for_pmc_idle(struct pmc * pm)2676 pmc_wait_for_pmc_idle(struct pmc *pm)
2677 {
2678 #ifdef INVARIANTS
2679 	volatile int maxloop;
2680 
2681 	maxloop = 100 * pmc_cpu_max();
2682 #endif
2683 	/*
2684 	 * Loop (with a forced context switch) till the PMC's runcount
2685 	 * comes down to zero.
2686 	 */
2687 	pmclog_flush(pm->pm_owner, 1);
2688 	while (counter_u64_fetch(pm->pm_runcount) > 0) {
2689 		pmclog_flush(pm->pm_owner, 1);
2690 #ifdef INVARIANTS
2691 		maxloop--;
2692 		KASSERT(maxloop > 0,
2693 		    ("[pmc,%d] (ri%d, rc%ju) waiting too long for "
2694 		     "pmc to be free", __LINE__, PMC_TO_ROWINDEX(pm),
2695 		     (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
2696 #endif
2697 		pmc_force_context_switch();
2698 	}
2699 }
2700 
2701 /*
2702  * This function does the following things:
2703  *
2704  *  - detaches the PMC from hardware
2705  *  - unlinks all target threads that were attached to it
2706  *  - removes the PMC from its owner's list
2707  *  - destroys the PMC private mutex
2708  *
2709  * Once this function completes, the given pmc pointer can be freed by
2710  * calling pmc_destroy_pmc_descriptor().
2711  */
2712 static void
pmc_release_pmc_descriptor(struct pmc * pm)2713 pmc_release_pmc_descriptor(struct pmc *pm)
2714 {
2715 	struct pmc_binding pb;
2716 	struct pmc_classdep *pcd;
2717 	struct pmc_hw *phw __diagused;
2718 	struct pmc_owner *po;
2719 	struct pmc_process *pp;
2720 	struct pmc_target *ptgt, *tmp;
2721 	enum pmc_mode mode;
2722 	u_int adjri, ri, cpu;
2723 
2724 	sx_assert(&pmc_sx, SX_XLOCKED);
2725 	KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
2726 
2727 	ri   = PMC_TO_ROWINDEX(pm);
2728 	pcd  = pmc_ri_to_classdep(md, ri, &adjri);
2729 	mode = PMC_TO_MODE(pm);
2730 
2731 	PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
2732 	    mode);
2733 
2734 	/*
2735 	 * First, we take the PMC off hardware.
2736 	 */
2737 	cpu = 0;
2738 	if (PMC_IS_SYSTEM_MODE(mode)) {
2739 		/*
2740 		 * A system mode PMC runs on a specific CPU. Switch
2741 		 * to this CPU and turn hardware off.
2742 		 */
2743 		pmc_save_cpu_binding(&pb);
2744 		cpu = PMC_TO_CPU(pm);
2745 		pmc_select_cpu(cpu);
2746 
2747 		/* switch off non-stalled CPUs */
2748 		pm->pm_pcpu_state[cpu].pps_cpustate = 0;
2749 		if (pm->pm_state == PMC_STATE_RUNNING &&
2750 			pm->pm_pcpu_state[cpu].pps_stalled == 0) {
2751 
2752 			phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
2753 
2754 			KASSERT(phw->phw_pmc == pm,
2755 			    ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
2756 				__LINE__, ri, phw->phw_pmc, pm));
2757 			PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
2758 
2759 			critical_enter();
2760 			(void)pcd->pcd_stop_pmc(cpu, adjri, pm);
2761 			critical_exit();
2762 		}
2763 
2764 		PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
2765 
2766 		critical_enter();
2767 		(void)pcd->pcd_config_pmc(cpu, adjri, NULL);
2768 		critical_exit();
2769 
2770 		/* adjust the global and process count of SS mode PMCs */
2771 		if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
2772 			po = pm->pm_owner;
2773 			po->po_sscount--;
2774 			if (po->po_sscount == 0) {
2775 				atomic_subtract_rel_int(&pmc_ss_count, 1);
2776 				CK_LIST_REMOVE(po, po_ssnext);
2777 				epoch_wait_preempt(global_epoch_preempt);
2778 			}
2779 		}
2780 		pm->pm_state = PMC_STATE_DELETED;
2781 
2782 		pmc_restore_cpu_binding(&pb);
2783 
2784 		/*
2785 		 * We could have references to this PMC structure in the
2786 		 * per-cpu sample queues.  Wait for the queue to drain.
2787 		 */
2788 		pmc_wait_for_pmc_idle(pm);
2789 
2790 	} else if (PMC_IS_VIRTUAL_MODE(mode)) {
2791 		/*
2792 		 * A virtual PMC could be running on multiple CPUs at a given
2793 		 * instant.
2794 		 *
2795 		 * By marking its state as DELETED, we ensure that this PMC is
2796 		 * never further scheduled on hardware.
2797 		 *
2798 		 * Then we wait till all CPUs are done with this PMC.
2799 		 */
2800 		pm->pm_state = PMC_STATE_DELETED;
2801 
2802 		/* Wait for the PMCs runcount to come to zero. */
2803 		pmc_wait_for_pmc_idle(pm);
2804 
2805 		/*
2806 		 * At this point the PMC is off all CPUs and cannot be freshly
2807 		 * scheduled onto a CPU. It is now safe to unlink all targets
2808 		 * from this PMC. If a process-record's refcount falls to zero,
2809 		 * we remove it from the hash table. The module-wide SX lock
2810 		 * protects us from races.
2811 		 */
2812 		LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
2813 			pp = ptgt->pt_process;
2814 			pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
2815 
2816 			PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
2817 
2818 			/*
2819 			 * If the target process record shows that no PMCs are
2820 			 * attached to it, reclaim its space.
2821 			 */
2822 			if (pp->pp_refcnt == 0) {
2823 				pmc_remove_process_descriptor(pp);
2824 				pmc_destroy_process_descriptor(pp);
2825 			}
2826 		}
2827 
2828 		cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
2829 	}
2830 
2831 	/*
2832 	 * Release any MD resources.
2833 	 */
2834 	(void)pcd->pcd_release_pmc(cpu, adjri, pm);
2835 
2836 	/*
2837 	 * Update row disposition.
2838 	 */
2839 	if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
2840 		PMC_UNMARK_ROW_STANDALONE(ri);
2841 	else
2842 		PMC_UNMARK_ROW_THREAD(ri);
2843 
2844 	/* Unlink from the owner's list. */
2845 	if (pm->pm_owner != NULL) {
2846 		LIST_REMOVE(pm, pm_next);
2847 		pm->pm_owner = NULL;
2848 	}
2849 }
2850 
2851 /*
2852  * Register an owner and a pmc.
2853  */
2854 static int
pmc_register_owner(struct proc * p,struct pmc * pmc)2855 pmc_register_owner(struct proc *p, struct pmc *pmc)
2856 {
2857 	struct pmc_owner *po;
2858 
2859 	sx_assert(&pmc_sx, SX_XLOCKED);
2860 
2861 	if ((po = pmc_find_owner_descriptor(p)) == NULL) {
2862 		if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
2863 			return (ENOMEM);
2864 	}
2865 
2866 	KASSERT(pmc->pm_owner == NULL,
2867 	    ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
2868 	pmc->pm_owner = po;
2869 
2870 	LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
2871 
2872 	PROC_LOCK(p);
2873 	p->p_flag |= P_HWPMC;
2874 	PROC_UNLOCK(p);
2875 
2876 	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0)
2877 		pmclog_process_pmcallocate(pmc);
2878 
2879 	PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
2880 	    po, pmc);
2881 
2882 	return (0);
2883 }
2884 
2885 /*
2886  * Return the current row disposition:
2887  * == 0 => FREE
2888  *  > 0 => PROCESS MODE
2889  *  < 0 => SYSTEM MODE
2890  */
2891 int
pmc_getrowdisp(int ri)2892 pmc_getrowdisp(int ri)
2893 {
2894 	return (pmc_pmcdisp[ri]);
2895 }
2896 
2897 /*
2898  * Check if a PMC at row index 'ri' can be allocated to the current
2899  * process.
2900  *
2901  * Allocation can fail if:
2902  *   - the current process is already being profiled by a PMC at index 'ri',
2903  *     attached to it via OP_PMCATTACH.
2904  *   - the current process has already allocated a PMC at index 'ri'
2905  *     via OP_ALLOCATE.
2906  */
2907 static bool
pmc_can_allocate_rowindex(struct proc * p,unsigned int ri,int cpu)2908 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
2909 {
2910 	struct pmc *pm;
2911 	struct pmc_owner *po;
2912 	struct pmc_process *pp;
2913 	enum pmc_mode mode;
2914 
2915 	PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
2916 	    "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
2917 
2918 	/*
2919 	 * We shouldn't have already allocated a process-mode PMC at
2920 	 * row index 'ri'.
2921 	 *
2922 	 * We shouldn't have allocated a system-wide PMC on the same
2923 	 * CPU and same RI.
2924 	 */
2925 	if ((po = pmc_find_owner_descriptor(p)) != NULL) {
2926 		LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
2927 			if (PMC_TO_ROWINDEX(pm) == ri) {
2928 				mode = PMC_TO_MODE(pm);
2929 				if (PMC_IS_VIRTUAL_MODE(mode))
2930 					return (false);
2931 				if (PMC_IS_SYSTEM_MODE(mode) &&
2932 				    PMC_TO_CPU(pm) == cpu)
2933 					return (false);
2934 			}
2935 		}
2936 	}
2937 
2938 	/*
2939 	 * We also shouldn't be the target of any PMC at this index
2940 	 * since otherwise a PMC_ATTACH to ourselves will fail.
2941 	 */
2942 	if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
2943 		if (pp->pp_pmcs[ri].pp_pmc != NULL)
2944 			return (false);
2945 
2946 	PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
2947 	    p, p->p_pid, p->p_comm, ri);
2948 	return (true);
2949 }
2950 
2951 /*
2952  * Check if a given PMC at row index 'ri' can be currently used in
2953  * mode 'mode'.
2954  */
2955 static bool
pmc_can_allocate_row(int ri,enum pmc_mode mode)2956 pmc_can_allocate_row(int ri, enum pmc_mode mode)
2957 {
2958 	enum pmc_disp disp;
2959 
2960 	sx_assert(&pmc_sx, SX_XLOCKED);
2961 
2962 	PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
2963 
2964 	if (PMC_IS_SYSTEM_MODE(mode))
2965 		disp = PMC_DISP_STANDALONE;
2966 	else
2967 		disp = PMC_DISP_THREAD;
2968 
2969 	/*
2970 	 * check disposition for PMC row 'ri':
2971 	 *
2972 	 * Expected disposition		Row-disposition		Result
2973 	 *
2974 	 * STANDALONE			STANDALONE or FREE	proceed
2975 	 * STANDALONE			THREAD			fail
2976 	 * THREAD			THREAD or FREE		proceed
2977 	 * THREAD			STANDALONE		fail
2978 	 */
2979 	if (!PMC_ROW_DISP_IS_FREE(ri) &&
2980 	    !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
2981 	    !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
2982 		return (false);
2983 
2984 	/*
2985 	 * All OK
2986 	 */
2987 	PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
2988 	return (true);
2989 }
2990 
2991 /*
2992  * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
2993  */
2994 static struct pmc *
pmc_find_pmc_descriptor_in_process(struct pmc_owner * po,pmc_id_t pmcid)2995 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
2996 {
2997 	struct pmc *pm;
2998 
2999 	KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
3000 	    ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
3001 	    PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
3002 
3003 	LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
3004 		if (pm->pm_id == pmcid)
3005 			return (pm);
3006 	}
3007 
3008 	return (NULL);
3009 }
3010 
3011 static int
pmc_find_pmc(pmc_id_t pmcid,struct pmc ** pmc)3012 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
3013 {
3014 	struct pmc *pm, *opm;
3015 	struct pmc_owner *po;
3016 	struct pmc_process *pp;
3017 
3018 	PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid);
3019 	if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc)
3020 		return (EINVAL);
3021 
3022 	if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) {
3023 		/*
3024 		 * In case of PMC_F_DESCENDANTS child processes we will not find
3025 		 * the current process in the owners hash list.  Find the owner
3026 		 * process first and from there lookup the po.
3027 		 */
3028 		pp = pmc_find_process_descriptor(curthread->td_proc,
3029 		    PMC_FLAG_NONE);
3030 		if (pp == NULL)
3031 			return (ESRCH);
3032 		opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc;
3033 		if (opm == NULL)
3034 			return (ESRCH);
3035 		if ((opm->pm_flags &
3036 		    (PMC_F_ATTACHED_TO_OWNER | PMC_F_DESCENDANTS)) !=
3037 		    (PMC_F_ATTACHED_TO_OWNER | PMC_F_DESCENDANTS))
3038 			return (ESRCH);
3039 
3040 		po = opm->pm_owner;
3041 	}
3042 
3043 	if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
3044 		return (EINVAL);
3045 
3046 	PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
3047 
3048 	*pmc = pm;
3049 	return (0);
3050 }
3051 
3052 /*
3053  * Start a PMC.
3054  */
3055 static int
pmc_start(struct pmc * pm)3056 pmc_start(struct pmc *pm)
3057 {
3058 	struct pmc_binding pb;
3059 	struct pmc_classdep *pcd;
3060 	struct pmc_owner *po;
3061 	pmc_value_t v;
3062 	enum pmc_mode mode;
3063 	int adjri, error, cpu, ri;
3064 
3065 	KASSERT(pm != NULL,
3066 	    ("[pmc,%d] null pm", __LINE__));
3067 
3068 	mode = PMC_TO_MODE(pm);
3069 	ri   = PMC_TO_ROWINDEX(pm);
3070 	pcd  = pmc_ri_to_classdep(md, ri, &adjri);
3071 
3072 	error = 0;
3073 	po = pm->pm_owner;
3074 
3075 	PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
3076 
3077 	po = pm->pm_owner;
3078 
3079 	/*
3080 	 * Disallow PMCSTART if a logfile is required but has not been
3081 	 * configured yet.
3082 	 */
3083 	if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 &&
3084 	    (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
3085 		return (EDOOFUS);	/* programming error */
3086 
3087 	/*
3088 	 * If this is a sampling mode PMC, log mapping information for
3089 	 * the kernel modules that are currently loaded.
3090 	 */
3091 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
3092 		pmc_log_kernel_mappings(pm);
3093 
3094 	if (PMC_IS_VIRTUAL_MODE(mode)) {
3095 		/*
3096 		 * If a PMCATTACH has never been done on this PMC,
3097 		 * attach it to its owner process.
3098 		 */
3099 		if (LIST_EMPTY(&pm->pm_targets)) {
3100 			error = (pm->pm_flags & PMC_F_ATTACH_DONE) != 0 ?
3101 			    ESRCH : pmc_attach_process(po->po_owner, pm);
3102 		}
3103 
3104 		/*
3105 		 * If the PMC is attached to its owner, then force a context
3106 		 * switch to ensure that the MD state gets set correctly.
3107 		 */
3108 		if (error == 0) {
3109 			pm->pm_state = PMC_STATE_RUNNING;
3110 			if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) != 0)
3111 				pmc_force_context_switch();
3112 		}
3113 
3114 		return (error);
3115 	}
3116 
3117 	/*
3118 	 * A system-wide PMC.
3119 	 *
3120 	 * Add the owner to the global list if this is a system-wide
3121 	 * sampling PMC.
3122 	 */
3123 	if (mode == PMC_MODE_SS) {
3124 		/*
3125 		 * Log mapping information for all existing processes in the
3126 		 * system.  Subsequent mappings are logged as they happen;
3127 		 * see pmc_process_mmap().
3128 		 */
3129 		if (po->po_logprocmaps == 0) {
3130 			pmc_log_all_process_mappings(po);
3131 			po->po_logprocmaps = 1;
3132 		}
3133 		po->po_sscount++;
3134 		if (po->po_sscount == 1) {
3135 			atomic_add_rel_int(&pmc_ss_count, 1);
3136 			CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
3137 			PMCDBG1(PMC,OPS,1, "po=%p in global list", po);
3138 		}
3139 	}
3140 
3141 	/*
3142 	 * Move to the CPU associated with this
3143 	 * PMC, and start the hardware.
3144 	 */
3145 	pmc_save_cpu_binding(&pb);
3146 	cpu = PMC_TO_CPU(pm);
3147 	if (!pmc_cpu_is_active(cpu))
3148 		return (ENXIO);
3149 	pmc_select_cpu(cpu);
3150 
3151 	/*
3152 	 * global PMCs are configured at allocation time
3153 	 * so write out the initial value and start the PMC.
3154 	 */
3155 	pm->pm_state = PMC_STATE_RUNNING;
3156 
3157 	critical_enter();
3158 	v = PMC_IS_SAMPLING_MODE(mode) ? pm->pm_sc.pm_reloadcount :
3159 	    pm->pm_sc.pm_initial;
3160 	if ((error = pcd->pcd_write_pmc(cpu, adjri, pm, v)) == 0) {
3161 		/* If a sampling mode PMC, reset stalled state. */
3162 		if (PMC_IS_SAMPLING_MODE(mode))
3163 			pm->pm_pcpu_state[cpu].pps_stalled = 0;
3164 
3165 		/* Indicate that we desire this to run. Start it. */
3166 		pm->pm_pcpu_state[cpu].pps_cpustate = 1;
3167 		error = pcd->pcd_start_pmc(cpu, adjri, pm);
3168 	}
3169 	critical_exit();
3170 
3171 	pmc_restore_cpu_binding(&pb);
3172 	return (error);
3173 }
3174 
3175 /*
3176  * Stop a PMC.
3177  */
3178 static int
pmc_stop(struct pmc * pm)3179 pmc_stop(struct pmc *pm)
3180 {
3181 	struct pmc_binding pb;
3182 	struct pmc_classdep *pcd;
3183 	struct pmc_owner *po;
3184 	int adjri, cpu, error, ri;
3185 
3186 	KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
3187 
3188 	PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, PMC_TO_MODE(pm),
3189 	    PMC_TO_ROWINDEX(pm));
3190 
3191 	pm->pm_state = PMC_STATE_STOPPED;
3192 
3193 	/*
3194 	 * If the PMC is a virtual mode one, changing the state to non-RUNNING
3195 	 * is enough to ensure that the PMC never gets scheduled.
3196 	 *
3197 	 * If this PMC is current running on a CPU, then it will handled
3198 	 * correctly at the time its target process is context switched out.
3199 	 */
3200 	if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
3201 		return (0);
3202 
3203 	/*
3204 	 * A system-mode PMC. Move to the CPU associated with this PMC, and
3205 	 * stop the hardware. We update the 'initial count' so that a
3206 	 * subsequent PMCSTART will resume counting from the current hardware
3207 	 * count.
3208 	 */
3209 	pmc_save_cpu_binding(&pb);
3210 
3211 	cpu = PMC_TO_CPU(pm);
3212 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
3213 	    ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
3214 	if (!pmc_cpu_is_active(cpu))
3215 		return (ENXIO);
3216 
3217 	pmc_select_cpu(cpu);
3218 
3219 	ri = PMC_TO_ROWINDEX(pm);
3220 	pcd = pmc_ri_to_classdep(md, ri, &adjri);
3221 
3222 	pm->pm_pcpu_state[cpu].pps_cpustate = 0;
3223 	critical_enter();
3224 	if ((error = pcd->pcd_stop_pmc(cpu, adjri, pm)) == 0) {
3225 		error = pcd->pcd_read_pmc(cpu, adjri, pm,
3226 		    &pm->pm_sc.pm_initial);
3227 	}
3228 	critical_exit();
3229 
3230 	pmc_restore_cpu_binding(&pb);
3231 
3232 	/* Remove this owner from the global list of SS PMC owners. */
3233 	po = pm->pm_owner;
3234 	if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
3235 		po->po_sscount--;
3236 		if (po->po_sscount == 0) {
3237 			atomic_subtract_rel_int(&pmc_ss_count, 1);
3238 			CK_LIST_REMOVE(po, po_ssnext);
3239 			epoch_wait_preempt(global_epoch_preempt);
3240 			PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po);
3241 		}
3242 	}
3243 
3244 	return (error);
3245 }
3246 
3247 static struct pmc_classdep *
pmc_class_to_classdep(enum pmc_class class)3248 pmc_class_to_classdep(enum pmc_class class)
3249 {
3250 	int n;
3251 
3252 	for (n = 0; n < md->pmd_nclass; n++) {
3253 		if (md->pmd_classdep[n].pcd_class == class)
3254 			return (&md->pmd_classdep[n]);
3255 	}
3256 	return (NULL);
3257 }
3258 
3259 #if defined(HWPMC_DEBUG) && defined(KTR)
3260 static const char *pmc_op_to_name[] = {
3261 #undef	__PMC_OP
3262 #define	__PMC_OP(N, D)	#N ,
3263 	__PMC_OPS()
3264 	NULL
3265 };
3266 #endif
3267 
3268 /*
3269  * The syscall interface
3270  */
3271 
3272 #define	PMC_GET_SX_XLOCK(...) do {		\
3273 	sx_xlock(&pmc_sx);			\
3274 	if (pmc_hook == NULL) {			\
3275 		sx_xunlock(&pmc_sx);		\
3276 		return __VA_ARGS__;		\
3277 	}					\
3278 } while (0)
3279 
3280 #define	PMC_DOWNGRADE_SX() do {			\
3281 	sx_downgrade(&pmc_sx);			\
3282 	is_sx_downgraded = true;		\
3283 } while (0)
3284 
3285 /*
3286  * Main body of PMC_OP_PMCALLOCATE.
3287  */
3288 static int
pmc_do_op_pmcallocate(struct thread * td,struct pmc_op_pmcallocate * pa)3289 pmc_do_op_pmcallocate(struct thread *td, struct pmc_op_pmcallocate *pa)
3290 {
3291 	struct proc *p;
3292 	struct pmc *pmc;
3293 	struct pmc_binding pb;
3294 	struct pmc_classdep *pcd;
3295 	struct pmc_hw *phw;
3296 	enum pmc_mode mode;
3297 	enum pmc_class class;
3298 	uint32_t caps, flags;
3299 	u_int cpu;
3300 	int adjri, n;
3301 	int error;
3302 
3303 	class = pa->pm_class;
3304 	caps  = pa->pm_caps;
3305 	flags = pa->pm_flags;
3306 	mode  = pa->pm_mode;
3307 	cpu   = pa->pm_cpu;
3308 
3309 	p = td->td_proc;
3310 
3311 	/* Requested mode must exist. */
3312 	if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
3313 	     mode != PMC_MODE_TS && mode != PMC_MODE_TC))
3314 		return (EINVAL);
3315 
3316 	/* Requested CPU must be valid. */
3317 	if (cpu != PMC_CPU_ANY && cpu >= pmc_cpu_max())
3318 		return (EINVAL);
3319 
3320 	/*
3321 	 * Virtual PMCs should only ask for a default CPU.
3322 	 * System mode PMCs need to specify a non-default CPU.
3323 	 */
3324 	if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != PMC_CPU_ANY) ||
3325 	    (PMC_IS_SYSTEM_MODE(mode) && cpu == PMC_CPU_ANY))
3326 		return (EINVAL);
3327 
3328 	/*
3329 	 * Check that an inactive CPU is not being asked for.
3330 	 */
3331 	if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu))
3332 		return (ENXIO);
3333 
3334 	/*
3335 	 * Refuse an allocation for a system-wide PMC if this process has been
3336 	 * jailed, or if this process lacks super-user credentials and the
3337 	 * sysctl tunable 'security.bsd.unprivileged_syspmcs' is zero.
3338 	 */
3339 	if (PMC_IS_SYSTEM_MODE(mode)) {
3340 		if (jailed(td->td_ucred))
3341 			return (EPERM);
3342 		if (!pmc_unprivileged_syspmcs) {
3343 			error = priv_check(td, PRIV_PMC_SYSTEM);
3344 			if (error != 0)
3345 				return (error);
3346 		}
3347 	}
3348 
3349 	/*
3350 	 * Look for valid values for 'pm_flags'.
3351 	 */
3352 	if ((flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
3353 	    PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN |
3354 	    PMC_F_EV_PMU)) != 0)
3355 		return (EINVAL);
3356 
3357 	/* PMC_F_USERCALLCHAIN is only valid with PMC_F_CALLCHAIN. */
3358 	if ((flags & (PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) ==
3359 	    PMC_F_USERCALLCHAIN)
3360 		return (EINVAL);
3361 
3362 	/* PMC_F_USERCALLCHAIN is only valid for sampling mode. */
3363 	if ((flags & PMC_F_USERCALLCHAIN) != 0 && mode != PMC_MODE_TS &&
3364 	    mode != PMC_MODE_SS)
3365 		return (EINVAL);
3366 
3367 	/* Process logging options are not allowed for system PMCs. */
3368 	if (PMC_IS_SYSTEM_MODE(mode) &&
3369 	    (flags & (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT)) != 0)
3370 		return (EINVAL);
3371 
3372 	/*
3373 	 * All sampling mode PMCs need to be able to interrupt the CPU.
3374 	 */
3375 	if (PMC_IS_SAMPLING_MODE(mode))
3376 		caps |= PMC_CAP_INTERRUPT;
3377 
3378 	/* A valid class specifier should have been passed in. */
3379 	pcd = pmc_class_to_classdep(class);
3380 	if (pcd == NULL)
3381 		return (EINVAL);
3382 
3383 	/* The requested PMC capabilities should be feasible. */
3384 	if ((pcd->pcd_caps & caps) != caps)
3385 		return (EOPNOTSUPP);
3386 
3387 	PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d", pa->pm_ev,
3388 	    caps, mode, cpu);
3389 
3390 	pmc = pmc_allocate_pmc_descriptor();
3391 	pmc->pm_id    = PMC_ID_MAKE_ID(cpu, pa->pm_mode, class, PMC_ID_INVALID);
3392 	pmc->pm_event = pa->pm_ev;
3393 	pmc->pm_state = PMC_STATE_FREE;
3394 	pmc->pm_caps  = caps;
3395 	pmc->pm_flags = flags;
3396 
3397 	/* XXX set lower bound on sampling for process counters */
3398 	if (PMC_IS_SAMPLING_MODE(mode)) {
3399 		/*
3400 		 * Don't permit requested sample rate to be less than
3401 		 * pmc_mincount.
3402 		 */
3403 		if (pa->pm_count < MAX(1, pmc_mincount))
3404 			log(LOG_WARNING, "pmcallocate: passed sample "
3405 			    "rate %ju - setting to %u\n",
3406 			    (uintmax_t)pa->pm_count,
3407 			    MAX(1, pmc_mincount));
3408 		pmc->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount),
3409 		    pa->pm_count);
3410 	} else
3411 		pmc->pm_sc.pm_initial = pa->pm_count;
3412 
3413 	/* switch thread to CPU 'cpu' */
3414 	pmc_save_cpu_binding(&pb);
3415 
3416 #define	PMC_IS_SHAREABLE_PMC(cpu, n)				\
3417 	(pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state &		\
3418 	 PMC_PHW_FLAG_IS_SHAREABLE)
3419 #define	PMC_IS_UNALLOCATED(cpu, n)				\
3420 	(pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
3421 
3422 	if (PMC_IS_SYSTEM_MODE(mode)) {
3423 		pmc_select_cpu(cpu);
3424 		for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) {
3425 			pcd = pmc_ri_to_classdep(md, n, &adjri);
3426 
3427 			if (!pmc_can_allocate_row(n, mode) ||
3428 			    !pmc_can_allocate_rowindex(p, n, cpu))
3429 				continue;
3430 			if (!PMC_IS_UNALLOCATED(cpu, n) &&
3431 			    !PMC_IS_SHAREABLE_PMC(cpu, n))
3432 				continue;
3433 
3434 			if (pcd->pcd_allocate_pmc(cpu, adjri, pmc, pa) == 0) {
3435 				/* Success. */
3436 				break;
3437 			}
3438 		}
3439 	} else {
3440 		/* Process virtual mode */
3441 		for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) {
3442 			pcd = pmc_ri_to_classdep(md, n, &adjri);
3443 
3444 			if (!pmc_can_allocate_row(n, mode) ||
3445 			    !pmc_can_allocate_rowindex(p, n, PMC_CPU_ANY))
3446 				continue;
3447 
3448 			if (pcd->pcd_allocate_pmc(td->td_oncpu, adjri, pmc,
3449 			    pa) == 0) {
3450 				/* Success. */
3451 				break;
3452 			}
3453 		}
3454 	}
3455 
3456 #undef	PMC_IS_UNALLOCATED
3457 #undef	PMC_IS_SHAREABLE_PMC
3458 
3459 	pmc_restore_cpu_binding(&pb);
3460 
3461 	if (n == md->pmd_npmc) {
3462 		pmc_destroy_pmc_descriptor(pmc);
3463 		return (EINVAL);
3464 	}
3465 
3466 	/* Fill in the correct value in the ID field. */
3467 	pmc->pm_id = PMC_ID_MAKE_ID(cpu, mode, class, n);
3468 
3469 	PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
3470 	    pmc->pm_event, class, mode, n, pmc->pm_id);
3471 
3472 	/* Process mode PMCs with logging enabled need log files. */
3473 	if ((pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW)) != 0)
3474 		pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3475 
3476 	/* All system mode sampling PMCs require a log file. */
3477 	if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
3478 		pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
3479 
3480 	/*
3481 	 * Configure global pmc's immediately.
3482 	 */
3483 	if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
3484 		pmc_save_cpu_binding(&pb);
3485 		pmc_select_cpu(cpu);
3486 
3487 		phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
3488 		pcd = pmc_ri_to_classdep(md, n, &adjri);
3489 
3490 		if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
3491 		    (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
3492 			(void)pcd->pcd_release_pmc(cpu, adjri, pmc);
3493 			pmc_destroy_pmc_descriptor(pmc);
3494 			pmc_restore_cpu_binding(&pb);
3495 			return (EPERM);
3496 		}
3497 
3498 		pmc_restore_cpu_binding(&pb);
3499 	}
3500 
3501 	pmc->pm_state = PMC_STATE_ALLOCATED;
3502 	pmc->pm_class = class;
3503 
3504 	/*
3505 	 * Mark row disposition.
3506 	 */
3507 	if (PMC_IS_SYSTEM_MODE(mode))
3508 		PMC_MARK_ROW_STANDALONE(n);
3509 	else
3510 		PMC_MARK_ROW_THREAD(n);
3511 
3512 	/*
3513 	 * Register this PMC with the current thread as its owner.
3514 	 */
3515 	error = pmc_register_owner(p, pmc);
3516 	if (error != 0) {
3517 		pmc_release_pmc_descriptor(pmc);
3518 		pmc_destroy_pmc_descriptor(pmc);
3519 		return (error);
3520 	}
3521 
3522 	/*
3523 	 * Return the allocated index.
3524 	 */
3525 	pa->pm_pmcid = pmc->pm_id;
3526 	return (0);
3527 }
3528 
3529 /*
3530  * Main body of PMC_OP_PMCATTACH.
3531  */
3532 static int
pmc_do_op_pmcattach(struct thread * td,struct pmc_op_pmcattach a)3533 pmc_do_op_pmcattach(struct thread *td, struct pmc_op_pmcattach a)
3534 {
3535 	struct pmc *pm;
3536 	struct proc *p;
3537 	int error;
3538 
3539 	sx_assert(&pmc_sx, SX_XLOCKED);
3540 
3541 	if (a.pm_pid < 0) {
3542 		return (EINVAL);
3543 	} else if (a.pm_pid == 0) {
3544 		a.pm_pid = td->td_proc->p_pid;
3545 	}
3546 
3547 	error = pmc_find_pmc(a.pm_pmc, &pm);
3548 	if (error != 0)
3549 		return (error);
3550 
3551 	if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
3552 		return (EINVAL);
3553 
3554 	/* PMCs may be (re)attached only when allocated or stopped */
3555 	if (pm->pm_state == PMC_STATE_RUNNING) {
3556 		return (EBUSY);
3557 	} else if (pm->pm_state != PMC_STATE_ALLOCATED &&
3558 	    pm->pm_state != PMC_STATE_STOPPED) {
3559 		return (EINVAL);
3560 	}
3561 
3562 	/* lookup pid */
3563 	if ((p = pfind(a.pm_pid)) == NULL)
3564 		return (ESRCH);
3565 
3566 	/*
3567 	 * Ignore processes that are working on exiting.
3568 	 */
3569 	if ((p->p_flag & P_WEXIT) != 0) {
3570 		PROC_UNLOCK(p);	/* pfind() returns a locked process */
3571 		return (ESRCH);
3572 	}
3573 
3574 	/*
3575 	 * We are allowed to attach a PMC to a process if we can debug it.
3576 	 */
3577 	error = p_candebug(curthread, p);
3578 
3579 	PROC_UNLOCK(p);
3580 
3581 	if (error == 0)
3582 		error = pmc_attach_process(p, pm);
3583 
3584 	return (error);
3585 }
3586 
3587 /*
3588  * Main body of PMC_OP_PMCDETACH.
3589  */
3590 static int
pmc_do_op_pmcdetach(struct thread * td,struct pmc_op_pmcattach a)3591 pmc_do_op_pmcdetach(struct thread *td, struct pmc_op_pmcattach a)
3592 {
3593 	struct pmc *pm;
3594 	struct proc *p;
3595 	int error;
3596 
3597 	if (a.pm_pid < 0) {
3598 		return (EINVAL);
3599 	} else if (a.pm_pid == 0)
3600 		a.pm_pid = td->td_proc->p_pid;
3601 
3602 	error = pmc_find_pmc(a.pm_pmc, &pm);
3603 	if (error != 0)
3604 		return (error);
3605 
3606 	if ((p = pfind(a.pm_pid)) == NULL)
3607 		return (ESRCH);
3608 
3609 	/*
3610 	 * Treat processes that are in the process of exiting as if they were
3611 	 * not present.
3612 	 */
3613 	if ((p->p_flag & P_WEXIT) != 0) {
3614 		PROC_UNLOCK(p);
3615 		return (ESRCH);
3616 	}
3617 
3618 	PROC_UNLOCK(p);	/* pfind() returns a locked process */
3619 
3620 	if (error == 0)
3621 		error = pmc_detach_process(p, pm);
3622 
3623 	return (error);
3624 }
3625 
3626 /*
3627  * Main body of PMC_OP_PMCRELEASE.
3628  */
3629 static int
pmc_do_op_pmcrelease(pmc_id_t pmcid)3630 pmc_do_op_pmcrelease(pmc_id_t pmcid)
3631 {
3632 	struct pmc_owner *po;
3633 	struct pmc *pm;
3634 	int error;
3635 
3636 	/*
3637 	 * Find PMC pointer for the named PMC.
3638 	 *
3639 	 * Use pmc_release_pmc_descriptor() to switch off the
3640 	 * PMC, remove all its target threads, and remove the
3641 	 * PMC from its owner's list.
3642 	 *
3643 	 * Remove the owner record if this is the last PMC
3644 	 * owned.
3645 	 *
3646 	 * Free up space.
3647 	 */
3648 	error = pmc_find_pmc(pmcid, &pm);
3649 	if (error != 0)
3650 		return (error);
3651 
3652 	po = pm->pm_owner;
3653 	pmc_release_pmc_descriptor(pm);
3654 	pmc_maybe_remove_owner(po);
3655 	pmc_destroy_pmc_descriptor(pm);
3656 
3657 	return (error);
3658 }
3659 
3660 /*
3661  * Main body of PMC_OP_PMCRW.
3662  */
3663 static int
pmc_do_op_pmcrw(const struct pmc_op_pmcrw * prw,pmc_value_t * valp)3664 pmc_do_op_pmcrw(const struct pmc_op_pmcrw *prw, pmc_value_t *valp)
3665 {
3666 	struct pmc_binding pb;
3667 	struct pmc_classdep *pcd;
3668 	struct pmc *pm;
3669 	u_int cpu, ri, adjri;
3670 	int error;
3671 
3672 	PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw->pm_pmcid, prw->pm_flags);
3673 
3674 	/* Must have at least one flag set. */
3675 	if ((prw->pm_flags & (PMC_F_OLDVALUE | PMC_F_NEWVALUE)) == 0)
3676 		return (EINVAL);
3677 
3678 	/* Locate PMC descriptor. */
3679 	error = pmc_find_pmc(prw->pm_pmcid, &pm);
3680 	if (error != 0)
3681 		return (error);
3682 
3683 	/* Can't read a PMC that hasn't been started. */
3684 	if (pm->pm_state != PMC_STATE_ALLOCATED &&
3685 	    pm->pm_state != PMC_STATE_STOPPED &&
3686 	    pm->pm_state != PMC_STATE_RUNNING)
3687 		return (EINVAL);
3688 
3689 	/* Writing a new value is allowed only for 'STOPPED' PMCs. */
3690 	if (pm->pm_state == PMC_STATE_RUNNING &&
3691 	    (prw->pm_flags & PMC_F_NEWVALUE) != 0)
3692 		return (EBUSY);
3693 
3694 	if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
3695 		/*
3696 		 * If this PMC is attached to its owner (i.e., the process
3697 		 * requesting this operation) and is running, then attempt to
3698 		 * get an upto-date reading from hardware for a READ. Writes
3699 		 * are only allowed when the PMC is stopped, so only update the
3700 		 * saved value field.
3701 		 *
3702 		 * If the PMC is not running, or is not attached to its owner,
3703 		 * read/write to the savedvalue field.
3704 		 */
3705 
3706 		ri = PMC_TO_ROWINDEX(pm);
3707 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
3708 
3709 		mtx_pool_lock_spin(pmc_mtxpool, pm);
3710 		cpu = curthread->td_oncpu;
3711 
3712 		if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) {
3713 			if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
3714 			    (pm->pm_state == PMC_STATE_RUNNING)) {
3715 				error = (*pcd->pcd_read_pmc)(cpu, adjri, pm,
3716 				    valp);
3717 			} else {
3718 				*valp = pm->pm_gv.pm_savedvalue;
3719 			}
3720 		}
3721 
3722 		if ((prw->pm_flags & PMC_F_NEWVALUE) != 0)
3723 			pm->pm_gv.pm_savedvalue = prw->pm_value;
3724 
3725 		mtx_pool_unlock_spin(pmc_mtxpool, pm);
3726 	} else { /* System mode PMCs */
3727 		cpu = PMC_TO_CPU(pm);
3728 		ri  = PMC_TO_ROWINDEX(pm);
3729 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
3730 
3731 		if (!pmc_cpu_is_active(cpu))
3732 			return (ENXIO);
3733 
3734 		/* Move this thread to CPU 'cpu'. */
3735 		pmc_save_cpu_binding(&pb);
3736 		pmc_select_cpu(cpu);
3737 		critical_enter();
3738 
3739 		/* Save old value. */
3740 		if ((prw->pm_flags & PMC_F_OLDVALUE) != 0)
3741 			error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, valp);
3742 
3743 		/* Write out new value. */
3744 		if (error == 0 && (prw->pm_flags & PMC_F_NEWVALUE) != 0)
3745 			error = (*pcd->pcd_write_pmc)(cpu, adjri, pm,
3746 			    prw->pm_value);
3747 
3748 		critical_exit();
3749 		pmc_restore_cpu_binding(&pb);
3750 		if (error != 0)
3751 			return (error);
3752 	}
3753 
3754 #ifdef HWPMC_DEBUG
3755 	if ((prw->pm_flags & PMC_F_NEWVALUE) != 0)
3756 		PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
3757 		    ri, prw->pm_value, *valp);
3758 	else
3759 		PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, *valp);
3760 #endif
3761 	return (error);
3762 }
3763 
3764 static int
pmc_syscall_handler(struct thread * td,void * syscall_args)3765 pmc_syscall_handler(struct thread *td, void *syscall_args)
3766 {
3767 	struct pmc_syscall_args *c;
3768 	void *pmclog_proc_handle;
3769 	void *arg;
3770 	int error, op;
3771 	bool is_sx_downgraded;
3772 
3773 	c = (struct pmc_syscall_args *)syscall_args;
3774 	op = c->pmop_code;
3775 	arg = c->pmop_data;
3776 
3777 	/* PMC isn't set up yet */
3778 	if (pmc_hook == NULL)
3779 		return (EINVAL);
3780 
3781 	if (op == PMC_OP_CONFIGURELOG) {
3782 		/*
3783 		 * We cannot create the logging process inside
3784 		 * pmclog_configure_log() because there is a LOR
3785 		 * between pmc_sx and process structure locks.
3786 		 * Instead, pre-create the process and ignite the loop
3787 		 * if everything is fine, otherwise direct the process
3788 		 * to exit.
3789 		 */
3790 		error = pmclog_proc_create(td, &pmclog_proc_handle);
3791 		if (error != 0)
3792 			goto done_syscall;
3793 	}
3794 
3795 	PMC_GET_SX_XLOCK(ENOSYS);
3796 	is_sx_downgraded = false;
3797 	PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
3798 	    pmc_op_to_name[op], arg);
3799 
3800 	error = 0;
3801 	counter_u64_add(pmc_stats.pm_syscalls, 1);
3802 
3803 	switch (op) {
3804 
3805 
3806 	/*
3807 	 * Configure a log file.
3808 	 *
3809 	 * XXX This OP will be reworked.
3810 	 */
3811 
3812 	case PMC_OP_CONFIGURELOG:
3813 	{
3814 		struct proc *p;
3815 		struct pmc *pm;
3816 		struct pmc_owner *po;
3817 		struct pmc_op_configurelog cl;
3818 
3819 		if ((error = copyin(arg, &cl, sizeof(cl))) != 0) {
3820 			pmclog_proc_ignite(pmclog_proc_handle, NULL);
3821 			break;
3822 		}
3823 
3824 		/* No flags currently implemented */
3825 		if (cl.pm_flags != 0) {
3826 			pmclog_proc_ignite(pmclog_proc_handle, NULL);
3827 			error = EINVAL;
3828 			break;
3829 		}
3830 
3831 		/* mark this process as owning a log file */
3832 		p = td->td_proc;
3833 		if ((po = pmc_find_owner_descriptor(p)) == NULL)
3834 			if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
3835 				pmclog_proc_ignite(pmclog_proc_handle, NULL);
3836 				error = ENOMEM;
3837 				break;
3838 			}
3839 
3840 		/*
3841 		 * If a valid fd was passed in, try to configure that,
3842 		 * otherwise if 'fd' was less than zero and there was
3843 		 * a log file configured, flush its buffers and
3844 		 * de-configure it.
3845 		 */
3846 		if (cl.pm_logfd >= 0) {
3847 			error = pmclog_configure_log(md, po, cl.pm_logfd);
3848 			pmclog_proc_ignite(pmclog_proc_handle, error == 0 ?
3849 			    po : NULL);
3850 		} else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
3851 			pmclog_proc_ignite(pmclog_proc_handle, NULL);
3852 			error = pmclog_close(po);
3853 			if (error == 0) {
3854 				LIST_FOREACH(pm, &po->po_pmcs, pm_next)
3855 				    if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
3856 					pm->pm_state == PMC_STATE_RUNNING)
3857 					    pmc_stop(pm);
3858 				error = pmclog_deconfigure_log(po);
3859 			}
3860 		} else {
3861 			pmclog_proc_ignite(pmclog_proc_handle, NULL);
3862 			error = EINVAL;
3863 		}
3864 	}
3865 	break;
3866 
3867 	/*
3868 	 * Flush a log file.
3869 	 */
3870 
3871 	case PMC_OP_FLUSHLOG:
3872 	{
3873 		struct pmc_owner *po;
3874 
3875 		sx_assert(&pmc_sx, SX_XLOCKED);
3876 
3877 		if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3878 			error = EINVAL;
3879 			break;
3880 		}
3881 
3882 		error = pmclog_flush(po, 0);
3883 	}
3884 	break;
3885 
3886 	/*
3887 	 * Close a log file.
3888 	 */
3889 
3890 	case PMC_OP_CLOSELOG:
3891 	{
3892 		struct pmc_owner *po;
3893 
3894 		sx_assert(&pmc_sx, SX_XLOCKED);
3895 
3896 		if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
3897 			error = EINVAL;
3898 			break;
3899 		}
3900 
3901 		error = pmclog_close(po);
3902 	}
3903 	break;
3904 
3905 	/*
3906 	 * Retrieve hardware configuration.
3907 	 */
3908 
3909 	case PMC_OP_GETCPUINFO:	/* CPU information */
3910 	{
3911 		struct pmc_op_getcpuinfo gci;
3912 		struct pmc_classinfo *pci;
3913 		struct pmc_classdep *pcd;
3914 		int cl;
3915 
3916 		memset(&gci, 0, sizeof(gci));
3917 		gci.pm_cputype = md->pmd_cputype;
3918 		gci.pm_ncpu    = pmc_cpu_max();
3919 		gci.pm_npmc    = md->pmd_npmc;
3920 		gci.pm_nclass  = md->pmd_nclass;
3921 		pci = gci.pm_classes;
3922 		pcd = md->pmd_classdep;
3923 		for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
3924 			pci->pm_caps  = pcd->pcd_caps;
3925 			pci->pm_class = pcd->pcd_class;
3926 			pci->pm_width = pcd->pcd_width;
3927 			pci->pm_num   = pcd->pcd_num;
3928 		}
3929 		error = copyout(&gci, arg, sizeof(gci));
3930 	}
3931 	break;
3932 
3933 	/*
3934 	 * Retrieve soft events list.
3935 	 */
3936 	case PMC_OP_GETDYNEVENTINFO:
3937 	{
3938 		enum pmc_class			cl;
3939 		enum pmc_event			ev;
3940 		struct pmc_op_getdyneventinfo	*gei;
3941 		struct pmc_dyn_event_descr	dev;
3942 		struct pmc_soft			*ps;
3943 		uint32_t			nevent;
3944 
3945 		sx_assert(&pmc_sx, SX_LOCKED);
3946 
3947 		gei = (struct pmc_op_getdyneventinfo *) arg;
3948 
3949 		if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
3950 			break;
3951 
3952 		/* Only SOFT class is dynamic. */
3953 		if (cl != PMC_CLASS_SOFT) {
3954 			error = EINVAL;
3955 			break;
3956 		}
3957 
3958 		nevent = 0;
3959 		for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) {
3960 			ps = pmc_soft_ev_acquire(ev);
3961 			if (ps == NULL)
3962 				continue;
3963 			bcopy(&ps->ps_ev, &dev, sizeof(dev));
3964 			pmc_soft_ev_release(ps);
3965 
3966 			error = copyout(&dev,
3967 			    &gei->pm_events[nevent],
3968 			    sizeof(struct pmc_dyn_event_descr));
3969 			if (error != 0)
3970 				break;
3971 			nevent++;
3972 		}
3973 		if (error != 0)
3974 			break;
3975 
3976 		error = copyout(&nevent, &gei->pm_nevent,
3977 		    sizeof(nevent));
3978 	}
3979 	break;
3980 
3981 	/*
3982 	 * Get module statistics
3983 	 */
3984 
3985 	case PMC_OP_GETDRIVERSTATS:
3986 	{
3987 		struct pmc_op_getdriverstats gms;
3988 #define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field)
3989 		CFETCH(gms, pmc_stats, pm_intr_ignored);
3990 		CFETCH(gms, pmc_stats, pm_intr_processed);
3991 		CFETCH(gms, pmc_stats, pm_intr_bufferfull);
3992 		CFETCH(gms, pmc_stats, pm_syscalls);
3993 		CFETCH(gms, pmc_stats, pm_syscall_errors);
3994 		CFETCH(gms, pmc_stats, pm_buffer_requests);
3995 		CFETCH(gms, pmc_stats, pm_buffer_requests_failed);
3996 		CFETCH(gms, pmc_stats, pm_log_sweeps);
3997 #undef CFETCH
3998 		error = copyout(&gms, arg, sizeof(gms));
3999 	}
4000 	break;
4001 
4002 
4003 	/*
4004 	 * Retrieve module version number
4005 	 */
4006 
4007 	case PMC_OP_GETMODULEVERSION:
4008 	{
4009 		uint32_t cv, modv;
4010 
4011 		/* retrieve the client's idea of the ABI version */
4012 		if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
4013 			break;
4014 		/* don't service clients newer than our driver */
4015 		modv = PMC_VERSION;
4016 		if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
4017 			error = EPROGMISMATCH;
4018 			break;
4019 		}
4020 		error = copyout(&modv, arg, sizeof(int));
4021 	}
4022 	break;
4023 
4024 
4025 	/*
4026 	 * Retrieve the state of all the PMCs on a given
4027 	 * CPU.
4028 	 */
4029 
4030 	case PMC_OP_GETPMCINFO:
4031 	{
4032 		int ari;
4033 		struct pmc *pm;
4034 		size_t pmcinfo_size;
4035 		uint32_t cpu, n, npmc;
4036 		struct pmc_owner *po;
4037 		struct pmc_binding pb;
4038 		struct pmc_classdep *pcd;
4039 		struct pmc_info *p, *pmcinfo;
4040 		struct pmc_op_getpmcinfo *gpi;
4041 
4042 		PMC_DOWNGRADE_SX();
4043 
4044 		gpi = (struct pmc_op_getpmcinfo *) arg;
4045 
4046 		if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
4047 			break;
4048 
4049 		if (cpu >= pmc_cpu_max()) {
4050 			error = EINVAL;
4051 			break;
4052 		}
4053 
4054 		if (!pmc_cpu_is_active(cpu)) {
4055 			error = ENXIO;
4056 			break;
4057 		}
4058 
4059 		/* switch to CPU 'cpu' */
4060 		pmc_save_cpu_binding(&pb);
4061 		pmc_select_cpu(cpu);
4062 
4063 		npmc = md->pmd_npmc;
4064 
4065 		pmcinfo_size = npmc * sizeof(struct pmc_info);
4066 		pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK | M_ZERO);
4067 
4068 		p = pmcinfo;
4069 
4070 		for (n = 0; n < md->pmd_npmc; n++, p++) {
4071 
4072 			pcd = pmc_ri_to_classdep(md, n, &ari);
4073 
4074 			KASSERT(pcd != NULL,
4075 			    ("[pmc,%d] null pcd ri=%d", __LINE__, n));
4076 
4077 			if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
4078 				break;
4079 
4080 			if (PMC_ROW_DISP_IS_STANDALONE(n))
4081 				p->pm_rowdisp = PMC_DISP_STANDALONE;
4082 			else if (PMC_ROW_DISP_IS_THREAD(n))
4083 				p->pm_rowdisp = PMC_DISP_THREAD;
4084 			else
4085 				p->pm_rowdisp = PMC_DISP_FREE;
4086 
4087 			p->pm_ownerpid = -1;
4088 
4089 			if (pm == NULL)	/* no PMC associated */
4090 				continue;
4091 
4092 			po = pm->pm_owner;
4093 
4094 			KASSERT(po->po_owner != NULL,
4095 			    ("[pmc,%d] pmc_owner had a null proc pointer",
4096 				__LINE__));
4097 
4098 			p->pm_ownerpid = po->po_owner->p_pid;
4099 			p->pm_mode     = PMC_TO_MODE(pm);
4100 			p->pm_event    = pm->pm_event;
4101 			p->pm_flags    = pm->pm_flags;
4102 
4103 			if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
4104 				p->pm_reloadcount =
4105 				    pm->pm_sc.pm_reloadcount;
4106 		}
4107 
4108 		pmc_restore_cpu_binding(&pb);
4109 
4110 		/* now copy out the PMC info collected */
4111 		if (error == 0)
4112 			error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
4113 
4114 		free(pmcinfo, M_PMC);
4115 	}
4116 	break;
4117 
4118 
4119 	/*
4120 	 * Set the administrative state of a PMC.  I.e. whether
4121 	 * the PMC is to be used or not.
4122 	 */
4123 
4124 	case PMC_OP_PMCADMIN:
4125 	{
4126 		int cpu, ri;
4127 		enum pmc_state request;
4128 		struct pmc_cpu *pc;
4129 		struct pmc_hw *phw;
4130 		struct pmc_op_pmcadmin pma;
4131 		struct pmc_binding pb;
4132 
4133 		sx_assert(&pmc_sx, SX_XLOCKED);
4134 
4135 		KASSERT(td == curthread,
4136 		    ("[pmc,%d] td != curthread", __LINE__));
4137 
4138 		error = priv_check(td, PRIV_PMC_MANAGE);
4139 		if (error)
4140 			break;
4141 
4142 		if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
4143 			break;
4144 
4145 		cpu = pma.pm_cpu;
4146 
4147 		if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
4148 			error = EINVAL;
4149 			break;
4150 		}
4151 
4152 		if (!pmc_cpu_is_active(cpu)) {
4153 			error = ENXIO;
4154 			break;
4155 		}
4156 
4157 		request = pma.pm_state;
4158 
4159 		if (request != PMC_STATE_DISABLED &&
4160 		    request != PMC_STATE_FREE) {
4161 			error = EINVAL;
4162 			break;
4163 		}
4164 
4165 		ri = pma.pm_pmc; /* pmc id == row index */
4166 		if (ri < 0 || ri >= (int) md->pmd_npmc) {
4167 			error = EINVAL;
4168 			break;
4169 		}
4170 
4171 		/*
4172 		 * We can't disable a PMC with a row-index allocated
4173 		 * for process virtual PMCs.
4174 		 */
4175 
4176 		if (PMC_ROW_DISP_IS_THREAD(ri) &&
4177 		    request == PMC_STATE_DISABLED) {
4178 			error = EBUSY;
4179 			break;
4180 		}
4181 
4182 		/*
4183 		 * otherwise, this PMC on this CPU is either free or
4184 		 * in system-wide mode.
4185 		 */
4186 
4187 		pmc_save_cpu_binding(&pb);
4188 		pmc_select_cpu(cpu);
4189 
4190 		pc  = pmc_pcpu[cpu];
4191 		phw = pc->pc_hwpmcs[ri];
4192 
4193 		/*
4194 		 * XXX do we need some kind of 'forced' disable?
4195 		 */
4196 
4197 		if (phw->phw_pmc == NULL) {
4198 			if (request == PMC_STATE_DISABLED &&
4199 			    (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
4200 				phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
4201 				PMC_MARK_ROW_STANDALONE(ri);
4202 			} else if (request == PMC_STATE_FREE &&
4203 			    (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
4204 				phw->phw_state |=  PMC_PHW_FLAG_IS_ENABLED;
4205 				PMC_UNMARK_ROW_STANDALONE(ri);
4206 			}
4207 			/* other cases are a no-op */
4208 		} else
4209 			error = EBUSY;
4210 
4211 		pmc_restore_cpu_binding(&pb);
4212 	}
4213 	break;
4214 
4215 
4216 	/*
4217 	 * Allocate a PMC.
4218 	 */
4219 	case PMC_OP_PMCALLOCATE:
4220 	{
4221 		struct pmc_op_pmcallocate pa;
4222 
4223 		error = copyin(arg, &pa, sizeof(pa));
4224 		if (error != 0)
4225 			break;
4226 
4227 		error = pmc_do_op_pmcallocate(td, &pa);
4228 		if (error != 0)
4229 			break;
4230 
4231 		error = copyout(&pa, arg, sizeof(pa));
4232 	}
4233 	break;
4234 
4235 	/*
4236 	 * Attach a PMC to a process.
4237 	 */
4238 	case PMC_OP_PMCATTACH:
4239 	{
4240 		struct pmc_op_pmcattach a;
4241 
4242 		error = copyin(arg, &a, sizeof(a));
4243 		if (error != 0)
4244 			break;
4245 
4246 		error = pmc_do_op_pmcattach(td, a);
4247 	}
4248 	break;
4249 
4250 	/*
4251 	 * Detach an attached PMC from a process.
4252 	 */
4253 	case PMC_OP_PMCDETACH:
4254 	{
4255 		struct pmc_op_pmcattach a;
4256 
4257 		error = copyin(arg, &a, sizeof(a));
4258 		if (error != 0)
4259 			break;
4260 
4261 		error = pmc_do_op_pmcdetach(td, a);
4262 	}
4263 	break;
4264 
4265 
4266 	/*
4267 	 * Retrieve the MSR number associated with the counter
4268 	 * 'pmc_id'.  This allows processes to directly use RDPMC
4269 	 * instructions to read their PMCs, without the overhead of a
4270 	 * system call.
4271 	 */
4272 
4273 	case PMC_OP_PMCGETMSR:
4274 	{
4275 		int adjri, ri;
4276 		struct pmc *pm;
4277 		struct pmc_target *pt;
4278 		struct pmc_op_getmsr gm;
4279 		struct pmc_classdep *pcd;
4280 
4281 		PMC_DOWNGRADE_SX();
4282 
4283 		if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
4284 			break;
4285 
4286 		if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
4287 			break;
4288 
4289 		/*
4290 		 * The allocated PMC has to be a process virtual PMC,
4291 		 * i.e., of type MODE_T[CS].  Global PMCs can only be
4292 		 * read using the PMCREAD operation since they may be
4293 		 * allocated on a different CPU than the one we could
4294 		 * be running on at the time of the RDPMC instruction.
4295 		 *
4296 		 * The GETMSR operation is not allowed for PMCs that
4297 		 * are inherited across processes.
4298 		 */
4299 
4300 		if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
4301 		    (pm->pm_flags & PMC_F_DESCENDANTS)) {
4302 			error = EINVAL;
4303 			break;
4304 		}
4305 
4306 		/*
4307 		 * It only makes sense to use a RDPMC (or its
4308 		 * equivalent instruction on non-x86 architectures) on
4309 		 * a process that has allocated and attached a PMC to
4310 		 * itself.  Conversely the PMC is only allowed to have
4311 		 * one process attached to it -- its owner.
4312 		 */
4313 
4314 		if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
4315 		    LIST_NEXT(pt, pt_next) != NULL ||
4316 		    pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
4317 			error = EINVAL;
4318 			break;
4319 		}
4320 
4321 		ri = PMC_TO_ROWINDEX(pm);
4322 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
4323 
4324 		/* PMC class has no 'GETMSR' support */
4325 		if (pcd->pcd_get_msr == NULL) {
4326 			error = ENOSYS;
4327 			break;
4328 		}
4329 
4330 		if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
4331 			break;
4332 
4333 		if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
4334 			break;
4335 
4336 		/*
4337 		 * Mark our process as using MSRs.  Update machine
4338 		 * state using a forced context switch.
4339 		 */
4340 
4341 		pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
4342 		pmc_force_context_switch();
4343 
4344 	}
4345 	break;
4346 
4347 	/*
4348 	 * Release an allocated PMC.
4349 	 */
4350 	case PMC_OP_PMCRELEASE:
4351 	{
4352 		struct pmc_op_simple sp;
4353 
4354 		error = copyin(arg, &sp, sizeof(sp));
4355 		if (error != 0)
4356 			break;
4357 
4358 		error = pmc_do_op_pmcrelease(sp.pm_pmcid);
4359 	}
4360 	break;
4361 
4362 	/*
4363 	 * Read and/or write a PMC.
4364 	 */
4365 	case PMC_OP_PMCRW:
4366 	{
4367 		struct pmc_op_pmcrw prw;
4368 		struct pmc_op_pmcrw *pprw;
4369 		pmc_value_t oldvalue;
4370 
4371 		PMC_DOWNGRADE_SX();
4372 
4373 		error = copyin(arg, &prw, sizeof(prw));
4374 		if (error != 0)
4375 			break;
4376 
4377 		error = pmc_do_op_pmcrw(&prw, &oldvalue);
4378 		if (error != 0)
4379 			break;
4380 
4381 		/* Return old value if requested. */
4382 		if ((prw.pm_flags & PMC_F_OLDVALUE) != 0) {
4383 			pprw = arg;
4384 			error = copyout(&oldvalue, &pprw->pm_value,
4385 			    sizeof(prw.pm_value));
4386 		}
4387 	}
4388 	break;
4389 
4390 
4391 	/*
4392 	 * Set the sampling rate for a sampling mode PMC and the
4393 	 * initial count for a counting mode PMC.
4394 	 */
4395 
4396 	case PMC_OP_PMCSETCOUNT:
4397 	{
4398 		struct pmc *pm;
4399 		struct pmc_op_pmcsetcount sc;
4400 
4401 		PMC_DOWNGRADE_SX();
4402 
4403 		if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
4404 			break;
4405 
4406 		if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
4407 			break;
4408 
4409 		if (pm->pm_state == PMC_STATE_RUNNING) {
4410 			error = EBUSY;
4411 			break;
4412 		}
4413 
4414 		if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
4415 			/*
4416 			 * Don't permit requested sample rate to be
4417 			 * less than pmc_mincount.
4418 			 */
4419 			if (sc.pm_count < MAX(1, pmc_mincount))
4420 				log(LOG_WARNING, "pmcsetcount: passed sample "
4421 				    "rate %ju - setting to %u\n",
4422 				    (uintmax_t)sc.pm_count,
4423 				    MAX(1, pmc_mincount));
4424 			pm->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount),
4425 			    sc.pm_count);
4426 		} else
4427 			pm->pm_sc.pm_initial = sc.pm_count;
4428 	}
4429 	break;
4430 
4431 
4432 	/*
4433 	 * Start a PMC.
4434 	 */
4435 
4436 	case PMC_OP_PMCSTART:
4437 	{
4438 		pmc_id_t pmcid;
4439 		struct pmc *pm;
4440 		struct pmc_op_simple sp;
4441 
4442 		sx_assert(&pmc_sx, SX_XLOCKED);
4443 
4444 		if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4445 			break;
4446 
4447 		pmcid = sp.pm_pmcid;
4448 
4449 		if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4450 			break;
4451 
4452 		KASSERT(pmcid == pm->pm_id,
4453 		    ("[pmc,%d] pmcid %x != id %x", __LINE__,
4454 			pm->pm_id, pmcid));
4455 
4456 		if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
4457 			break;
4458 		else if (pm->pm_state != PMC_STATE_STOPPED &&
4459 		    pm->pm_state != PMC_STATE_ALLOCATED) {
4460 			error = EINVAL;
4461 			break;
4462 		}
4463 
4464 		error = pmc_start(pm);
4465 	}
4466 	break;
4467 
4468 
4469 	/*
4470 	 * Stop a PMC.
4471 	 */
4472 
4473 	case PMC_OP_PMCSTOP:
4474 	{
4475 		pmc_id_t pmcid;
4476 		struct pmc *pm;
4477 		struct pmc_op_simple sp;
4478 
4479 		PMC_DOWNGRADE_SX();
4480 
4481 		if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
4482 			break;
4483 
4484 		pmcid = sp.pm_pmcid;
4485 
4486 		/*
4487 		 * Mark the PMC as inactive and invoke the MD stop
4488 		 * routines if needed.
4489 		 */
4490 
4491 		if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4492 			break;
4493 
4494 		KASSERT(pmcid == pm->pm_id,
4495 		    ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
4496 			pm->pm_id, pmcid));
4497 
4498 		if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
4499 			break;
4500 		else if (pm->pm_state != PMC_STATE_RUNNING) {
4501 			error = EINVAL;
4502 			break;
4503 		}
4504 
4505 		error = pmc_stop(pm);
4506 	}
4507 	break;
4508 
4509 
4510 	/*
4511 	 * Write a user supplied value to the log file.
4512 	 */
4513 
4514 	case PMC_OP_WRITELOG:
4515 	{
4516 		struct pmc_op_writelog wl;
4517 		struct pmc_owner *po;
4518 
4519 		PMC_DOWNGRADE_SX();
4520 
4521 		if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
4522 			break;
4523 
4524 		if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
4525 			error = EINVAL;
4526 			break;
4527 		}
4528 
4529 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
4530 			error = EINVAL;
4531 			break;
4532 		}
4533 
4534 		error = pmclog_process_userlog(po, &wl);
4535 	}
4536 	break;
4537 
4538 	/*
4539 	 * Get the PMC capabilities
4540 	 */
4541 
4542 	case PMC_OP_GETCAPS:
4543 	{
4544 		struct pmc_op_caps c;
4545 		struct pmc *pm;
4546 		struct pmc_classdep *pcd;
4547 		pmc_id_t pmcid;
4548 		int adjri, ri;
4549 
4550 		PMC_DOWNGRADE_SX();
4551 
4552 		if ((error = copyin(arg, &c, sizeof(c))) != 0)
4553 			break;
4554 
4555 		pmcid = c.pm_pmcid;
4556 
4557 		if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
4558 			break;
4559 
4560 		KASSERT(pmcid == pm->pm_id,
4561 		    ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
4562 			pm->pm_id, pmcid));
4563 
4564 		ri = PMC_TO_ROWINDEX(pm);
4565 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
4566 
4567 		/*
4568 		 * If PMC class has no GETCAPS return the class capabilities
4569 		 * otherwise get the per counter capabilities.
4570 		 */
4571 		if (pcd->pcd_get_caps == NULL) {
4572 			c.pm_caps = pcd->pcd_caps;
4573 		} else {
4574 			error = (*pcd->pcd_get_caps)(adjri, &c.pm_caps);
4575 			if (error < 0)
4576 				break;
4577 		}
4578 
4579 		if ((error = copyout(&c, arg, sizeof(c))) < 0)
4580 			break;
4581 	}
4582 	break;
4583 
4584 	default:
4585 		error = EINVAL;
4586 		break;
4587 	}
4588 
4589 	if (is_sx_downgraded)
4590 		sx_sunlock(&pmc_sx);
4591 	else
4592 		sx_xunlock(&pmc_sx);
4593 done_syscall:
4594 	if (error)
4595 		counter_u64_add(pmc_stats.pm_syscall_errors, 1);
4596 
4597 	return (error);
4598 }
4599 
4600 /*
4601  * Helper functions
4602  */
4603 
4604 /*
4605  * Mark the thread as needing callchain capture and post an AST.  The
4606  * actual callchain capture will be done in a context where it is safe
4607  * to take page faults.
4608  */
4609 static void
pmc_post_callchain_callback(void)4610 pmc_post_callchain_callback(void)
4611 {
4612 	struct thread *td;
4613 
4614 	td = curthread;
4615 
4616 	/*
4617 	 * If there is multiple PMCs for the same interrupt ignore new post
4618 	 */
4619 	if ((td->td_pflags & TDP_CALLCHAIN) != 0)
4620 		return;
4621 
4622 	/*
4623 	 * Mark this thread as needing callchain capture.
4624 	 * `td->td_pflags' will be safe to touch because this thread
4625 	 * was in user space when it was interrupted.
4626 	 */
4627 	td->td_pflags |= TDP_CALLCHAIN;
4628 
4629 	/*
4630 	 * Don't let this thread migrate between CPUs until callchain
4631 	 * capture completes.
4632 	 */
4633 	sched_pin();
4634 
4635 	return;
4636 }
4637 
4638 static void
pmc_multipart_add(struct pmc_sample * ps,int type,int length)4639 pmc_multipart_add(struct pmc_sample *ps, int type, int length)
4640 {
4641 	int i;
4642 	uint8_t *hdr;
4643 
4644 	MPASS(ps->ps_pc != NULL);
4645 	MPASS(ps->ps_nsamples_actual != 0);
4646 
4647 	hdr = (uint8_t *)ps->ps_pc;
4648 
4649 	for (i = 0; i < PMC_MULTIPART_HEADER_ENTRIES; i++) {
4650 		if (hdr[2 * i] == PMC_CC_MULTIPART_NONE) {
4651 			hdr[2 * i] = type;
4652 			hdr[2 * i + 1] = length;
4653 			ps->ps_nsamples_actual += length;
4654 			return;
4655 		}
4656 	}
4657 
4658 	KASSERT(false, ("Too many parts in the multipart header!"));
4659 }
4660 
4661 static void
pmc_multipart_copydata(struct pmc_sample * ps,struct pmc_multipart * mp)4662 pmc_multipart_copydata(struct pmc_sample *ps, struct pmc_multipart *mp)
4663 {
4664 	int i, scale;
4665 	uint64_t *ps_pc;
4666 
4667 	MPASS(ps->ps_pc != NULL);
4668 	MPASS(ps->ps_nsamples_actual != 0);
4669 
4670 	ps_pc = (uint64_t *)ps->ps_pc;
4671 
4672 	for (i = 0; i < mp->pl_length; i++)
4673 		ps_pc[i + 1] = mp->pl_mpdata[i];
4674 
4675 	scale = sizeof(uint64_t) / sizeof(uintptr_t);
4676 	pmc_multipart_add(ps, mp->pl_type, scale * mp->pl_length);
4677 }
4678 
4679 /*
4680  * Find a free slot in the per-cpu array of samples and capture the
4681  * current callchain there.  If a sample was successfully added, a bit
4682  * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
4683  * needs to be invoked from the clock handler.
4684  *
4685  * This function is meant to be called from an NMI handler.  It cannot
4686  * use any of the locking primitives supplied by the OS.
4687  */
4688 static int
pmc_add_sample(ring_type_t ring,struct pmc * pm,struct trapframe * tf,struct pmc_multipart * mp)4689 pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf,
4690     struct pmc_multipart *mp)
4691 {
4692 	struct pmc_sample *ps;
4693 	struct pmc_samplebuffer *psb;
4694 	struct thread *td;
4695 	int error, cpu, callchaindepth;
4696 	bool inuserspace;
4697 
4698 	error = 0;
4699 
4700 	/*
4701 	 * Allocate space for a sample buffer.
4702 	 */
4703 	cpu = curcpu;
4704 	psb = pmc_pcpu[cpu]->pc_sb[ring];
4705 	inuserspace = TRAPF_USERMODE(tf);
4706 	ps = PMC_PROD_SAMPLE(psb);
4707 	if (psb->ps_considx != psb->ps_prodidx &&
4708 		ps->ps_nsamples) {	/* in use, reader hasn't caught up */
4709 		pm->pm_pcpu_state[cpu].pps_stalled = 1;
4710 		counter_u64_add(pmc_stats.pm_intr_bufferfull, 1);
4711 		PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
4712 		    cpu, pm, tf, inuserspace,
4713 		    (int)(psb->ps_prodidx & pmc_sample_mask),
4714 		    (int)(psb->ps_considx & pmc_sample_mask));
4715 		callchaindepth = 1;
4716 		error = ENOMEM;
4717 		goto done;
4718 	}
4719 
4720 	/* Fill in entry. */
4721 	PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, tf,
4722 	    inuserspace, (int)(psb->ps_prodidx & pmc_sample_mask),
4723 	    (int)(psb->ps_considx & pmc_sample_mask));
4724 
4725 	td = curthread;
4726 	ps->ps_pmc = pm;
4727 	ps->ps_td = td;
4728 	ps->ps_pid = td->td_proc->p_pid;
4729 	ps->ps_tid = td->td_tid;
4730 	ps->ps_tsc = pmc_rdtsc();
4731 	ps->ps_ticks = ticks;
4732 	ps->ps_cpu = cpu;
4733 	ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
4734 	ps->ps_nsamples_actual = 0;
4735 
4736 	callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
4737 	    pmc_callchaindepth : 1;
4738 
4739 	MPASS(ps->ps_pc != NULL);
4740 
4741 	if (mp != NULL) {
4742 		/* Set multipart flag, clear header and copy data */
4743 		ps->ps_flags |= PMC_CC_F_MULTIPART;
4744 		ps->ps_pc[0] = 0;
4745 		ps->ps_nsamples_actual = 1;
4746 		pmc_multipart_copydata(ps, mp);
4747 	}
4748 
4749 	if (callchaindepth == 1) {
4750 		ps->ps_pc[ps->ps_nsamples_actual] = PMC_TRAPFRAME_TO_PC(tf);
4751 	} else {
4752 		/*
4753 		 * Kernel stack traversals can be done immediately, while we
4754 		 * defer to an AST for user space traversals.
4755 		 */
4756 		if (!inuserspace) {
4757 			callchaindepth = pmc_save_kernel_callchain(
4758 			    ps->ps_pc + ps->ps_nsamples_actual,
4759 			    callchaindepth - ps->ps_nsamples_actual, tf);
4760 			callchaindepth += ps->ps_nsamples_actual;
4761 		} else {
4762 			pmc_post_callchain_callback();
4763 			callchaindepth = PMC_USER_CALLCHAIN_PENDING;
4764 		}
4765 	}
4766 
4767 	ps->ps_nsamples = callchaindepth; /* mark entry as in-use */
4768 	if (ring == PMC_UR) {
4769 		ps->ps_nsamples_actual = ps->ps_nsamples;
4770 		ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING;
4771 	}
4772 
4773 	KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0,
4774 	    ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm,
4775 	    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
4776 
4777 	counter_u64_add(pm->pm_runcount, 1);	/* hold onto PMC */
4778 	/* increment write pointer */
4779 	psb->ps_prodidx++;
4780 done:
4781 	/* mark CPU as needing processing */
4782 	if (callchaindepth != PMC_USER_CALLCHAIN_PENDING)
4783 		DPCPU_SET(pmc_sampled, 1);
4784 
4785 	return (error);
4786 }
4787 
4788 /*
4789  * Interrupt processing.
4790  *
4791  * This function may be called from an NMI handler. It cannot use any of the
4792  * locking primitives supplied by the OS.
4793  */
4794 int
pmc_process_interrupt_mp(int ring,struct pmc * pm,struct trapframe * tf,struct pmc_multipart * mp)4795 pmc_process_interrupt_mp(int ring, struct pmc *pm, struct trapframe *tf,
4796     struct pmc_multipart *mp)
4797 {
4798 	struct thread *td;
4799 
4800 	td = curthread;
4801 	if ((pm->pm_flags & PMC_F_USERCALLCHAIN) &&
4802 	    (td->td_proc->p_flag & P_KPROC) == 0 && !TRAPF_USERMODE(tf)) {
4803 		atomic_add_int(&td->td_pmcpend, 1);
4804 		return (pmc_add_sample(PMC_UR, pm, tf, mp));
4805 	}
4806 	return (pmc_add_sample(ring, pm, tf, mp));
4807 }
4808 
4809 int
pmc_process_interrupt(int ring,struct pmc * pm,struct trapframe * tf)4810 pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf)
4811 {
4812 	return (pmc_process_interrupt_mp(ring, pm, tf, NULL));
4813 }
4814 
4815 /*
4816  * Capture a user call chain. This function will be called from ast()
4817  * before control returns to userland and before the process gets
4818  * rescheduled.
4819  */
4820 static void
pmc_capture_user_callchain(int cpu,int ring,struct trapframe * tf)4821 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
4822 {
4823 	struct pmc *pm;
4824 	struct pmc_sample *ps;
4825 	struct pmc_samplebuffer *psb;
4826 	struct thread *td;
4827 	uint64_t considx, prodidx;
4828 	int nsamples, nrecords, pass, iter;
4829 	int start_ticks __diagused;
4830 
4831 	psb = pmc_pcpu[cpu]->pc_sb[ring];
4832 	td = curthread;
4833 	nrecords = INT_MAX;
4834 	pass = 0;
4835 	start_ticks = ticks;
4836 
4837 	KASSERT(td->td_pflags & TDP_CALLCHAIN,
4838 	    ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
4839 	    __LINE__));
4840 restart:
4841 	if (ring == PMC_UR)
4842 		nrecords = atomic_readandclear_32(&td->td_pmcpend);
4843 
4844 	for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx;
4845 	    considx < prodidx && iter < pmc_nsamples; considx++, iter++) {
4846 		ps = PMC_CONS_SAMPLE_OFF(psb, considx);
4847 
4848 		/*
4849 		 * Iterate through all deferred callchain requests. Walk from
4850 		 * the current read pointer to the current write pointer.
4851 		 */
4852 #ifdef INVARIANTS
4853 		if (ps->ps_nsamples == PMC_SAMPLE_FREE) {
4854 			continue;
4855 		}
4856 #endif
4857 		if (ps->ps_td != td ||
4858 		    ps->ps_nsamples != PMC_USER_CALLCHAIN_PENDING ||
4859 		    ps->ps_pmc->pm_state != PMC_STATE_RUNNING)
4860 			continue;
4861 
4862 		KASSERT(ps->ps_cpu == cpu,
4863 		    ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
4864 		    ps->ps_cpu, PCPU_GET(cpuid)));
4865 
4866 		pm = ps->ps_pmc;
4867 		KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
4868 		    ("[pmc,%d] Retrieving callchain for PMC that doesn't "
4869 		    "want it", __LINE__));
4870 		KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4871 		    ("[pmc,%d] runcount %ju", __LINE__,
4872 		    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
4873 
4874 		if (ring == PMC_UR) {
4875 			counter_u64_add(pmc_stats.pm_merges, 1);
4876 		}
4877 		nsamples = ps->ps_nsamples_actual;
4878 
4879 		/*
4880 		 * Retrieve the callchain and mark the sample buffer
4881 		 * as 'processable' by the timer tick sweep code.
4882 		 */
4883 		if (__predict_true(nsamples < pmc_callchaindepth - 1))
4884 			nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples,
4885 			    pmc_callchaindepth - nsamples - 1, tf);
4886 
4887 		/*
4888 		 * We have to prevent hardclock from potentially overwriting
4889 		 * this sample between when we read the value and when we set
4890 		 * it.
4891 		 */
4892 		spinlock_enter();
4893 
4894 		/*
4895 		 * Verify that the sample hasn't been dropped in the meantime.
4896 		 */
4897 		if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) {
4898 			ps->ps_nsamples = nsamples;
4899 			/*
4900 			 * If we couldn't get a sample, simply drop the
4901 			 * reference.
4902 			 */
4903 			if (nsamples == 0)
4904 				counter_u64_add(pm->pm_runcount, -1);
4905 		}
4906 		spinlock_exit();
4907 		if (nrecords-- == 1)
4908 			break;
4909 	}
4910 	if (__predict_false(ring == PMC_UR && td->td_pmcpend)) {
4911 		if (pass == 0) {
4912 			pass = 1;
4913 			goto restart;
4914 		}
4915 		/* only collect samples for this part once */
4916 		td->td_pmcpend = 0;
4917 	}
4918 
4919 #ifdef INVARIANTS
4920 	if ((ticks - start_ticks) > hz)
4921 		log(LOG_ERR, "%s took %d ticks\n", __func__, (ticks - start_ticks));
4922 #endif
4923 	/* mark CPU as needing processing */
4924 	DPCPU_SET(pmc_sampled, 1);
4925 }
4926 
4927 /*
4928  * Process saved PC samples.
4929  */
4930 static void
pmc_process_samples(int cpu,ring_type_t ring)4931 pmc_process_samples(int cpu, ring_type_t ring)
4932 {
4933 	struct pmc *pm;
4934 	struct thread *td;
4935 	struct pmc_owner *po;
4936 	struct pmc_sample *ps;
4937 	struct pmc_classdep *pcd;
4938 	struct pmc_samplebuffer *psb;
4939 	uint64_t delta __diagused;
4940 	int adjri, n;
4941 
4942 	KASSERT(PCPU_GET(cpuid) == cpu,
4943 	    ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
4944 		PCPU_GET(cpuid), cpu));
4945 
4946 	psb = pmc_pcpu[cpu]->pc_sb[ring];
4947 	delta = psb->ps_prodidx - psb->ps_considx;
4948 	MPASS(delta <= pmc_nsamples);
4949 	MPASS(psb->ps_considx <= psb->ps_prodidx);
4950 	for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) {
4951 		ps = PMC_CONS_SAMPLE(psb);
4952 
4953 		if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE))
4954 			continue;
4955 
4956 		/* skip non-running samples */
4957 		pm = ps->ps_pmc;
4958 		if (pm->pm_state != PMC_STATE_RUNNING)
4959 			goto entrydone;
4960 
4961 		KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
4962 		    ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm,
4963 		    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
4964 		KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
4965 		    ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
4966 		    pm, PMC_TO_MODE(pm)));
4967 
4968 		po = pm->pm_owner;
4969 
4970 		/* If there is a pending AST wait for completion */
4971 		if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) {
4972 			/*
4973 			 * If we've been waiting more than 1 tick to
4974 			 * collect a callchain for this record then
4975 			 * drop it and move on.
4976 			 */
4977 			if (ticks - ps->ps_ticks > 1) {
4978 				/*
4979 				 * Track how often we hit this as it will
4980 				 * preferentially lose user samples
4981 				 * for long running system calls.
4982 				 */
4983 				counter_u64_add(pmc_stats.pm_overwrites, 1);
4984 				goto entrydone;
4985 			}
4986 			/* Need a rescan at a later time. */
4987 			DPCPU_SET(pmc_sampled, 1);
4988 			break;
4989 		}
4990 
4991 		PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
4992 		    pm, ps->ps_nsamples, ps->ps_flags,
4993 		    (int)(psb->ps_prodidx & pmc_sample_mask),
4994 		    (int)(psb->ps_considx & pmc_sample_mask));
4995 
4996 		/*
4997 		 * If this is a process-mode PMC that is attached to
4998 		 * its owner, and if the PC is in user mode, update
4999 		 * profiling statistics like timer-based profiling
5000 		 * would have done.
5001 		 *
5002 		 * Otherwise, this is either a sampling-mode PMC that
5003 		 * is attached to a different process than its owner,
5004 		 * or a system-wide sampling PMC. Dispatch a log
5005 		 * entry to the PMC's owner process.
5006 		 */
5007 		if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
5008 			if (ps->ps_flags & PMC_CC_F_USERSPACE) {
5009 				td = FIRST_THREAD_IN_PROC(po->po_owner);
5010 				addupc_intr(td, ps->ps_pc[0], 1);
5011 			}
5012 		} else
5013 			pmclog_process_callchain(pm, ps);
5014 
5015 entrydone:
5016 		ps->ps_nsamples = 0; /* mark entry as free */
5017 		KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
5018 		    ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm,
5019 		    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
5020 
5021 		counter_u64_add(pm->pm_runcount, -1);
5022 	}
5023 
5024 	counter_u64_add(pmc_stats.pm_log_sweeps, 1);
5025 
5026 	/* Do not re-enable stalled PMCs if we failed to process any samples */
5027 	if (n == 0)
5028 		return;
5029 
5030 	/*
5031 	 * Restart any stalled sampling PMCs on this CPU.
5032 	 *
5033 	 * If the NMI handler sets the pm_stalled field of a PMC after
5034 	 * the check below, we'll end up processing the stalled PMC at
5035 	 * the next hardclock tick.
5036 	 */
5037 	for (n = 0; n < md->pmd_npmc; n++) {
5038 		pcd = pmc_ri_to_classdep(md, n, &adjri);
5039 		KASSERT(pcd != NULL,
5040 		    ("[pmc,%d] null pcd ri=%d", __LINE__, n));
5041 		(void)(*pcd->pcd_get_config)(cpu, adjri, &pm);
5042 
5043 		if (pm == NULL ||				/* !cfg'ed */
5044 		    pm->pm_state != PMC_STATE_RUNNING ||	/* !active */
5045 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) ||	/* !sampling */
5046 		    !pm->pm_pcpu_state[cpu].pps_cpustate ||	/* !desired */
5047 		    !pm->pm_pcpu_state[cpu].pps_stalled)	/* !stalled */
5048 			continue;
5049 
5050 		pm->pm_pcpu_state[cpu].pps_stalled = 0;
5051 		(void)(*pcd->pcd_start_pmc)(cpu, adjri, pm);
5052 	}
5053 }
5054 
5055 /*
5056  * Event handlers.
5057  */
5058 
5059 /*
5060  * Handle a process exit.
5061  *
5062  * Remove this process from all hash tables.  If this process
5063  * owned any PMCs, turn off those PMCs and deallocate them,
5064  * removing any associations with target processes.
5065  *
5066  * This function will be called by the last 'thread' of a
5067  * process.
5068  *
5069  * XXX This eventhandler gets called early in the exit process.
5070  * Consider using a 'hook' invocation from thread_exit() or equivalent
5071  * spot.  Another negative is that kse_exit doesn't seem to call
5072  * exit1() [??].
5073  */
5074 static void
pmc_process_exit(void * arg __unused,struct proc * p)5075 pmc_process_exit(void *arg __unused, struct proc *p)
5076 {
5077 	struct pmc *pm;
5078 	struct pmc_owner *po;
5079 	struct pmc_process *pp;
5080 	struct pmc_classdep *pcd;
5081 	pmc_value_t newvalue, tmp;
5082 	int ri, adjri, cpu;
5083 	bool is_using_hwpmcs;
5084 
5085 	PROC_LOCK(p);
5086 	is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0;
5087 	PROC_UNLOCK(p);
5088 
5089 	/*
5090 	 * Log a sysexit event to all SS PMC owners.
5091 	 */
5092 	PMC_EPOCH_ENTER();
5093 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5094 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0)
5095 			pmclog_process_sysexit(po, p->p_pid);
5096 	}
5097 	PMC_EPOCH_EXIT();
5098 
5099 	PMC_GET_SX_XLOCK();
5100 	PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
5101 	    p->p_comm);
5102 
5103 	if (!is_using_hwpmcs)
5104 		goto out;
5105 
5106 	/*
5107 	 * Since this code is invoked by the last thread in an exiting process,
5108 	 * we would have context switched IN at some prior point. However, with
5109 	 * PREEMPTION, kernel mode context switches may happen any time, so we
5110 	 * want to disable a context switch OUT till we get any PMCs targeting
5111 	 * this process off the hardware.
5112 	 *
5113 	 * We also need to atomically remove this process' entry from our
5114 	 * target process hash table, using PMC_FLAG_REMOVE.
5115 	 */
5116 	PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
5117 	    p->p_comm);
5118 
5119 	critical_enter(); /* no preemption */
5120 
5121 	cpu = curthread->td_oncpu;
5122 
5123 	pp = pmc_find_process_descriptor(p, PMC_FLAG_REMOVE);
5124 	if (pp == NULL) {
5125 		critical_exit();
5126 		goto out;
5127 	}
5128 
5129 	PMCDBG2(PRC,EXT,2, "process-exit proc=%p pmc-process=%p", p, pp);
5130 
5131 	/*
5132 	 * The exiting process could be the target of some PMCs which will be
5133 	 * running on currently executing CPU.
5134 	 *
5135 	 * We need to turn these PMCs off like we would do at context switch
5136 	 * OUT time.
5137 	 */
5138 	for (ri = 0; ri < md->pmd_npmc; ri++) {
5139 		/*
5140 		 * Pick up the pmc pointer from hardware state similar to the
5141 		 * CSW_OUT code.
5142 		 */
5143 		pm = NULL;
5144 		pcd = pmc_ri_to_classdep(md, ri, &adjri);
5145 
5146 		(void)(*pcd->pcd_get_config)(cpu, adjri, &pm);
5147 
5148 		PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
5149 
5150 		if (pm == NULL || !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
5151 			continue;
5152 
5153 		PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p state=%d", ri,
5154 		    pp->pp_pmcs[ri].pp_pmc, pm, pm->pm_state);
5155 
5156 		KASSERT(PMC_TO_ROWINDEX(pm) == ri,
5157 		    ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", __LINE__,
5158 		    PMC_TO_ROWINDEX(pm), ri));
5159 		KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
5160 		    ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri,
5161 		    pp->pp_pmcs[ri].pp_pmc));
5162 		KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
5163 		    ("[pmc,%d] bad runcount ri %d rc %ju", __LINE__, ri,
5164 		    (uintmax_t)counter_u64_fetch(pm->pm_runcount)));
5165 
5166 		/*
5167 		 * Change desired state, and then stop if not stalled. This
5168 		 * two-step dance should avoid race conditions where an
5169 		 * interrupt re-enables the PMC after this code has already
5170 		 * checked the pm_stalled flag.
5171 		 */
5172 		if (pm->pm_pcpu_state[cpu].pps_cpustate) {
5173 			pm->pm_pcpu_state[cpu].pps_cpustate = 0;
5174 			if (!pm->pm_pcpu_state[cpu].pps_stalled) {
5175 				(void)pcd->pcd_stop_pmc(cpu, adjri, pm);
5176 
5177 				if (PMC_TO_MODE(pm) == PMC_MODE_TC) {
5178 					pcd->pcd_read_pmc(cpu, adjri, pm,
5179 					    &newvalue);
5180 					tmp = newvalue - PMC_PCPU_SAVED(cpu, ri);
5181 
5182 					mtx_pool_lock_spin(pmc_mtxpool, pm);
5183 					pm->pm_gv.pm_savedvalue += tmp;
5184 					pp->pp_pmcs[ri].pp_pmcval += tmp;
5185 					mtx_pool_unlock_spin(pmc_mtxpool, pm);
5186 				}
5187 			}
5188 		}
5189 
5190 		KASSERT(counter_u64_fetch(pm->pm_runcount) > 0,
5191 		    ("[pmc,%d] runcount is %d", __LINE__, ri));
5192 
5193 		counter_u64_add(pm->pm_runcount, -1);
5194 		(void)pcd->pcd_config_pmc(cpu, adjri, NULL);
5195 	}
5196 
5197 	/*
5198 	 * Inform the MD layer of this pseudo "context switch out".
5199 	 */
5200 	(void)md->pmd_switch_out(pmc_pcpu[cpu], pp);
5201 
5202 	critical_exit(); /* ok to be pre-empted now */
5203 
5204 	/*
5205 	 * Unlink this process from the PMCs that are targeting it. This will
5206 	 * send a signal to all PMC owner's whose PMCs are orphaned.
5207 	 *
5208 	 * Log PMC value at exit time if requested.
5209 	 */
5210 	for (ri = 0; ri < md->pmd_npmc; ri++) {
5211 		if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
5212 			if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 &&
5213 			    PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) {
5214 				pmclog_process_procexit(pm, pp);
5215 			}
5216 			pmc_unlink_target_process(pm, pp);
5217 		}
5218 	}
5219 	free(pp, M_PMC);
5220 
5221 out:
5222 	/*
5223 	 * If the process owned PMCs, free them up and free up memory.
5224 	 */
5225 	if ((po = pmc_find_owner_descriptor(p)) != NULL) {
5226 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0)
5227 			pmclog_close(po);
5228 		pmc_remove_owner(po);
5229 		pmc_destroy_owner_descriptor(po);
5230 	}
5231 
5232 	sx_xunlock(&pmc_sx);
5233 }
5234 
5235 /*
5236  * Handle a process fork.
5237  *
5238  * If the parent process 'p1' is under HWPMC monitoring, then copy
5239  * over any attached PMCs that have 'do_descendants' semantics.
5240  */
5241 static void
pmc_process_fork(void * arg __unused,struct proc * p1,struct proc * newproc,int flags __unused)5242 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
5243     int flags __unused)
5244 {
5245 	struct pmc *pm;
5246 	struct pmc_owner *po;
5247 	struct pmc_process *ppnew, *ppold;
5248 	unsigned int ri;
5249 	bool is_using_hwpmcs, do_descendants;
5250 
5251 	PROC_LOCK(p1);
5252 	is_using_hwpmcs = (p1->p_flag & P_HWPMC) != 0;
5253 	PROC_UNLOCK(p1);
5254 
5255 	/*
5256 	 * If there are system-wide sampling PMCs active, we need to
5257 	 * log all fork events to their owner's logs.
5258 	 */
5259 	PMC_EPOCH_ENTER();
5260 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5261 		if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
5262 			pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
5263 			pmclog_process_proccreate(po, newproc, 1);
5264 		}
5265 	}
5266 	PMC_EPOCH_EXIT();
5267 
5268 	if (!is_using_hwpmcs)
5269 		return;
5270 
5271 	PMC_GET_SX_XLOCK();
5272 	PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
5273 	    p1->p_pid, p1->p_comm, newproc);
5274 
5275 	/*
5276 	 * If the parent process (curthread->td_proc) is a
5277 	 * target of any PMCs, look for PMCs that are to be
5278 	 * inherited, and link these into the new process
5279 	 * descriptor.
5280 	 */
5281 	ppold = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE);
5282 	if (ppold == NULL)
5283 		goto done; /* nothing to do */
5284 
5285 	do_descendants = false;
5286 	for (ri = 0; ri < md->pmd_npmc; ri++) {
5287 		if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
5288 		    (pm->pm_flags & PMC_F_DESCENDANTS) != 0) {
5289 			do_descendants = true;
5290 			break;
5291 		}
5292 	}
5293 	if (!do_descendants) /* nothing to do */
5294 		goto done;
5295 
5296 	/*
5297 	 * Now mark the new process as being tracked by this driver.
5298 	 */
5299 	PROC_LOCK(newproc);
5300 	newproc->p_flag |= P_HWPMC;
5301 	PROC_UNLOCK(newproc);
5302 
5303 	/* Allocate a descriptor for the new process. */
5304 	ppnew = pmc_find_process_descriptor(newproc, PMC_FLAG_ALLOCATE);
5305 	if (ppnew == NULL)
5306 		goto done;
5307 
5308 	/*
5309 	 * Run through all PMCs that were targeting the old process
5310 	 * and which specified F_DESCENDANTS and attach them to the
5311 	 * new process.
5312 	 *
5313 	 * Log the fork event to all owners of PMCs attached to this
5314 	 * process, if not already logged.
5315 	 */
5316 	for (ri = 0; ri < md->pmd_npmc; ri++) {
5317 		if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
5318 		    (pm->pm_flags & PMC_F_DESCENDANTS) != 0) {
5319 			pmc_link_target_process(pm, ppnew);
5320 			po = pm->pm_owner;
5321 			if (po->po_sscount == 0 &&
5322 			    (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) {
5323 				pmclog_process_procfork(po, p1->p_pid,
5324 				    newproc->p_pid);
5325 			}
5326 		}
5327 	}
5328 
5329 done:
5330 	sx_xunlock(&pmc_sx);
5331 }
5332 
5333 static void
pmc_process_threadcreate(struct thread * td)5334 pmc_process_threadcreate(struct thread *td)
5335 {
5336 	struct pmc_owner *po;
5337 
5338 	PMC_EPOCH_ENTER();
5339 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5340 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0)
5341 			pmclog_process_threadcreate(po, td, 1);
5342 	}
5343 	PMC_EPOCH_EXIT();
5344 }
5345 
5346 static void
pmc_process_threadexit(struct thread * td)5347 pmc_process_threadexit(struct thread *td)
5348 {
5349 	struct pmc_owner *po;
5350 
5351 	PMC_EPOCH_ENTER();
5352 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5353 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0)
5354 			pmclog_process_threadexit(po, td);
5355 	}
5356 	PMC_EPOCH_EXIT();
5357 }
5358 
5359 static void
pmc_process_proccreate(struct proc * p)5360 pmc_process_proccreate(struct proc *p)
5361 {
5362 	struct pmc_owner *po;
5363 
5364 	PMC_EPOCH_ENTER();
5365 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5366 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0)
5367 			pmclog_process_proccreate(po, p, 1 /* sync */);
5368 	}
5369 	PMC_EPOCH_EXIT();
5370 }
5371 
5372 static void
pmc_process_allproc(struct pmc * pm)5373 pmc_process_allproc(struct pmc *pm)
5374 {
5375 	struct pmc_owner *po;
5376 	struct thread *td;
5377 	struct proc *p;
5378 
5379 	po = pm->pm_owner;
5380 	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
5381 		return;
5382 
5383 	sx_slock(&allproc_lock);
5384 	FOREACH_PROC_IN_SYSTEM(p) {
5385 		pmclog_process_proccreate(po, p, 0 /* sync */);
5386 		PROC_LOCK(p);
5387 		FOREACH_THREAD_IN_PROC(p, td)
5388 			pmclog_process_threadcreate(po, td, 0 /* sync */);
5389 		PROC_UNLOCK(p);
5390 	}
5391 	sx_sunlock(&allproc_lock);
5392 	pmclog_flush(po, 0);
5393 }
5394 
5395 static void
pmc_kld_load(void * arg __unused,linker_file_t lf)5396 pmc_kld_load(void *arg __unused, linker_file_t lf)
5397 {
5398 	struct pmc_owner *po;
5399 
5400 	/*
5401 	 * Notify owners of system sampling PMCs about KLD operations.
5402 	 */
5403 	PMC_EPOCH_ENTER();
5404 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5405 		if (po->po_flags & PMC_PO_OWNS_LOGFILE)
5406 			pmclog_process_map_in(po, (pid_t) -1,
5407 			    (uintfptr_t) lf->address, lf->pathname);
5408 	}
5409 	PMC_EPOCH_EXIT();
5410 
5411 	/*
5412 	 * TODO: Notify owners of (all) process-sampling PMCs too.
5413 	 */
5414 }
5415 
5416 static void
pmc_kld_unload(void * arg __unused,const char * filename __unused,caddr_t address,size_t size)5417 pmc_kld_unload(void *arg __unused, const char *filename __unused,
5418     caddr_t address, size_t size)
5419 {
5420 	struct pmc_owner *po;
5421 
5422 	PMC_EPOCH_ENTER();
5423 	CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) {
5424 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) {
5425 			pmclog_process_map_out(po, (pid_t)-1,
5426 			    (uintfptr_t)address, (uintfptr_t)address + size);
5427 		}
5428 	}
5429 	PMC_EPOCH_EXIT();
5430 
5431 	/*
5432 	 * TODO: Notify owners of process-sampling PMCs.
5433 	 */
5434 }
5435 
5436 /*
5437  * initialization
5438  */
5439 static const char *
pmc_name_of_pmcclass(enum pmc_class class)5440 pmc_name_of_pmcclass(enum pmc_class class)
5441 {
5442 
5443 	switch (class) {
5444 #undef	__PMC_CLASS
5445 #define	__PMC_CLASS(S,V,D)						\
5446 	case PMC_CLASS_##S:						\
5447 		return #S;
5448 	__PMC_CLASSES();
5449 	default:
5450 		return ("<unknown>");
5451 	}
5452 }
5453 
5454 /*
5455  * Base class initializer: allocate structure and set default classes.
5456  */
5457 struct pmc_mdep *
pmc_mdep_alloc(int nclasses)5458 pmc_mdep_alloc(int nclasses)
5459 {
5460 	struct pmc_mdep *md;
5461 	int n;
5462 
5463 	/* SOFT + md classes */
5464 	n = 1 + nclasses;
5465 	md = malloc(sizeof(struct pmc_mdep) + n * sizeof(struct pmc_classdep),
5466 	    M_PMC, M_WAITOK | M_ZERO);
5467 	md->pmd_nclass = n;
5468 
5469 	/* Default methods */
5470 	md->pmd_switch_in = generic_switch_in;
5471 	md->pmd_switch_out = generic_switch_out;
5472 
5473 	/* Add base class. */
5474 	pmc_soft_initialize(md);
5475 	return (md);
5476 }
5477 
5478 void
pmc_mdep_free(struct pmc_mdep * md)5479 pmc_mdep_free(struct pmc_mdep *md)
5480 {
5481 	pmc_soft_finalize(md);
5482 	free(md, M_PMC);
5483 }
5484 
5485 static int
generic_switch_in(struct pmc_cpu * pc __unused,struct pmc_process * pp __unused)5486 generic_switch_in(struct pmc_cpu *pc __unused, struct pmc_process *pp __unused)
5487 {
5488 
5489 	return (0);
5490 }
5491 
5492 static int
generic_switch_out(struct pmc_cpu * pc __unused,struct pmc_process * pp __unused)5493 generic_switch_out(struct pmc_cpu *pc __unused, struct pmc_process *pp __unused)
5494 {
5495 
5496 	return (0);
5497 }
5498 
5499 static struct pmc_mdep *
pmc_generic_cpu_initialize(void)5500 pmc_generic_cpu_initialize(void)
5501 {
5502 	struct pmc_mdep *md;
5503 
5504 	md = pmc_mdep_alloc(0);
5505 
5506 	md->pmd_cputype = PMC_CPU_GENERIC;
5507 
5508 	return (md);
5509 }
5510 
5511 static void
pmc_generic_cpu_finalize(struct pmc_mdep * md __unused)5512 pmc_generic_cpu_finalize(struct pmc_mdep *md __unused)
5513 {
5514 
5515 }
5516 
5517 static int
pmc_initialize(void)5518 pmc_initialize(void)
5519 {
5520 	struct pcpu *pc;
5521 	struct pmc_binding pb;
5522 	struct pmc_classdep *pcd;
5523 	struct pmc_sample *ps;
5524 	struct pmc_samplebuffer *sb;
5525 	int c, cpu, error, n, ri;
5526 	u_int maxcpu, domain;
5527 
5528 	md = NULL;
5529 	error = 0;
5530 
5531 	pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK);
5532 	pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK);
5533 	pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK);
5534 	pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK);
5535 	pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK);
5536 	pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK);
5537 	pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK);
5538 	pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK);
5539 	pmc_stats.pm_merges = counter_u64_alloc(M_WAITOK);
5540 	pmc_stats.pm_overwrites = counter_u64_alloc(M_WAITOK);
5541 
5542 #ifdef HWPMC_DEBUG
5543 	/* parse debug flags first */
5544 	if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
5545 	    pmc_debugstr, sizeof(pmc_debugstr))) {
5546 		pmc_debugflags_parse(pmc_debugstr, pmc_debugstr +
5547 		    strlen(pmc_debugstr));
5548 	}
5549 #endif
5550 
5551 	PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
5552 
5553 	/* check kernel version */
5554 	if (pmc_kernel_version != PMC_VERSION) {
5555 		if (pmc_kernel_version == 0)
5556 			printf("hwpmc: this kernel has not been compiled with "
5557 			    "'options HWPMC_HOOKS'.\n");
5558 		else
5559 			printf("hwpmc: kernel version (0x%x) does not match "
5560 			    "module version (0x%x).\n", pmc_kernel_version,
5561 			    PMC_VERSION);
5562 		return (EPROGMISMATCH);
5563 	}
5564 
5565 	/*
5566 	 * check sysctl parameters
5567 	 */
5568 	if (pmc_hashsize <= 0) {
5569 		printf("hwpmc: tunable \"hashsize\"=%d must be "
5570 		    "greater than zero.\n", pmc_hashsize);
5571 		pmc_hashsize = PMC_HASH_SIZE;
5572 	}
5573 
5574 	if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
5575 		printf("hwpmc: tunable \"nsamples\"=%d out of "
5576 		    "range.\n", pmc_nsamples);
5577 		pmc_nsamples = PMC_NSAMPLES;
5578 	}
5579 	pmc_sample_mask = pmc_nsamples - 1;
5580 
5581 	if (pmc_callchaindepth <= 0 ||
5582 	    pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
5583 		printf("hwpmc: tunable \"callchaindepth\"=%d out of "
5584 		    "range - using %d.\n", pmc_callchaindepth,
5585 		    PMC_CALLCHAIN_DEPTH_MAX);
5586 		pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX;
5587 	}
5588 
5589 	md = pmc_md_initialize();
5590 	if (md == NULL) {
5591 		/* Default to generic CPU. */
5592 		md = pmc_generic_cpu_initialize();
5593 		if (md == NULL)
5594 			return (ENOSYS);
5595         }
5596 
5597 	/*
5598 	 * Refresh classes base ri. Optional classes may come in different
5599 	 * order.
5600 	 */
5601 	for (ri = c = 0; c < md->pmd_nclass; c++) {
5602 		pcd = &md->pmd_classdep[c];
5603 		pcd->pcd_ri = ri;
5604 		ri += pcd->pcd_num;
5605 	}
5606 
5607 	KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
5608 	    ("[pmc,%d] no classes or pmcs", __LINE__));
5609 
5610 	/* Compute the map from row-indices to classdep pointers. */
5611 	pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
5612 	    md->pmd_npmc, M_PMC, M_WAITOK | M_ZERO);
5613 
5614 	for (n = 0; n < md->pmd_npmc; n++)
5615 		pmc_rowindex_to_classdep[n] = NULL;
5616 
5617 	for (ri = c = 0; c < md->pmd_nclass; c++) {
5618 		pcd = &md->pmd_classdep[c];
5619 		for (n = 0; n < pcd->pcd_num; n++, ri++)
5620 			pmc_rowindex_to_classdep[ri] = pcd;
5621 	}
5622 
5623 	KASSERT(ri == md->pmd_npmc,
5624 	    ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
5625 	    ri, md->pmd_npmc));
5626 
5627 	maxcpu = pmc_cpu_max();
5628 
5629 	/* allocate space for the per-cpu array */
5630 	pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
5631 	    M_WAITOK | M_ZERO);
5632 
5633 	/* per-cpu 'saved values' for managing process-mode PMCs */
5634 	pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
5635 	    M_PMC, M_WAITOK);
5636 
5637 	/* Perform CPU-dependent initialization. */
5638 	pmc_save_cpu_binding(&pb);
5639 	error = 0;
5640 	for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
5641 		if (!pmc_cpu_is_active(cpu))
5642 			continue;
5643 		pmc_select_cpu(cpu);
5644 		pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
5645 		    md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
5646 		    M_WAITOK | M_ZERO);
5647 		for (n = 0; error == 0 && n < md->pmd_nclass; n++)
5648 			if (md->pmd_classdep[n].pcd_num > 0)
5649 				error = md->pmd_classdep[n].pcd_pcpu_init(md,
5650 				    cpu);
5651 	}
5652 	pmc_restore_cpu_binding(&pb);
5653 
5654 	if (error != 0)
5655 		return (error);
5656 
5657 	/* allocate space for the sample array */
5658 	for (cpu = 0; cpu < maxcpu; cpu++) {
5659 		if (!pmc_cpu_is_active(cpu))
5660 			continue;
5661 		pc = pcpu_find(cpu);
5662 		domain = pc->pc_domain;
5663 		sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5664 		    pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5665 		    DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5666 
5667 		KASSERT(pmc_pcpu[cpu] != NULL,
5668 		    ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
5669 
5670 		sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5671 		    pmc_nsamples * sizeof(uintptr_t), M_PMC,
5672 		    DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5673 
5674 		for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5675 			ps->ps_pc = sb->ps_callchains +
5676 			    (n * pmc_callchaindepth);
5677 
5678 		pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
5679 
5680 		sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5681 		    pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5682 		    DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5683 
5684 		sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5685 		    pmc_nsamples * sizeof(uintptr_t), M_PMC,
5686 		    DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5687 		for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5688 			ps->ps_pc = sb->ps_callchains +
5689 			    (n * pmc_callchaindepth);
5690 
5691 		pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
5692 
5693 		sb = malloc_domainset(sizeof(struct pmc_samplebuffer) +
5694 		    pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
5695 		    DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5696 		sb->ps_callchains = malloc_domainset(pmc_callchaindepth *
5697 		    pmc_nsamples * sizeof(uintptr_t), M_PMC,
5698 		    DOMAINSET_PREF(domain), M_WAITOK | M_ZERO);
5699 		for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
5700 			ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth;
5701 
5702 		pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb;
5703 	}
5704 
5705 	/* allocate space for the row disposition array */
5706 	pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
5707 	    M_PMC, M_WAITOK | M_ZERO);
5708 
5709 	/* mark all PMCs as available */
5710 	for (n = 0; n < md->pmd_npmc; n++)
5711 		PMC_MARK_ROW_FREE(n);
5712 
5713 	/* allocate thread hash tables */
5714 	pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
5715 	    &pmc_ownerhashmask);
5716 
5717 	pmc_processhash = hashinit(pmc_hashsize, M_PMC,
5718 	    &pmc_processhashmask);
5719 	mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
5720 	    MTX_SPIN);
5721 
5722 	CK_LIST_INIT(&pmc_ss_owners);
5723 	pmc_ss_count = 0;
5724 
5725 	/* allocate a pool of spin mutexes */
5726 	pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
5727 	    MTX_SPIN);
5728 
5729 	PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
5730 	    "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
5731 	    pmc_processhash, pmc_processhashmask);
5732 
5733 	/* Initialize a spin mutex for the thread free list. */
5734 	mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf",
5735 	    MTX_SPIN);
5736 
5737 	/* Initialize the task to prune the thread free list. */
5738 	TASK_INIT(&free_task, 0, pmc_thread_descriptor_pool_free_task, NULL);
5739 
5740 	/* register process {exit,fork,exec} handlers */
5741 	pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
5742 	    pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
5743 	pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
5744 	    pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
5745 
5746 	/* register kld event handlers */
5747 	pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load,
5748 	    NULL, EVENTHANDLER_PRI_ANY);
5749 	pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload,
5750 	    NULL, EVENTHANDLER_PRI_ANY);
5751 
5752 	/* initialize logging */
5753 	pmclog_initialize();
5754 
5755 	/* set hook functions */
5756 	pmc_intr = md->pmd_intr;
5757 	wmb();
5758 	pmc_hook = pmc_hook_handler;
5759 
5760 	if (error == 0) {
5761 		printf(PMC_MODULE_NAME ":");
5762 		for (n = 0; n < md->pmd_nclass; n++) {
5763 			if (md->pmd_classdep[n].pcd_num == 0)
5764 				continue;
5765 			pcd = &md->pmd_classdep[n];
5766 			printf(" %s/%d/%d/0x%b",
5767 			    pmc_name_of_pmcclass(pcd->pcd_class),
5768 			    pcd->pcd_num,
5769 			    pcd->pcd_width,
5770 			    pcd->pcd_caps,
5771 			    "\20"
5772 			    "\1INT\2USR\3SYS\4EDG\5THR"
5773 			    "\6REA\7WRI\10INV\11QUA\12PRC"
5774 			    "\13TAG\14CSC");
5775 		}
5776 		printf("\n");
5777 	}
5778 
5779 	return (error);
5780 }
5781 
5782 /* prepare to be unloaded */
5783 static void
pmc_cleanup(void)5784 pmc_cleanup(void)
5785 {
5786 	struct pmc_binding pb;
5787 	struct pmc_owner *po, *tmp;
5788 	struct pmc_ownerhash *ph;
5789 	struct pmc_processhash *prh __pmcdbg_used;
5790 	u_int maxcpu;
5791 	int cpu, c;
5792 
5793 	PMCDBG0(MOD,INI,0, "cleanup");
5794 
5795 	/* switch off sampling */
5796 	CPU_FOREACH(cpu)
5797 		DPCPU_ID_SET(cpu, pmc_sampled, 0);
5798 	pmc_intr = NULL;
5799 
5800 	sx_xlock(&pmc_sx);
5801 	if (pmc_hook == NULL) {	/* being unloaded already */
5802 		sx_xunlock(&pmc_sx);
5803 		return;
5804 	}
5805 
5806 	pmc_hook = NULL; /* prevent new threads from entering module */
5807 
5808 	/* deregister event handlers */
5809 	EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
5810 	EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
5811 	EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag);
5812 	EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag);
5813 
5814 	/* send SIGBUS to all owner threads, free up allocations */
5815 	if (pmc_ownerhash != NULL) {
5816 		for (ph = pmc_ownerhash;
5817 		     ph <= &pmc_ownerhash[pmc_ownerhashmask];
5818 		     ph++) {
5819 			LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
5820 				pmc_remove_owner(po);
5821 
5822 				PMCDBG3(MOD,INI,2,
5823 				    "cleanup signal proc=%p (%d, %s)",
5824 				    po->po_owner, po->po_owner->p_pid,
5825 				    po->po_owner->p_comm);
5826 
5827 				PROC_LOCK(po->po_owner);
5828 				kern_psignal(po->po_owner, SIGBUS);
5829 				PROC_UNLOCK(po->po_owner);
5830 
5831 				pmc_destroy_owner_descriptor(po);
5832 			}
5833 		}
5834 	}
5835 
5836 	/* reclaim allocated data structures */
5837 	taskqueue_drain(taskqueue_fast, &free_task);
5838 	mtx_destroy(&pmc_threadfreelist_mtx);
5839 	pmc_thread_descriptor_pool_drain();
5840 
5841 	if (pmc_mtxpool != NULL)
5842 		mtx_pool_destroy(&pmc_mtxpool);
5843 
5844 	mtx_destroy(&pmc_processhash_mtx);
5845 	if (pmc_processhash != NULL) {
5846 #ifdef HWPMC_DEBUG
5847 		struct pmc_process *pp;
5848 
5849 		PMCDBG0(MOD,INI,3, "destroy process hash");
5850 		for (prh = pmc_processhash;
5851 		     prh <= &pmc_processhash[pmc_processhashmask];
5852 		     prh++)
5853 			LIST_FOREACH(pp, prh, pp_next)
5854 			    PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
5855 #endif
5856 
5857 		hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
5858 		pmc_processhash = NULL;
5859 	}
5860 
5861 	if (pmc_ownerhash != NULL) {
5862 		PMCDBG0(MOD,INI,3, "destroy owner hash");
5863 		hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
5864 		pmc_ownerhash = NULL;
5865 	}
5866 
5867 	KASSERT(CK_LIST_EMPTY(&pmc_ss_owners),
5868 	    ("[pmc,%d] Global SS owner list not empty", __LINE__));
5869 	KASSERT(pmc_ss_count == 0,
5870 	    ("[pmc,%d] Global SS count not empty", __LINE__));
5871 
5872  	/* do processor and pmc-class dependent cleanup */
5873 	maxcpu = pmc_cpu_max();
5874 
5875 	PMCDBG0(MOD,INI,3, "md cleanup");
5876 	if (md) {
5877 		pmc_save_cpu_binding(&pb);
5878 		for (cpu = 0; cpu < maxcpu; cpu++) {
5879 			PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
5880 			    cpu, pmc_pcpu[cpu]);
5881 			if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
5882 				continue;
5883 
5884 			pmc_select_cpu(cpu);
5885 			for (c = 0; c < md->pmd_nclass; c++) {
5886 				if (md->pmd_classdep[c].pcd_num > 0) {
5887 					md->pmd_classdep[c].pcd_pcpu_fini(md,
5888 					    cpu);
5889 				}
5890 			}
5891 		}
5892 
5893 		if (md->pmd_cputype == PMC_CPU_GENERIC)
5894 			pmc_generic_cpu_finalize(md);
5895 		else
5896 			pmc_md_finalize(md);
5897 
5898 		pmc_mdep_free(md);
5899 		md = NULL;
5900 		pmc_restore_cpu_binding(&pb);
5901 	}
5902 
5903 	/* Free per-cpu descriptors. */
5904 	for (cpu = 0; cpu < maxcpu; cpu++) {
5905 		if (!pmc_cpu_is_active(cpu))
5906 			continue;
5907 		KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
5908 		    ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
5909 			cpu));
5910 		KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
5911 		    ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
5912 			cpu));
5913 		KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL,
5914 		    ("[pmc,%d] Null userret cpu sample buffer cpu=%d", __LINE__,
5915 			cpu));
5916 		free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
5917 		free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
5918 		free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
5919 		free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
5920 		free(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC);
5921 		free(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC);
5922 		free(pmc_pcpu[cpu], M_PMC);
5923 	}
5924 
5925 	free(pmc_pcpu, M_PMC);
5926 	pmc_pcpu = NULL;
5927 
5928 	free(pmc_pcpu_saved, M_PMC);
5929 	pmc_pcpu_saved = NULL;
5930 
5931 	if (pmc_pmcdisp != NULL) {
5932 		free(pmc_pmcdisp, M_PMC);
5933 		pmc_pmcdisp = NULL;
5934 	}
5935 
5936 	if (pmc_rowindex_to_classdep != NULL) {
5937 		free(pmc_rowindex_to_classdep, M_PMC);
5938 		pmc_rowindex_to_classdep = NULL;
5939 	}
5940 
5941 	pmclog_shutdown();
5942 	counter_u64_free(pmc_stats.pm_intr_ignored);
5943 	counter_u64_free(pmc_stats.pm_intr_processed);
5944 	counter_u64_free(pmc_stats.pm_intr_bufferfull);
5945 	counter_u64_free(pmc_stats.pm_syscalls);
5946 	counter_u64_free(pmc_stats.pm_syscall_errors);
5947 	counter_u64_free(pmc_stats.pm_buffer_requests);
5948 	counter_u64_free(pmc_stats.pm_buffer_requests_failed);
5949 	counter_u64_free(pmc_stats.pm_log_sweeps);
5950 	counter_u64_free(pmc_stats.pm_merges);
5951 	counter_u64_free(pmc_stats.pm_overwrites);
5952 	sx_xunlock(&pmc_sx);	/* we are done */
5953 }
5954 
5955 /*
5956  * The function called at load/unload.
5957  */
5958 static int
load(struct module * module __unused,int cmd,void * arg __unused)5959 load(struct module *module __unused, int cmd, void *arg __unused)
5960 {
5961 	int error;
5962 
5963 	error = 0;
5964 
5965 	switch (cmd) {
5966 	case MOD_LOAD:
5967 		/* initialize the subsystem */
5968 		error = pmc_initialize();
5969 		if (error != 0)
5970 			break;
5971 		PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d", pmc_syscall_num,
5972 		    pmc_cpu_max());
5973 		break;
5974 	case MOD_UNLOAD:
5975 	case MOD_SHUTDOWN:
5976 		pmc_cleanup();
5977 		PMCDBG0(MOD,INI,1, "unloaded");
5978 		break;
5979 	default:
5980 		error = EINVAL;
5981 		break;
5982 	}
5983 
5984 	return (error);
5985 }
5986