xref: /freebsd/lib/libpmc/libpmc.c (revision 51e235148a4becba94e824a44bd69687644a7f56)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/pmc.h>
34 #include <sys/syscall.h>
35 
36 #include <assert.h>
37 #include <ctype.h>
38 #include <errno.h>
39 #include <err.h>
40 #include <fcntl.h>
41 #include <pmc.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <strings.h>
46 #include <sysexits.h>
47 #include <unistd.h>
48 
49 #include "libpmcinternal.h"
50 
51 /* Function prototypes */
52 #if defined(__amd64__) || defined(__i386__)
53 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54     struct pmc_op_pmcallocate *_pmc_config);
55 #endif
56 #if defined(__amd64__) || defined(__i386__)
57 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58     struct pmc_op_pmcallocate *_pmc_config);
59 #endif
60 #if defined(__arm__)
61 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
62     struct pmc_op_pmcallocate *_pmc_config);
63 #endif
64 #if defined(__aarch64__)
65 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66     struct pmc_op_pmcallocate *_pmc_config);
67 static int cmn600_pmu_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
68     struct pmc_op_pmcallocate *_pmc_config);
69 static int dmc620_pmu_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
70     struct pmc_op_pmcallocate *_pmc_config);
71 #endif
72 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
73     struct pmc_op_pmcallocate *_pmc_config);
74 
75 #if defined(__powerpc__)
76 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
77 			     struct pmc_op_pmcallocate *_pmc_config);
78 #endif /* __powerpc__ */
79 
80 #define PMC_CALL(op, params)	syscall(pmc_syscall, (op), (params))
81 
82 /*
83  * Event aliases provide a way for the user to ask for generic events
84  * like "cache-misses", or "instructions-retired".  These aliases are
85  * mapped to the appropriate canonical event descriptions using a
86  * lookup table.
87  */
88 struct pmc_event_alias {
89 	const char	*pm_alias;
90 	const char	*pm_spec;
91 };
92 
93 static const struct pmc_event_alias *pmc_mdep_event_aliases;
94 
95 /*
96  * The pmc_event_descr structure maps symbolic names known to the user
97  * to integer codes used by the PMC KLD.
98  */
99 struct pmc_event_descr {
100 	const char	*pm_ev_name;
101 	enum pmc_event	pm_ev_code;
102 };
103 
104 /*
105  * The pmc_class_descr structure maps class name prefixes for
106  * event names to event tables and other PMC class data.
107  */
108 struct pmc_class_descr {
109 	const char	*pm_evc_name;
110 	size_t		pm_evc_name_size;
111 	enum pmc_class	pm_evc_class;
112 	const struct pmc_event_descr *pm_evc_event_table;
113 	size_t		pm_evc_event_table_size;
114 	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
115 			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
116 };
117 
118 #define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
119 #define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
120 
121 #undef	__PMC_EV
122 #define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
123 
124 /*
125  * PMC_CLASSDEP_TABLE(NAME, CLASS)
126  *
127  * Define a table mapping event names and aliases to HWPMC event IDs.
128  */
129 #define	PMC_CLASSDEP_TABLE(N, C)				\
130 	static const struct pmc_event_descr N##_event_table[] =	\
131 	{							\
132 		__PMC_EV_##C()					\
133 	}
134 
135 PMC_CLASSDEP_TABLE(iaf, IAF);
136 PMC_CLASSDEP_TABLE(k8, K8);
137 PMC_CLASSDEP_TABLE(armv7, ARMV7);
138 PMC_CLASSDEP_TABLE(armv8, ARMV8);
139 PMC_CLASSDEP_TABLE(cmn600_pmu, CMN600_PMU);
140 PMC_CLASSDEP_TABLE(dmc620_pmu_cd2, DMC620_PMU_CD2);
141 PMC_CLASSDEP_TABLE(dmc620_pmu_c, DMC620_PMU_C);
142 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
143 PMC_CLASSDEP_TABLE(ppc970, PPC970);
144 PMC_CLASSDEP_TABLE(e500, E500);
145 
146 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
147 
148 #undef	__PMC_EV_ALIAS
149 #define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
150 
151 /*
152  * TODO: Factor out the __PMC_EV_ARMV7/8 list into a single separate table
153  * rather than duplicating for each core.
154  */
155 
156 static const struct pmc_event_descr cortex_a8_event_table[] =
157 {
158 	__PMC_EV_ALIAS_ARMV7_CORTEX_A8()
159 	__PMC_EV_ARMV7()
160 };
161 
162 static const struct pmc_event_descr cortex_a9_event_table[] =
163 {
164 	__PMC_EV_ALIAS_ARMV7_CORTEX_A9()
165 	__PMC_EV_ARMV7()
166 };
167 
168 static const struct pmc_event_descr cortex_a53_event_table[] =
169 {
170 	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
171 	__PMC_EV_ARMV8()
172 };
173 
174 static const struct pmc_event_descr cortex_a57_event_table[] =
175 {
176 	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
177 	__PMC_EV_ARMV8()
178 };
179 
180 static const struct pmc_event_descr cortex_a76_event_table[] =
181 {
182 	__PMC_EV_ALIAS_ARMV8_CORTEX_A76()
183 	__PMC_EV_ARMV8()
184 };
185 
186 static const struct pmc_event_descr tsc_event_table[] =
187 {
188 	__PMC_EV_ALIAS_TSC()
189 };
190 
191 #undef	PMC_CLASS_TABLE_DESC
192 #define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
193 static const struct pmc_class_descr NAME##_class_table_descr =	\
194 	{							\
195 		.pm_evc_name  = #CLASS "-",			\
196 		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
197 		.pm_evc_class = PMC_CLASS_##CLASS ,		\
198 		.pm_evc_event_table = EVENTS##_event_table ,	\
199 		.pm_evc_event_table_size = 			\
200 			PMC_EVENT_TABLE_SIZE(EVENTS),		\
201 		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
202 	}
203 
204 #if	defined(__i386__) || defined(__amd64__)
205 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
206 #endif
207 #if	defined(__i386__) || defined(__amd64__)
208 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
209 #endif
210 #if	defined(__arm__)
211 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
212 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
213 #endif
214 #if	defined(__aarch64__)
215 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
216 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
217 PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64);
218 PMC_CLASS_TABLE_DESC(cmn600_pmu, CMN600_PMU, cmn600_pmu, cmn600_pmu);
219 PMC_CLASS_TABLE_DESC(dmc620_pmu_cd2, DMC620_PMU_CD2, dmc620_pmu_cd2, dmc620_pmu);
220 PMC_CLASS_TABLE_DESC(dmc620_pmu_c, DMC620_PMU_C, dmc620_pmu_c, dmc620_pmu);
221 #endif
222 #if defined(__powerpc__)
223 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
224 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
225 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
226 #endif
227 
228 static struct pmc_class_descr soft_class_table_descr =
229 {
230 	.pm_evc_name  = "SOFT-",
231 	.pm_evc_name_size = sizeof("SOFT-") - 1,
232 	.pm_evc_class = PMC_CLASS_SOFT,
233 	.pm_evc_event_table = NULL,
234 	.pm_evc_event_table_size = 0,
235 	.pm_evc_allocate_pmc = soft_allocate_pmc
236 };
237 
238 #undef	PMC_CLASS_TABLE_DESC
239 
240 static const struct pmc_class_descr **pmc_class_table;
241 #define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
242 
243 /*
244  * Mapping tables, mapping enumeration values to human readable
245  * strings.
246  */
247 
248 static const char * pmc_capability_names[] = {
249 #undef	__PMC_CAP
250 #define	__PMC_CAP(N,V,D)	#N ,
251 	__PMC_CAPS()
252 };
253 
254 struct pmc_class_map {
255 	enum pmc_class	pm_class;
256 	const char	*pm_name;
257 };
258 
259 static const struct pmc_class_map pmc_class_names[] = {
260 #undef	__PMC_CLASS
261 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
262 	__PMC_CLASSES()
263 };
264 
265 struct pmc_cputype_map {
266 	enum pmc_cputype pm_cputype;
267 	const char	*pm_name;
268 };
269 
270 static const struct pmc_cputype_map pmc_cputype_names[] = {
271 #undef	__PMC_CPU
272 #define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
273 	__PMC_CPUS()
274 };
275 
276 static const char * pmc_disposition_names[] = {
277 #undef	__PMC_DISP
278 #define	__PMC_DISP(D)	#D ,
279 	__PMC_DISPOSITIONS()
280 };
281 
282 static const char * pmc_mode_names[] = {
283 #undef  __PMC_MODE
284 #define __PMC_MODE(M,N)	#M ,
285 	__PMC_MODES()
286 };
287 
288 static const char * pmc_state_names[] = {
289 #undef  __PMC_STATE
290 #define __PMC_STATE(S) #S ,
291 	__PMC_STATES()
292 };
293 
294 /*
295  * Filled in by pmc_init().
296  */
297 static int pmc_syscall = -1;
298 static struct pmc_cpuinfo cpu_info;
299 static struct pmc_op_getdyneventinfo soft_event_info;
300 
301 /* Event masks for events */
302 struct pmc_masks {
303 	const char	*pm_name;
304 	const uint64_t	pm_value;
305 };
306 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
307 #define	NULLMASK	{ .pm_name = NULL }
308 
309 #if defined(__amd64__) || defined(__i386__)
310 static int
311 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
312 {
313 	const struct pmc_masks *pm;
314 	char *q, *r;
315 	int c;
316 
317 	if (pmask == NULL)	/* no mask keywords */
318 		return (-1);
319 	q = strchr(p, '=');	/* skip '=' */
320 	if (*++q == '\0')	/* no more data */
321 		return (-1);
322 	c = 0;			/* count of mask keywords seen */
323 	while ((r = strsep(&q, "+")) != NULL) {
324 		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
325 		    pm++)
326 			;
327 		if (pm->pm_name == NULL) /* not found */
328 			return (-1);
329 		*evmask |= pm->pm_value;
330 		c++;
331 	}
332 	return (c);
333 }
334 #endif
335 
336 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
337 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
338 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
339 
340 #if defined(__amd64__) || defined(__i386__)
341 /*
342  * AMD K8 PMCs.
343  *
344  */
345 
346 static struct pmc_event_alias k8_aliases[] = {
347 	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
348 	EV_ALIAS("branch-mispredicts",
349 	    "k8-fr-retired-taken-branches-mispredicted"),
350 	EV_ALIAS("cycles",		"tsc"),
351 	EV_ALIAS("dc-misses",		"k8-dc-miss"),
352 	EV_ALIAS("ic-misses",		"k8-ic-miss"),
353 	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
354 	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
355 	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
356 	EV_ALIAS(NULL, NULL)
357 };
358 
359 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
360 
361 /*
362  * Parsing tables
363  */
364 
365 /* fp dispatched fpu ops */
366 static const struct pmc_masks k8_mask_fdfo[] = {
367 	__K8MASK(add-pipe-excluding-junk-ops,	0),
368 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
369 	__K8MASK(store-pipe-excluding-junk-ops,	2),
370 	__K8MASK(add-pipe-junk-ops,		3),
371 	__K8MASK(multiply-pipe-junk-ops,	4),
372 	__K8MASK(store-pipe-junk-ops,		5),
373 	NULLMASK
374 };
375 
376 /* ls segment register loads */
377 static const struct pmc_masks k8_mask_lsrl[] = {
378 	__K8MASK(es,	0),
379 	__K8MASK(cs,	1),
380 	__K8MASK(ss,	2),
381 	__K8MASK(ds,	3),
382 	__K8MASK(fs,	4),
383 	__K8MASK(gs,	5),
384 	__K8MASK(hs,	6),
385 	NULLMASK
386 };
387 
388 /* ls locked operation */
389 static const struct pmc_masks k8_mask_llo[] = {
390 	__K8MASK(locked-instructions,	0),
391 	__K8MASK(cycles-in-request,	1),
392 	__K8MASK(cycles-to-complete,	2),
393 	NULLMASK
394 };
395 
396 /* dc refill from {l2,system} and dc copyback */
397 static const struct pmc_masks k8_mask_dc[] = {
398 	__K8MASK(invalid,	0),
399 	__K8MASK(shared,	1),
400 	__K8MASK(exclusive,	2),
401 	__K8MASK(owner,		3),
402 	__K8MASK(modified,	4),
403 	NULLMASK
404 };
405 
406 /* dc one bit ecc error */
407 static const struct pmc_masks k8_mask_dobee[] = {
408 	__K8MASK(scrubber,	0),
409 	__K8MASK(piggyback,	1),
410 	NULLMASK
411 };
412 
413 /* dc dispatched prefetch instructions */
414 static const struct pmc_masks k8_mask_ddpi[] = {
415 	__K8MASK(load,	0),
416 	__K8MASK(store,	1),
417 	__K8MASK(nta,	2),
418 	NULLMASK
419 };
420 
421 /* dc dcache accesses by locks */
422 static const struct pmc_masks k8_mask_dabl[] = {
423 	__K8MASK(accesses,	0),
424 	__K8MASK(misses,	1),
425 	NULLMASK
426 };
427 
428 /* bu internal l2 request */
429 static const struct pmc_masks k8_mask_bilr[] = {
430 	__K8MASK(ic-fill,	0),
431 	__K8MASK(dc-fill,	1),
432 	__K8MASK(tlb-reload,	2),
433 	__K8MASK(tag-snoop,	3),
434 	__K8MASK(cancelled,	4),
435 	NULLMASK
436 };
437 
438 /* bu fill request l2 miss */
439 static const struct pmc_masks k8_mask_bfrlm[] = {
440 	__K8MASK(ic-fill,	0),
441 	__K8MASK(dc-fill,	1),
442 	__K8MASK(tlb-reload,	2),
443 	NULLMASK
444 };
445 
446 /* bu fill into l2 */
447 static const struct pmc_masks k8_mask_bfil[] = {
448 	__K8MASK(dirty-l2-victim,	0),
449 	__K8MASK(victim-from-l2,	1),
450 	NULLMASK
451 };
452 
453 /* fr retired fpu instructions */
454 static const struct pmc_masks k8_mask_frfi[] = {
455 	__K8MASK(x87,			0),
456 	__K8MASK(mmx-3dnow,		1),
457 	__K8MASK(packed-sse-sse2,	2),
458 	__K8MASK(scalar-sse-sse2,	3),
459 	NULLMASK
460 };
461 
462 /* fr retired fastpath double op instructions */
463 static const struct pmc_masks k8_mask_frfdoi[] = {
464 	__K8MASK(low-op-pos-0,		0),
465 	__K8MASK(low-op-pos-1,		1),
466 	__K8MASK(low-op-pos-2,		2),
467 	NULLMASK
468 };
469 
470 /* fr fpu exceptions */
471 static const struct pmc_masks k8_mask_ffe[] = {
472 	__K8MASK(x87-reclass-microfaults,	0),
473 	__K8MASK(sse-retype-microfaults,	1),
474 	__K8MASK(sse-reclass-microfaults,	2),
475 	__K8MASK(sse-and-x87-microtraps,	3),
476 	NULLMASK
477 };
478 
479 /* nb memory controller page access event */
480 static const struct pmc_masks k8_mask_nmcpae[] = {
481 	__K8MASK(page-hit,	0),
482 	__K8MASK(page-miss,	1),
483 	__K8MASK(page-conflict,	2),
484 	NULLMASK
485 };
486 
487 /* nb memory controller turnaround */
488 static const struct pmc_masks k8_mask_nmct[] = {
489 	__K8MASK(dimm-turnaround,		0),
490 	__K8MASK(read-to-write-turnaround,	1),
491 	__K8MASK(write-to-read-turnaround,	2),
492 	NULLMASK
493 };
494 
495 /* nb memory controller bypass saturation */
496 static const struct pmc_masks k8_mask_nmcbs[] = {
497 	__K8MASK(memory-controller-hi-pri-bypass,	0),
498 	__K8MASK(memory-controller-lo-pri-bypass,	1),
499 	__K8MASK(dram-controller-interface-bypass,	2),
500 	__K8MASK(dram-controller-queue-bypass,		3),
501 	NULLMASK
502 };
503 
504 /* nb sized commands */
505 static const struct pmc_masks k8_mask_nsc[] = {
506 	__K8MASK(nonpostwrszbyte,	0),
507 	__K8MASK(nonpostwrszdword,	1),
508 	__K8MASK(postwrszbyte,		2),
509 	__K8MASK(postwrszdword,		3),
510 	__K8MASK(rdszbyte,		4),
511 	__K8MASK(rdszdword,		5),
512 	__K8MASK(rdmodwr,		6),
513 	NULLMASK
514 };
515 
516 /* nb probe result */
517 static const struct pmc_masks k8_mask_npr[] = {
518 	__K8MASK(probe-miss,		0),
519 	__K8MASK(probe-hit,		1),
520 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
521 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
522 	NULLMASK
523 };
524 
525 /* nb hypertransport bus bandwidth */
526 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
527 	__K8MASK(command,	0),
528 	__K8MASK(data,	1),
529 	__K8MASK(buffer-release, 2),
530 	__K8MASK(nop,	3),
531 	NULLMASK
532 };
533 
534 #undef	__K8MASK
535 
536 #define	K8_KW_COUNT	"count"
537 #define	K8_KW_EDGE	"edge"
538 #define	K8_KW_INV	"inv"
539 #define	K8_KW_MASK	"mask"
540 #define	K8_KW_OS	"os"
541 #define	K8_KW_USR	"usr"
542 
543 static int
544 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
545     struct pmc_op_pmcallocate *pmc_config)
546 {
547 	char		*e, *p, *q;
548 	int		n;
549 	uint32_t	count;
550 	uint64_t	evmask;
551 	const struct pmc_masks	*pm, *pmask;
552 
553 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
554 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
555 
556 	pmask = NULL;
557 	evmask = 0;
558 
559 #define	__K8SETMASK(M) pmask = k8_mask_##M
560 
561 	/* setup parsing tables */
562 	switch (pe) {
563 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
564 		__K8SETMASK(fdfo);
565 		break;
566 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
567 		__K8SETMASK(lsrl);
568 		break;
569 	case PMC_EV_K8_LS_LOCKED_OPERATION:
570 		__K8SETMASK(llo);
571 		break;
572 	case PMC_EV_K8_DC_REFILL_FROM_L2:
573 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
574 	case PMC_EV_K8_DC_COPYBACK:
575 		__K8SETMASK(dc);
576 		break;
577 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
578 		__K8SETMASK(dobee);
579 		break;
580 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
581 		__K8SETMASK(ddpi);
582 		break;
583 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
584 		__K8SETMASK(dabl);
585 		break;
586 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
587 		__K8SETMASK(bilr);
588 		break;
589 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
590 		__K8SETMASK(bfrlm);
591 		break;
592 	case PMC_EV_K8_BU_FILL_INTO_L2:
593 		__K8SETMASK(bfil);
594 		break;
595 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
596 		__K8SETMASK(frfi);
597 		break;
598 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
599 		__K8SETMASK(frfdoi);
600 		break;
601 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
602 		__K8SETMASK(ffe);
603 		break;
604 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
605 		__K8SETMASK(nmcpae);
606 		break;
607 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
608 		__K8SETMASK(nmct);
609 		break;
610 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
611 		__K8SETMASK(nmcbs);
612 		break;
613 	case PMC_EV_K8_NB_SIZED_COMMANDS:
614 		__K8SETMASK(nsc);
615 		break;
616 	case PMC_EV_K8_NB_PROBE_RESULT:
617 		__K8SETMASK(npr);
618 		break;
619 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
620 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
621 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
622 		__K8SETMASK(nhbb);
623 		break;
624 
625 	default:
626 		break;		/* no options defined */
627 	}
628 
629 	while ((p = strsep(&ctrspec, ",")) != NULL) {
630 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
631 			q = strchr(p, '=');
632 			if (*++q == '\0') /* skip '=' */
633 				return (-1);
634 
635 			count = strtol(q, &e, 0);
636 			if (e == q || *e != '\0')
637 				return (-1);
638 
639 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
640 			pmc_config->pm_md.pm_amd.pm_amd_config |=
641 			    AMD_PMC_TO_COUNTER(count);
642 
643 		} else if (KWMATCH(p, K8_KW_EDGE)) {
644 			pmc_config->pm_caps |= PMC_CAP_EDGE;
645 		} else if (KWMATCH(p, K8_KW_INV)) {
646 			pmc_config->pm_caps |= PMC_CAP_INVERT;
647 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
648 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
649 				return (-1);
650 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
651 		} else if (KWMATCH(p, K8_KW_OS)) {
652 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
653 		} else if (KWMATCH(p, K8_KW_USR)) {
654 			pmc_config->pm_caps |= PMC_CAP_USER;
655 		} else
656 			return (-1);
657 	}
658 
659 	/* other post processing */
660 	switch (pe) {
661 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
662 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
663 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
664 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
665 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
666 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
667 		/* XXX only available in rev B and later */
668 		break;
669 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
670 		/* XXX only available in rev C and later */
671 		break;
672 	case PMC_EV_K8_LS_LOCKED_OPERATION:
673 		/* XXX CPU Rev A,B evmask is to be zero */
674 		if (evmask & (evmask - 1)) /* > 1 bit set */
675 			return (-1);
676 		if (evmask == 0) {
677 			evmask = 0x01; /* Rev C and later: #instrs */
678 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
679 		}
680 		break;
681 	default:
682 		if (evmask == 0 && pmask != NULL) {
683 			for (pm = pmask; pm->pm_name; pm++)
684 				evmask |= pm->pm_value;
685 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
686 		}
687 	}
688 
689 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
690 		pmc_config->pm_md.pm_amd.pm_amd_config =
691 		    AMD_PMC_TO_UNITMASK(evmask);
692 
693 	return (0);
694 }
695 
696 #endif
697 
698 #if	defined(__i386__) || defined(__amd64__)
699 static int
700 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
701     struct pmc_op_pmcallocate *pmc_config)
702 {
703 	if (pe != PMC_EV_TSC_TSC)
704 		return (-1);
705 
706 	/* TSC events must be unqualified. */
707 	if (ctrspec && *ctrspec != '\0')
708 		return (-1);
709 
710 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
711 	pmc_config->pm_caps |= PMC_CAP_READ;
712 
713 	return (0);
714 }
715 #endif
716 
717 static struct pmc_event_alias generic_aliases[] = {
718 	EV_ALIAS("instructions",		"SOFT-CLOCK.HARD"),
719 	EV_ALIAS(NULL, NULL)
720 };
721 
722 static int
723 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
724     struct pmc_op_pmcallocate *pmc_config)
725 {
726 	(void)ctrspec;
727 	(void)pmc_config;
728 
729 	if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
730 		return (-1);
731 
732 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
733 	return (0);
734 }
735 
736 #if	defined(__arm__)
737 static struct pmc_event_alias cortex_a8_aliases[] = {
738 	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
739 	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
740 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
741 	EV_ALIAS(NULL, NULL)
742 };
743 
744 static struct pmc_event_alias cortex_a9_aliases[] = {
745 	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
746 	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
747 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
748 	EV_ALIAS(NULL, NULL)
749 };
750 
751 static int
752 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
753     struct pmc_op_pmcallocate *pmc_config __unused)
754 {
755 	switch (pe) {
756 	default:
757 		break;
758 	}
759 
760 	return (0);
761 }
762 #endif
763 
764 #if	defined(__aarch64__)
765 static struct pmc_event_alias cortex_a53_aliases[] = {
766 	EV_ALIAS(NULL, NULL)
767 };
768 static struct pmc_event_alias cortex_a57_aliases[] = {
769 	EV_ALIAS(NULL, NULL)
770 };
771 static struct pmc_event_alias cortex_a76_aliases[] = {
772 	EV_ALIAS(NULL, NULL)
773 };
774 
775 static int
776 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec,
777     struct pmc_op_pmcallocate *pmc_config)
778 {
779 	char *p;
780 
781 	while ((p = strsep(&ctrspec, ",")) != NULL) {
782 		if (KWMATCH(p, "os"))
783 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
784 		else if (KWMATCH(p, "usr"))
785 			pmc_config->pm_caps |= PMC_CAP_USER;
786 		else
787 			return (-1);
788 	}
789 
790 	return (0);
791 }
792 
793 static int
794 cmn600_pmu_allocate_pmc(enum pmc_event pe, char *ctrspec,
795     struct pmc_op_pmcallocate *pmc_config)
796 {
797 	uint32_t nodeid, occupancy, xpport, xpchannel;
798 	char *e, *p, *q;
799 	unsigned int i;
800 	char *xpport_names[] = { "East", "West", "North", "South", "devport0",
801 	    "devport1" };
802 	char *xpchannel_names[] = { "REQ", "RSP", "SNP", "DAT" };
803 
804 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
805 	pmc_config->pm_caps |= PMC_CAP_SYSTEM;
806 	pmc_config->pm_md.pm_cmn600.pma_cmn600_config = 0;
807 	/*
808 	 * CMN600 extra fields:
809 	 * * nodeid - node coordinates x[2-3],y[2-3],p[1],s[2]
810 	 * 		width of x and y fields depend on matrix size.
811 	 * * occupancy - numeric value to select desired filter.
812 	 * * xpport - East, West, North, South, devport0, devport1 (or 0, 1, ..., 5)
813 	 * * xpchannel - REQ, RSP, SNP, DAT (or 0, 1, 2, 3)
814 	 */
815 
816 	while ((p = strsep(&ctrspec, ",")) != NULL) {
817 		if (KWPREFIXMATCH(p, "nodeid=")) {
818 			q = strchr(p, '=');
819 			if (*++q == '\0') /* skip '=' */
820 				return (-1);
821 
822 			nodeid = strtol(q, &e, 0);
823 			if (e == q || *e != '\0')
824 				return (-1);
825 
826 			pmc_config->pm_md.pm_cmn600.pma_cmn600_nodeid |= nodeid;
827 
828 		} else if (KWPREFIXMATCH(p, "occupancy=")) {
829 			q = strchr(p, '=');
830 			if (*++q == '\0') /* skip '=' */
831 				return (-1);
832 
833 			occupancy = strtol(q, &e, 0);
834 			if (e == q || *e != '\0')
835 				return (-1);
836 
837 			pmc_config->pm_md.pm_cmn600.pma_cmn600_occupancy = occupancy;
838 		} else if (KWPREFIXMATCH(p, "xpport=")) {
839 			q = strchr(p, '=');
840 			if (*++q == '\0') /* skip '=' */
841 				return (-1);
842 
843 			xpport = strtol(q, &e, 0);
844 			if (e == q || *e != '\0') {
845 				for (i = 0; i < nitems(xpport_names); i++) {
846 					if (strcasecmp(xpport_names[i], q) == 0) {
847 						xpport = i;
848 						break;
849 					}
850 				}
851 				if (i == nitems(xpport_names))
852 					return (-1);
853 			}
854 
855 			pmc_config->pm_md.pm_cmn600.pma_cmn600_config |= xpport << 2;
856 		} else if (KWPREFIXMATCH(p, "xpchannel=")) {
857 			q = strchr(p, '=');
858 			if (*++q == '\0') /* skip '=' */
859 				return (-1);
860 
861 			xpchannel = strtol(q, &e, 0);
862 			if (e == q || *e != '\0') {
863 				for (i = 0; i < nitems(xpchannel_names); i++) {
864 					if (strcasecmp(xpchannel_names[i], q) == 0) {
865 						xpchannel = i;
866 						break;
867 					}
868 				}
869 				if (i == nitems(xpchannel_names))
870 					return (-1);
871 			}
872 
873 			pmc_config->pm_md.pm_cmn600.pma_cmn600_config |= xpchannel << 5;
874 		} else
875 			return (-1);
876 	}
877 
878 	return (0);
879 }
880 
881 static int
882 dmc620_pmu_allocate_pmc(enum pmc_event pe, char *ctrspec,
883     struct pmc_op_pmcallocate *pmc_config)
884 {
885 	char		*e, *p, *q;
886 	uint64_t	match, mask;
887 	uint32_t	count;
888 
889 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
890 	pmc_config->pm_caps |= PMC_CAP_SYSTEM;
891 	pmc_config->pm_md.pm_dmc620.pm_dmc620_config = 0;
892 
893 	while ((p = strsep(&ctrspec, ",")) != NULL) {
894 		if (KWPREFIXMATCH(p, "count=")) {
895 			q = strchr(p, '=');
896 			if (*++q == '\0') /* skip '=' */
897 				return (-1);
898 
899 			count = strtol(q, &e, 0);
900 			if (e == q || *e != '\0')
901 				return (-1);
902 
903 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
904 			pmc_config->pm_md.pm_dmc620.pm_dmc620_config |= count;
905 
906 		} else if (KWMATCH(p, "inv")) {
907 			pmc_config->pm_caps |= PMC_CAP_INVERT;
908 		} else if (KWPREFIXMATCH(p, "match=")) {
909 			match = strtol(q, &e, 0);
910 			if (e == q || *e != '\0')
911 				return (-1);
912 
913 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
914 			pmc_config->pm_md.pm_dmc620.pm_dmc620_match = match;
915 		} else if (KWPREFIXMATCH(p, "mask=")) {
916 			q = strchr(p, '=');
917 			if (*++q == '\0') /* skip '=' */
918 				return (-1);
919 
920 			mask = strtol(q, &e, 0);
921 			if (e == q || *e != '\0')
922 				return (-1);
923 
924 			pmc_config->pm_md.pm_dmc620.pm_dmc620_mask = mask;
925 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
926 		} else
927 			return (-1);
928 	}
929 
930 	return (0);
931 }
932 #endif
933 
934 #if defined(__powerpc__)
935 
936 static struct pmc_event_alias ppc7450_aliases[] = {
937 	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
938 	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
939 	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
940 	EV_ALIAS(NULL, NULL)
941 };
942 
943 static struct pmc_event_alias ppc970_aliases[] = {
944 	EV_ALIAS("instructions", "INSTR_COMPLETED"),
945 	EV_ALIAS("cycles",       "CYCLES"),
946 	EV_ALIAS(NULL, NULL)
947 };
948 
949 static struct pmc_event_alias e500_aliases[] = {
950 	EV_ALIAS("instructions", "INSTR_COMPLETED"),
951 	EV_ALIAS("cycles",       "CYCLES"),
952 	EV_ALIAS(NULL, NULL)
953 };
954 
955 #define	POWERPC_KW_OS		"os"
956 #define	POWERPC_KW_USR		"usr"
957 #define	POWERPC_KW_ANYTHREAD	"anythread"
958 
959 static int
960 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
961 		     struct pmc_op_pmcallocate *pmc_config __unused)
962 {
963 	char *p;
964 
965 	(void) pe;
966 
967 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
968 
969 	while ((p = strsep(&ctrspec, ",")) != NULL) {
970 		if (KWMATCH(p, POWERPC_KW_OS))
971 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
972 		else if (KWMATCH(p, POWERPC_KW_USR))
973 			pmc_config->pm_caps |= PMC_CAP_USER;
974 		else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
975 			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
976 		else
977 			return (-1);
978 	}
979 
980 	return (0);
981 }
982 
983 #endif /* __powerpc__ */
984 
985 
986 /*
987  * Match an event name `name' with its canonical form.
988  *
989  * Matches are case insensitive and spaces, periods, underscores and
990  * hyphen characters are considered to match each other.
991  *
992  * Returns 1 for a match, 0 otherwise.
993  */
994 
995 static int
996 pmc_match_event_name(const char *name, const char *canonicalname)
997 {
998 	int cc, nc;
999 	const unsigned char *c, *n;
1000 
1001 	c = (const unsigned char *) canonicalname;
1002 	n = (const unsigned char *) name;
1003 
1004 	for (; (nc = *n) && (cc = *c); n++, c++) {
1005 
1006 		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
1007 		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
1008 			continue;
1009 
1010 		if (toupper(nc) == toupper(cc))
1011 			continue;
1012 
1013 
1014 		return (0);
1015 	}
1016 
1017 	if (*n == '\0' && *c == '\0')
1018 		return (1);
1019 
1020 	return (0);
1021 }
1022 
1023 /*
1024  * Match an event name against all the event named supported by a
1025  * PMC class.
1026  *
1027  * Returns an event descriptor pointer on match or NULL otherwise.
1028  */
1029 static const struct pmc_event_descr *
1030 pmc_match_event_class(const char *name,
1031     const struct pmc_class_descr *pcd)
1032 {
1033 	size_t n;
1034 	const struct pmc_event_descr *ev;
1035 
1036 	ev = pcd->pm_evc_event_table;
1037 	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
1038 		if (pmc_match_event_name(name, ev->pm_ev_name))
1039 			return (ev);
1040 
1041 	return (NULL);
1042 }
1043 
1044 /*
1045  * API entry points
1046  */
1047 
1048 int
1049 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1050     uint32_t flags, int cpu, pmc_id_t *pmcid,
1051     uint64_t count)
1052 {
1053 	size_t n;
1054 	int retval;
1055 	char *r, *spec_copy;
1056 	const char *ctrname;
1057 	const struct pmc_event_descr *ev;
1058 	const struct pmc_event_alias *alias;
1059 	struct pmc_op_pmcallocate pmc_config;
1060 	const struct pmc_class_descr *pcd;
1061 
1062 	spec_copy = NULL;
1063 	retval    = -1;
1064 
1065 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1066 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1067 		errno = EINVAL;
1068 		goto out;
1069 	}
1070 	bzero(&pmc_config, sizeof(pmc_config));
1071 	pmc_config.pm_cpu   = cpu;
1072 	pmc_config.pm_mode  = mode;
1073 	pmc_config.pm_flags = flags;
1074 	pmc_config.pm_count = count;
1075 	if (PMC_IS_SAMPLING_MODE(mode))
1076 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1077 
1078 	/*
1079 	 * Try to pull the raw event ID directly from the pmu-events table. If
1080 	 * this is unsupported on the platform, or the event is not found,
1081 	 * continue with searching the regular event tables.
1082 	 */
1083 	r = spec_copy = strdup(ctrspec);
1084 	ctrname = strsep(&r, ",");
1085 	if (pmc_pmu_enabled()) {
1086 		if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) {
1087 			/*
1088 			 * XXX: pmclog_get_event exploits this to disambiguate
1089 			 *      PMU from PMC event codes in PMCALLOCATE events.
1090 			 */
1091 			assert(pmc_config.pm_ev < PMC_EVENT_FIRST);
1092 			goto found;
1093 		}
1094 
1095 		/* Otherwise, reset any changes */
1096 		pmc_config.pm_ev = 0;
1097 		pmc_config.pm_caps = 0;
1098 		pmc_config.pm_class = 0;
1099 	}
1100 	free(spec_copy);
1101 	spec_copy = NULL;
1102 
1103 	/* replace an event alias with the canonical event specifier */
1104 	if (pmc_mdep_event_aliases)
1105 		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
1106 			if (!strcasecmp(ctrspec, alias->pm_alias)) {
1107 				spec_copy = strdup(alias->pm_spec);
1108 				break;
1109 			}
1110 
1111 	if (spec_copy == NULL)
1112 		spec_copy = strdup(ctrspec);
1113 
1114 	r = spec_copy;
1115 	ctrname = strsep(&r, ",");
1116 
1117 	/*
1118 	 * If a explicit class prefix was given by the user, restrict the
1119 	 * search for the event to the specified PMC class.
1120 	 */
1121 	ev = NULL;
1122 	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
1123 		pcd = pmc_class_table[n];
1124 		if (pcd != NULL && strncasecmp(ctrname, pcd->pm_evc_name,
1125 		    pcd->pm_evc_name_size) == 0) {
1126 			if ((ev = pmc_match_event_class(ctrname +
1127 			    pcd->pm_evc_name_size, pcd)) == NULL) {
1128 				errno = EINVAL;
1129 				goto out;
1130 			}
1131 			break;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * Otherwise, search for this event in all compatible PMC
1137 	 * classes.
1138 	 */
1139 	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
1140 		pcd = pmc_class_table[n];
1141 		if (pcd != NULL)
1142 			ev = pmc_match_event_class(ctrname, pcd);
1143 	}
1144 
1145 	if (ev == NULL) {
1146 		errno = EINVAL;
1147 		goto out;
1148 	}
1149 
1150 	pmc_config.pm_ev    = ev->pm_ev_code;
1151 	pmc_config.pm_class = pcd->pm_evc_class;
1152 
1153  	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
1154 		errno = EINVAL;
1155 		goto out;
1156 	}
1157 
1158 found:
1159 	if (PMC_CALL(PMC_OP_PMCALLOCATE, &pmc_config) == 0) {
1160 		*pmcid = pmc_config.pm_pmcid;
1161 		retval = 0;
1162 	}
1163 out:
1164 	if (spec_copy)
1165 		free(spec_copy);
1166 
1167 	return (retval);
1168 }
1169 
1170 int
1171 pmc_attach(pmc_id_t pmc, pid_t pid)
1172 {
1173 	struct pmc_op_pmcattach pmc_attach_args;
1174 
1175 	pmc_attach_args.pm_pmc = pmc;
1176 	pmc_attach_args.pm_pid = pid;
1177 
1178 	return (PMC_CALL(PMC_OP_PMCATTACH, &pmc_attach_args));
1179 }
1180 
1181 int
1182 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1183 {
1184 	unsigned int i;
1185 	enum pmc_class cl;
1186 
1187 	cl = PMC_ID_TO_CLASS(pmcid);
1188 	for (i = 0; i < cpu_info.pm_nclass; i++)
1189 		if (cpu_info.pm_classes[i].pm_class == cl) {
1190 			*caps = cpu_info.pm_classes[i].pm_caps;
1191 			return (0);
1192 		}
1193 	errno = EINVAL;
1194 	return (-1);
1195 }
1196 
1197 int
1198 pmc_configure_logfile(int fd)
1199 {
1200 	struct pmc_op_configurelog cla;
1201 
1202 	cla.pm_flags = 0;
1203 	cla.pm_logfd = fd;
1204 	if (PMC_CALL(PMC_OP_CONFIGURELOG, &cla) < 0)
1205 		return (-1);
1206 	return (0);
1207 }
1208 
1209 int
1210 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1211 {
1212 	if (pmc_syscall == -1) {
1213 		errno = ENXIO;
1214 		return (-1);
1215 	}
1216 
1217 	*pci = &cpu_info;
1218 	return (0);
1219 }
1220 
1221 int
1222 pmc_detach(pmc_id_t pmc, pid_t pid)
1223 {
1224 	struct pmc_op_pmcattach pmc_detach_args;
1225 
1226 	pmc_detach_args.pm_pmc = pmc;
1227 	pmc_detach_args.pm_pid = pid;
1228 	return (PMC_CALL(PMC_OP_PMCDETACH, &pmc_detach_args));
1229 }
1230 
1231 int
1232 pmc_disable(int cpu, int pmc)
1233 {
1234 	struct pmc_op_pmcadmin ssa;
1235 
1236 	ssa.pm_cpu = cpu;
1237 	ssa.pm_pmc = pmc;
1238 	ssa.pm_state = PMC_STATE_DISABLED;
1239 	return (PMC_CALL(PMC_OP_PMCADMIN, &ssa));
1240 }
1241 
1242 int
1243 pmc_enable(int cpu, int pmc)
1244 {
1245 	struct pmc_op_pmcadmin ssa;
1246 
1247 	ssa.pm_cpu = cpu;
1248 	ssa.pm_pmc = pmc;
1249 	ssa.pm_state = PMC_STATE_FREE;
1250 	return (PMC_CALL(PMC_OP_PMCADMIN, &ssa));
1251 }
1252 
1253 /*
1254  * Return a list of events known to a given PMC class.  'cl' is the
1255  * PMC class identifier, 'eventnames' is the returned list of 'const
1256  * char *' pointers pointing to the names of the events. 'nevents' is
1257  * the number of event name pointers returned.
1258  *
1259  * The space for 'eventnames' is allocated using malloc(3).  The caller
1260  * is responsible for freeing this space when done.
1261  */
1262 int
1263 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1264     int *nevents)
1265 {
1266 	int count;
1267 	const char **names;
1268 	const struct pmc_event_descr *ev;
1269 
1270 	switch (cl)
1271 	{
1272 	case PMC_CLASS_IAF:
1273 		ev = iaf_event_table;
1274 		count = PMC_EVENT_TABLE_SIZE(iaf);
1275 		break;
1276 	case PMC_CLASS_TSC:
1277 		ev = tsc_event_table;
1278 		count = PMC_EVENT_TABLE_SIZE(tsc);
1279 		break;
1280 	case PMC_CLASS_K8:
1281 		ev = k8_event_table;
1282 		count = PMC_EVENT_TABLE_SIZE(k8);
1283 		break;
1284 	case PMC_CLASS_ARMV7:
1285 		switch (cpu_info.pm_cputype) {
1286 		default:
1287 		case PMC_CPU_ARMV7_CORTEX_A8:
1288 			ev = cortex_a8_event_table;
1289 			count = PMC_EVENT_TABLE_SIZE(cortex_a8);
1290 			break;
1291 		case PMC_CPU_ARMV7_CORTEX_A9:
1292 			ev = cortex_a9_event_table;
1293 			count = PMC_EVENT_TABLE_SIZE(cortex_a9);
1294 			break;
1295 		}
1296 		break;
1297 	case PMC_CLASS_ARMV8:
1298 		switch (cpu_info.pm_cputype) {
1299 		default:
1300 		case PMC_CPU_ARMV8_CORTEX_A53:
1301 			ev = cortex_a53_event_table;
1302 			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
1303 			break;
1304 		case PMC_CPU_ARMV8_CORTEX_A57:
1305 			ev = cortex_a57_event_table;
1306 			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
1307 			break;
1308 		case PMC_CPU_ARMV8_CORTEX_A76:
1309 			ev = cortex_a76_event_table;
1310 			count = PMC_EVENT_TABLE_SIZE(cortex_a76);
1311 			break;
1312 		}
1313 		break;
1314 	case PMC_CLASS_CMN600_PMU:
1315 		ev = cmn600_pmu_event_table;
1316 		count = PMC_EVENT_TABLE_SIZE(cmn600_pmu);
1317 		break;
1318 	case PMC_CLASS_DMC620_PMU_CD2:
1319 		ev = dmc620_pmu_cd2_event_table;
1320 		count = PMC_EVENT_TABLE_SIZE(dmc620_pmu_cd2);
1321 		break;
1322 	case PMC_CLASS_DMC620_PMU_C:
1323 		ev = dmc620_pmu_c_event_table;
1324 		count = PMC_EVENT_TABLE_SIZE(dmc620_pmu_c);
1325 		break;
1326 	case PMC_CLASS_PPC7450:
1327 		ev = ppc7450_event_table;
1328 		count = PMC_EVENT_TABLE_SIZE(ppc7450);
1329 		break;
1330 	case PMC_CLASS_PPC970:
1331 		ev = ppc970_event_table;
1332 		count = PMC_EVENT_TABLE_SIZE(ppc970);
1333 		break;
1334 	case PMC_CLASS_E500:
1335 		ev = e500_event_table;
1336 		count = PMC_EVENT_TABLE_SIZE(e500);
1337 		break;
1338 	case PMC_CLASS_SOFT:
1339 		ev = soft_event_table;
1340 		count = soft_event_info.pm_nevent;
1341 		break;
1342 	default:
1343 		errno = EINVAL;
1344 		return (-1);
1345 	}
1346 
1347 	if ((names = malloc(count * sizeof(const char *))) == NULL)
1348 		return (-1);
1349 
1350 	*eventnames = names;
1351 	*nevents = count;
1352 
1353 	for (;count--; ev++, names++)
1354 		*names = ev->pm_ev_name;
1355 
1356 	return (0);
1357 }
1358 
1359 int
1360 pmc_flush_logfile(void)
1361 {
1362 	return (PMC_CALL(PMC_OP_FLUSHLOG, 0));
1363 }
1364 
1365 int
1366 pmc_close_logfile(void)
1367 {
1368 	return (PMC_CALL(PMC_OP_CLOSELOG, 0));
1369 }
1370 
1371 int
1372 pmc_get_driver_stats(struct pmc_driverstats *ds)
1373 {
1374 	struct pmc_op_getdriverstats gms;
1375 
1376 	if (PMC_CALL(PMC_OP_GETDRIVERSTATS, &gms) < 0)
1377 		return (-1);
1378 
1379 	/* copy out fields in the current userland<->library interface */
1380 	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1381 	ds->pm_intr_processed  = gms.pm_intr_processed;
1382 	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1383 	ds->pm_syscalls        = gms.pm_syscalls;
1384 	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1385 	ds->pm_buffer_requests = gms.pm_buffer_requests;
1386 	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1387 	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1388 	return (0);
1389 }
1390 
1391 int
1392 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1393 {
1394 	struct pmc_op_getmsr gm;
1395 
1396 	gm.pm_pmcid = pmc;
1397 	if (PMC_CALL(PMC_OP_PMCGETMSR, &gm) < 0)
1398 		return (-1);
1399 	*msr = gm.pm_msr;
1400 	return (0);
1401 }
1402 
1403 int
1404 pmc_init(void)
1405 {
1406 	int error, pmc_mod_id;
1407 	unsigned int n;
1408 	uint32_t abi_version;
1409 	struct module_stat pmc_modstat;
1410 	struct pmc_op_getcpuinfo op_cpu_info;
1411 
1412 	if (pmc_syscall != -1) /* already inited */
1413 		return (0);
1414 
1415 	/* retrieve the system call number from the KLD */
1416 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1417 		return (-1);
1418 
1419 	pmc_modstat.version = sizeof(struct module_stat);
1420 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1421 		return (-1);
1422 
1423 	pmc_syscall = pmc_modstat.data.intval;
1424 
1425 	/* check the kernel module's ABI against our compiled-in version */
1426 	abi_version = PMC_VERSION;
1427 	if (PMC_CALL(PMC_OP_GETMODULEVERSION, &abi_version) < 0)
1428 		return (pmc_syscall = -1);
1429 
1430 	/* ignore patch & minor numbers for the comparison */
1431 	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1432 		errno  = EPROGMISMATCH;
1433 		return (pmc_syscall = -1);
1434 	}
1435 
1436 	bzero(&op_cpu_info, sizeof(op_cpu_info));
1437 	if (PMC_CALL(PMC_OP_GETCPUINFO, &op_cpu_info) < 0)
1438 		return (pmc_syscall = -1);
1439 
1440 	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1441 	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1442 	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1443 	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1444 	for (n = 0; n < op_cpu_info.pm_nclass; n++)
1445 		memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
1446 		    sizeof(cpu_info.pm_classes[n]));
1447 
1448 	pmc_class_table = calloc(PMC_CLASS_TABLE_SIZE,
1449 	    sizeof(struct pmc_class_descr *));
1450 
1451 	if (pmc_class_table == NULL)
1452 		return (-1);
1453 
1454 	/*
1455 	 * Get soft events list.
1456 	 */
1457 	soft_event_info.pm_class = PMC_CLASS_SOFT;
1458 	if (PMC_CALL(PMC_OP_GETDYNEVENTINFO, &soft_event_info) < 0)
1459 		return (pmc_syscall = -1);
1460 
1461 	/* Map soft events to static list. */
1462 	for (n = 0; n < soft_event_info.pm_nevent; n++) {
1463 		soft_event_table[n].pm_ev_name =
1464 		    soft_event_info.pm_events[n].pm_ev_name;
1465 		soft_event_table[n].pm_ev_code =
1466 		    soft_event_info.pm_events[n].pm_ev_code;
1467 	}
1468 	soft_class_table_descr.pm_evc_event_table_size = \
1469 	    soft_event_info.pm_nevent;
1470 	soft_class_table_descr.pm_evc_event_table = \
1471 	    soft_event_table;
1472 
1473 	/*
1474 	 * Fill in the class table.
1475 	 */
1476 	n = 0;
1477 	for (unsigned i = 0; i < PMC_CLASS_TABLE_SIZE; i++) {
1478 		switch (cpu_info.pm_classes[i].pm_class) {
1479 #if defined(__amd64__) || defined(__i386__)
1480 		case PMC_CLASS_TSC:
1481 			pmc_class_table[n++] = &tsc_class_table_descr;
1482 			break;
1483 
1484 		case PMC_CLASS_K8:
1485 			pmc_class_table[n++] = &k8_class_table_descr;
1486 			break;
1487 #endif
1488 
1489 		case PMC_CLASS_SOFT:
1490 			pmc_class_table[n++] = &soft_class_table_descr;
1491 			break;
1492 
1493 #if defined(__arm__)
1494 		case PMC_CLASS_ARMV7:
1495 			switch (cpu_info.pm_cputype) {
1496 			case PMC_CPU_ARMV7_CORTEX_A8:
1497 				pmc_class_table[n++] =
1498 				    &cortex_a8_class_table_descr;
1499 				break;
1500 			case PMC_CPU_ARMV7_CORTEX_A9:
1501 				pmc_class_table[n++] =
1502 				    &cortex_a9_class_table_descr;
1503 				break;
1504 			default:
1505 				errno = ENXIO;
1506 				return (pmc_syscall = -1);
1507 			}
1508 			break;
1509 #endif
1510 
1511 #if defined(__aarch64__)
1512 		case PMC_CLASS_ARMV8:
1513 			switch (cpu_info.pm_cputype) {
1514 			case PMC_CPU_ARMV8_CORTEX_A53:
1515 				pmc_class_table[n++] =
1516 				    &cortex_a53_class_table_descr;
1517 				break;
1518 			case PMC_CPU_ARMV8_CORTEX_A57:
1519 				pmc_class_table[n++] =
1520 				    &cortex_a57_class_table_descr;
1521 				break;
1522 			case PMC_CPU_ARMV8_CORTEX_A76:
1523 				pmc_class_table[n++] =
1524 				    &cortex_a76_class_table_descr;
1525 				break;
1526 			default:
1527 				errno = ENXIO;
1528 				return (pmc_syscall = -1);
1529 			}
1530 			break;
1531 
1532 		case PMC_CLASS_DMC620_PMU_CD2:
1533 			pmc_class_table[n++] =
1534 			    &dmc620_pmu_cd2_class_table_descr;
1535 			break;
1536 
1537 		case PMC_CLASS_DMC620_PMU_C:
1538 			pmc_class_table[n++] = &dmc620_pmu_c_class_table_descr;
1539 			break;
1540 
1541 		case PMC_CLASS_CMN600_PMU:
1542 			pmc_class_table[n++] = &cmn600_pmu_class_table_descr;
1543 			break;
1544 #endif
1545 
1546 #if defined(__powerpc__)
1547 		case PMC_CLASS_PPC7450:
1548 			pmc_class_table[n++] = &ppc7450_class_table_descr;
1549 			break;
1550 
1551 		case PMC_CLASS_PPC970:
1552 			pmc_class_table[n++] = &ppc970_class_table_descr;
1553 			break;
1554 
1555 		case PMC_CLASS_E500:
1556 			pmc_class_table[n++] = &e500_class_table_descr;
1557 			break;
1558 #endif
1559 
1560 		default:
1561 #if defined(DEBUG)
1562 			printf("pm_class: 0x%x\n",
1563 			    cpu_info.pm_classes[i].pm_class);
1564 #endif
1565 			break;
1566 		}
1567 	}
1568 
1569 #define	PMC_MDEP_INIT(C) pmc_mdep_event_aliases = C##_aliases
1570 
1571 	/* Configure the event name parser. */
1572 	switch (cpu_info.pm_cputype) {
1573 #if defined(__amd64__) || defined(__i386__)
1574 	case PMC_CPU_AMD_K8:
1575 		PMC_MDEP_INIT(k8);
1576 		break;
1577 #endif
1578 	case PMC_CPU_GENERIC:
1579 		PMC_MDEP_INIT(generic);
1580 		break;
1581 #if defined(__arm__)
1582 	case PMC_CPU_ARMV7_CORTEX_A8:
1583 		PMC_MDEP_INIT(cortex_a8);
1584 		break;
1585 	case PMC_CPU_ARMV7_CORTEX_A9:
1586 		PMC_MDEP_INIT(cortex_a9);
1587 		break;
1588 #endif
1589 #if defined(__aarch64__)
1590 	case PMC_CPU_ARMV8_CORTEX_A53:
1591 		PMC_MDEP_INIT(cortex_a53);
1592 		break;
1593 	case PMC_CPU_ARMV8_CORTEX_A57:
1594 		PMC_MDEP_INIT(cortex_a57);
1595 		break;
1596 	case PMC_CPU_ARMV8_CORTEX_A76:
1597 		PMC_MDEP_INIT(cortex_a76);
1598 		break;
1599 #endif
1600 #if defined(__powerpc__)
1601 	case PMC_CPU_PPC_7450:
1602 		PMC_MDEP_INIT(ppc7450);
1603 		break;
1604 	case PMC_CPU_PPC_970:
1605 		PMC_MDEP_INIT(ppc970);
1606 		break;
1607 	case PMC_CPU_PPC_E500:
1608 		PMC_MDEP_INIT(e500);
1609 		break;
1610 #endif
1611 	default:
1612 		/*
1613 		 * Some kind of CPU this version of the library knows nothing
1614 		 * about.  This shouldn't happen since the abi version check
1615 		 * should have caught this.
1616 		 */
1617 #if defined(__amd64__) || defined(__i386__) || defined(__powerpc64__)
1618 		break;
1619 #endif
1620 		errno = ENXIO;
1621 		return (pmc_syscall = -1);
1622 	}
1623 
1624 	return (0);
1625 }
1626 
1627 const char *
1628 pmc_name_of_capability(enum pmc_caps cap)
1629 {
1630 	int i;
1631 
1632 	/*
1633 	 * 'cap' should have a single bit set and should be in
1634 	 * range.
1635 	 */
1636 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1637 	    cap > PMC_CAP_LAST) {
1638 		errno = EINVAL;
1639 		return (NULL);
1640 	}
1641 
1642 	i = ffs(cap);
1643 	return (pmc_capability_names[i - 1]);
1644 }
1645 
1646 const char *
1647 pmc_name_of_class(enum pmc_class pc)
1648 {
1649 	size_t n;
1650 
1651 	for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
1652 		if (pc == pmc_class_names[n].pm_class)
1653 			return (pmc_class_names[n].pm_name);
1654 
1655 	errno = EINVAL;
1656 	return (NULL);
1657 }
1658 
1659 const char *
1660 pmc_name_of_cputype(enum pmc_cputype cp)
1661 {
1662 	size_t n;
1663 
1664 	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
1665 		if (cp == pmc_cputype_names[n].pm_cputype)
1666 			return (pmc_cputype_names[n].pm_name);
1667 
1668 	errno = EINVAL;
1669 	return (NULL);
1670 }
1671 
1672 const char *
1673 pmc_name_of_disposition(enum pmc_disp pd)
1674 {
1675 	if ((int) pd >= PMC_DISP_FIRST &&
1676 	    pd <= PMC_DISP_LAST)
1677 		return (pmc_disposition_names[pd]);
1678 
1679 	errno = EINVAL;
1680 	return (NULL);
1681 }
1682 
1683 const char *
1684 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
1685 {
1686 	const struct pmc_event_descr *ev, *evfence;
1687 
1688 	ev = evfence = NULL;
1689 	if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
1690 		ev = k8_event_table;
1691 		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
1692 
1693 	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
1694 		switch (cpu) {
1695 		case PMC_CPU_ARMV7_CORTEX_A8:
1696 			ev = cortex_a8_event_table;
1697 			evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
1698 			break;
1699 		case PMC_CPU_ARMV7_CORTEX_A9:
1700 			ev = cortex_a9_event_table;
1701 			evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
1702 			break;
1703 		default:	/* Unknown CPU type. */
1704 			break;
1705 		}
1706 	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
1707 		switch (cpu) {
1708 		case PMC_CPU_ARMV8_CORTEX_A53:
1709 			ev = cortex_a53_event_table;
1710 			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
1711 			break;
1712 		case PMC_CPU_ARMV8_CORTEX_A57:
1713 			ev = cortex_a57_event_table;
1714 			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
1715 			break;
1716 		case PMC_CPU_ARMV8_CORTEX_A76:
1717 			ev = cortex_a76_event_table;
1718 			evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76);
1719 			break;
1720 		default:	/* Unknown CPU type. */
1721 			break;
1722 		}
1723 	} else if (pe >= PMC_EV_CMN600_PMU_FIRST &&
1724 	    pe <= PMC_EV_CMN600_PMU_LAST) {
1725 		ev = cmn600_pmu_event_table;
1726 		evfence = cmn600_pmu_event_table +
1727 		    PMC_EVENT_TABLE_SIZE(cmn600_pmu);
1728 	} else if (pe >= PMC_EV_DMC620_PMU_CD2_FIRST &&
1729 	    pe <= PMC_EV_DMC620_PMU_CD2_LAST) {
1730 		ev = dmc620_pmu_cd2_event_table;
1731 		evfence = dmc620_pmu_cd2_event_table +
1732 		    PMC_EVENT_TABLE_SIZE(dmc620_pmu_cd2);
1733 	} else if (pe >= PMC_EV_DMC620_PMU_C_FIRST &&
1734 	    pe <= PMC_EV_DMC620_PMU_C_LAST) {
1735 		ev = dmc620_pmu_c_event_table;
1736 		evfence = dmc620_pmu_c_event_table +
1737 		    PMC_EVENT_TABLE_SIZE(dmc620_pmu_c);
1738 	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
1739 		ev = ppc7450_event_table;
1740 		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
1741 	} else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
1742 		ev = ppc970_event_table;
1743 		evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
1744 	} else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
1745 		ev = e500_event_table;
1746 		evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
1747 	} else if (pe == PMC_EV_TSC_TSC) {
1748 		ev = tsc_event_table;
1749 		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
1750 	} else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
1751 		ev = soft_event_table;
1752 		evfence = soft_event_table + soft_event_info.pm_nevent;
1753 	}
1754 
1755 	for (; ev != evfence; ev++)
1756 		if (pe == ev->pm_ev_code)
1757 			return (ev->pm_ev_name);
1758 
1759 	return (NULL);
1760 }
1761 
1762 const char *
1763 pmc_name_of_event(enum pmc_event pe)
1764 {
1765 	const char *n;
1766 
1767 	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
1768 		return (n);
1769 
1770 	errno = EINVAL;
1771 	return (NULL);
1772 }
1773 
1774 const char *
1775 pmc_name_of_mode(enum pmc_mode pm)
1776 {
1777 	if ((int) pm >= PMC_MODE_FIRST &&
1778 	    pm <= PMC_MODE_LAST)
1779 		return (pmc_mode_names[pm]);
1780 
1781 	errno = EINVAL;
1782 	return (NULL);
1783 }
1784 
1785 const char *
1786 pmc_name_of_state(enum pmc_state ps)
1787 {
1788 	if ((int) ps >= PMC_STATE_FIRST &&
1789 	    ps <= PMC_STATE_LAST)
1790 		return (pmc_state_names[ps]);
1791 
1792 	errno = EINVAL;
1793 	return (NULL);
1794 }
1795 
1796 int
1797 pmc_ncpu(void)
1798 {
1799 	if (pmc_syscall == -1) {
1800 		errno = ENXIO;
1801 		return (-1);
1802 	}
1803 
1804 	return (cpu_info.pm_ncpu);
1805 }
1806 
1807 int
1808 pmc_npmc(int cpu)
1809 {
1810 	if (pmc_syscall == -1) {
1811 		errno = ENXIO;
1812 		return (-1);
1813 	}
1814 
1815 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1816 		errno = EINVAL;
1817 		return (-1);
1818 	}
1819 
1820 	return (cpu_info.pm_npmc);
1821 }
1822 
1823 int
1824 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
1825 {
1826 	int nbytes, npmc;
1827 	struct pmc_op_getpmcinfo *pmci;
1828 
1829 	if ((npmc = pmc_npmc(cpu)) < 0)
1830 		return (-1);
1831 
1832 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
1833 	    npmc * sizeof(struct pmc_info);
1834 
1835 	if ((pmci = calloc(1, nbytes)) == NULL)
1836 		return (-1);
1837 
1838 	pmci->pm_cpu  = cpu;
1839 
1840 	if (PMC_CALL(PMC_OP_GETPMCINFO, pmci) < 0) {
1841 		free(pmci);
1842 		return (-1);
1843 	}
1844 
1845 	/* kernel<->library, library<->userland interfaces are identical */
1846 	*ppmci = (struct pmc_pmcinfo *) pmci;
1847 	return (0);
1848 }
1849 
1850 int
1851 pmc_read(pmc_id_t pmc, pmc_value_t *value)
1852 {
1853 	struct pmc_op_pmcrw pmc_read_op;
1854 
1855 	pmc_read_op.pm_pmcid = pmc;
1856 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1857 	pmc_read_op.pm_value = -1;
1858 
1859 	if (PMC_CALL(PMC_OP_PMCRW, &pmc_read_op) < 0)
1860 		return (-1);
1861 
1862 	*value = pmc_read_op.pm_value;
1863 	return (0);
1864 }
1865 
1866 int
1867 pmc_release(pmc_id_t pmc)
1868 {
1869 	struct pmc_op_simple	pmc_release_args;
1870 
1871 	pmc_release_args.pm_pmcid = pmc;
1872 	return (PMC_CALL(PMC_OP_PMCRELEASE, &pmc_release_args));
1873 }
1874 
1875 int
1876 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1877 {
1878 	struct pmc_op_pmcrw pmc_rw_op;
1879 
1880 	pmc_rw_op.pm_pmcid = pmc;
1881 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1882 	pmc_rw_op.pm_value = newvalue;
1883 
1884 	if (PMC_CALL(PMC_OP_PMCRW, &pmc_rw_op) < 0)
1885 		return (-1);
1886 
1887 	*oldvaluep = pmc_rw_op.pm_value;
1888 	return (0);
1889 }
1890 
1891 int
1892 pmc_set(pmc_id_t pmc, pmc_value_t value)
1893 {
1894 	struct pmc_op_pmcsetcount sc;
1895 
1896 	sc.pm_pmcid = pmc;
1897 	sc.pm_count = value;
1898 
1899 	if (PMC_CALL(PMC_OP_PMCSETCOUNT, &sc) < 0)
1900 		return (-1);
1901 	return (0);
1902 }
1903 
1904 int
1905 pmc_start(pmc_id_t pmc)
1906 {
1907 	struct pmc_op_simple	pmc_start_args;
1908 
1909 	pmc_start_args.pm_pmcid = pmc;
1910 	return (PMC_CALL(PMC_OP_PMCSTART, &pmc_start_args));
1911 }
1912 
1913 int
1914 pmc_stop(pmc_id_t pmc)
1915 {
1916 	struct pmc_op_simple	pmc_stop_args;
1917 
1918 	pmc_stop_args.pm_pmcid = pmc;
1919 	return (PMC_CALL(PMC_OP_PMCSTOP, &pmc_stop_args));
1920 }
1921 
1922 int
1923 pmc_width(pmc_id_t pmcid, uint32_t *width)
1924 {
1925 	unsigned int i;
1926 	enum pmc_class cl;
1927 
1928 	cl = PMC_ID_TO_CLASS(pmcid);
1929 	for (i = 0; i < cpu_info.pm_nclass; i++)
1930 		if (cpu_info.pm_classes[i].pm_class == cl) {
1931 			*width = cpu_info.pm_classes[i].pm_width;
1932 			return (0);
1933 		}
1934 	errno = EINVAL;
1935 	return (-1);
1936 }
1937 
1938 int
1939 pmc_write(pmc_id_t pmc, pmc_value_t value)
1940 {
1941 	struct pmc_op_pmcrw pmc_write_op;
1942 
1943 	pmc_write_op.pm_pmcid = pmc;
1944 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1945 	pmc_write_op.pm_value = value;
1946 	return (PMC_CALL(PMC_OP_PMCRW, &pmc_write_op));
1947 }
1948 
1949 int
1950 pmc_writelog(uint32_t userdata)
1951 {
1952 	struct pmc_op_writelog wl;
1953 
1954 	wl.pm_userdata = userdata;
1955 	return (PMC_CALL(PMC_OP_WRITELOG, &wl));
1956 }
1957