xref: /freebsd/lib/libpmc/libpmc.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/pmc.h>
34 #include <sys/syscall.h>
35 
36 #include <ctype.h>
37 #include <errno.h>
38 #include <err.h>
39 #include <fcntl.h>
40 #include <pmc.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <strings.h>
45 #include <sysexits.h>
46 #include <unistd.h>
47 
48 #include "libpmcinternal.h"
49 
50 /* Function prototypes */
51 #if defined(__amd64__) || defined(__i386__)
52 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
53     struct pmc_op_pmcallocate *_pmc_config);
54 #endif
55 #if defined(__amd64__) || defined(__i386__)
56 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57     struct pmc_op_pmcallocate *_pmc_config);
58 #endif
59 #if defined(__arm__)
60 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61     struct pmc_op_pmcallocate *_pmc_config);
62 #endif
63 #if defined(__aarch64__)
64 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65     struct pmc_op_pmcallocate *_pmc_config);
66 static int cmn600_pmu_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
67     struct pmc_op_pmcallocate *_pmc_config);
68 static int dmc620_pmu_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
69     struct pmc_op_pmcallocate *_pmc_config);
70 #endif
71 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
72     struct pmc_op_pmcallocate *_pmc_config);
73 
74 #if defined(__powerpc__)
75 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
76 			     struct pmc_op_pmcallocate *_pmc_config);
77 #endif /* __powerpc__ */
78 
79 #define PMC_CALL(op, params)	syscall(pmc_syscall, (op), (params))
80 
81 /*
82  * Event aliases provide a way for the user to ask for generic events
83  * like "cache-misses", or "instructions-retired".  These aliases are
84  * mapped to the appropriate canonical event descriptions using a
85  * lookup table.
86  */
87 struct pmc_event_alias {
88 	const char	*pm_alias;
89 	const char	*pm_spec;
90 };
91 
92 static const struct pmc_event_alias *pmc_mdep_event_aliases;
93 
94 /*
95  * The pmc_event_descr structure maps symbolic names known to the user
96  * to integer codes used by the PMC KLD.
97  */
98 struct pmc_event_descr {
99 	const char	*pm_ev_name;
100 	enum pmc_event	pm_ev_code;
101 };
102 
103 /*
104  * The pmc_class_descr structure maps class name prefixes for
105  * event names to event tables and other PMC class data.
106  */
107 struct pmc_class_descr {
108 	const char	*pm_evc_name;
109 	size_t		pm_evc_name_size;
110 	enum pmc_class	pm_evc_class;
111 	const struct pmc_event_descr *pm_evc_event_table;
112 	size_t		pm_evc_event_table_size;
113 	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
114 			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
115 };
116 
117 #define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
118 #define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
119 
120 #undef	__PMC_EV
121 #define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
122 
123 /*
124  * PMC_CLASSDEP_TABLE(NAME, CLASS)
125  *
126  * Define a table mapping event names and aliases to HWPMC event IDs.
127  */
128 #define	PMC_CLASSDEP_TABLE(N, C)				\
129 	static const struct pmc_event_descr N##_event_table[] =	\
130 	{							\
131 		__PMC_EV_##C()					\
132 	}
133 
134 PMC_CLASSDEP_TABLE(iaf, IAF);
135 PMC_CLASSDEP_TABLE(k8, K8);
136 PMC_CLASSDEP_TABLE(armv7, ARMV7);
137 PMC_CLASSDEP_TABLE(armv8, ARMV8);
138 PMC_CLASSDEP_TABLE(cmn600_pmu, CMN600_PMU);
139 PMC_CLASSDEP_TABLE(dmc620_pmu_cd2, DMC620_PMU_CD2);
140 PMC_CLASSDEP_TABLE(dmc620_pmu_c, DMC620_PMU_C);
141 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
142 PMC_CLASSDEP_TABLE(ppc970, PPC970);
143 PMC_CLASSDEP_TABLE(e500, E500);
144 
145 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
146 
147 #undef	__PMC_EV_ALIAS
148 #define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
149 
150 /*
151  * TODO: Factor out the __PMC_EV_ARMV7/8 list into a single separate table
152  * rather than duplicating for each core.
153  */
154 
155 static const struct pmc_event_descr cortex_a8_event_table[] =
156 {
157 	__PMC_EV_ALIAS_ARMV7_CORTEX_A8()
158 	__PMC_EV_ARMV7()
159 };
160 
161 static const struct pmc_event_descr cortex_a9_event_table[] =
162 {
163 	__PMC_EV_ALIAS_ARMV7_CORTEX_A9()
164 	__PMC_EV_ARMV7()
165 };
166 
167 static const struct pmc_event_descr cortex_a53_event_table[] =
168 {
169 	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
170 	__PMC_EV_ARMV8()
171 };
172 
173 static const struct pmc_event_descr cortex_a57_event_table[] =
174 {
175 	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
176 	__PMC_EV_ARMV8()
177 };
178 
179 static const struct pmc_event_descr cortex_a76_event_table[] =
180 {
181 	__PMC_EV_ALIAS_ARMV8_CORTEX_A76()
182 	__PMC_EV_ARMV8()
183 };
184 
185 static const struct pmc_event_descr tsc_event_table[] =
186 {
187 	__PMC_EV_ALIAS_TSC()
188 };
189 
190 #undef	PMC_CLASS_TABLE_DESC
191 #define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
192 static const struct pmc_class_descr NAME##_class_table_descr =	\
193 	{							\
194 		.pm_evc_name  = #CLASS "-",			\
195 		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
196 		.pm_evc_class = PMC_CLASS_##CLASS ,		\
197 		.pm_evc_event_table = EVENTS##_event_table ,	\
198 		.pm_evc_event_table_size = 			\
199 			PMC_EVENT_TABLE_SIZE(EVENTS),		\
200 		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
201 	}
202 
203 #if	defined(__i386__) || defined(__amd64__)
204 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
205 #endif
206 #if	defined(__i386__) || defined(__amd64__)
207 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
208 #endif
209 #if	defined(__arm__)
210 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
211 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
212 #endif
213 #if	defined(__aarch64__)
214 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
215 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
216 PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64);
217 PMC_CLASS_TABLE_DESC(cmn600_pmu, CMN600_PMU, cmn600_pmu, cmn600_pmu);
218 PMC_CLASS_TABLE_DESC(dmc620_pmu_cd2, DMC620_PMU_CD2, dmc620_pmu_cd2, dmc620_pmu);
219 PMC_CLASS_TABLE_DESC(dmc620_pmu_c, DMC620_PMU_C, dmc620_pmu_c, dmc620_pmu);
220 #endif
221 #if defined(__powerpc__)
222 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
223 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
224 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
225 #endif
226 
227 static struct pmc_class_descr soft_class_table_descr =
228 {
229 	.pm_evc_name  = "SOFT-",
230 	.pm_evc_name_size = sizeof("SOFT-") - 1,
231 	.pm_evc_class = PMC_CLASS_SOFT,
232 	.pm_evc_event_table = NULL,
233 	.pm_evc_event_table_size = 0,
234 	.pm_evc_allocate_pmc = soft_allocate_pmc
235 };
236 
237 #undef	PMC_CLASS_TABLE_DESC
238 
239 static const struct pmc_class_descr **pmc_class_table;
240 #define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
241 
242 /*
243  * Mapping tables, mapping enumeration values to human readable
244  * strings.
245  */
246 
247 static const char * pmc_capability_names[] = {
248 #undef	__PMC_CAP
249 #define	__PMC_CAP(N,V,D)	#N ,
250 	__PMC_CAPS()
251 };
252 
253 struct pmc_class_map {
254 	enum pmc_class	pm_class;
255 	const char	*pm_name;
256 };
257 
258 static const struct pmc_class_map pmc_class_names[] = {
259 #undef	__PMC_CLASS
260 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
261 	__PMC_CLASSES()
262 };
263 
264 struct pmc_cputype_map {
265 	enum pmc_cputype pm_cputype;
266 	const char	*pm_name;
267 };
268 
269 static const struct pmc_cputype_map pmc_cputype_names[] = {
270 #undef	__PMC_CPU
271 #define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
272 	__PMC_CPUS()
273 };
274 
275 static const char * pmc_disposition_names[] = {
276 #undef	__PMC_DISP
277 #define	__PMC_DISP(D)	#D ,
278 	__PMC_DISPOSITIONS()
279 };
280 
281 static const char * pmc_mode_names[] = {
282 #undef  __PMC_MODE
283 #define __PMC_MODE(M,N)	#M ,
284 	__PMC_MODES()
285 };
286 
287 static const char * pmc_state_names[] = {
288 #undef  __PMC_STATE
289 #define __PMC_STATE(S) #S ,
290 	__PMC_STATES()
291 };
292 
293 /*
294  * Filled in by pmc_init().
295  */
296 static int pmc_syscall = -1;
297 static struct pmc_cpuinfo cpu_info;
298 static struct pmc_op_getdyneventinfo soft_event_info;
299 
300 /* Event masks for events */
301 struct pmc_masks {
302 	const char	*pm_name;
303 	const uint64_t	pm_value;
304 };
305 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
306 #define	NULLMASK	{ .pm_name = NULL }
307 
308 #if defined(__amd64__) || defined(__i386__)
309 static int
310 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
311 {
312 	const struct pmc_masks *pm;
313 	char *q, *r;
314 	int c;
315 
316 	if (pmask == NULL)	/* no mask keywords */
317 		return (-1);
318 	q = strchr(p, '=');	/* skip '=' */
319 	if (*++q == '\0')	/* no more data */
320 		return (-1);
321 	c = 0;			/* count of mask keywords seen */
322 	while ((r = strsep(&q, "+")) != NULL) {
323 		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
324 		    pm++)
325 			;
326 		if (pm->pm_name == NULL) /* not found */
327 			return (-1);
328 		*evmask |= pm->pm_value;
329 		c++;
330 	}
331 	return (c);
332 }
333 #endif
334 
335 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
336 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
337 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
338 
339 #if defined(__amd64__) || defined(__i386__)
340 /*
341  * AMD K8 PMCs.
342  *
343  */
344 
345 static struct pmc_event_alias k8_aliases[] = {
346 	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
347 	EV_ALIAS("branch-mispredicts",
348 	    "k8-fr-retired-taken-branches-mispredicted"),
349 	EV_ALIAS("cycles",		"tsc"),
350 	EV_ALIAS("dc-misses",		"k8-dc-miss"),
351 	EV_ALIAS("ic-misses",		"k8-ic-miss"),
352 	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
353 	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
354 	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
355 	EV_ALIAS(NULL, NULL)
356 };
357 
358 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
359 
360 /*
361  * Parsing tables
362  */
363 
364 /* fp dispatched fpu ops */
365 static const struct pmc_masks k8_mask_fdfo[] = {
366 	__K8MASK(add-pipe-excluding-junk-ops,	0),
367 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
368 	__K8MASK(store-pipe-excluding-junk-ops,	2),
369 	__K8MASK(add-pipe-junk-ops,		3),
370 	__K8MASK(multiply-pipe-junk-ops,	4),
371 	__K8MASK(store-pipe-junk-ops,		5),
372 	NULLMASK
373 };
374 
375 /* ls segment register loads */
376 static const struct pmc_masks k8_mask_lsrl[] = {
377 	__K8MASK(es,	0),
378 	__K8MASK(cs,	1),
379 	__K8MASK(ss,	2),
380 	__K8MASK(ds,	3),
381 	__K8MASK(fs,	4),
382 	__K8MASK(gs,	5),
383 	__K8MASK(hs,	6),
384 	NULLMASK
385 };
386 
387 /* ls locked operation */
388 static const struct pmc_masks k8_mask_llo[] = {
389 	__K8MASK(locked-instructions,	0),
390 	__K8MASK(cycles-in-request,	1),
391 	__K8MASK(cycles-to-complete,	2),
392 	NULLMASK
393 };
394 
395 /* dc refill from {l2,system} and dc copyback */
396 static const struct pmc_masks k8_mask_dc[] = {
397 	__K8MASK(invalid,	0),
398 	__K8MASK(shared,	1),
399 	__K8MASK(exclusive,	2),
400 	__K8MASK(owner,		3),
401 	__K8MASK(modified,	4),
402 	NULLMASK
403 };
404 
405 /* dc one bit ecc error */
406 static const struct pmc_masks k8_mask_dobee[] = {
407 	__K8MASK(scrubber,	0),
408 	__K8MASK(piggyback,	1),
409 	NULLMASK
410 };
411 
412 /* dc dispatched prefetch instructions */
413 static const struct pmc_masks k8_mask_ddpi[] = {
414 	__K8MASK(load,	0),
415 	__K8MASK(store,	1),
416 	__K8MASK(nta,	2),
417 	NULLMASK
418 };
419 
420 /* dc dcache accesses by locks */
421 static const struct pmc_masks k8_mask_dabl[] = {
422 	__K8MASK(accesses,	0),
423 	__K8MASK(misses,	1),
424 	NULLMASK
425 };
426 
427 /* bu internal l2 request */
428 static const struct pmc_masks k8_mask_bilr[] = {
429 	__K8MASK(ic-fill,	0),
430 	__K8MASK(dc-fill,	1),
431 	__K8MASK(tlb-reload,	2),
432 	__K8MASK(tag-snoop,	3),
433 	__K8MASK(cancelled,	4),
434 	NULLMASK
435 };
436 
437 /* bu fill request l2 miss */
438 static const struct pmc_masks k8_mask_bfrlm[] = {
439 	__K8MASK(ic-fill,	0),
440 	__K8MASK(dc-fill,	1),
441 	__K8MASK(tlb-reload,	2),
442 	NULLMASK
443 };
444 
445 /* bu fill into l2 */
446 static const struct pmc_masks k8_mask_bfil[] = {
447 	__K8MASK(dirty-l2-victim,	0),
448 	__K8MASK(victim-from-l2,	1),
449 	NULLMASK
450 };
451 
452 /* fr retired fpu instructions */
453 static const struct pmc_masks k8_mask_frfi[] = {
454 	__K8MASK(x87,			0),
455 	__K8MASK(mmx-3dnow,		1),
456 	__K8MASK(packed-sse-sse2,	2),
457 	__K8MASK(scalar-sse-sse2,	3),
458 	NULLMASK
459 };
460 
461 /* fr retired fastpath double op instructions */
462 static const struct pmc_masks k8_mask_frfdoi[] = {
463 	__K8MASK(low-op-pos-0,		0),
464 	__K8MASK(low-op-pos-1,		1),
465 	__K8MASK(low-op-pos-2,		2),
466 	NULLMASK
467 };
468 
469 /* fr fpu exceptions */
470 static const struct pmc_masks k8_mask_ffe[] = {
471 	__K8MASK(x87-reclass-microfaults,	0),
472 	__K8MASK(sse-retype-microfaults,	1),
473 	__K8MASK(sse-reclass-microfaults,	2),
474 	__K8MASK(sse-and-x87-microtraps,	3),
475 	NULLMASK
476 };
477 
478 /* nb memory controller page access event */
479 static const struct pmc_masks k8_mask_nmcpae[] = {
480 	__K8MASK(page-hit,	0),
481 	__K8MASK(page-miss,	1),
482 	__K8MASK(page-conflict,	2),
483 	NULLMASK
484 };
485 
486 /* nb memory controller turnaround */
487 static const struct pmc_masks k8_mask_nmct[] = {
488 	__K8MASK(dimm-turnaround,		0),
489 	__K8MASK(read-to-write-turnaround,	1),
490 	__K8MASK(write-to-read-turnaround,	2),
491 	NULLMASK
492 };
493 
494 /* nb memory controller bypass saturation */
495 static const struct pmc_masks k8_mask_nmcbs[] = {
496 	__K8MASK(memory-controller-hi-pri-bypass,	0),
497 	__K8MASK(memory-controller-lo-pri-bypass,	1),
498 	__K8MASK(dram-controller-interface-bypass,	2),
499 	__K8MASK(dram-controller-queue-bypass,		3),
500 	NULLMASK
501 };
502 
503 /* nb sized commands */
504 static const struct pmc_masks k8_mask_nsc[] = {
505 	__K8MASK(nonpostwrszbyte,	0),
506 	__K8MASK(nonpostwrszdword,	1),
507 	__K8MASK(postwrszbyte,		2),
508 	__K8MASK(postwrszdword,		3),
509 	__K8MASK(rdszbyte,		4),
510 	__K8MASK(rdszdword,		5),
511 	__K8MASK(rdmodwr,		6),
512 	NULLMASK
513 };
514 
515 /* nb probe result */
516 static const struct pmc_masks k8_mask_npr[] = {
517 	__K8MASK(probe-miss,		0),
518 	__K8MASK(probe-hit,		1),
519 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
520 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
521 	NULLMASK
522 };
523 
524 /* nb hypertransport bus bandwidth */
525 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
526 	__K8MASK(command,	0),
527 	__K8MASK(data,	1),
528 	__K8MASK(buffer-release, 2),
529 	__K8MASK(nop,	3),
530 	NULLMASK
531 };
532 
533 #undef	__K8MASK
534 
535 #define	K8_KW_COUNT	"count"
536 #define	K8_KW_EDGE	"edge"
537 #define	K8_KW_INV	"inv"
538 #define	K8_KW_MASK	"mask"
539 #define	K8_KW_OS	"os"
540 #define	K8_KW_USR	"usr"
541 
542 static int
543 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
544     struct pmc_op_pmcallocate *pmc_config)
545 {
546 	char		*e, *p, *q;
547 	int		n;
548 	uint32_t	count;
549 	uint64_t	evmask;
550 	const struct pmc_masks	*pm, *pmask;
551 
552 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
553 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
554 
555 	pmask = NULL;
556 	evmask = 0;
557 
558 #define	__K8SETMASK(M) pmask = k8_mask_##M
559 
560 	/* setup parsing tables */
561 	switch (pe) {
562 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
563 		__K8SETMASK(fdfo);
564 		break;
565 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
566 		__K8SETMASK(lsrl);
567 		break;
568 	case PMC_EV_K8_LS_LOCKED_OPERATION:
569 		__K8SETMASK(llo);
570 		break;
571 	case PMC_EV_K8_DC_REFILL_FROM_L2:
572 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
573 	case PMC_EV_K8_DC_COPYBACK:
574 		__K8SETMASK(dc);
575 		break;
576 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
577 		__K8SETMASK(dobee);
578 		break;
579 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
580 		__K8SETMASK(ddpi);
581 		break;
582 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
583 		__K8SETMASK(dabl);
584 		break;
585 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
586 		__K8SETMASK(bilr);
587 		break;
588 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
589 		__K8SETMASK(bfrlm);
590 		break;
591 	case PMC_EV_K8_BU_FILL_INTO_L2:
592 		__K8SETMASK(bfil);
593 		break;
594 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
595 		__K8SETMASK(frfi);
596 		break;
597 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
598 		__K8SETMASK(frfdoi);
599 		break;
600 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
601 		__K8SETMASK(ffe);
602 		break;
603 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
604 		__K8SETMASK(nmcpae);
605 		break;
606 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
607 		__K8SETMASK(nmct);
608 		break;
609 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
610 		__K8SETMASK(nmcbs);
611 		break;
612 	case PMC_EV_K8_NB_SIZED_COMMANDS:
613 		__K8SETMASK(nsc);
614 		break;
615 	case PMC_EV_K8_NB_PROBE_RESULT:
616 		__K8SETMASK(npr);
617 		break;
618 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
619 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
620 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
621 		__K8SETMASK(nhbb);
622 		break;
623 
624 	default:
625 		break;		/* no options defined */
626 	}
627 
628 	while ((p = strsep(&ctrspec, ",")) != NULL) {
629 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
630 			q = strchr(p, '=');
631 			if (*++q == '\0') /* skip '=' */
632 				return (-1);
633 
634 			count = strtol(q, &e, 0);
635 			if (e == q || *e != '\0')
636 				return (-1);
637 
638 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
639 			pmc_config->pm_md.pm_amd.pm_amd_config |=
640 			    AMD_PMC_TO_COUNTER(count);
641 
642 		} else if (KWMATCH(p, K8_KW_EDGE)) {
643 			pmc_config->pm_caps |= PMC_CAP_EDGE;
644 		} else if (KWMATCH(p, K8_KW_INV)) {
645 			pmc_config->pm_caps |= PMC_CAP_INVERT;
646 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
647 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
648 				return (-1);
649 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
650 		} else if (KWMATCH(p, K8_KW_OS)) {
651 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
652 		} else if (KWMATCH(p, K8_KW_USR)) {
653 			pmc_config->pm_caps |= PMC_CAP_USER;
654 		} else
655 			return (-1);
656 	}
657 
658 	/* other post processing */
659 	switch (pe) {
660 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
661 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
662 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
663 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
664 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
665 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
666 		/* XXX only available in rev B and later */
667 		break;
668 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
669 		/* XXX only available in rev C and later */
670 		break;
671 	case PMC_EV_K8_LS_LOCKED_OPERATION:
672 		/* XXX CPU Rev A,B evmask is to be zero */
673 		if (evmask & (evmask - 1)) /* > 1 bit set */
674 			return (-1);
675 		if (evmask == 0) {
676 			evmask = 0x01; /* Rev C and later: #instrs */
677 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
678 		}
679 		break;
680 	default:
681 		if (evmask == 0 && pmask != NULL) {
682 			for (pm = pmask; pm->pm_name; pm++)
683 				evmask |= pm->pm_value;
684 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
685 		}
686 	}
687 
688 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
689 		pmc_config->pm_md.pm_amd.pm_amd_config =
690 		    AMD_PMC_TO_UNITMASK(evmask);
691 
692 	return (0);
693 }
694 
695 #endif
696 
697 #if	defined(__i386__) || defined(__amd64__)
698 static int
699 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
700     struct pmc_op_pmcallocate *pmc_config)
701 {
702 	if (pe != PMC_EV_TSC_TSC)
703 		return (-1);
704 
705 	/* TSC events must be unqualified. */
706 	if (ctrspec && *ctrspec != '\0')
707 		return (-1);
708 
709 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
710 	pmc_config->pm_caps |= PMC_CAP_READ;
711 
712 	return (0);
713 }
714 #endif
715 
716 static struct pmc_event_alias generic_aliases[] = {
717 	EV_ALIAS("instructions",		"SOFT-CLOCK.HARD"),
718 	EV_ALIAS(NULL, NULL)
719 };
720 
721 static int
722 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
723     struct pmc_op_pmcallocate *pmc_config)
724 {
725 	(void)ctrspec;
726 	(void)pmc_config;
727 
728 	if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
729 		return (-1);
730 
731 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
732 	return (0);
733 }
734 
735 #if	defined(__arm__)
736 static struct pmc_event_alias cortex_a8_aliases[] = {
737 	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
738 	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
739 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
740 	EV_ALIAS(NULL, NULL)
741 };
742 
743 static struct pmc_event_alias cortex_a9_aliases[] = {
744 	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
745 	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
746 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
747 	EV_ALIAS(NULL, NULL)
748 };
749 
750 static int
751 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
752     struct pmc_op_pmcallocate *pmc_config __unused)
753 {
754 	switch (pe) {
755 	default:
756 		break;
757 	}
758 
759 	return (0);
760 }
761 #endif
762 
763 #if	defined(__aarch64__)
764 static struct pmc_event_alias cortex_a53_aliases[] = {
765 	EV_ALIAS(NULL, NULL)
766 };
767 static struct pmc_event_alias cortex_a57_aliases[] = {
768 	EV_ALIAS(NULL, NULL)
769 };
770 static struct pmc_event_alias cortex_a76_aliases[] = {
771 	EV_ALIAS(NULL, NULL)
772 };
773 
774 static int
775 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec,
776     struct pmc_op_pmcallocate *pmc_config)
777 {
778 	char *p;
779 
780 	while ((p = strsep(&ctrspec, ",")) != NULL) {
781 		if (KWMATCH(p, "os"))
782 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
783 		else if (KWMATCH(p, "usr"))
784 			pmc_config->pm_caps |= PMC_CAP_USER;
785 		else
786 			return (-1);
787 	}
788 
789 	return (0);
790 }
791 
792 static int
793 cmn600_pmu_allocate_pmc(enum pmc_event pe, char *ctrspec,
794     struct pmc_op_pmcallocate *pmc_config)
795 {
796 	uint32_t nodeid, occupancy, xpport, xpchannel;
797 	char *e, *p, *q;
798 	unsigned int i;
799 	char *xpport_names[] = { "East", "West", "North", "South", "devport0",
800 	    "devport1" };
801 	char *xpchannel_names[] = { "REQ", "RSP", "SNP", "DAT" };
802 
803 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
804 	pmc_config->pm_caps |= PMC_CAP_SYSTEM;
805 	pmc_config->pm_md.pm_cmn600.pma_cmn600_config = 0;
806 	/*
807 	 * CMN600 extra fields:
808 	 * * nodeid - node coordinates x[2-3],y[2-3],p[1],s[2]
809 	 * 		width of x and y fields depend on matrix size.
810 	 * * occupancy - numeric value to select desired filter.
811 	 * * xpport - East, West, North, South, devport0, devport1 (or 0, 1, ..., 5)
812 	 * * xpchannel - REQ, RSP, SNP, DAT (or 0, 1, 2, 3)
813 	 */
814 
815 	while ((p = strsep(&ctrspec, ",")) != NULL) {
816 		if (KWPREFIXMATCH(p, "nodeid=")) {
817 			q = strchr(p, '=');
818 			if (*++q == '\0') /* skip '=' */
819 				return (-1);
820 
821 			nodeid = strtol(q, &e, 0);
822 			if (e == q || *e != '\0')
823 				return (-1);
824 
825 			pmc_config->pm_md.pm_cmn600.pma_cmn600_nodeid |= nodeid;
826 
827 		} else if (KWPREFIXMATCH(p, "occupancy=")) {
828 			q = strchr(p, '=');
829 			if (*++q == '\0') /* skip '=' */
830 				return (-1);
831 
832 			occupancy = strtol(q, &e, 0);
833 			if (e == q || *e != '\0')
834 				return (-1);
835 
836 			pmc_config->pm_md.pm_cmn600.pma_cmn600_occupancy = occupancy;
837 		} else if (KWPREFIXMATCH(p, "xpport=")) {
838 			q = strchr(p, '=');
839 			if (*++q == '\0') /* skip '=' */
840 				return (-1);
841 
842 			xpport = strtol(q, &e, 0);
843 			if (e == q || *e != '\0') {
844 				for (i = 0; i < nitems(xpport_names); i++) {
845 					if (strcasecmp(xpport_names[i], q) == 0) {
846 						xpport = i;
847 						break;
848 					}
849 				}
850 				if (i == nitems(xpport_names))
851 					return (-1);
852 			}
853 
854 			pmc_config->pm_md.pm_cmn600.pma_cmn600_config |= xpport << 2;
855 		} else if (KWPREFIXMATCH(p, "xpchannel=")) {
856 			q = strchr(p, '=');
857 			if (*++q == '\0') /* skip '=' */
858 				return (-1);
859 
860 			xpchannel = strtol(q, &e, 0);
861 			if (e == q || *e != '\0') {
862 				for (i = 0; i < nitems(xpchannel_names); i++) {
863 					if (strcasecmp(xpchannel_names[i], q) == 0) {
864 						xpchannel = i;
865 						break;
866 					}
867 				}
868 				if (i == nitems(xpchannel_names))
869 					return (-1);
870 			}
871 
872 			pmc_config->pm_md.pm_cmn600.pma_cmn600_config |= xpchannel << 5;
873 		} else
874 			return (-1);
875 	}
876 
877 	return (0);
878 }
879 
880 static int
881 dmc620_pmu_allocate_pmc(enum pmc_event pe, char *ctrspec,
882     struct pmc_op_pmcallocate *pmc_config)
883 {
884 	char		*e, *p, *q;
885 	uint64_t	match, mask;
886 	uint32_t	count;
887 
888 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
889 	pmc_config->pm_caps |= PMC_CAP_SYSTEM;
890 	pmc_config->pm_md.pm_dmc620.pm_dmc620_config = 0;
891 
892 	while ((p = strsep(&ctrspec, ",")) != NULL) {
893 		if (KWPREFIXMATCH(p, "count=")) {
894 			q = strchr(p, '=');
895 			if (*++q == '\0') /* skip '=' */
896 				return (-1);
897 
898 			count = strtol(q, &e, 0);
899 			if (e == q || *e != '\0')
900 				return (-1);
901 
902 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
903 			pmc_config->pm_md.pm_dmc620.pm_dmc620_config |= count;
904 
905 		} else if (KWMATCH(p, "inv")) {
906 			pmc_config->pm_caps |= PMC_CAP_INVERT;
907 		} else if (KWPREFIXMATCH(p, "match=")) {
908 			match = strtol(q, &e, 0);
909 			if (e == q || *e != '\0')
910 				return (-1);
911 
912 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
913 			pmc_config->pm_md.pm_dmc620.pm_dmc620_match = match;
914 		} else if (KWPREFIXMATCH(p, "mask=")) {
915 			q = strchr(p, '=');
916 			if (*++q == '\0') /* skip '=' */
917 				return (-1);
918 
919 			mask = strtol(q, &e, 0);
920 			if (e == q || *e != '\0')
921 				return (-1);
922 
923 			pmc_config->pm_md.pm_dmc620.pm_dmc620_mask = mask;
924 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
925 		} else
926 			return (-1);
927 	}
928 
929 	return (0);
930 }
931 #endif
932 
933 #if defined(__powerpc__)
934 
935 static struct pmc_event_alias ppc7450_aliases[] = {
936 	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
937 	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
938 	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
939 	EV_ALIAS(NULL, NULL)
940 };
941 
942 static struct pmc_event_alias ppc970_aliases[] = {
943 	EV_ALIAS("instructions", "INSTR_COMPLETED"),
944 	EV_ALIAS("cycles",       "CYCLES"),
945 	EV_ALIAS(NULL, NULL)
946 };
947 
948 static struct pmc_event_alias e500_aliases[] = {
949 	EV_ALIAS("instructions", "INSTR_COMPLETED"),
950 	EV_ALIAS("cycles",       "CYCLES"),
951 	EV_ALIAS(NULL, NULL)
952 };
953 
954 #define	POWERPC_KW_OS		"os"
955 #define	POWERPC_KW_USR		"usr"
956 #define	POWERPC_KW_ANYTHREAD	"anythread"
957 
958 static int
959 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
960 		     struct pmc_op_pmcallocate *pmc_config __unused)
961 {
962 	char *p;
963 
964 	(void) pe;
965 
966 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
967 
968 	while ((p = strsep(&ctrspec, ",")) != NULL) {
969 		if (KWMATCH(p, POWERPC_KW_OS))
970 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
971 		else if (KWMATCH(p, POWERPC_KW_USR))
972 			pmc_config->pm_caps |= PMC_CAP_USER;
973 		else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
974 			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
975 		else
976 			return (-1);
977 	}
978 
979 	return (0);
980 }
981 
982 #endif /* __powerpc__ */
983 
984 
985 /*
986  * Match an event name `name' with its canonical form.
987  *
988  * Matches are case insensitive and spaces, periods, underscores and
989  * hyphen characters are considered to match each other.
990  *
991  * Returns 1 for a match, 0 otherwise.
992  */
993 
994 static int
995 pmc_match_event_name(const char *name, const char *canonicalname)
996 {
997 	int cc, nc;
998 	const unsigned char *c, *n;
999 
1000 	c = (const unsigned char *) canonicalname;
1001 	n = (const unsigned char *) name;
1002 
1003 	for (; (nc = *n) && (cc = *c); n++, c++) {
1004 
1005 		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
1006 		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
1007 			continue;
1008 
1009 		if (toupper(nc) == toupper(cc))
1010 			continue;
1011 
1012 
1013 		return (0);
1014 	}
1015 
1016 	if (*n == '\0' && *c == '\0')
1017 		return (1);
1018 
1019 	return (0);
1020 }
1021 
1022 /*
1023  * Match an event name against all the event named supported by a
1024  * PMC class.
1025  *
1026  * Returns an event descriptor pointer on match or NULL otherwise.
1027  */
1028 static const struct pmc_event_descr *
1029 pmc_match_event_class(const char *name,
1030     const struct pmc_class_descr *pcd)
1031 {
1032 	size_t n;
1033 	const struct pmc_event_descr *ev;
1034 
1035 	ev = pcd->pm_evc_event_table;
1036 	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
1037 		if (pmc_match_event_name(name, ev->pm_ev_name))
1038 			return (ev);
1039 
1040 	return (NULL);
1041 }
1042 
1043 /*
1044  * API entry points
1045  */
1046 
1047 int
1048 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1049     uint32_t flags, int cpu, pmc_id_t *pmcid,
1050     uint64_t count)
1051 {
1052 	size_t n;
1053 	int retval;
1054 	char *r, *spec_copy;
1055 	const char *ctrname;
1056 	const struct pmc_event_descr *ev;
1057 	const struct pmc_event_alias *alias;
1058 	struct pmc_op_pmcallocate pmc_config;
1059 	const struct pmc_class_descr *pcd;
1060 
1061 	spec_copy = NULL;
1062 	retval    = -1;
1063 
1064 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1065 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1066 		errno = EINVAL;
1067 		goto out;
1068 	}
1069 	bzero(&pmc_config, sizeof(pmc_config));
1070 	pmc_config.pm_cpu   = cpu;
1071 	pmc_config.pm_mode  = mode;
1072 	pmc_config.pm_flags = flags;
1073 	pmc_config.pm_count = count;
1074 	if (PMC_IS_SAMPLING_MODE(mode))
1075 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1076 
1077 	/*
1078 	 * Try to pull the raw event ID directly from the pmu-events table. If
1079 	 * this is unsupported on the platform, or the event is not found,
1080 	 * continue with searching the regular event tables.
1081 	 */
1082 	r = spec_copy = strdup(ctrspec);
1083 	ctrname = strsep(&r, ",");
1084 	if (pmc_pmu_enabled()) {
1085 		if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0)
1086 			goto found;
1087 	}
1088 	free(spec_copy);
1089 	spec_copy = NULL;
1090 
1091 	/* replace an event alias with the canonical event specifier */
1092 	if (pmc_mdep_event_aliases)
1093 		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
1094 			if (!strcasecmp(ctrspec, alias->pm_alias)) {
1095 				spec_copy = strdup(alias->pm_spec);
1096 				break;
1097 			}
1098 
1099 	if (spec_copy == NULL)
1100 		spec_copy = strdup(ctrspec);
1101 
1102 	r = spec_copy;
1103 	ctrname = strsep(&r, ",");
1104 
1105 	/*
1106 	 * If a explicit class prefix was given by the user, restrict the
1107 	 * search for the event to the specified PMC class.
1108 	 */
1109 	ev = NULL;
1110 	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
1111 		pcd = pmc_class_table[n];
1112 		if (pcd != NULL && strncasecmp(ctrname, pcd->pm_evc_name,
1113 		    pcd->pm_evc_name_size) == 0) {
1114 			if ((ev = pmc_match_event_class(ctrname +
1115 			    pcd->pm_evc_name_size, pcd)) == NULL) {
1116 				errno = EINVAL;
1117 				goto out;
1118 			}
1119 			break;
1120 		}
1121 	}
1122 
1123 	/*
1124 	 * Otherwise, search for this event in all compatible PMC
1125 	 * classes.
1126 	 */
1127 	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
1128 		pcd = pmc_class_table[n];
1129 		if (pcd != NULL)
1130 			ev = pmc_match_event_class(ctrname, pcd);
1131 	}
1132 
1133 	if (ev == NULL) {
1134 		errno = EINVAL;
1135 		goto out;
1136 	}
1137 
1138 	pmc_config.pm_ev    = ev->pm_ev_code;
1139 	pmc_config.pm_class = pcd->pm_evc_class;
1140 
1141  	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
1142 		errno = EINVAL;
1143 		goto out;
1144 	}
1145 
1146 found:
1147 	if (PMC_CALL(PMC_OP_PMCALLOCATE, &pmc_config) == 0) {
1148 		*pmcid = pmc_config.pm_pmcid;
1149 		retval = 0;
1150 	}
1151 out:
1152 	if (spec_copy)
1153 		free(spec_copy);
1154 
1155 	return (retval);
1156 }
1157 
1158 int
1159 pmc_attach(pmc_id_t pmc, pid_t pid)
1160 {
1161 	struct pmc_op_pmcattach pmc_attach_args;
1162 
1163 	pmc_attach_args.pm_pmc = pmc;
1164 	pmc_attach_args.pm_pid = pid;
1165 
1166 	return (PMC_CALL(PMC_OP_PMCATTACH, &pmc_attach_args));
1167 }
1168 
1169 int
1170 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1171 {
1172 	unsigned int i;
1173 	enum pmc_class cl;
1174 
1175 	cl = PMC_ID_TO_CLASS(pmcid);
1176 	for (i = 0; i < cpu_info.pm_nclass; i++)
1177 		if (cpu_info.pm_classes[i].pm_class == cl) {
1178 			*caps = cpu_info.pm_classes[i].pm_caps;
1179 			return (0);
1180 		}
1181 	errno = EINVAL;
1182 	return (-1);
1183 }
1184 
1185 int
1186 pmc_configure_logfile(int fd)
1187 {
1188 	struct pmc_op_configurelog cla;
1189 
1190 	cla.pm_flags = 0;
1191 	cla.pm_logfd = fd;
1192 	if (PMC_CALL(PMC_OP_CONFIGURELOG, &cla) < 0)
1193 		return (-1);
1194 	return (0);
1195 }
1196 
1197 int
1198 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1199 {
1200 	if (pmc_syscall == -1) {
1201 		errno = ENXIO;
1202 		return (-1);
1203 	}
1204 
1205 	*pci = &cpu_info;
1206 	return (0);
1207 }
1208 
1209 int
1210 pmc_detach(pmc_id_t pmc, pid_t pid)
1211 {
1212 	struct pmc_op_pmcattach pmc_detach_args;
1213 
1214 	pmc_detach_args.pm_pmc = pmc;
1215 	pmc_detach_args.pm_pid = pid;
1216 	return (PMC_CALL(PMC_OP_PMCDETACH, &pmc_detach_args));
1217 }
1218 
1219 int
1220 pmc_disable(int cpu, int pmc)
1221 {
1222 	struct pmc_op_pmcadmin ssa;
1223 
1224 	ssa.pm_cpu = cpu;
1225 	ssa.pm_pmc = pmc;
1226 	ssa.pm_state = PMC_STATE_DISABLED;
1227 	return (PMC_CALL(PMC_OP_PMCADMIN, &ssa));
1228 }
1229 
1230 int
1231 pmc_enable(int cpu, int pmc)
1232 {
1233 	struct pmc_op_pmcadmin ssa;
1234 
1235 	ssa.pm_cpu = cpu;
1236 	ssa.pm_pmc = pmc;
1237 	ssa.pm_state = PMC_STATE_FREE;
1238 	return (PMC_CALL(PMC_OP_PMCADMIN, &ssa));
1239 }
1240 
1241 /*
1242  * Return a list of events known to a given PMC class.  'cl' is the
1243  * PMC class identifier, 'eventnames' is the returned list of 'const
1244  * char *' pointers pointing to the names of the events. 'nevents' is
1245  * the number of event name pointers returned.
1246  *
1247  * The space for 'eventnames' is allocated using malloc(3).  The caller
1248  * is responsible for freeing this space when done.
1249  */
1250 int
1251 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1252     int *nevents)
1253 {
1254 	int count;
1255 	const char **names;
1256 	const struct pmc_event_descr *ev;
1257 
1258 	switch (cl)
1259 	{
1260 	case PMC_CLASS_IAF:
1261 		ev = iaf_event_table;
1262 		count = PMC_EVENT_TABLE_SIZE(iaf);
1263 		break;
1264 	case PMC_CLASS_TSC:
1265 		ev = tsc_event_table;
1266 		count = PMC_EVENT_TABLE_SIZE(tsc);
1267 		break;
1268 	case PMC_CLASS_K8:
1269 		ev = k8_event_table;
1270 		count = PMC_EVENT_TABLE_SIZE(k8);
1271 		break;
1272 	case PMC_CLASS_ARMV7:
1273 		switch (cpu_info.pm_cputype) {
1274 		default:
1275 		case PMC_CPU_ARMV7_CORTEX_A8:
1276 			ev = cortex_a8_event_table;
1277 			count = PMC_EVENT_TABLE_SIZE(cortex_a8);
1278 			break;
1279 		case PMC_CPU_ARMV7_CORTEX_A9:
1280 			ev = cortex_a9_event_table;
1281 			count = PMC_EVENT_TABLE_SIZE(cortex_a9);
1282 			break;
1283 		}
1284 		break;
1285 	case PMC_CLASS_ARMV8:
1286 		switch (cpu_info.pm_cputype) {
1287 		default:
1288 		case PMC_CPU_ARMV8_CORTEX_A53:
1289 			ev = cortex_a53_event_table;
1290 			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
1291 			break;
1292 		case PMC_CPU_ARMV8_CORTEX_A57:
1293 			ev = cortex_a57_event_table;
1294 			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
1295 			break;
1296 		case PMC_CPU_ARMV8_CORTEX_A76:
1297 			ev = cortex_a76_event_table;
1298 			count = PMC_EVENT_TABLE_SIZE(cortex_a76);
1299 			break;
1300 		}
1301 		break;
1302 	case PMC_CLASS_CMN600_PMU:
1303 		ev = cmn600_pmu_event_table;
1304 		count = PMC_EVENT_TABLE_SIZE(cmn600_pmu);
1305 		break;
1306 	case PMC_CLASS_DMC620_PMU_CD2:
1307 		ev = dmc620_pmu_cd2_event_table;
1308 		count = PMC_EVENT_TABLE_SIZE(dmc620_pmu_cd2);
1309 		break;
1310 	case PMC_CLASS_DMC620_PMU_C:
1311 		ev = dmc620_pmu_c_event_table;
1312 		count = PMC_EVENT_TABLE_SIZE(dmc620_pmu_c);
1313 		break;
1314 	case PMC_CLASS_PPC7450:
1315 		ev = ppc7450_event_table;
1316 		count = PMC_EVENT_TABLE_SIZE(ppc7450);
1317 		break;
1318 	case PMC_CLASS_PPC970:
1319 		ev = ppc970_event_table;
1320 		count = PMC_EVENT_TABLE_SIZE(ppc970);
1321 		break;
1322 	case PMC_CLASS_E500:
1323 		ev = e500_event_table;
1324 		count = PMC_EVENT_TABLE_SIZE(e500);
1325 		break;
1326 	case PMC_CLASS_SOFT:
1327 		ev = soft_event_table;
1328 		count = soft_event_info.pm_nevent;
1329 		break;
1330 	default:
1331 		errno = EINVAL;
1332 		return (-1);
1333 	}
1334 
1335 	if ((names = malloc(count * sizeof(const char *))) == NULL)
1336 		return (-1);
1337 
1338 	*eventnames = names;
1339 	*nevents = count;
1340 
1341 	for (;count--; ev++, names++)
1342 		*names = ev->pm_ev_name;
1343 
1344 	return (0);
1345 }
1346 
1347 int
1348 pmc_flush_logfile(void)
1349 {
1350 	return (PMC_CALL(PMC_OP_FLUSHLOG, 0));
1351 }
1352 
1353 int
1354 pmc_close_logfile(void)
1355 {
1356 	return (PMC_CALL(PMC_OP_CLOSELOG, 0));
1357 }
1358 
1359 int
1360 pmc_get_driver_stats(struct pmc_driverstats *ds)
1361 {
1362 	struct pmc_op_getdriverstats gms;
1363 
1364 	if (PMC_CALL(PMC_OP_GETDRIVERSTATS, &gms) < 0)
1365 		return (-1);
1366 
1367 	/* copy out fields in the current userland<->library interface */
1368 	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1369 	ds->pm_intr_processed  = gms.pm_intr_processed;
1370 	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1371 	ds->pm_syscalls        = gms.pm_syscalls;
1372 	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1373 	ds->pm_buffer_requests = gms.pm_buffer_requests;
1374 	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1375 	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1376 	return (0);
1377 }
1378 
1379 int
1380 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1381 {
1382 	struct pmc_op_getmsr gm;
1383 
1384 	gm.pm_pmcid = pmc;
1385 	if (PMC_CALL(PMC_OP_PMCGETMSR, &gm) < 0)
1386 		return (-1);
1387 	*msr = gm.pm_msr;
1388 	return (0);
1389 }
1390 
1391 int
1392 pmc_init(void)
1393 {
1394 	int error, pmc_mod_id;
1395 	unsigned int n;
1396 	uint32_t abi_version;
1397 	struct module_stat pmc_modstat;
1398 	struct pmc_op_getcpuinfo op_cpu_info;
1399 
1400 	if (pmc_syscall != -1) /* already inited */
1401 		return (0);
1402 
1403 	/* retrieve the system call number from the KLD */
1404 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1405 		return (-1);
1406 
1407 	pmc_modstat.version = sizeof(struct module_stat);
1408 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1409 		return (-1);
1410 
1411 	pmc_syscall = pmc_modstat.data.intval;
1412 
1413 	/* check the kernel module's ABI against our compiled-in version */
1414 	abi_version = PMC_VERSION;
1415 	if (PMC_CALL(PMC_OP_GETMODULEVERSION, &abi_version) < 0)
1416 		return (pmc_syscall = -1);
1417 
1418 	/* ignore patch & minor numbers for the comparison */
1419 	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1420 		errno  = EPROGMISMATCH;
1421 		return (pmc_syscall = -1);
1422 	}
1423 
1424 	bzero(&op_cpu_info, sizeof(op_cpu_info));
1425 	if (PMC_CALL(PMC_OP_GETCPUINFO, &op_cpu_info) < 0)
1426 		return (pmc_syscall = -1);
1427 
1428 	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1429 	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1430 	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1431 	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1432 	for (n = 0; n < op_cpu_info.pm_nclass; n++)
1433 		memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
1434 		    sizeof(cpu_info.pm_classes[n]));
1435 
1436 	pmc_class_table = calloc(PMC_CLASS_TABLE_SIZE,
1437 	    sizeof(struct pmc_class_descr *));
1438 
1439 	if (pmc_class_table == NULL)
1440 		return (-1);
1441 
1442 	/*
1443 	 * Get soft events list.
1444 	 */
1445 	soft_event_info.pm_class = PMC_CLASS_SOFT;
1446 	if (PMC_CALL(PMC_OP_GETDYNEVENTINFO, &soft_event_info) < 0)
1447 		return (pmc_syscall = -1);
1448 
1449 	/* Map soft events to static list. */
1450 	for (n = 0; n < soft_event_info.pm_nevent; n++) {
1451 		soft_event_table[n].pm_ev_name =
1452 		    soft_event_info.pm_events[n].pm_ev_name;
1453 		soft_event_table[n].pm_ev_code =
1454 		    soft_event_info.pm_events[n].pm_ev_code;
1455 	}
1456 	soft_class_table_descr.pm_evc_event_table_size = \
1457 	    soft_event_info.pm_nevent;
1458 	soft_class_table_descr.pm_evc_event_table = \
1459 	    soft_event_table;
1460 
1461 	/*
1462 	 * Fill in the class table.
1463 	 */
1464 	n = 0;
1465 	for (unsigned i = 0; i < PMC_CLASS_TABLE_SIZE; i++) {
1466 		switch (cpu_info.pm_classes[i].pm_class) {
1467 #if defined(__amd64__) || defined(__i386__)
1468 		case PMC_CLASS_TSC:
1469 			pmc_class_table[n++] = &tsc_class_table_descr;
1470 			break;
1471 
1472 		case PMC_CLASS_K8:
1473 			pmc_class_table[n++] = &k8_class_table_descr;
1474 			break;
1475 #endif
1476 
1477 		case PMC_CLASS_SOFT:
1478 			pmc_class_table[n++] = &soft_class_table_descr;
1479 			break;
1480 
1481 #if defined(__arm__)
1482 		case PMC_CLASS_ARMV7:
1483 			switch (cpu_info.pm_cputype) {
1484 			case PMC_CPU_ARMV7_CORTEX_A8:
1485 				pmc_class_table[n++] =
1486 				    &cortex_a8_class_table_descr;
1487 				break;
1488 			case PMC_CPU_ARMV7_CORTEX_A9:
1489 				pmc_class_table[n++] =
1490 				    &cortex_a9_class_table_descr;
1491 				break;
1492 			default:
1493 				errno = ENXIO;
1494 				return (pmc_syscall = -1);
1495 			}
1496 			break;
1497 #endif
1498 
1499 #if defined(__aarch64__)
1500 		case PMC_CLASS_ARMV8:
1501 			switch (cpu_info.pm_cputype) {
1502 			case PMC_CPU_ARMV8_CORTEX_A53:
1503 				pmc_class_table[n++] =
1504 				    &cortex_a53_class_table_descr;
1505 				break;
1506 			case PMC_CPU_ARMV8_CORTEX_A57:
1507 				pmc_class_table[n++] =
1508 				    &cortex_a57_class_table_descr;
1509 				break;
1510 			case PMC_CPU_ARMV8_CORTEX_A76:
1511 				pmc_class_table[n++] =
1512 				    &cortex_a76_class_table_descr;
1513 				break;
1514 			default:
1515 				errno = ENXIO;
1516 				return (pmc_syscall = -1);
1517 			}
1518 			break;
1519 
1520 		case PMC_CLASS_DMC620_PMU_CD2:
1521 			pmc_class_table[n++] =
1522 			    &dmc620_pmu_cd2_class_table_descr;
1523 			break;
1524 
1525 		case PMC_CLASS_DMC620_PMU_C:
1526 			pmc_class_table[n++] = &dmc620_pmu_c_class_table_descr;
1527 			break;
1528 
1529 		case PMC_CLASS_CMN600_PMU:
1530 			pmc_class_table[n++] = &cmn600_pmu_class_table_descr;
1531 			break;
1532 #endif
1533 
1534 #if defined(__powerpc__)
1535 		case PMC_CLASS_PPC7450:
1536 			pmc_class_table[n++] = &ppc7450_class_table_descr;
1537 			break;
1538 
1539 		case PMC_CLASS_PPC970:
1540 			pmc_class_table[n++] = &ppc970_class_table_descr;
1541 			break;
1542 
1543 		case PMC_CLASS_E500:
1544 			pmc_class_table[n++] = &e500_class_table_descr;
1545 			break;
1546 #endif
1547 
1548 		default:
1549 #if defined(DEBUG)
1550 			printf("pm_class: 0x%x\n",
1551 			    cpu_info.pm_classes[i].pm_class);
1552 #endif
1553 			break;
1554 		}
1555 	}
1556 
1557 #define	PMC_MDEP_INIT(C) pmc_mdep_event_aliases = C##_aliases
1558 
1559 	/* Configure the event name parser. */
1560 	switch (cpu_info.pm_cputype) {
1561 #if defined(__amd64__) || defined(__i386__)
1562 	case PMC_CPU_AMD_K8:
1563 		PMC_MDEP_INIT(k8);
1564 		break;
1565 #endif
1566 	case PMC_CPU_GENERIC:
1567 		PMC_MDEP_INIT(generic);
1568 		break;
1569 #if defined(__arm__)
1570 	case PMC_CPU_ARMV7_CORTEX_A8:
1571 		PMC_MDEP_INIT(cortex_a8);
1572 		break;
1573 	case PMC_CPU_ARMV7_CORTEX_A9:
1574 		PMC_MDEP_INIT(cortex_a9);
1575 		break;
1576 #endif
1577 #if defined(__aarch64__)
1578 	case PMC_CPU_ARMV8_CORTEX_A53:
1579 		PMC_MDEP_INIT(cortex_a53);
1580 		break;
1581 	case PMC_CPU_ARMV8_CORTEX_A57:
1582 		PMC_MDEP_INIT(cortex_a57);
1583 		break;
1584 	case PMC_CPU_ARMV8_CORTEX_A76:
1585 		PMC_MDEP_INIT(cortex_a76);
1586 		break;
1587 #endif
1588 #if defined(__powerpc__)
1589 	case PMC_CPU_PPC_7450:
1590 		PMC_MDEP_INIT(ppc7450);
1591 		break;
1592 	case PMC_CPU_PPC_970:
1593 		PMC_MDEP_INIT(ppc970);
1594 		break;
1595 	case PMC_CPU_PPC_E500:
1596 		PMC_MDEP_INIT(e500);
1597 		break;
1598 #endif
1599 	default:
1600 		/*
1601 		 * Some kind of CPU this version of the library knows nothing
1602 		 * about.  This shouldn't happen since the abi version check
1603 		 * should have caught this.
1604 		 */
1605 #if defined(__amd64__) || defined(__i386__) || defined(__powerpc64__)
1606 		break;
1607 #endif
1608 		errno = ENXIO;
1609 		return (pmc_syscall = -1);
1610 	}
1611 
1612 	return (0);
1613 }
1614 
1615 const char *
1616 pmc_name_of_capability(enum pmc_caps cap)
1617 {
1618 	int i;
1619 
1620 	/*
1621 	 * 'cap' should have a single bit set and should be in
1622 	 * range.
1623 	 */
1624 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1625 	    cap > PMC_CAP_LAST) {
1626 		errno = EINVAL;
1627 		return (NULL);
1628 	}
1629 
1630 	i = ffs(cap);
1631 	return (pmc_capability_names[i - 1]);
1632 }
1633 
1634 const char *
1635 pmc_name_of_class(enum pmc_class pc)
1636 {
1637 	size_t n;
1638 
1639 	for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
1640 		if (pc == pmc_class_names[n].pm_class)
1641 			return (pmc_class_names[n].pm_name);
1642 
1643 	errno = EINVAL;
1644 	return (NULL);
1645 }
1646 
1647 const char *
1648 pmc_name_of_cputype(enum pmc_cputype cp)
1649 {
1650 	size_t n;
1651 
1652 	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
1653 		if (cp == pmc_cputype_names[n].pm_cputype)
1654 			return (pmc_cputype_names[n].pm_name);
1655 
1656 	errno = EINVAL;
1657 	return (NULL);
1658 }
1659 
1660 const char *
1661 pmc_name_of_disposition(enum pmc_disp pd)
1662 {
1663 	if ((int) pd >= PMC_DISP_FIRST &&
1664 	    pd <= PMC_DISP_LAST)
1665 		return (pmc_disposition_names[pd]);
1666 
1667 	errno = EINVAL;
1668 	return (NULL);
1669 }
1670 
1671 const char *
1672 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
1673 {
1674 	const struct pmc_event_descr *ev, *evfence;
1675 
1676 	ev = evfence = NULL;
1677 	if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
1678 		ev = k8_event_table;
1679 		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
1680 
1681 	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
1682 		switch (cpu) {
1683 		case PMC_CPU_ARMV7_CORTEX_A8:
1684 			ev = cortex_a8_event_table;
1685 			evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
1686 			break;
1687 		case PMC_CPU_ARMV7_CORTEX_A9:
1688 			ev = cortex_a9_event_table;
1689 			evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
1690 			break;
1691 		default:	/* Unknown CPU type. */
1692 			break;
1693 		}
1694 	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
1695 		switch (cpu) {
1696 		case PMC_CPU_ARMV8_CORTEX_A53:
1697 			ev = cortex_a53_event_table;
1698 			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
1699 			break;
1700 		case PMC_CPU_ARMV8_CORTEX_A57:
1701 			ev = cortex_a57_event_table;
1702 			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
1703 			break;
1704 		case PMC_CPU_ARMV8_CORTEX_A76:
1705 			ev = cortex_a76_event_table;
1706 			evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76);
1707 			break;
1708 		default:	/* Unknown CPU type. */
1709 			break;
1710 		}
1711 	} else if (pe >= PMC_EV_CMN600_PMU_FIRST &&
1712 	    pe <= PMC_EV_CMN600_PMU_LAST) {
1713 		ev = cmn600_pmu_event_table;
1714 		evfence = cmn600_pmu_event_table +
1715 		    PMC_EVENT_TABLE_SIZE(cmn600_pmu);
1716 	} else if (pe >= PMC_EV_DMC620_PMU_CD2_FIRST &&
1717 	    pe <= PMC_EV_DMC620_PMU_CD2_LAST) {
1718 		ev = dmc620_pmu_cd2_event_table;
1719 		evfence = dmc620_pmu_cd2_event_table +
1720 		    PMC_EVENT_TABLE_SIZE(dmc620_pmu_cd2);
1721 	} else if (pe >= PMC_EV_DMC620_PMU_C_FIRST &&
1722 	    pe <= PMC_EV_DMC620_PMU_C_LAST) {
1723 		ev = dmc620_pmu_c_event_table;
1724 		evfence = dmc620_pmu_c_event_table +
1725 		    PMC_EVENT_TABLE_SIZE(dmc620_pmu_c);
1726 	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
1727 		ev = ppc7450_event_table;
1728 		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
1729 	} else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
1730 		ev = ppc970_event_table;
1731 		evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
1732 	} else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
1733 		ev = e500_event_table;
1734 		evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
1735 	} else if (pe == PMC_EV_TSC_TSC) {
1736 		ev = tsc_event_table;
1737 		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
1738 	} else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
1739 		ev = soft_event_table;
1740 		evfence = soft_event_table + soft_event_info.pm_nevent;
1741 	}
1742 
1743 	for (; ev != evfence; ev++)
1744 		if (pe == ev->pm_ev_code)
1745 			return (ev->pm_ev_name);
1746 
1747 	return (NULL);
1748 }
1749 
1750 const char *
1751 pmc_name_of_event(enum pmc_event pe)
1752 {
1753 	const char *n;
1754 
1755 	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
1756 		return (n);
1757 
1758 	errno = EINVAL;
1759 	return (NULL);
1760 }
1761 
1762 const char *
1763 pmc_name_of_mode(enum pmc_mode pm)
1764 {
1765 	if ((int) pm >= PMC_MODE_FIRST &&
1766 	    pm <= PMC_MODE_LAST)
1767 		return (pmc_mode_names[pm]);
1768 
1769 	errno = EINVAL;
1770 	return (NULL);
1771 }
1772 
1773 const char *
1774 pmc_name_of_state(enum pmc_state ps)
1775 {
1776 	if ((int) ps >= PMC_STATE_FIRST &&
1777 	    ps <= PMC_STATE_LAST)
1778 		return (pmc_state_names[ps]);
1779 
1780 	errno = EINVAL;
1781 	return (NULL);
1782 }
1783 
1784 int
1785 pmc_ncpu(void)
1786 {
1787 	if (pmc_syscall == -1) {
1788 		errno = ENXIO;
1789 		return (-1);
1790 	}
1791 
1792 	return (cpu_info.pm_ncpu);
1793 }
1794 
1795 int
1796 pmc_npmc(int cpu)
1797 {
1798 	if (pmc_syscall == -1) {
1799 		errno = ENXIO;
1800 		return (-1);
1801 	}
1802 
1803 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1804 		errno = EINVAL;
1805 		return (-1);
1806 	}
1807 
1808 	return (cpu_info.pm_npmc);
1809 }
1810 
1811 int
1812 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
1813 {
1814 	int nbytes, npmc;
1815 	struct pmc_op_getpmcinfo *pmci;
1816 
1817 	if ((npmc = pmc_npmc(cpu)) < 0)
1818 		return (-1);
1819 
1820 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
1821 	    npmc * sizeof(struct pmc_info);
1822 
1823 	if ((pmci = calloc(1, nbytes)) == NULL)
1824 		return (-1);
1825 
1826 	pmci->pm_cpu  = cpu;
1827 
1828 	if (PMC_CALL(PMC_OP_GETPMCINFO, pmci) < 0) {
1829 		free(pmci);
1830 		return (-1);
1831 	}
1832 
1833 	/* kernel<->library, library<->userland interfaces are identical */
1834 	*ppmci = (struct pmc_pmcinfo *) pmci;
1835 	return (0);
1836 }
1837 
1838 int
1839 pmc_read(pmc_id_t pmc, pmc_value_t *value)
1840 {
1841 	struct pmc_op_pmcrw pmc_read_op;
1842 
1843 	pmc_read_op.pm_pmcid = pmc;
1844 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1845 	pmc_read_op.pm_value = -1;
1846 
1847 	if (PMC_CALL(PMC_OP_PMCRW, &pmc_read_op) < 0)
1848 		return (-1);
1849 
1850 	*value = pmc_read_op.pm_value;
1851 	return (0);
1852 }
1853 
1854 int
1855 pmc_release(pmc_id_t pmc)
1856 {
1857 	struct pmc_op_simple	pmc_release_args;
1858 
1859 	pmc_release_args.pm_pmcid = pmc;
1860 	return (PMC_CALL(PMC_OP_PMCRELEASE, &pmc_release_args));
1861 }
1862 
1863 int
1864 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1865 {
1866 	struct pmc_op_pmcrw pmc_rw_op;
1867 
1868 	pmc_rw_op.pm_pmcid = pmc;
1869 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1870 	pmc_rw_op.pm_value = newvalue;
1871 
1872 	if (PMC_CALL(PMC_OP_PMCRW, &pmc_rw_op) < 0)
1873 		return (-1);
1874 
1875 	*oldvaluep = pmc_rw_op.pm_value;
1876 	return (0);
1877 }
1878 
1879 int
1880 pmc_set(pmc_id_t pmc, pmc_value_t value)
1881 {
1882 	struct pmc_op_pmcsetcount sc;
1883 
1884 	sc.pm_pmcid = pmc;
1885 	sc.pm_count = value;
1886 
1887 	if (PMC_CALL(PMC_OP_PMCSETCOUNT, &sc) < 0)
1888 		return (-1);
1889 	return (0);
1890 }
1891 
1892 int
1893 pmc_start(pmc_id_t pmc)
1894 {
1895 	struct pmc_op_simple	pmc_start_args;
1896 
1897 	pmc_start_args.pm_pmcid = pmc;
1898 	return (PMC_CALL(PMC_OP_PMCSTART, &pmc_start_args));
1899 }
1900 
1901 int
1902 pmc_stop(pmc_id_t pmc)
1903 {
1904 	struct pmc_op_simple	pmc_stop_args;
1905 
1906 	pmc_stop_args.pm_pmcid = pmc;
1907 	return (PMC_CALL(PMC_OP_PMCSTOP, &pmc_stop_args));
1908 }
1909 
1910 int
1911 pmc_width(pmc_id_t pmcid, uint32_t *width)
1912 {
1913 	unsigned int i;
1914 	enum pmc_class cl;
1915 
1916 	cl = PMC_ID_TO_CLASS(pmcid);
1917 	for (i = 0; i < cpu_info.pm_nclass; i++)
1918 		if (cpu_info.pm_classes[i].pm_class == cl) {
1919 			*width = cpu_info.pm_classes[i].pm_width;
1920 			return (0);
1921 		}
1922 	errno = EINVAL;
1923 	return (-1);
1924 }
1925 
1926 int
1927 pmc_write(pmc_id_t pmc, pmc_value_t value)
1928 {
1929 	struct pmc_op_pmcrw pmc_write_op;
1930 
1931 	pmc_write_op.pm_pmcid = pmc;
1932 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1933 	pmc_write_op.pm_value = value;
1934 	return (PMC_CALL(PMC_OP_PMCRW, &pmc_write_op));
1935 }
1936 
1937 int
1938 pmc_writelog(uint32_t userdata)
1939 {
1940 	struct pmc_op_writelog wl;
1941 
1942 	wl.pm_userdata = userdata;
1943 	return (PMC_CALL(PMC_OP_WRITELOG, &wl));
1944 }
1945