xref: /freebsd/lib/libpmc/libpmc.c (revision 41059135ce931c0f1014a999ffabc6bc470ce856)
1 /*-
2  * Copyright (c) 2003-2008 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/pmc.h>
34 #include <sys/syscall.h>
35 
36 #include <ctype.h>
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <pmc.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <string.h>
43 #include <strings.h>
44 #include <unistd.h>
45 
46 #include "libpmcinternal.h"
47 
48 /* Function prototypes */
49 #if defined(__i386__)
50 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
51     struct pmc_op_pmcallocate *_pmc_config);
52 #endif
53 #if defined(__amd64__) || defined(__i386__)
54 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
55     struct pmc_op_pmcallocate *_pmc_config);
56 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57     struct pmc_op_pmcallocate *_pmc_config);
58 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59     struct pmc_op_pmcallocate *_pmc_config);
60 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61     struct pmc_op_pmcallocate *_pmc_config);
62 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63     struct pmc_op_pmcallocate *_pmc_config);
64 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65     struct pmc_op_pmcallocate *_pmc_config);
66 #endif
67 #if defined(__i386__)
68 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
69     struct pmc_op_pmcallocate *_pmc_config);
70 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71     struct pmc_op_pmcallocate *_pmc_config);
72 #endif
73 #if defined(__amd64__) || defined(__i386__)
74 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
75     struct pmc_op_pmcallocate *_pmc_config);
76 #endif
77 #if defined(__arm__)
78 #if defined(__XSCALE__)
79 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
80     struct pmc_op_pmcallocate *_pmc_config);
81 #endif
82 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
83     struct pmc_op_pmcallocate *_pmc_config);
84 #endif
85 #if defined(__aarch64__)
86 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
87     struct pmc_op_pmcallocate *_pmc_config);
88 #endif
89 #if defined(__mips__)
90 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
91 			     struct pmc_op_pmcallocate *_pmc_config);
92 #endif /* __mips__ */
93 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
94     struct pmc_op_pmcallocate *_pmc_config);
95 
96 #if defined(__powerpc__)
97 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
98 			     struct pmc_op_pmcallocate *_pmc_config);
99 #endif /* __powerpc__ */
100 
101 #define PMC_CALL(cmd, params)				\
102 	syscall(pmc_syscall, PMC_OP_##cmd, (params))
103 
104 /*
105  * Event aliases provide a way for the user to ask for generic events
106  * like "cache-misses", or "instructions-retired".  These aliases are
107  * mapped to the appropriate canonical event descriptions using a
108  * lookup table.
109  */
110 struct pmc_event_alias {
111 	const char	*pm_alias;
112 	const char	*pm_spec;
113 };
114 
115 static const struct pmc_event_alias *pmc_mdep_event_aliases;
116 
117 /*
118  * The pmc_event_descr structure maps symbolic names known to the user
119  * to integer codes used by the PMC KLD.
120  */
121 struct pmc_event_descr {
122 	const char	*pm_ev_name;
123 	enum pmc_event	pm_ev_code;
124 };
125 
126 /*
127  * The pmc_class_descr structure maps class name prefixes for
128  * event names to event tables and other PMC class data.
129  */
130 struct pmc_class_descr {
131 	const char	*pm_evc_name;
132 	size_t		pm_evc_name_size;
133 	enum pmc_class	pm_evc_class;
134 	const struct pmc_event_descr *pm_evc_event_table;
135 	size_t		pm_evc_event_table_size;
136 	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
137 			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
138 };
139 
140 #define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
141 #define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
142 
143 #undef	__PMC_EV
144 #define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
145 
146 /*
147  * PMC_CLASSDEP_TABLE(NAME, CLASS)
148  *
149  * Define a table mapping event names and aliases to HWPMC event IDs.
150  */
151 #define	PMC_CLASSDEP_TABLE(N, C)				\
152 	static const struct pmc_event_descr N##_event_table[] =	\
153 	{							\
154 		__PMC_EV_##C()					\
155 	}
156 
157 PMC_CLASSDEP_TABLE(iaf, IAF);
158 PMC_CLASSDEP_TABLE(k7, K7);
159 PMC_CLASSDEP_TABLE(k8, K8);
160 PMC_CLASSDEP_TABLE(p4, P4);
161 PMC_CLASSDEP_TABLE(p5, P5);
162 PMC_CLASSDEP_TABLE(p6, P6);
163 PMC_CLASSDEP_TABLE(xscale, XSCALE);
164 PMC_CLASSDEP_TABLE(armv7, ARMV7);
165 PMC_CLASSDEP_TABLE(armv8, ARMV8);
166 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
167 PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
168 PMC_CLASSDEP_TABLE(octeon, OCTEON);
169 PMC_CLASSDEP_TABLE(ucf, UCF);
170 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
171 PMC_CLASSDEP_TABLE(ppc970, PPC970);
172 PMC_CLASSDEP_TABLE(e500, E500);
173 
174 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
175 
176 #undef	__PMC_EV_ALIAS
177 #define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
178 
179 static const struct pmc_event_descr atom_event_table[] =
180 {
181 	__PMC_EV_ALIAS_ATOM()
182 };
183 
184 static const struct pmc_event_descr atom_silvermont_event_table[] =
185 {
186 	__PMC_EV_ALIAS_ATOM_SILVERMONT()
187 };
188 
189 static const struct pmc_event_descr core_event_table[] =
190 {
191 	__PMC_EV_ALIAS_CORE()
192 };
193 
194 
195 static const struct pmc_event_descr core2_event_table[] =
196 {
197 	__PMC_EV_ALIAS_CORE2()
198 };
199 
200 static const struct pmc_event_descr corei7_event_table[] =
201 {
202 	__PMC_EV_ALIAS_COREI7()
203 };
204 
205 static const struct pmc_event_descr nehalem_ex_event_table[] =
206 {
207 	__PMC_EV_ALIAS_COREI7()
208 };
209 
210 static const struct pmc_event_descr haswell_event_table[] =
211 {
212 	__PMC_EV_ALIAS_HASWELL()
213 };
214 
215 static const struct pmc_event_descr haswell_xeon_event_table[] =
216 {
217 	__PMC_EV_ALIAS_HASWELL_XEON()
218 };
219 
220 static const struct pmc_event_descr broadwell_event_table[] =
221 {
222 	__PMC_EV_ALIAS_BROADWELL()
223 };
224 
225 static const struct pmc_event_descr broadwell_xeon_event_table[] =
226 {
227 	__PMC_EV_ALIAS_BROADWELL_XEON()
228 };
229 
230 static const struct pmc_event_descr skylake_event_table[] =
231 {
232 	__PMC_EV_ALIAS_SKYLAKE()
233 };
234 
235 static const struct pmc_event_descr ivybridge_event_table[] =
236 {
237 	__PMC_EV_ALIAS_IVYBRIDGE()
238 };
239 
240 static const struct pmc_event_descr ivybridge_xeon_event_table[] =
241 {
242 	__PMC_EV_ALIAS_IVYBRIDGE_XEON()
243 };
244 
245 static const struct pmc_event_descr sandybridge_event_table[] =
246 {
247 	__PMC_EV_ALIAS_SANDYBRIDGE()
248 };
249 
250 static const struct pmc_event_descr sandybridge_xeon_event_table[] =
251 {
252 	__PMC_EV_ALIAS_SANDYBRIDGE_XEON()
253 };
254 
255 static const struct pmc_event_descr westmere_event_table[] =
256 {
257 	__PMC_EV_ALIAS_WESTMERE()
258 };
259 
260 static const struct pmc_event_descr westmere_ex_event_table[] =
261 {
262 	__PMC_EV_ALIAS_WESTMERE()
263 };
264 
265 static const struct pmc_event_descr corei7uc_event_table[] =
266 {
267 	__PMC_EV_ALIAS_COREI7UC()
268 };
269 
270 static const struct pmc_event_descr haswelluc_event_table[] =
271 {
272 	__PMC_EV_ALIAS_HASWELLUC()
273 };
274 
275 static const struct pmc_event_descr broadwelluc_event_table[] =
276 {
277 	__PMC_EV_ALIAS_BROADWELLUC()
278 };
279 
280 static const struct pmc_event_descr sandybridgeuc_event_table[] =
281 {
282 	__PMC_EV_ALIAS_SANDYBRIDGEUC()
283 };
284 
285 static const struct pmc_event_descr westmereuc_event_table[] =
286 {
287 	__PMC_EV_ALIAS_WESTMEREUC()
288 };
289 
290 static const struct pmc_event_descr cortex_a8_event_table[] =
291 {
292 	__PMC_EV_ALIAS_ARMV7_CORTEX_A8()
293 };
294 
295 static const struct pmc_event_descr cortex_a9_event_table[] =
296 {
297 	__PMC_EV_ALIAS_ARMV7_CORTEX_A9()
298 };
299 
300 static const struct pmc_event_descr cortex_a53_event_table[] =
301 {
302 	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
303 };
304 
305 static const struct pmc_event_descr cortex_a57_event_table[] =
306 {
307 	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
308 };
309 
310 /*
311  * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
312  *
313  * Map a CPU to the PMC classes it supports.
314  */
315 #define	PMC_MDEP_TABLE(N,C,...)				\
316 	static const enum pmc_class N##_pmc_classes[] = {	\
317 		PMC_CLASS_##C, __VA_ARGS__			\
318 	}
319 
320 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
321 PMC_MDEP_TABLE(atom_silvermont, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
322 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC);
323 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
324 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
325 PMC_MDEP_TABLE(nehalem_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
326 PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
327 PMC_MDEP_TABLE(haswell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
328 PMC_MDEP_TABLE(broadwell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
329 PMC_MDEP_TABLE(broadwell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
330 PMC_MDEP_TABLE(skylake, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
331 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
332 PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
333 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
334 PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
335 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
336 PMC_MDEP_TABLE(westmere_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
337 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC);
338 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC);
339 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC);
340 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
341 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
342 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
343 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
344 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
345 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
346 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
347 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
348 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
349 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
350 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC);
351 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC);
352 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC);
353 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT);
354 
355 static const struct pmc_event_descr tsc_event_table[] =
356 {
357 	__PMC_EV_TSC()
358 };
359 
360 #undef	PMC_CLASS_TABLE_DESC
361 #define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
362 static const struct pmc_class_descr NAME##_class_table_descr =	\
363 	{							\
364 		.pm_evc_name  = #CLASS "-",			\
365 		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
366 		.pm_evc_class = PMC_CLASS_##CLASS ,		\
367 		.pm_evc_event_table = EVENTS##_event_table ,	\
368 		.pm_evc_event_table_size = 			\
369 			PMC_EVENT_TABLE_SIZE(EVENTS),		\
370 		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
371 	}
372 
373 #if	defined(__i386__) || defined(__amd64__)
374 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
375 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
376 PMC_CLASS_TABLE_DESC(atom_silvermont, IAP, atom_silvermont, iap);
377 PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
378 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
379 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
380 PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap);
381 PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap);
382 PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap);
383 PMC_CLASS_TABLE_DESC(broadwell, IAP, broadwell, iap);
384 PMC_CLASS_TABLE_DESC(broadwell_xeon, IAP, broadwell_xeon, iap);
385 PMC_CLASS_TABLE_DESC(skylake, IAP, skylake, iap);
386 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap);
387 PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap);
388 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap);
389 PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap);
390 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap);
391 PMC_CLASS_TABLE_DESC(westmere_ex, IAP, westmere_ex, iap);
392 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf);
393 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp);
394 PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp);
395 PMC_CLASS_TABLE_DESC(broadwelluc, UCP, broadwelluc, ucp);
396 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp);
397 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp);
398 #endif
399 #if	defined(__i386__)
400 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
401 #endif
402 #if	defined(__i386__) || defined(__amd64__)
403 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
404 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
405 #endif
406 #if	defined(__i386__)
407 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
408 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
409 #endif
410 #if	defined(__i386__) || defined(__amd64__)
411 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
412 #endif
413 #if	defined(__arm__)
414 #if	defined(__XSCALE__)
415 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
416 #endif
417 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
418 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
419 #endif
420 #if	defined(__aarch64__)
421 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
422 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
423 #endif
424 #if defined(__mips__)
425 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
426 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
427 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
428 #endif /* __mips__ */
429 #if defined(__powerpc__)
430 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
431 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
432 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
433 #endif
434 
435 static struct pmc_class_descr soft_class_table_descr =
436 {
437 	.pm_evc_name  = "SOFT-",
438 	.pm_evc_name_size = sizeof("SOFT-") - 1,
439 	.pm_evc_class = PMC_CLASS_SOFT,
440 	.pm_evc_event_table = NULL,
441 	.pm_evc_event_table_size = 0,
442 	.pm_evc_allocate_pmc = soft_allocate_pmc
443 };
444 
445 #undef	PMC_CLASS_TABLE_DESC
446 
447 static const struct pmc_class_descr **pmc_class_table;
448 #define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
449 
450 static const enum pmc_class *pmc_mdep_class_list;
451 static size_t pmc_mdep_class_list_size;
452 
453 /*
454  * Mapping tables, mapping enumeration values to human readable
455  * strings.
456  */
457 
458 static const char * pmc_capability_names[] = {
459 #undef	__PMC_CAP
460 #define	__PMC_CAP(N,V,D)	#N ,
461 	__PMC_CAPS()
462 };
463 
464 struct pmc_class_map {
465 	enum pmc_class	pm_class;
466 	const char	*pm_name;
467 };
468 
469 static const struct pmc_class_map pmc_class_names[] = {
470 #undef	__PMC_CLASS
471 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
472 	__PMC_CLASSES()
473 };
474 
475 struct pmc_cputype_map {
476 	enum pmc_cputype pm_cputype;
477 	const char	*pm_name;
478 };
479 
480 static const struct pmc_cputype_map pmc_cputype_names[] = {
481 #undef	__PMC_CPU
482 #define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
483 	__PMC_CPUS()
484 };
485 
486 static const char * pmc_disposition_names[] = {
487 #undef	__PMC_DISP
488 #define	__PMC_DISP(D)	#D ,
489 	__PMC_DISPOSITIONS()
490 };
491 
492 static const char * pmc_mode_names[] = {
493 #undef  __PMC_MODE
494 #define __PMC_MODE(M,N)	#M ,
495 	__PMC_MODES()
496 };
497 
498 static const char * pmc_state_names[] = {
499 #undef  __PMC_STATE
500 #define __PMC_STATE(S) #S ,
501 	__PMC_STATES()
502 };
503 
504 /*
505  * Filled in by pmc_init().
506  */
507 static int pmc_syscall = -1;
508 static struct pmc_cpuinfo cpu_info;
509 static struct pmc_op_getdyneventinfo soft_event_info;
510 
511 /* Event masks for events */
512 struct pmc_masks {
513 	const char	*pm_name;
514 	const uint64_t	pm_value;
515 };
516 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
517 #define	NULLMASK	{ .pm_name = NULL }
518 
519 #if defined(__amd64__) || defined(__i386__)
520 static int
521 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
522 {
523 	const struct pmc_masks *pm;
524 	char *q, *r;
525 	int c;
526 
527 	if (pmask == NULL)	/* no mask keywords */
528 		return (-1);
529 	q = strchr(p, '=');	/* skip '=' */
530 	if (*++q == '\0')	/* no more data */
531 		return (-1);
532 	c = 0;			/* count of mask keywords seen */
533 	while ((r = strsep(&q, "+")) != NULL) {
534 		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
535 		    pm++)
536 			;
537 		if (pm->pm_name == NULL) /* not found */
538 			return (-1);
539 		*evmask |= pm->pm_value;
540 		c++;
541 	}
542 	return (c);
543 }
544 #endif
545 
546 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
547 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
548 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
549 
550 #if defined(__i386__)
551 
552 /*
553  * AMD K7 (Athlon) CPUs.
554  */
555 
556 static struct pmc_event_alias k7_aliases[] = {
557 	EV_ALIAS("branches",		"k7-retired-branches"),
558 	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
559 	EV_ALIAS("cycles",		"tsc"),
560 	EV_ALIAS("dc-misses",		"k7-dc-misses"),
561 	EV_ALIAS("ic-misses",		"k7-ic-misses"),
562 	EV_ALIAS("instructions",	"k7-retired-instructions"),
563 	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
564 	EV_ALIAS(NULL, NULL)
565 };
566 
567 #define	K7_KW_COUNT	"count"
568 #define	K7_KW_EDGE	"edge"
569 #define	K7_KW_INV	"inv"
570 #define	K7_KW_OS	"os"
571 #define	K7_KW_UNITMASK	"unitmask"
572 #define	K7_KW_USR	"usr"
573 
574 static int
575 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
576     struct pmc_op_pmcallocate *pmc_config)
577 {
578 	char		*e, *p, *q;
579 	int		c, has_unitmask;
580 	uint32_t	count, unitmask;
581 
582 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
583 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
584 
585 	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
586 	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
587 	    pe == PMC_EV_K7_DC_WRITEBACKS) {
588 		has_unitmask = 1;
589 		unitmask = AMD_PMC_UNITMASK_MOESI;
590 	} else
591 		unitmask = has_unitmask = 0;
592 
593 	while ((p = strsep(&ctrspec, ",")) != NULL) {
594 		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
595 			q = strchr(p, '=');
596 			if (*++q == '\0') /* skip '=' */
597 				return (-1);
598 
599 			count = strtol(q, &e, 0);
600 			if (e == q || *e != '\0')
601 				return (-1);
602 
603 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
604 			pmc_config->pm_md.pm_amd.pm_amd_config |=
605 			    AMD_PMC_TO_COUNTER(count);
606 
607 		} else if (KWMATCH(p, K7_KW_EDGE)) {
608 			pmc_config->pm_caps |= PMC_CAP_EDGE;
609 		} else if (KWMATCH(p, K7_KW_INV)) {
610 			pmc_config->pm_caps |= PMC_CAP_INVERT;
611 		} else if (KWMATCH(p, K7_KW_OS)) {
612 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
613 		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
614 			if (has_unitmask == 0)
615 				return (-1);
616 			unitmask = 0;
617 			q = strchr(p, '=');
618 			if (*++q == '\0') /* skip '=' */
619 				return (-1);
620 
621 			while ((c = tolower(*q++)) != 0)
622 				if (c == 'm')
623 					unitmask |= AMD_PMC_UNITMASK_M;
624 				else if (c == 'o')
625 					unitmask |= AMD_PMC_UNITMASK_O;
626 				else if (c == 'e')
627 					unitmask |= AMD_PMC_UNITMASK_E;
628 				else if (c == 's')
629 					unitmask |= AMD_PMC_UNITMASK_S;
630 				else if (c == 'i')
631 					unitmask |= AMD_PMC_UNITMASK_I;
632 				else if (c == '+')
633 					continue;
634 				else
635 					return (-1);
636 
637 			if (unitmask == 0)
638 				return (-1);
639 
640 		} else if (KWMATCH(p, K7_KW_USR)) {
641 			pmc_config->pm_caps |= PMC_CAP_USER;
642 		} else
643 			return (-1);
644 	}
645 
646 	if (has_unitmask) {
647 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
648 		pmc_config->pm_md.pm_amd.pm_amd_config |=
649 		    AMD_PMC_TO_UNITMASK(unitmask);
650 	}
651 
652 	return (0);
653 
654 }
655 
656 #endif
657 
658 #if defined(__amd64__) || defined(__i386__)
659 
660 /*
661  * Intel Core (Family 6, Model E) PMCs.
662  */
663 
664 static struct pmc_event_alias core_aliases[] = {
665 	EV_ALIAS("branches",		"iap-br-instr-ret"),
666 	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
667 	EV_ALIAS("cycles",		"tsc-tsc"),
668 	EV_ALIAS("ic-misses",		"iap-icache-misses"),
669 	EV_ALIAS("instructions",	"iap-instr-ret"),
670 	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
671 	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
672 	EV_ALIAS(NULL, NULL)
673 };
674 
675 /*
676  * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
677  * and Atom (Family 6, model 1CH) PMCs.
678  *
679  * We map aliases to events on the fixed-function counters if these
680  * are present.  Note that not all CPUs in this family contain fixed-function
681  * counters.
682  */
683 
684 static struct pmc_event_alias core2_aliases[] = {
685 	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
686 	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
687 	EV_ALIAS("cycles",		"tsc-tsc"),
688 	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
689 	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
690 	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
691 	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
692 	EV_ALIAS(NULL, NULL)
693 };
694 
695 static struct pmc_event_alias core2_aliases_without_iaf[] = {
696 	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
697 	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
698 	EV_ALIAS("cycles",		"tsc-tsc"),
699 	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
700 	EV_ALIAS("instructions",	"iap-inst-retired.any_p"),
701 	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
702 	EV_ALIAS("unhalted-cycles",	"iap-cpu-clk-unhalted.core_p"),
703 	EV_ALIAS(NULL, NULL)
704 };
705 
706 #define	atom_aliases			core2_aliases
707 #define	atom_aliases_without_iaf	core2_aliases_without_iaf
708 #define	atom_silvermont_aliases		core2_aliases
709 #define	atom_silvermont_aliases_without_iaf	core2_aliases_without_iaf
710 #define corei7_aliases			core2_aliases
711 #define corei7_aliases_without_iaf	core2_aliases_without_iaf
712 #define nehalem_ex_aliases		core2_aliases
713 #define nehalem_ex_aliases_without_iaf	core2_aliases_without_iaf
714 #define haswell_aliases			core2_aliases
715 #define haswell_aliases_without_iaf	core2_aliases_without_iaf
716 #define haswell_xeon_aliases			core2_aliases
717 #define haswell_xeon_aliases_without_iaf	core2_aliases_without_iaf
718 #define broadwell_aliases			core2_aliases
719 #define broadwell_aliases_without_iaf	core2_aliases_without_iaf
720 #define broadwell_xeon_aliases			core2_aliases
721 #define broadwell_xeon_aliases_without_iaf	core2_aliases_without_iaf
722 #define skylake_aliases			core2_aliases
723 #define skylake_aliases_without_iaf	core2_aliases_without_iaf
724 #define ivybridge_aliases		core2_aliases
725 #define ivybridge_aliases_without_iaf	core2_aliases_without_iaf
726 #define ivybridge_xeon_aliases		core2_aliases
727 #define ivybridge_xeon_aliases_without_iaf	core2_aliases_without_iaf
728 #define sandybridge_aliases		core2_aliases
729 #define sandybridge_aliases_without_iaf	core2_aliases_without_iaf
730 #define sandybridge_xeon_aliases	core2_aliases
731 #define sandybridge_xeon_aliases_without_iaf	core2_aliases_without_iaf
732 #define westmere_aliases		core2_aliases
733 #define westmere_aliases_without_iaf	core2_aliases_without_iaf
734 #define westmere_ex_aliases		core2_aliases
735 #define westmere_ex_aliases_without_iaf	core2_aliases_without_iaf
736 
737 #define	IAF_KW_OS		"os"
738 #define	IAF_KW_USR		"usr"
739 #define	IAF_KW_ANYTHREAD	"anythread"
740 
741 /*
742  * Parse an event specifier for Intel fixed function counters.
743  */
744 static int
745 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
746     struct pmc_op_pmcallocate *pmc_config)
747 {
748 	char *p;
749 
750 	(void) pe;
751 
752 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
753 	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
754 
755 	while ((p = strsep(&ctrspec, ",")) != NULL) {
756 		if (KWMATCH(p, IAF_KW_OS))
757 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
758 		else if (KWMATCH(p, IAF_KW_USR))
759 			pmc_config->pm_caps |= PMC_CAP_USER;
760 		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
761 			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
762 		else
763 			return (-1);
764 	}
765 
766 	return (0);
767 }
768 
769 /*
770  * Core/Core2 support.
771  */
772 
773 #define	IAP_KW_AGENT		"agent"
774 #define	IAP_KW_ANYTHREAD	"anythread"
775 #define	IAP_KW_CACHESTATE	"cachestate"
776 #define	IAP_KW_CMASK		"cmask"
777 #define	IAP_KW_CORE		"core"
778 #define	IAP_KW_EDGE		"edge"
779 #define	IAP_KW_INV		"inv"
780 #define	IAP_KW_OS		"os"
781 #define	IAP_KW_PREFETCH		"prefetch"
782 #define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
783 #define	IAP_KW_SNOOPTYPE	"snooptype"
784 #define	IAP_KW_TRANSITION	"trans"
785 #define	IAP_KW_USR		"usr"
786 #define	IAP_KW_RSP		"rsp"
787 
788 static struct pmc_masks iap_core_mask[] = {
789 	PMCMASK(all,	(0x3 << 14)),
790 	PMCMASK(this,	(0x1 << 14)),
791 	NULLMASK
792 };
793 
794 static struct pmc_masks iap_agent_mask[] = {
795 	PMCMASK(this,	0),
796 	PMCMASK(any,	(0x1 << 13)),
797 	NULLMASK
798 };
799 
800 static struct pmc_masks iap_prefetch_mask[] = {
801 	PMCMASK(both,		(0x3 << 12)),
802 	PMCMASK(only,		(0x1 << 12)),
803 	PMCMASK(exclude,	0),
804 	NULLMASK
805 };
806 
807 static struct pmc_masks iap_cachestate_mask[] = {
808 	PMCMASK(i,		(1 <<  8)),
809 	PMCMASK(s,		(1 <<  9)),
810 	PMCMASK(e,		(1 << 10)),
811 	PMCMASK(m,		(1 << 11)),
812 	NULLMASK
813 };
814 
815 static struct pmc_masks iap_snoopresponse_mask[] = {
816 	PMCMASK(clean,		(1 << 8)),
817 	PMCMASK(hit,		(1 << 9)),
818 	PMCMASK(hitm,		(1 << 11)),
819 	NULLMASK
820 };
821 
822 static struct pmc_masks iap_snooptype_mask[] = {
823 	PMCMASK(cmp2s,		(1 << 8)),
824 	PMCMASK(cmp2i,		(1 << 9)),
825 	NULLMASK
826 };
827 
828 static struct pmc_masks iap_transition_mask[] = {
829 	PMCMASK(any,		0x00),
830 	PMCMASK(frequency,	0x10),
831 	NULLMASK
832 };
833 
834 static struct pmc_masks iap_rsp_mask_i7_wm[] = {
835 	PMCMASK(DMND_DATA_RD,		(1 <<  0)),
836 	PMCMASK(DMND_RFO,		(1 <<  1)),
837 	PMCMASK(DMND_IFETCH,		(1 <<  2)),
838 	PMCMASK(WB,			(1 <<  3)),
839 	PMCMASK(PF_DATA_RD,		(1 <<  4)),
840 	PMCMASK(PF_RFO,			(1 <<  5)),
841 	PMCMASK(PF_IFETCH,		(1 <<  6)),
842 	PMCMASK(OTHER,			(1 <<  7)),
843 	PMCMASK(UNCORE_HIT,		(1 <<  8)),
844 	PMCMASK(OTHER_CORE_HIT_SNP,	(1 <<  9)),
845 	PMCMASK(OTHER_CORE_HITM,	(1 << 10)),
846 	PMCMASK(REMOTE_CACHE_FWD,	(1 << 12)),
847 	PMCMASK(REMOTE_DRAM,		(1 << 13)),
848 	PMCMASK(LOCAL_DRAM,		(1 << 14)),
849 	PMCMASK(NON_DRAM,		(1 << 15)),
850 	NULLMASK
851 };
852 
853 static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = {
854 	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
855 	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
856 	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
857 	PMCMASK(REQ_WB,			(1ULL <<  3)),
858 	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  4)),
859 	PMCMASK(REQ_PF_RFO,		(1ULL <<  5)),
860 	PMCMASK(REQ_PF_IFETCH,		(1ULL <<  6)),
861 	PMCMASK(REQ_PF_LLC_DATA_RD,	(1ULL <<  7)),
862 	PMCMASK(REQ_PF_LLC_RFO,		(1ULL <<  8)),
863 	PMCMASK(REQ_PF_LLC_IFETCH,	(1ULL <<  9)),
864 	PMCMASK(REQ_BUS_LOCKS,		(1ULL << 10)),
865 	PMCMASK(REQ_STRM_ST,		(1ULL << 11)),
866 	PMCMASK(REQ_OTHER,		(1ULL << 15)),
867 	PMCMASK(RES_ANY,		(1ULL << 16)),
868 	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
869 	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
870 	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
871 	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
872 	PMCMASK(RES_SUPPLIER_LLC_HITF,	(1ULL << 21)),
873 	PMCMASK(RES_SUPPLIER_LOCAL,	(1ULL << 22)),
874 	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
875 	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
876 	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
877 	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
878 	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
879 	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
880 	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
881 	NULLMASK
882 };
883 
884 /* Broadwell is defined to use the same mask as Haswell */
885 static struct pmc_masks iap_rsp_mask_haswell[] = {
886 	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
887 	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
888 	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
889 	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  4)),
890 	PMCMASK(REQ_PF_RFO,		(1ULL <<  5)),
891 	PMCMASK(REQ_PF_IFETCH,		(1ULL <<  6)),
892 	PMCMASK(REQ_OTHER,		(1ULL << 15)),
893 	PMCMASK(RES_ANY,		(1ULL << 16)),
894 	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
895 	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
896 	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
897 	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
898 	PMCMASK(RES_SUPPLIER_LLC_HITF,	(1ULL << 21)),
899 	PMCMASK(RES_SUPPLIER_LOCAL,	(1ULL << 22)),
900 	/*
901 	 * For processor type 06_45H 22 is L4_HIT_LOCAL_L4
902 	 * and 23, 24 and 25 are also defined.
903 	 */
904 	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
905 	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
906 	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
907 	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
908 	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
909 	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
910 	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
911 	NULLMASK
912 };
913 
914 static struct pmc_masks iap_rsp_mask_skylake[] = {
915 	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
916 	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
917 	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
918 	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  7)),
919 	PMCMASK(REQ_PF_RFO,		(1ULL <<  8)),
920 	PMCMASK(REQ_STRM_ST,		(1ULL << 11)),
921 	PMCMASK(REQ_OTHER,		(1ULL << 15)),
922 	PMCMASK(RES_ANY,		(1ULL << 16)),
923 	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
924 	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
925 	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
926 	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
927 	PMCMASK(RES_SUPPLIER_L4_HIT,	(1ULL << 22)),
928 	PMCMASK(RES_SUPPLIER_DRAM,	(1ULL << 26)),
929 	PMCMASK(RES_SUPPLIER_SPL_HIT,	(1ULL << 30)),
930 	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
931 	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
932 	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
933 	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
934 	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
935 	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
936 	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
937 	NULLMASK
938 };
939 
940 
941 static int
942 iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
943     struct pmc_op_pmcallocate *pmc_config)
944 {
945 	char *e, *p, *q;
946 	uint64_t cachestate, evmask, rsp;
947 	int count, n;
948 
949 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
950 	    PMC_CAP_QUALIFIER);
951 	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
952 
953 	cachestate = evmask = rsp = 0;
954 
955 	/* Parse additional modifiers if present */
956 	while ((p = strsep(&ctrspec, ",")) != NULL) {
957 
958 		n = 0;
959 		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
960 			q = strchr(p, '=');
961 			if (*++q == '\0') /* skip '=' */
962 				return (-1);
963 			count = strtol(q, &e, 0);
964 			if (e == q || *e != '\0')
965 				return (-1);
966 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
967 			pmc_config->pm_md.pm_iap.pm_iap_config |=
968 			    IAP_CMASK(count);
969 		} else if (KWMATCH(p, IAP_KW_EDGE)) {
970 			pmc_config->pm_caps |= PMC_CAP_EDGE;
971 		} else if (KWMATCH(p, IAP_KW_INV)) {
972 			pmc_config->pm_caps |= PMC_CAP_INVERT;
973 		} else if (KWMATCH(p, IAP_KW_OS)) {
974 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
975 		} else if (KWMATCH(p, IAP_KW_USR)) {
976 			pmc_config->pm_caps |= PMC_CAP_USER;
977 		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
978 			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
979 		} else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
980 			n = pmc_parse_mask(iap_core_mask, p, &evmask);
981 			if (n != 1)
982 				return (-1);
983 		} else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
984 			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
985 			if (n != 1)
986 				return (-1);
987 		} else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
988 			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
989 			if (n != 1)
990 				return (-1);
991 		} else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
992 			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
993 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
994 		    KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
995 			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
996 			if (n != 1)
997 				return (-1);
998 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
999 		    cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM_SILVERMONT ||
1000 		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
1001 		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
1002 			if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
1003 				n = pmc_parse_mask(iap_snoopresponse_mask, p,
1004 				    &evmask);
1005 			} else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
1006 				n = pmc_parse_mask(iap_snooptype_mask, p,
1007 				    &evmask);
1008 			} else
1009 				return (-1);
1010 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 ||
1011 		    cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE ||
1012 		    cpu_info.pm_cputype == PMC_CPU_INTEL_NEHALEM_EX ||
1013 		    cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE_EX) {
1014 			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1015 				n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp);
1016 			} else
1017 				return (-1);
1018 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
1019 		    cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
1020 			cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
1021 			cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) {
1022 			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1023 				n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp);
1024 			} else
1025 				return (-1);
1026 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL ||
1027 			cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL_XEON) {
1028 			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1029 				n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1030 			} else
1031 				return (-1);
1032 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL ||
1033 			cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL_XEON) {
1034 			/* Broadwell is defined to use same mask as haswell */
1035 			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1036 				n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1037 			} else
1038 				return (-1);
1039 
1040 		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE) {
1041 			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1042 				n = pmc_parse_mask(iap_rsp_mask_skylake, p, &rsp);
1043 			} else
1044 				return (-1);
1045 
1046 		} else
1047 			return (-1);
1048 
1049 		if (n < 0)	/* Parsing failed. */
1050 			return (-1);
1051 	}
1052 
1053 	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
1054 
1055 	/*
1056 	 * If the event requires a 'cachestate' qualifier but was not
1057 	 * specified by the user, use a sensible default.
1058 	 */
1059 	switch (pe) {
1060 	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
1061 	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
1062 	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
1063 	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
1064 	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
1065 	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
1066 	case PMC_EV_IAP_EVENT_32H: /* Core */
1067 	case PMC_EV_IAP_EVENT_40H: /* Core */
1068 	case PMC_EV_IAP_EVENT_41H: /* Core */
1069 	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
1070 		if (cachestate == 0)
1071 			cachestate = (0xF << 8);
1072 		break;
1073 	case PMC_EV_IAP_EVENT_77H: /* Atom */
1074 		/* IAP_EVENT_77H only accepts a cachestate qualifier on the
1075 		 * Atom processor
1076 		 */
1077 		if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0)
1078 			cachestate = (0xF << 8);
1079 	    break;
1080 	default:
1081 		break;
1082 	}
1083 
1084 	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
1085 	pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp;
1086 
1087 	return (0);
1088 }
1089 
1090 /*
1091  * Intel Uncore.
1092  */
1093 
1094 static int
1095 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec,
1096     struct pmc_op_pmcallocate *pmc_config)
1097 {
1098 	(void) pe;
1099 	(void) ctrspec;
1100 
1101 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1102 	pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0;
1103 
1104 	return (0);
1105 }
1106 
1107 #define	UCP_KW_CMASK		"cmask"
1108 #define	UCP_KW_EDGE		"edge"
1109 #define	UCP_KW_INV		"inv"
1110 
1111 static int
1112 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec,
1113     struct pmc_op_pmcallocate *pmc_config)
1114 {
1115 	char *e, *p, *q;
1116 	int count, n;
1117 
1118 	(void) pe;
1119 
1120 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
1121 	    PMC_CAP_QUALIFIER);
1122 	pmc_config->pm_md.pm_ucp.pm_ucp_config = 0;
1123 
1124 	/* Parse additional modifiers if present */
1125 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1126 
1127 		n = 0;
1128 		if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) {
1129 			q = strchr(p, '=');
1130 			if (*++q == '\0') /* skip '=' */
1131 				return (-1);
1132 			count = strtol(q, &e, 0);
1133 			if (e == q || *e != '\0')
1134 				return (-1);
1135 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1136 			pmc_config->pm_md.pm_ucp.pm_ucp_config |=
1137 			    UCP_CMASK(count);
1138 		} else if (KWMATCH(p, UCP_KW_EDGE)) {
1139 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1140 		} else if (KWMATCH(p, UCP_KW_INV)) {
1141 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1142 		} else
1143 			return (-1);
1144 
1145 		if (n < 0)	/* Parsing failed. */
1146 			return (-1);
1147 	}
1148 
1149 	return (0);
1150 }
1151 
1152 /*
1153  * AMD K8 PMCs.
1154  *
1155  * These are very similar to AMD K7 PMCs, but support more kinds of
1156  * events.
1157  */
1158 
1159 static struct pmc_event_alias k8_aliases[] = {
1160 	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
1161 	EV_ALIAS("branch-mispredicts",
1162 	    "k8-fr-retired-taken-branches-mispredicted"),
1163 	EV_ALIAS("cycles",		"tsc"),
1164 	EV_ALIAS("dc-misses",		"k8-dc-miss"),
1165 	EV_ALIAS("ic-misses",		"k8-ic-miss"),
1166 	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
1167 	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
1168 	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
1169 	EV_ALIAS(NULL, NULL)
1170 };
1171 
1172 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
1173 
1174 /*
1175  * Parsing tables
1176  */
1177 
1178 /* fp dispatched fpu ops */
1179 static const struct pmc_masks k8_mask_fdfo[] = {
1180 	__K8MASK(add-pipe-excluding-junk-ops,	0),
1181 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
1182 	__K8MASK(store-pipe-excluding-junk-ops,	2),
1183 	__K8MASK(add-pipe-junk-ops,		3),
1184 	__K8MASK(multiply-pipe-junk-ops,	4),
1185 	__K8MASK(store-pipe-junk-ops,		5),
1186 	NULLMASK
1187 };
1188 
1189 /* ls segment register loads */
1190 static const struct pmc_masks k8_mask_lsrl[] = {
1191 	__K8MASK(es,	0),
1192 	__K8MASK(cs,	1),
1193 	__K8MASK(ss,	2),
1194 	__K8MASK(ds,	3),
1195 	__K8MASK(fs,	4),
1196 	__K8MASK(gs,	5),
1197 	__K8MASK(hs,	6),
1198 	NULLMASK
1199 };
1200 
1201 /* ls locked operation */
1202 static const struct pmc_masks k8_mask_llo[] = {
1203 	__K8MASK(locked-instructions,	0),
1204 	__K8MASK(cycles-in-request,	1),
1205 	__K8MASK(cycles-to-complete,	2),
1206 	NULLMASK
1207 };
1208 
1209 /* dc refill from {l2,system} and dc copyback */
1210 static const struct pmc_masks k8_mask_dc[] = {
1211 	__K8MASK(invalid,	0),
1212 	__K8MASK(shared,	1),
1213 	__K8MASK(exclusive,	2),
1214 	__K8MASK(owner,		3),
1215 	__K8MASK(modified,	4),
1216 	NULLMASK
1217 };
1218 
1219 /* dc one bit ecc error */
1220 static const struct pmc_masks k8_mask_dobee[] = {
1221 	__K8MASK(scrubber,	0),
1222 	__K8MASK(piggyback,	1),
1223 	NULLMASK
1224 };
1225 
1226 /* dc dispatched prefetch instructions */
1227 static const struct pmc_masks k8_mask_ddpi[] = {
1228 	__K8MASK(load,	0),
1229 	__K8MASK(store,	1),
1230 	__K8MASK(nta,	2),
1231 	NULLMASK
1232 };
1233 
1234 /* dc dcache accesses by locks */
1235 static const struct pmc_masks k8_mask_dabl[] = {
1236 	__K8MASK(accesses,	0),
1237 	__K8MASK(misses,	1),
1238 	NULLMASK
1239 };
1240 
1241 /* bu internal l2 request */
1242 static const struct pmc_masks k8_mask_bilr[] = {
1243 	__K8MASK(ic-fill,	0),
1244 	__K8MASK(dc-fill,	1),
1245 	__K8MASK(tlb-reload,	2),
1246 	__K8MASK(tag-snoop,	3),
1247 	__K8MASK(cancelled,	4),
1248 	NULLMASK
1249 };
1250 
1251 /* bu fill request l2 miss */
1252 static const struct pmc_masks k8_mask_bfrlm[] = {
1253 	__K8MASK(ic-fill,	0),
1254 	__K8MASK(dc-fill,	1),
1255 	__K8MASK(tlb-reload,	2),
1256 	NULLMASK
1257 };
1258 
1259 /* bu fill into l2 */
1260 static const struct pmc_masks k8_mask_bfil[] = {
1261 	__K8MASK(dirty-l2-victim,	0),
1262 	__K8MASK(victim-from-l2,	1),
1263 	NULLMASK
1264 };
1265 
1266 /* fr retired fpu instructions */
1267 static const struct pmc_masks k8_mask_frfi[] = {
1268 	__K8MASK(x87,			0),
1269 	__K8MASK(mmx-3dnow,		1),
1270 	__K8MASK(packed-sse-sse2,	2),
1271 	__K8MASK(scalar-sse-sse2,	3),
1272 	NULLMASK
1273 };
1274 
1275 /* fr retired fastpath double op instructions */
1276 static const struct pmc_masks k8_mask_frfdoi[] = {
1277 	__K8MASK(low-op-pos-0,		0),
1278 	__K8MASK(low-op-pos-1,		1),
1279 	__K8MASK(low-op-pos-2,		2),
1280 	NULLMASK
1281 };
1282 
1283 /* fr fpu exceptions */
1284 static const struct pmc_masks k8_mask_ffe[] = {
1285 	__K8MASK(x87-reclass-microfaults,	0),
1286 	__K8MASK(sse-retype-microfaults,	1),
1287 	__K8MASK(sse-reclass-microfaults,	2),
1288 	__K8MASK(sse-and-x87-microtraps,	3),
1289 	NULLMASK
1290 };
1291 
1292 /* nb memory controller page access event */
1293 static const struct pmc_masks k8_mask_nmcpae[] = {
1294 	__K8MASK(page-hit,	0),
1295 	__K8MASK(page-miss,	1),
1296 	__K8MASK(page-conflict,	2),
1297 	NULLMASK
1298 };
1299 
1300 /* nb memory controller turnaround */
1301 static const struct pmc_masks k8_mask_nmct[] = {
1302 	__K8MASK(dimm-turnaround,		0),
1303 	__K8MASK(read-to-write-turnaround,	1),
1304 	__K8MASK(write-to-read-turnaround,	2),
1305 	NULLMASK
1306 };
1307 
1308 /* nb memory controller bypass saturation */
1309 static const struct pmc_masks k8_mask_nmcbs[] = {
1310 	__K8MASK(memory-controller-hi-pri-bypass,	0),
1311 	__K8MASK(memory-controller-lo-pri-bypass,	1),
1312 	__K8MASK(dram-controller-interface-bypass,	2),
1313 	__K8MASK(dram-controller-queue-bypass,		3),
1314 	NULLMASK
1315 };
1316 
1317 /* nb sized commands */
1318 static const struct pmc_masks k8_mask_nsc[] = {
1319 	__K8MASK(nonpostwrszbyte,	0),
1320 	__K8MASK(nonpostwrszdword,	1),
1321 	__K8MASK(postwrszbyte,		2),
1322 	__K8MASK(postwrszdword,		3),
1323 	__K8MASK(rdszbyte,		4),
1324 	__K8MASK(rdszdword,		5),
1325 	__K8MASK(rdmodwr,		6),
1326 	NULLMASK
1327 };
1328 
1329 /* nb probe result */
1330 static const struct pmc_masks k8_mask_npr[] = {
1331 	__K8MASK(probe-miss,		0),
1332 	__K8MASK(probe-hit,		1),
1333 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1334 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1335 	NULLMASK
1336 };
1337 
1338 /* nb hypertransport bus bandwidth */
1339 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1340 	__K8MASK(command,	0),
1341 	__K8MASK(data,	1),
1342 	__K8MASK(buffer-release, 2),
1343 	__K8MASK(nop,	3),
1344 	NULLMASK
1345 };
1346 
1347 #undef	__K8MASK
1348 
1349 #define	K8_KW_COUNT	"count"
1350 #define	K8_KW_EDGE	"edge"
1351 #define	K8_KW_INV	"inv"
1352 #define	K8_KW_MASK	"mask"
1353 #define	K8_KW_OS	"os"
1354 #define	K8_KW_USR	"usr"
1355 
1356 static int
1357 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1358     struct pmc_op_pmcallocate *pmc_config)
1359 {
1360 	char		*e, *p, *q;
1361 	int		n;
1362 	uint32_t	count;
1363 	uint64_t	evmask;
1364 	const struct pmc_masks	*pm, *pmask;
1365 
1366 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1367 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1368 
1369 	pmask = NULL;
1370 	evmask = 0;
1371 
1372 #define	__K8SETMASK(M) pmask = k8_mask_##M
1373 
1374 	/* setup parsing tables */
1375 	switch (pe) {
1376 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1377 		__K8SETMASK(fdfo);
1378 		break;
1379 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1380 		__K8SETMASK(lsrl);
1381 		break;
1382 	case PMC_EV_K8_LS_LOCKED_OPERATION:
1383 		__K8SETMASK(llo);
1384 		break;
1385 	case PMC_EV_K8_DC_REFILL_FROM_L2:
1386 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1387 	case PMC_EV_K8_DC_COPYBACK:
1388 		__K8SETMASK(dc);
1389 		break;
1390 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1391 		__K8SETMASK(dobee);
1392 		break;
1393 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1394 		__K8SETMASK(ddpi);
1395 		break;
1396 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1397 		__K8SETMASK(dabl);
1398 		break;
1399 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1400 		__K8SETMASK(bilr);
1401 		break;
1402 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1403 		__K8SETMASK(bfrlm);
1404 		break;
1405 	case PMC_EV_K8_BU_FILL_INTO_L2:
1406 		__K8SETMASK(bfil);
1407 		break;
1408 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1409 		__K8SETMASK(frfi);
1410 		break;
1411 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1412 		__K8SETMASK(frfdoi);
1413 		break;
1414 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1415 		__K8SETMASK(ffe);
1416 		break;
1417 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1418 		__K8SETMASK(nmcpae);
1419 		break;
1420 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1421 		__K8SETMASK(nmct);
1422 		break;
1423 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1424 		__K8SETMASK(nmcbs);
1425 		break;
1426 	case PMC_EV_K8_NB_SIZED_COMMANDS:
1427 		__K8SETMASK(nsc);
1428 		break;
1429 	case PMC_EV_K8_NB_PROBE_RESULT:
1430 		__K8SETMASK(npr);
1431 		break;
1432 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1433 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1434 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1435 		__K8SETMASK(nhbb);
1436 		break;
1437 
1438 	default:
1439 		break;		/* no options defined */
1440 	}
1441 
1442 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1443 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1444 			q = strchr(p, '=');
1445 			if (*++q == '\0') /* skip '=' */
1446 				return (-1);
1447 
1448 			count = strtol(q, &e, 0);
1449 			if (e == q || *e != '\0')
1450 				return (-1);
1451 
1452 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1453 			pmc_config->pm_md.pm_amd.pm_amd_config |=
1454 			    AMD_PMC_TO_COUNTER(count);
1455 
1456 		} else if (KWMATCH(p, K8_KW_EDGE)) {
1457 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1458 		} else if (KWMATCH(p, K8_KW_INV)) {
1459 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1460 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1461 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1462 				return (-1);
1463 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1464 		} else if (KWMATCH(p, K8_KW_OS)) {
1465 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1466 		} else if (KWMATCH(p, K8_KW_USR)) {
1467 			pmc_config->pm_caps |= PMC_CAP_USER;
1468 		} else
1469 			return (-1);
1470 	}
1471 
1472 	/* other post processing */
1473 	switch (pe) {
1474 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1475 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1476 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1477 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1478 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1479 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1480 		/* XXX only available in rev B and later */
1481 		break;
1482 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1483 		/* XXX only available in rev C and later */
1484 		break;
1485 	case PMC_EV_K8_LS_LOCKED_OPERATION:
1486 		/* XXX CPU Rev A,B evmask is to be zero */
1487 		if (evmask & (evmask - 1)) /* > 1 bit set */
1488 			return (-1);
1489 		if (evmask == 0) {
1490 			evmask = 0x01; /* Rev C and later: #instrs */
1491 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1492 		}
1493 		break;
1494 	default:
1495 		if (evmask == 0 && pmask != NULL) {
1496 			for (pm = pmask; pm->pm_name; pm++)
1497 				evmask |= pm->pm_value;
1498 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1499 		}
1500 	}
1501 
1502 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1503 		pmc_config->pm_md.pm_amd.pm_amd_config =
1504 		    AMD_PMC_TO_UNITMASK(evmask);
1505 
1506 	return (0);
1507 }
1508 
1509 #endif
1510 
1511 #if defined(__amd64__) || defined(__i386__)
1512 
1513 /*
1514  * Intel P4 PMCs
1515  */
1516 
1517 static struct pmc_event_alias p4_aliases[] = {
1518 	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
1519 	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
1520 	EV_ALIAS("cycles",		"tsc"),
1521 	EV_ALIAS("instructions",
1522 	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
1523 	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
1524 	EV_ALIAS(NULL, NULL)
1525 };
1526 
1527 #define	P4_KW_ACTIVE	"active"
1528 #define	P4_KW_ACTIVE_ANY "any"
1529 #define	P4_KW_ACTIVE_BOTH "both"
1530 #define	P4_KW_ACTIVE_NONE "none"
1531 #define	P4_KW_ACTIVE_SINGLE "single"
1532 #define	P4_KW_BUSREQTYPE "busreqtype"
1533 #define	P4_KW_CASCADE	"cascade"
1534 #define	P4_KW_EDGE	"edge"
1535 #define	P4_KW_INV	"complement"
1536 #define	P4_KW_OS	"os"
1537 #define	P4_KW_MASK	"mask"
1538 #define	P4_KW_PRECISE	"precise"
1539 #define	P4_KW_TAG	"tag"
1540 #define	P4_KW_THRESHOLD	"threshold"
1541 #define	P4_KW_USR	"usr"
1542 
1543 #define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
1544 
1545 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1546 	__P4MASK(dd, 0),
1547 	__P4MASK(db, 1),
1548 	__P4MASK(di, 2),
1549 	__P4MASK(bd, 3),
1550 	__P4MASK(bb, 4),
1551 	__P4MASK(bi, 5),
1552 	__P4MASK(id, 6),
1553 	__P4MASK(ib, 7),
1554 	NULLMASK
1555 };
1556 
1557 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1558 	__P4MASK(tcmiss, 0),
1559 	NULLMASK,
1560 };
1561 
1562 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1563 	__P4MASK(hit, 0),
1564 	__P4MASK(miss, 1),
1565 	__P4MASK(hit-uc, 2),
1566 	NULLMASK
1567 };
1568 
1569 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1570 	__P4MASK(st-rb-full, 2),
1571 	__P4MASK(64k-conf, 3),
1572 	NULLMASK
1573 };
1574 
1575 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1576 	__P4MASK(lsc, 0),
1577 	__P4MASK(ssc, 1),
1578 	NULLMASK
1579 };
1580 
1581 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1582 	__P4MASK(split-ld, 1),
1583 	NULLMASK
1584 };
1585 
1586 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1587 	__P4MASK(split-st, 1),
1588 	NULLMASK
1589 };
1590 
1591 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1592 	__P4MASK(no-sta, 1),
1593 	__P4MASK(no-std, 3),
1594 	__P4MASK(partial-data, 4),
1595 	__P4MASK(unalgn-addr, 5),
1596 	NULLMASK
1597 };
1598 
1599 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1600 	__P4MASK(dtmiss, 0),
1601 	__P4MASK(itmiss, 1),
1602 	NULLMASK
1603 };
1604 
1605 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1606 	__P4MASK(rd-2ndl-hits, 0),
1607 	__P4MASK(rd-2ndl-hite, 1),
1608 	__P4MASK(rd-2ndl-hitm, 2),
1609 	__P4MASK(rd-3rdl-hits, 3),
1610 	__P4MASK(rd-3rdl-hite, 4),
1611 	__P4MASK(rd-3rdl-hitm, 5),
1612 	__P4MASK(rd-2ndl-miss, 8),
1613 	__P4MASK(rd-3rdl-miss, 9),
1614 	__P4MASK(wr-2ndl-miss, 10),
1615 	NULLMASK
1616 };
1617 
1618 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1619 	__P4MASK(all-read, 5),
1620 	__P4MASK(all-write, 6),
1621 	__P4MASK(mem-uc, 7),
1622 	__P4MASK(mem-wc, 8),
1623 	__P4MASK(mem-wt, 9),
1624 	__P4MASK(mem-wp, 10),
1625 	__P4MASK(mem-wb, 11),
1626 	__P4MASK(own, 13),
1627 	__P4MASK(other, 14),
1628 	__P4MASK(prefetch, 15),
1629 	NULLMASK
1630 };
1631 
1632 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1633 	__P4MASK(all-read, 5),
1634 	__P4MASK(all-write, 6),
1635 	__P4MASK(mem-uc, 7),
1636 	__P4MASK(mem-wc, 8),
1637 	__P4MASK(mem-wt, 9),
1638 	__P4MASK(mem-wp, 10),
1639 	__P4MASK(mem-wb, 11),
1640 	__P4MASK(own, 13),
1641 	__P4MASK(other, 14),
1642 	__P4MASK(prefetch, 15),
1643 	NULLMASK
1644 };
1645 
1646 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1647 	__P4MASK(drdy-drv, 0),
1648 	__P4MASK(drdy-own, 1),
1649 	__P4MASK(drdy-other, 2),
1650 	__P4MASK(dbsy-drv, 3),
1651 	__P4MASK(dbsy-own, 4),
1652 	__P4MASK(dbsy-other, 5),
1653 	NULLMASK
1654 };
1655 
1656 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1657 	__P4MASK(req-type0, 0),
1658 	__P4MASK(req-type1, 1),
1659 	__P4MASK(req-len0, 2),
1660 	__P4MASK(req-len1, 3),
1661 	__P4MASK(req-io-type, 5),
1662 	__P4MASK(req-lock-type, 6),
1663 	__P4MASK(req-cache-type, 7),
1664 	__P4MASK(req-split-type, 8),
1665 	__P4MASK(req-dem-type, 9),
1666 	__P4MASK(req-ord-type, 10),
1667 	__P4MASK(mem-type0, 11),
1668 	__P4MASK(mem-type1, 12),
1669 	__P4MASK(mem-type2, 13),
1670 	NULLMASK
1671 };
1672 
1673 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1674 	__P4MASK(all, 15),
1675 	NULLMASK
1676 };
1677 
1678 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1679 	__P4MASK(all, 15),
1680 	NULLMASK
1681 };
1682 
1683 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1684 	__P4MASK(all, 15),
1685 	NULLMASK
1686 };
1687 
1688 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1689 	__P4MASK(all, 15),
1690 	NULLMASK
1691 };
1692 
1693 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1694 	__P4MASK(all, 15),
1695 	NULLMASK
1696 };
1697 
1698 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1699 	__P4MASK(all, 15),
1700 	NULLMASK
1701 };
1702 
1703 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1704 	__P4MASK(all, 15),
1705 	NULLMASK
1706 };
1707 
1708 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1709 	__P4MASK(all, 15),
1710 	NULLMASK
1711 };
1712 
1713 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1714 	__P4MASK(allp0, 3),
1715 	__P4MASK(allp2, 4),
1716 	NULLMASK
1717 };
1718 
1719 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1720 	__P4MASK(running, 0),
1721 	NULLMASK
1722 };
1723 
1724 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1725 	__P4MASK(cisc, 0),
1726 	NULLMASK
1727 };
1728 
1729 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1730 	__P4MASK(from-tc-build, 0),
1731 	__P4MASK(from-tc-deliver, 1),
1732 	__P4MASK(from-rom, 2),
1733 	NULLMASK
1734 };
1735 
1736 static const struct pmc_masks p4_mask_rmbt[] = {
1737 	/* retired mispred branch type */
1738 	__P4MASK(conditional, 1),
1739 	__P4MASK(call, 2),
1740 	__P4MASK(return, 3),
1741 	__P4MASK(indirect, 4),
1742 	NULLMASK
1743 };
1744 
1745 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1746 	__P4MASK(conditional, 1),
1747 	__P4MASK(call, 2),
1748 	__P4MASK(retired, 3),
1749 	__P4MASK(indirect, 4),
1750 	NULLMASK
1751 };
1752 
1753 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1754 	__P4MASK(sbfull, 5),
1755 	NULLMASK
1756 };
1757 
1758 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1759 	__P4MASK(wcb-evicts, 0),
1760 	__P4MASK(wcb-full-evict, 1),
1761 	NULLMASK
1762 };
1763 
1764 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1765 	__P4MASK(nbogus, 0),
1766 	__P4MASK(bogus, 1),
1767 	NULLMASK
1768 };
1769 
1770 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1771 	__P4MASK(nbogus0, 0),
1772 	__P4MASK(nbogus1, 1),
1773 	__P4MASK(nbogus2, 2),
1774 	__P4MASK(nbogus3, 3),
1775 	__P4MASK(bogus0, 4),
1776 	__P4MASK(bogus1, 5),
1777 	__P4MASK(bogus2, 6),
1778 	__P4MASK(bogus3, 7),
1779 	NULLMASK
1780 };
1781 
1782 static const struct pmc_masks p4_mask_re[] = { /* replay event */
1783 	__P4MASK(nbogus, 0),
1784 	__P4MASK(bogus, 1),
1785 	NULLMASK
1786 };
1787 
1788 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1789 	__P4MASK(nbogusntag, 0),
1790 	__P4MASK(nbogustag, 1),
1791 	__P4MASK(bogusntag, 2),
1792 	__P4MASK(bogustag, 3),
1793 	NULLMASK
1794 };
1795 
1796 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1797 	__P4MASK(nbogus, 0),
1798 	__P4MASK(bogus, 1),
1799 	NULLMASK
1800 };
1801 
1802 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1803 	__P4MASK(tagloads, 1),
1804 	__P4MASK(tagstores, 2),
1805 	NULLMASK
1806 };
1807 
1808 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1809 	__P4MASK(mmnp, 0),
1810 	__P4MASK(mmnm, 1),
1811 	__P4MASK(mmtp, 2),
1812 	__P4MASK(mmtm, 3),
1813 	NULLMASK
1814 };
1815 
1816 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1817 	__P4MASK(nbogus, 0),
1818 	NULLMASK
1819 };
1820 
1821 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1822 	__P4MASK(fpsu, 0),
1823 	__P4MASK(fpso, 1),
1824 	__P4MASK(poao, 2),
1825 	__P4MASK(poau, 3),
1826 	__P4MASK(prea, 4),
1827 	NULLMASK
1828 };
1829 
1830 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1831 	__P4MASK(clear, 0),
1832 	__P4MASK(moclear, 2),
1833 	__P4MASK(smclear, 3),
1834 	NULLMASK
1835 };
1836 
1837 /* P4 event parser */
1838 static int
1839 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1840     struct pmc_op_pmcallocate *pmc_config)
1841 {
1842 
1843 	char	*e, *p, *q;
1844 	int	count, has_tag, has_busreqtype, n;
1845 	uint32_t cccractivemask;
1846 	uint64_t evmask;
1847 	const struct pmc_masks *pm, *pmask;
1848 
1849 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1850 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1851 	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1852 
1853 	pmask   = NULL;
1854 	evmask  = 0;
1855 	cccractivemask = 0x3;
1856 	has_tag = has_busreqtype = 0;
1857 
1858 #define	__P4SETMASK(M) do {				\
1859 	pmask = p4_mask_##M;				\
1860 } while (0)
1861 
1862 	switch (pe) {
1863 	case PMC_EV_P4_TC_DELIVER_MODE:
1864 		__P4SETMASK(tcdm);
1865 		break;
1866 	case PMC_EV_P4_BPU_FETCH_REQUEST:
1867 		__P4SETMASK(bfr);
1868 		break;
1869 	case PMC_EV_P4_ITLB_REFERENCE:
1870 		__P4SETMASK(ir);
1871 		break;
1872 	case PMC_EV_P4_MEMORY_CANCEL:
1873 		__P4SETMASK(memcan);
1874 		break;
1875 	case PMC_EV_P4_MEMORY_COMPLETE:
1876 		__P4SETMASK(memcomp);
1877 		break;
1878 	case PMC_EV_P4_LOAD_PORT_REPLAY:
1879 		__P4SETMASK(lpr);
1880 		break;
1881 	case PMC_EV_P4_STORE_PORT_REPLAY:
1882 		__P4SETMASK(spr);
1883 		break;
1884 	case PMC_EV_P4_MOB_LOAD_REPLAY:
1885 		__P4SETMASK(mlr);
1886 		break;
1887 	case PMC_EV_P4_PAGE_WALK_TYPE:
1888 		__P4SETMASK(pwt);
1889 		break;
1890 	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1891 		__P4SETMASK(bcr);
1892 		break;
1893 	case PMC_EV_P4_IOQ_ALLOCATION:
1894 		__P4SETMASK(ia);
1895 		has_busreqtype = 1;
1896 		break;
1897 	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1898 		__P4SETMASK(iae);
1899 		has_busreqtype = 1;
1900 		break;
1901 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1902 		__P4SETMASK(fda);
1903 		break;
1904 	case PMC_EV_P4_BSQ_ALLOCATION:
1905 		__P4SETMASK(ba);
1906 		break;
1907 	case PMC_EV_P4_SSE_INPUT_ASSIST:
1908 		__P4SETMASK(sia);
1909 		break;
1910 	case PMC_EV_P4_PACKED_SP_UOP:
1911 		__P4SETMASK(psu);
1912 		break;
1913 	case PMC_EV_P4_PACKED_DP_UOP:
1914 		__P4SETMASK(pdu);
1915 		break;
1916 	case PMC_EV_P4_SCALAR_SP_UOP:
1917 		__P4SETMASK(ssu);
1918 		break;
1919 	case PMC_EV_P4_SCALAR_DP_UOP:
1920 		__P4SETMASK(sdu);
1921 		break;
1922 	case PMC_EV_P4_64BIT_MMX_UOP:
1923 		__P4SETMASK(64bmu);
1924 		break;
1925 	case PMC_EV_P4_128BIT_MMX_UOP:
1926 		__P4SETMASK(128bmu);
1927 		break;
1928 	case PMC_EV_P4_X87_FP_UOP:
1929 		__P4SETMASK(xfu);
1930 		break;
1931 	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1932 		__P4SETMASK(xsmu);
1933 		break;
1934 	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1935 		__P4SETMASK(gpe);
1936 		break;
1937 	case PMC_EV_P4_TC_MS_XFER:
1938 		__P4SETMASK(tmx);
1939 		break;
1940 	case PMC_EV_P4_UOP_QUEUE_WRITES:
1941 		__P4SETMASK(uqw);
1942 		break;
1943 	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1944 		__P4SETMASK(rmbt);
1945 		break;
1946 	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1947 		__P4SETMASK(rbt);
1948 		break;
1949 	case PMC_EV_P4_RESOURCE_STALL:
1950 		__P4SETMASK(rs);
1951 		break;
1952 	case PMC_EV_P4_WC_BUFFER:
1953 		__P4SETMASK(wb);
1954 		break;
1955 	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1956 	case PMC_EV_P4_B2B_CYCLES:
1957 	case PMC_EV_P4_BNR:
1958 	case PMC_EV_P4_SNOOP:
1959 	case PMC_EV_P4_RESPONSE:
1960 		break;
1961 	case PMC_EV_P4_FRONT_END_EVENT:
1962 		__P4SETMASK(fee);
1963 		break;
1964 	case PMC_EV_P4_EXECUTION_EVENT:
1965 		__P4SETMASK(ee);
1966 		break;
1967 	case PMC_EV_P4_REPLAY_EVENT:
1968 		__P4SETMASK(re);
1969 		break;
1970 	case PMC_EV_P4_INSTR_RETIRED:
1971 		__P4SETMASK(insret);
1972 		break;
1973 	case PMC_EV_P4_UOPS_RETIRED:
1974 		__P4SETMASK(ur);
1975 		break;
1976 	case PMC_EV_P4_UOP_TYPE:
1977 		__P4SETMASK(ut);
1978 		break;
1979 	case PMC_EV_P4_BRANCH_RETIRED:
1980 		__P4SETMASK(br);
1981 		break;
1982 	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1983 		__P4SETMASK(mbr);
1984 		break;
1985 	case PMC_EV_P4_X87_ASSIST:
1986 		__P4SETMASK(xa);
1987 		break;
1988 	case PMC_EV_P4_MACHINE_CLEAR:
1989 		__P4SETMASK(machclr);
1990 		break;
1991 	default:
1992 		return (-1);
1993 	}
1994 
1995 	/* process additional flags */
1996 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1997 		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1998 			q = strchr(p, '=');
1999 			if (*++q == '\0') /* skip '=' */
2000 				return (-1);
2001 
2002 			if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
2003 				cccractivemask = 0x0;
2004 			else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
2005 				cccractivemask = 0x1;
2006 			else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
2007 				cccractivemask = 0x2;
2008 			else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
2009 				cccractivemask = 0x3;
2010 			else
2011 				return (-1);
2012 
2013 		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
2014 			if (has_busreqtype == 0)
2015 				return (-1);
2016 
2017 			q = strchr(p, '=');
2018 			if (*++q == '\0') /* skip '=' */
2019 				return (-1);
2020 
2021 			count = strtol(q, &e, 0);
2022 			if (e == q || *e != '\0')
2023 				return (-1);
2024 			evmask = (evmask & ~0x1F) | (count & 0x1F);
2025 		} else if (KWMATCH(p, P4_KW_CASCADE))
2026 			pmc_config->pm_caps |= PMC_CAP_CASCADE;
2027 		else if (KWMATCH(p, P4_KW_EDGE))
2028 			pmc_config->pm_caps |= PMC_CAP_EDGE;
2029 		else if (KWMATCH(p, P4_KW_INV))
2030 			pmc_config->pm_caps |= PMC_CAP_INVERT;
2031 		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
2032 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2033 				return (-1);
2034 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2035 		} else if (KWMATCH(p, P4_KW_OS))
2036 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2037 		else if (KWMATCH(p, P4_KW_PRECISE))
2038 			pmc_config->pm_caps |= PMC_CAP_PRECISE;
2039 		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
2040 			if (has_tag == 0)
2041 				return (-1);
2042 
2043 			q = strchr(p, '=');
2044 			if (*++q == '\0') /* skip '=' */
2045 				return (-1);
2046 
2047 			count = strtol(q, &e, 0);
2048 			if (e == q || *e != '\0')
2049 				return (-1);
2050 
2051 			pmc_config->pm_caps |= PMC_CAP_TAGGING;
2052 			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
2053 			    P4_ESCR_TO_TAG_VALUE(count);
2054 		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
2055 			q = strchr(p, '=');
2056 			if (*++q == '\0') /* skip '=' */
2057 				return (-1);
2058 
2059 			count = strtol(q, &e, 0);
2060 			if (e == q || *e != '\0')
2061 				return (-1);
2062 
2063 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2064 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
2065 			    ~P4_CCCR_THRESHOLD_MASK;
2066 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2067 			    P4_CCCR_TO_THRESHOLD(count);
2068 		} else if (KWMATCH(p, P4_KW_USR))
2069 			pmc_config->pm_caps |= PMC_CAP_USER;
2070 		else
2071 			return (-1);
2072 	}
2073 
2074 	/* other post processing */
2075 	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
2076 	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
2077 	    pe == PMC_EV_P4_BSQ_ALLOCATION)
2078 		pmc_config->pm_caps |= PMC_CAP_EDGE;
2079 
2080 	/* fill in thread activity mask */
2081 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2082 	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
2083 
2084 	if (evmask)
2085 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2086 
2087 	switch (pe) {
2088 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
2089 		if ((evmask & 0x06) == 0x06 ||
2090 		    (evmask & 0x18) == 0x18)
2091 			return (-1); /* can't have own+other bits together */
2092 		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
2093 			evmask = 0x1D;
2094 		break;
2095 	case PMC_EV_P4_MACHINE_CLEAR:
2096 		/* only one bit is allowed to be set */
2097 		if ((evmask & (evmask - 1)) != 0)
2098 			return (-1);
2099 		if (evmask == 0) {
2100 			evmask = 0x1;	/* 'CLEAR' */
2101 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2102 		}
2103 		break;
2104 	default:
2105 		if (evmask == 0 && pmask) {
2106 			for (pm = pmask; pm->pm_name; pm++)
2107 				evmask |= pm->pm_value;
2108 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2109 		}
2110 	}
2111 
2112 	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
2113 	    P4_ESCR_TO_EVENT_MASK(evmask);
2114 
2115 	return (0);
2116 }
2117 
2118 #endif
2119 
2120 #if defined(__i386__)
2121 
2122 /*
2123  * Pentium style PMCs
2124  */
2125 
2126 static struct pmc_event_alias p5_aliases[] = {
2127 	EV_ALIAS("branches",		"p5-taken-branches"),
2128 	EV_ALIAS("cycles",		"tsc"),
2129 	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
2130 	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
2131 	EV_ALIAS("instructions",	"p5-instructions-executed"),
2132 	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
2133 	EV_ALIAS("unhalted-cycles",
2134 	    "p5-number-of-cycles-not-in-halt-state"),
2135 	EV_ALIAS(NULL, NULL)
2136 };
2137 
2138 static int
2139 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
2140     struct pmc_op_pmcallocate *pmc_config)
2141 {
2142 	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
2143 }
2144 
2145 /*
2146  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
2147  * and Pentium M CPUs.
2148  */
2149 
2150 static struct pmc_event_alias p6_aliases[] = {
2151 	EV_ALIAS("branches",		"p6-br-inst-retired"),
2152 	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
2153 	EV_ALIAS("cycles",		"tsc"),
2154 	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
2155 	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
2156 	EV_ALIAS("instructions",	"p6-inst-retired"),
2157 	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
2158 	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
2159 	EV_ALIAS(NULL, NULL)
2160 };
2161 
2162 #define	P6_KW_CMASK	"cmask"
2163 #define	P6_KW_EDGE	"edge"
2164 #define	P6_KW_INV	"inv"
2165 #define	P6_KW_OS	"os"
2166 #define	P6_KW_UMASK	"umask"
2167 #define	P6_KW_USR	"usr"
2168 
2169 static struct pmc_masks p6_mask_mesi[] = {
2170 	PMCMASK(m,	0x01),
2171 	PMCMASK(e,	0x02),
2172 	PMCMASK(s,	0x04),
2173 	PMCMASK(i,	0x08),
2174 	NULLMASK
2175 };
2176 
2177 static struct pmc_masks p6_mask_mesihw[] = {
2178 	PMCMASK(m,	0x01),
2179 	PMCMASK(e,	0x02),
2180 	PMCMASK(s,	0x04),
2181 	PMCMASK(i,	0x08),
2182 	PMCMASK(nonhw,	0x00),
2183 	PMCMASK(hw,	0x10),
2184 	PMCMASK(both,	0x30),
2185 	NULLMASK
2186 };
2187 
2188 static struct pmc_masks p6_mask_hw[] = {
2189 	PMCMASK(nonhw,	0x00),
2190 	PMCMASK(hw,	0x10),
2191 	PMCMASK(both,	0x30),
2192 	NULLMASK
2193 };
2194 
2195 static struct pmc_masks p6_mask_any[] = {
2196 	PMCMASK(self,	0x00),
2197 	PMCMASK(any,	0x20),
2198 	NULLMASK
2199 };
2200 
2201 static struct pmc_masks p6_mask_ekp[] = {
2202 	PMCMASK(nta,	0x00),
2203 	PMCMASK(t1,	0x01),
2204 	PMCMASK(t2,	0x02),
2205 	PMCMASK(wos,	0x03),
2206 	NULLMASK
2207 };
2208 
2209 static struct pmc_masks p6_mask_pps[] = {
2210 	PMCMASK(packed-and-scalar, 0x00),
2211 	PMCMASK(scalar,	0x01),
2212 	NULLMASK
2213 };
2214 
2215 static struct pmc_masks p6_mask_mite[] = {
2216 	PMCMASK(packed-multiply,	 0x01),
2217 	PMCMASK(packed-shift,		0x02),
2218 	PMCMASK(pack,			0x04),
2219 	PMCMASK(unpack,			0x08),
2220 	PMCMASK(packed-logical,		0x10),
2221 	PMCMASK(packed-arithmetic,	0x20),
2222 	NULLMASK
2223 };
2224 
2225 static struct pmc_masks p6_mask_fmt[] = {
2226 	PMCMASK(mmxtofp,	0x00),
2227 	PMCMASK(fptommx,	0x01),
2228 	NULLMASK
2229 };
2230 
2231 static struct pmc_masks p6_mask_sr[] = {
2232 	PMCMASK(es,	0x01),
2233 	PMCMASK(ds,	0x02),
2234 	PMCMASK(fs,	0x04),
2235 	PMCMASK(gs,	0x08),
2236 	NULLMASK
2237 };
2238 
2239 static struct pmc_masks p6_mask_eet[] = {
2240 	PMCMASK(all,	0x00),
2241 	PMCMASK(freq,	0x02),
2242 	NULLMASK
2243 };
2244 
2245 static struct pmc_masks p6_mask_efur[] = {
2246 	PMCMASK(all,	0x00),
2247 	PMCMASK(loadop,	0x01),
2248 	PMCMASK(stdsta,	0x02),
2249 	NULLMASK
2250 };
2251 
2252 static struct pmc_masks p6_mask_essir[] = {
2253 	PMCMASK(sse-packed-single,	0x00),
2254 	PMCMASK(sse-packed-single-scalar-single, 0x01),
2255 	PMCMASK(sse2-packed-double,	0x02),
2256 	PMCMASK(sse2-scalar-double,	0x03),
2257 	NULLMASK
2258 };
2259 
2260 static struct pmc_masks p6_mask_esscir[] = {
2261 	PMCMASK(sse-packed-single,	0x00),
2262 	PMCMASK(sse-scalar-single,	0x01),
2263 	PMCMASK(sse2-packed-double,	0x02),
2264 	PMCMASK(sse2-scalar-double,	0x03),
2265 	NULLMASK
2266 };
2267 
2268 /* P6 event parser */
2269 static int
2270 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
2271     struct pmc_op_pmcallocate *pmc_config)
2272 {
2273 	char *e, *p, *q;
2274 	uint64_t evmask;
2275 	int count, n;
2276 	const struct pmc_masks *pm, *pmask;
2277 
2278 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2279 	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
2280 
2281 	evmask = 0;
2282 
2283 #define	P6MASKSET(M)	pmask = p6_mask_ ## M
2284 
2285 	switch(pe) {
2286 	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
2287 	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
2288 	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
2289 	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
2290 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
2291 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
2292 	case PMC_EV_P6_BUS_TRAN_BRD:
2293 	case PMC_EV_P6_BUS_TRAN_RFO:
2294 	case PMC_EV_P6_BUS_TRANS_WB:
2295 	case PMC_EV_P6_BUS_TRAN_IFETCH:
2296 	case PMC_EV_P6_BUS_TRAN_INVAL:
2297 	case PMC_EV_P6_BUS_TRAN_PWR:
2298 	case PMC_EV_P6_BUS_TRANS_P:
2299 	case PMC_EV_P6_BUS_TRANS_IO:
2300 	case PMC_EV_P6_BUS_TRAN_DEF:
2301 	case PMC_EV_P6_BUS_TRAN_BURST:
2302 	case PMC_EV_P6_BUS_TRAN_ANY:
2303 	case PMC_EV_P6_BUS_TRAN_MEM:
2304 		P6MASKSET(any);	break;
2305 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2306 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
2307 		P6MASKSET(ekp); break;
2308 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2309 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2310 		P6MASKSET(pps);	break;
2311 	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
2312 		P6MASKSET(mite); break;
2313 	case PMC_EV_P6_FP_MMX_TRANS:
2314 		P6MASKSET(fmt);	break;
2315 	case PMC_EV_P6_SEG_RENAME_STALLS:
2316 	case PMC_EV_P6_SEG_REG_RENAMES:
2317 		P6MASKSET(sr);	break;
2318 	case PMC_EV_P6_EMON_EST_TRANS:
2319 		P6MASKSET(eet);	break;
2320 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2321 		P6MASKSET(efur); break;
2322 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2323 		P6MASKSET(essir); break;
2324 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2325 		P6MASKSET(esscir); break;
2326 	default:
2327 		pmask = NULL;
2328 		break;
2329 	}
2330 
2331 	/* Pentium M PMCs have a few events with different semantics */
2332 	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
2333 		if (pe == PMC_EV_P6_L2_LD ||
2334 		    pe == PMC_EV_P6_L2_LINES_IN ||
2335 		    pe == PMC_EV_P6_L2_LINES_OUT)
2336 			P6MASKSET(mesihw);
2337 		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
2338 			P6MASKSET(hw);
2339 	}
2340 
2341 	/* Parse additional modifiers if present */
2342 	while ((p = strsep(&ctrspec, ",")) != NULL) {
2343 		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
2344 			q = strchr(p, '=');
2345 			if (*++q == '\0') /* skip '=' */
2346 				return (-1);
2347 			count = strtol(q, &e, 0);
2348 			if (e == q || *e != '\0')
2349 				return (-1);
2350 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2351 			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2352 			    P6_EVSEL_TO_CMASK(count);
2353 		} else if (KWMATCH(p, P6_KW_EDGE)) {
2354 			pmc_config->pm_caps |= PMC_CAP_EDGE;
2355 		} else if (KWMATCH(p, P6_KW_INV)) {
2356 			pmc_config->pm_caps |= PMC_CAP_INVERT;
2357 		} else if (KWMATCH(p, P6_KW_OS)) {
2358 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2359 		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
2360 			evmask = 0;
2361 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2362 				return (-1);
2363 			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
2364 			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
2365 			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
2366 			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
2367 			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
2368 			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
2369 			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
2370 			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
2371 			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
2372 			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
2373 			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
2374 			     pe == PMC_EV_P6_BUS_TRANS_IO ||
2375 			     pe == PMC_EV_P6_BUS_TRANS_P ||
2376 			     pe == PMC_EV_P6_BUS_TRANS_WB ||
2377 			     pe == PMC_EV_P6_EMON_EST_TRANS ||
2378 			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
2379 			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
2380 			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
2381 			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
2382 			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
2383 			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
2384 			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
2385 			     pe == PMC_EV_P6_FP_MMX_TRANS)
2386 			    && (n > 1))	/* Only one mask keyword is allowed. */
2387 				return (-1);
2388 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2389 		} else if (KWMATCH(p, P6_KW_USR)) {
2390 			pmc_config->pm_caps |= PMC_CAP_USER;
2391 		} else
2392 			return (-1);
2393 	}
2394 
2395 	/* post processing */
2396 	switch (pe) {
2397 
2398 		/*
2399 		 * The following events default to an evmask of 0
2400 		 */
2401 
2402 		/* default => 'self' */
2403 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
2404 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
2405 	case PMC_EV_P6_BUS_TRAN_BRD:
2406 	case PMC_EV_P6_BUS_TRAN_RFO:
2407 	case PMC_EV_P6_BUS_TRANS_WB:
2408 	case PMC_EV_P6_BUS_TRAN_IFETCH:
2409 	case PMC_EV_P6_BUS_TRAN_INVAL:
2410 	case PMC_EV_P6_BUS_TRAN_PWR:
2411 	case PMC_EV_P6_BUS_TRANS_P:
2412 	case PMC_EV_P6_BUS_TRANS_IO:
2413 	case PMC_EV_P6_BUS_TRAN_DEF:
2414 	case PMC_EV_P6_BUS_TRAN_BURST:
2415 	case PMC_EV_P6_BUS_TRAN_ANY:
2416 	case PMC_EV_P6_BUS_TRAN_MEM:
2417 
2418 		/* default => 'nta' */
2419 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2420 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
2421 
2422 		/* default => 'packed and scalar' */
2423 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2424 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2425 
2426 		/* default => 'mmx to fp transitions' */
2427 	case PMC_EV_P6_FP_MMX_TRANS:
2428 
2429 		/* default => 'SSE Packed Single' */
2430 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2431 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2432 
2433 		/* default => 'all fused micro-ops' */
2434 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2435 
2436 		/* default => 'all transitions' */
2437 	case PMC_EV_P6_EMON_EST_TRANS:
2438 		break;
2439 
2440 	case PMC_EV_P6_MMX_UOPS_EXEC:
2441 		evmask = 0x0F;		/* only value allowed */
2442 		break;
2443 
2444 	default:
2445 		/*
2446 		 * For all other events, set the default event mask
2447 		 * to a logical OR of all the allowed event mask bits.
2448 		 */
2449 		if (evmask == 0 && pmask) {
2450 			for (pm = pmask; pm->pm_name; pm++)
2451 				evmask |= pm->pm_value;
2452 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2453 		}
2454 
2455 		break;
2456 	}
2457 
2458 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2459 		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2460 		    P6_EVSEL_TO_UMASK(evmask);
2461 
2462 	return (0);
2463 }
2464 
2465 #endif
2466 
2467 #if	defined(__i386__) || defined(__amd64__)
2468 static int
2469 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2470     struct pmc_op_pmcallocate *pmc_config)
2471 {
2472 	if (pe != PMC_EV_TSC_TSC)
2473 		return (-1);
2474 
2475 	/* TSC events must be unqualified. */
2476 	if (ctrspec && *ctrspec != '\0')
2477 		return (-1);
2478 
2479 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2480 	pmc_config->pm_caps |= PMC_CAP_READ;
2481 
2482 	return (0);
2483 }
2484 #endif
2485 
2486 static struct pmc_event_alias generic_aliases[] = {
2487 	EV_ALIAS("instructions",		"SOFT-CLOCK.HARD"),
2488 	EV_ALIAS(NULL, NULL)
2489 };
2490 
2491 static int
2492 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
2493     struct pmc_op_pmcallocate *pmc_config)
2494 {
2495 	(void)ctrspec;
2496 	(void)pmc_config;
2497 
2498 	if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
2499 		return (-1);
2500 
2501 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2502 	return (0);
2503 }
2504 
2505 #if	defined(__arm__)
2506 #if	defined(__XSCALE__)
2507 
2508 static struct pmc_event_alias xscale_aliases[] = {
2509 	EV_ALIAS("branches",		"BRANCH_RETIRED"),
2510 	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
2511 	EV_ALIAS("dc-misses",		"DC_MISS"),
2512 	EV_ALIAS("ic-misses",		"IC_MISS"),
2513 	EV_ALIAS("instructions",	"INSTR_RETIRED"),
2514 	EV_ALIAS(NULL, NULL)
2515 };
2516 static int
2517 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2518     struct pmc_op_pmcallocate *pmc_config __unused)
2519 {
2520 	switch (pe) {
2521 	default:
2522 		break;
2523 	}
2524 
2525 	return (0);
2526 }
2527 #endif
2528 
2529 static struct pmc_event_alias cortex_a8_aliases[] = {
2530 	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
2531 	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
2532 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2533 	EV_ALIAS(NULL, NULL)
2534 };
2535 
2536 static struct pmc_event_alias cortex_a9_aliases[] = {
2537 	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
2538 	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
2539 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2540 	EV_ALIAS(NULL, NULL)
2541 };
2542 
2543 static int
2544 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2545     struct pmc_op_pmcallocate *pmc_config __unused)
2546 {
2547 	switch (pe) {
2548 	default:
2549 		break;
2550 	}
2551 
2552 	return (0);
2553 }
2554 #endif
2555 
2556 #if	defined(__aarch64__)
2557 static struct pmc_event_alias cortex_a53_aliases[] = {
2558 	EV_ALIAS(NULL, NULL)
2559 };
2560 static struct pmc_event_alias cortex_a57_aliases[] = {
2561 	EV_ALIAS(NULL, NULL)
2562 };
2563 static int
2564 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2565     struct pmc_op_pmcallocate *pmc_config __unused)
2566 {
2567 	switch (pe) {
2568 	default:
2569 		break;
2570 	}
2571 
2572 	return (0);
2573 }
2574 #endif
2575 
2576 #if defined(__mips__)
2577 
2578 static struct pmc_event_alias mips24k_aliases[] = {
2579 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2580 	EV_ALIAS("branches",		"BRANCH_COMPLETED"),
2581 	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
2582 	EV_ALIAS(NULL, NULL)
2583 };
2584 
2585 static struct pmc_event_alias mips74k_aliases[] = {
2586 	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2587 	EV_ALIAS("branches",		"BRANCH_INSNS"),
2588 	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCH_INSNS"),
2589 	EV_ALIAS(NULL, NULL)
2590 };
2591 
2592 static struct pmc_event_alias octeon_aliases[] = {
2593 	EV_ALIAS("instructions",	"RET"),
2594 	EV_ALIAS("branches",		"BR"),
2595 	EV_ALIAS("branch-mispredicts",	"BRMIS"),
2596 	EV_ALIAS(NULL, NULL)
2597 };
2598 
2599 #define	MIPS_KW_OS		"os"
2600 #define	MIPS_KW_USR		"usr"
2601 #define	MIPS_KW_ANYTHREAD	"anythread"
2602 
2603 static int
2604 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2605 		  struct pmc_op_pmcallocate *pmc_config __unused)
2606 {
2607 	char *p;
2608 
2609 	(void) pe;
2610 
2611 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2612 
2613 	while ((p = strsep(&ctrspec, ",")) != NULL) {
2614 		if (KWMATCH(p, MIPS_KW_OS))
2615 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2616 		else if (KWMATCH(p, MIPS_KW_USR))
2617 			pmc_config->pm_caps |= PMC_CAP_USER;
2618 		else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
2619 			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2620 		else
2621 			return (-1);
2622 	}
2623 
2624 	return (0);
2625 }
2626 
2627 #endif /* __mips__ */
2628 
2629 #if defined(__powerpc__)
2630 
2631 static struct pmc_event_alias ppc7450_aliases[] = {
2632 	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
2633 	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
2634 	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
2635 	EV_ALIAS(NULL, NULL)
2636 };
2637 
2638 static struct pmc_event_alias ppc970_aliases[] = {
2639 	EV_ALIAS("instructions", "INSTR_COMPLETED"),
2640 	EV_ALIAS("cycles",       "CYCLES"),
2641 	EV_ALIAS(NULL, NULL)
2642 };
2643 
2644 static struct pmc_event_alias e500_aliases[] = {
2645 	EV_ALIAS("instructions", "INSTR_COMPLETED"),
2646 	EV_ALIAS("cycles",       "CYCLES"),
2647 	EV_ALIAS(NULL, NULL)
2648 };
2649 
2650 #define	POWERPC_KW_OS		"os"
2651 #define	POWERPC_KW_USR		"usr"
2652 #define	POWERPC_KW_ANYTHREAD	"anythread"
2653 
2654 static int
2655 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2656 		     struct pmc_op_pmcallocate *pmc_config __unused)
2657 {
2658 	char *p;
2659 
2660 	(void) pe;
2661 
2662 	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2663 
2664 	while ((p = strsep(&ctrspec, ",")) != NULL) {
2665 		if (KWMATCH(p, POWERPC_KW_OS))
2666 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2667 		else if (KWMATCH(p, POWERPC_KW_USR))
2668 			pmc_config->pm_caps |= PMC_CAP_USER;
2669 		else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
2670 			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2671 		else
2672 			return (-1);
2673 	}
2674 
2675 	return (0);
2676 }
2677 
2678 #endif /* __powerpc__ */
2679 
2680 
2681 /*
2682  * Match an event name `name' with its canonical form.
2683  *
2684  * Matches are case insensitive and spaces, periods, underscores and
2685  * hyphen characters are considered to match each other.
2686  *
2687  * Returns 1 for a match, 0 otherwise.
2688  */
2689 
2690 static int
2691 pmc_match_event_name(const char *name, const char *canonicalname)
2692 {
2693 	int cc, nc;
2694 	const unsigned char *c, *n;
2695 
2696 	c = (const unsigned char *) canonicalname;
2697 	n = (const unsigned char *) name;
2698 
2699 	for (; (nc = *n) && (cc = *c); n++, c++) {
2700 
2701 		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2702 		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2703 			continue;
2704 
2705 		if (toupper(nc) == toupper(cc))
2706 			continue;
2707 
2708 
2709 		return (0);
2710 	}
2711 
2712 	if (*n == '\0' && *c == '\0')
2713 		return (1);
2714 
2715 	return (0);
2716 }
2717 
2718 /*
2719  * Match an event name against all the event named supported by a
2720  * PMC class.
2721  *
2722  * Returns an event descriptor pointer on match or NULL otherwise.
2723  */
2724 static const struct pmc_event_descr *
2725 pmc_match_event_class(const char *name,
2726     const struct pmc_class_descr *pcd)
2727 {
2728 	size_t n;
2729 	const struct pmc_event_descr *ev;
2730 
2731 	ev = pcd->pm_evc_event_table;
2732 	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2733 		if (pmc_match_event_name(name, ev->pm_ev_name))
2734 			return (ev);
2735 
2736 	return (NULL);
2737 }
2738 
2739 static int
2740 pmc_mdep_is_compatible_class(enum pmc_class pc)
2741 {
2742 	size_t n;
2743 
2744 	for (n = 0; n < pmc_mdep_class_list_size; n++)
2745 		if (pmc_mdep_class_list[n] == pc)
2746 			return (1);
2747 	return (0);
2748 }
2749 
2750 /*
2751  * API entry points
2752  */
2753 
2754 int
2755 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2756     uint32_t flags, int cpu, pmc_id_t *pmcid)
2757 {
2758 	size_t n;
2759 	int retval;
2760 	char *r, *spec_copy;
2761 	const char *ctrname;
2762 	const struct pmc_event_descr *ev;
2763 	const struct pmc_event_alias *alias;
2764 	struct pmc_op_pmcallocate pmc_config;
2765 	const struct pmc_class_descr *pcd;
2766 
2767 	spec_copy = NULL;
2768 	retval    = -1;
2769 
2770 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2771 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2772 		errno = EINVAL;
2773 		goto out;
2774 	}
2775 
2776 	/* replace an event alias with the canonical event specifier */
2777 	if (pmc_mdep_event_aliases)
2778 		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2779 			if (!strcasecmp(ctrspec, alias->pm_alias)) {
2780 				spec_copy = strdup(alias->pm_spec);
2781 				break;
2782 			}
2783 
2784 	if (spec_copy == NULL)
2785 		spec_copy = strdup(ctrspec);
2786 
2787 	r = spec_copy;
2788 	ctrname = strsep(&r, ",");
2789 
2790 	/*
2791 	 * If a explicit class prefix was given by the user, restrict the
2792 	 * search for the event to the specified PMC class.
2793 	 */
2794 	ev = NULL;
2795 	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2796 		pcd = pmc_class_table[n];
2797 		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2798 		    strncasecmp(ctrname, pcd->pm_evc_name,
2799 				pcd->pm_evc_name_size) == 0) {
2800 			if ((ev = pmc_match_event_class(ctrname +
2801 			    pcd->pm_evc_name_size, pcd)) == NULL) {
2802 				errno = EINVAL;
2803 				goto out;
2804 			}
2805 			break;
2806 		}
2807 	}
2808 
2809 	/*
2810 	 * Otherwise, search for this event in all compatible PMC
2811 	 * classes.
2812 	 */
2813 	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2814 		pcd = pmc_class_table[n];
2815 		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2816 			ev = pmc_match_event_class(ctrname, pcd);
2817 	}
2818 
2819 	if (ev == NULL) {
2820 		errno = EINVAL;
2821 		goto out;
2822 	}
2823 
2824 	bzero(&pmc_config, sizeof(pmc_config));
2825 	pmc_config.pm_ev    = ev->pm_ev_code;
2826 	pmc_config.pm_class = pcd->pm_evc_class;
2827 	pmc_config.pm_cpu   = cpu;
2828 	pmc_config.pm_mode  = mode;
2829 	pmc_config.pm_flags = flags;
2830 
2831 	if (PMC_IS_SAMPLING_MODE(mode))
2832 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2833 
2834  	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2835 		errno = EINVAL;
2836 		goto out;
2837 	}
2838 
2839 	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2840 		goto out;
2841 
2842 	*pmcid = pmc_config.pm_pmcid;
2843 
2844 	retval = 0;
2845 
2846  out:
2847 	if (spec_copy)
2848 		free(spec_copy);
2849 
2850 	return (retval);
2851 }
2852 
2853 int
2854 pmc_attach(pmc_id_t pmc, pid_t pid)
2855 {
2856 	struct pmc_op_pmcattach pmc_attach_args;
2857 
2858 	pmc_attach_args.pm_pmc = pmc;
2859 	pmc_attach_args.pm_pid = pid;
2860 
2861 	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2862 }
2863 
2864 int
2865 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2866 {
2867 	unsigned int i;
2868 	enum pmc_class cl;
2869 
2870 	cl = PMC_ID_TO_CLASS(pmcid);
2871 	for (i = 0; i < cpu_info.pm_nclass; i++)
2872 		if (cpu_info.pm_classes[i].pm_class == cl) {
2873 			*caps = cpu_info.pm_classes[i].pm_caps;
2874 			return (0);
2875 		}
2876 	errno = EINVAL;
2877 	return (-1);
2878 }
2879 
2880 int
2881 pmc_configure_logfile(int fd)
2882 {
2883 	struct pmc_op_configurelog cla;
2884 
2885 	cla.pm_logfd = fd;
2886 	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2887 		return (-1);
2888 	return (0);
2889 }
2890 
2891 int
2892 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2893 {
2894 	if (pmc_syscall == -1) {
2895 		errno = ENXIO;
2896 		return (-1);
2897 	}
2898 
2899 	*pci = &cpu_info;
2900 	return (0);
2901 }
2902 
2903 int
2904 pmc_detach(pmc_id_t pmc, pid_t pid)
2905 {
2906 	struct pmc_op_pmcattach pmc_detach_args;
2907 
2908 	pmc_detach_args.pm_pmc = pmc;
2909 	pmc_detach_args.pm_pid = pid;
2910 	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2911 }
2912 
2913 int
2914 pmc_disable(int cpu, int pmc)
2915 {
2916 	struct pmc_op_pmcadmin ssa;
2917 
2918 	ssa.pm_cpu = cpu;
2919 	ssa.pm_pmc = pmc;
2920 	ssa.pm_state = PMC_STATE_DISABLED;
2921 	return (PMC_CALL(PMCADMIN, &ssa));
2922 }
2923 
2924 int
2925 pmc_enable(int cpu, int pmc)
2926 {
2927 	struct pmc_op_pmcadmin ssa;
2928 
2929 	ssa.pm_cpu = cpu;
2930 	ssa.pm_pmc = pmc;
2931 	ssa.pm_state = PMC_STATE_FREE;
2932 	return (PMC_CALL(PMCADMIN, &ssa));
2933 }
2934 
2935 /*
2936  * Return a list of events known to a given PMC class.  'cl' is the
2937  * PMC class identifier, 'eventnames' is the returned list of 'const
2938  * char *' pointers pointing to the names of the events. 'nevents' is
2939  * the number of event name pointers returned.
2940  *
2941  * The space for 'eventnames' is allocated using malloc(3).  The caller
2942  * is responsible for freeing this space when done.
2943  */
2944 int
2945 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2946     int *nevents)
2947 {
2948 	int count;
2949 	const char **names;
2950 	const struct pmc_event_descr *ev;
2951 
2952 	switch (cl)
2953 	{
2954 	case PMC_CLASS_IAF:
2955 		ev = iaf_event_table;
2956 		count = PMC_EVENT_TABLE_SIZE(iaf);
2957 		break;
2958 	case PMC_CLASS_IAP:
2959 		/*
2960 		 * Return the most appropriate set of event name
2961 		 * spellings for the current CPU.
2962 		 */
2963 		switch (cpu_info.pm_cputype) {
2964 		default:
2965 		case PMC_CPU_INTEL_ATOM:
2966 			ev = atom_event_table;
2967 			count = PMC_EVENT_TABLE_SIZE(atom);
2968 			break;
2969 		case PMC_CPU_INTEL_ATOM_SILVERMONT:
2970 			ev = atom_silvermont_event_table;
2971 			count = PMC_EVENT_TABLE_SIZE(atom_silvermont);
2972 			break;
2973 		case PMC_CPU_INTEL_CORE:
2974 			ev = core_event_table;
2975 			count = PMC_EVENT_TABLE_SIZE(core);
2976 			break;
2977 		case PMC_CPU_INTEL_CORE2:
2978 		case PMC_CPU_INTEL_CORE2EXTREME:
2979 			ev = core2_event_table;
2980 			count = PMC_EVENT_TABLE_SIZE(core2);
2981 			break;
2982 		case PMC_CPU_INTEL_COREI7:
2983 			ev = corei7_event_table;
2984 			count = PMC_EVENT_TABLE_SIZE(corei7);
2985 			break;
2986 		case PMC_CPU_INTEL_NEHALEM_EX:
2987 			ev = nehalem_ex_event_table;
2988 			count = PMC_EVENT_TABLE_SIZE(nehalem_ex);
2989 			break;
2990 		case PMC_CPU_INTEL_HASWELL:
2991 			ev = haswell_event_table;
2992 			count = PMC_EVENT_TABLE_SIZE(haswell);
2993 			break;
2994 		case PMC_CPU_INTEL_HASWELL_XEON:
2995 			ev = haswell_xeon_event_table;
2996 			count = PMC_EVENT_TABLE_SIZE(haswell_xeon);
2997 			break;
2998 		case PMC_CPU_INTEL_BROADWELL:
2999 			ev = broadwell_event_table;
3000 			count = PMC_EVENT_TABLE_SIZE(broadwell);
3001 			break;
3002 		case PMC_CPU_INTEL_BROADWELL_XEON:
3003 			ev = broadwell_xeon_event_table;
3004 			count = PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3005 			break;
3006 		case PMC_CPU_INTEL_SKYLAKE:
3007 			ev = skylake_event_table;
3008 			count = PMC_EVENT_TABLE_SIZE(skylake);
3009 			break;
3010 		case PMC_CPU_INTEL_IVYBRIDGE:
3011 			ev = ivybridge_event_table;
3012 			count = PMC_EVENT_TABLE_SIZE(ivybridge);
3013 			break;
3014 		case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3015 			ev = ivybridge_xeon_event_table;
3016 			count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3017 			break;
3018 		case PMC_CPU_INTEL_SANDYBRIDGE:
3019 			ev = sandybridge_event_table;
3020 			count = PMC_EVENT_TABLE_SIZE(sandybridge);
3021 			break;
3022 		case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3023 			ev = sandybridge_xeon_event_table;
3024 			count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3025 			break;
3026 		case PMC_CPU_INTEL_WESTMERE:
3027 			ev = westmere_event_table;
3028 			count = PMC_EVENT_TABLE_SIZE(westmere);
3029 			break;
3030 		case PMC_CPU_INTEL_WESTMERE_EX:
3031 			ev = westmere_ex_event_table;
3032 			count = PMC_EVENT_TABLE_SIZE(westmere_ex);
3033 			break;
3034 		}
3035 		break;
3036 	case PMC_CLASS_UCF:
3037 		ev = ucf_event_table;
3038 		count = PMC_EVENT_TABLE_SIZE(ucf);
3039 		break;
3040 	case PMC_CLASS_UCP:
3041 		/*
3042 		 * Return the most appropriate set of event name
3043 		 * spellings for the current CPU.
3044 		 */
3045 		switch (cpu_info.pm_cputype) {
3046 		default:
3047 		case PMC_CPU_INTEL_COREI7:
3048 			ev = corei7uc_event_table;
3049 			count = PMC_EVENT_TABLE_SIZE(corei7uc);
3050 			break;
3051 		case PMC_CPU_INTEL_HASWELL:
3052 			ev = haswelluc_event_table;
3053 			count = PMC_EVENT_TABLE_SIZE(haswelluc);
3054 			break;
3055 		case PMC_CPU_INTEL_BROADWELL:
3056 			ev = broadwelluc_event_table;
3057 			count = PMC_EVENT_TABLE_SIZE(broadwelluc);
3058 			break;
3059 		case PMC_CPU_INTEL_SANDYBRIDGE:
3060 			ev = sandybridgeuc_event_table;
3061 			count = PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3062 			break;
3063 		case PMC_CPU_INTEL_WESTMERE:
3064 			ev = westmereuc_event_table;
3065 			count = PMC_EVENT_TABLE_SIZE(westmereuc);
3066 			break;
3067 		}
3068 		break;
3069 	case PMC_CLASS_TSC:
3070 		ev = tsc_event_table;
3071 		count = PMC_EVENT_TABLE_SIZE(tsc);
3072 		break;
3073 	case PMC_CLASS_K7:
3074 		ev = k7_event_table;
3075 		count = PMC_EVENT_TABLE_SIZE(k7);
3076 		break;
3077 	case PMC_CLASS_K8:
3078 		ev = k8_event_table;
3079 		count = PMC_EVENT_TABLE_SIZE(k8);
3080 		break;
3081 	case PMC_CLASS_P4:
3082 		ev = p4_event_table;
3083 		count = PMC_EVENT_TABLE_SIZE(p4);
3084 		break;
3085 	case PMC_CLASS_P5:
3086 		ev = p5_event_table;
3087 		count = PMC_EVENT_TABLE_SIZE(p5);
3088 		break;
3089 	case PMC_CLASS_P6:
3090 		ev = p6_event_table;
3091 		count = PMC_EVENT_TABLE_SIZE(p6);
3092 		break;
3093 	case PMC_CLASS_XSCALE:
3094 		ev = xscale_event_table;
3095 		count = PMC_EVENT_TABLE_SIZE(xscale);
3096 		break;
3097 	case PMC_CLASS_ARMV7:
3098 		switch (cpu_info.pm_cputype) {
3099 		default:
3100 		case PMC_CPU_ARMV7_CORTEX_A8:
3101 			ev = cortex_a8_event_table;
3102 			count = PMC_EVENT_TABLE_SIZE(cortex_a8);
3103 			break;
3104 		case PMC_CPU_ARMV7_CORTEX_A9:
3105 			ev = cortex_a9_event_table;
3106 			count = PMC_EVENT_TABLE_SIZE(cortex_a9);
3107 			break;
3108 		}
3109 		break;
3110 	case PMC_CLASS_ARMV8:
3111 		switch (cpu_info.pm_cputype) {
3112 		default:
3113 		case PMC_CPU_ARMV8_CORTEX_A53:
3114 			ev = cortex_a53_event_table;
3115 			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
3116 			break;
3117 		case PMC_CPU_ARMV8_CORTEX_A57:
3118 			ev = cortex_a57_event_table;
3119 			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
3120 			break;
3121 		}
3122 		break;
3123 	case PMC_CLASS_MIPS24K:
3124 		ev = mips24k_event_table;
3125 		count = PMC_EVENT_TABLE_SIZE(mips24k);
3126 		break;
3127 	case PMC_CLASS_MIPS74K:
3128 		ev = mips74k_event_table;
3129 		count = PMC_EVENT_TABLE_SIZE(mips74k);
3130 		break;
3131 	case PMC_CLASS_OCTEON:
3132 		ev = octeon_event_table;
3133 		count = PMC_EVENT_TABLE_SIZE(octeon);
3134 		break;
3135 	case PMC_CLASS_PPC7450:
3136 		ev = ppc7450_event_table;
3137 		count = PMC_EVENT_TABLE_SIZE(ppc7450);
3138 		break;
3139 	case PMC_CLASS_PPC970:
3140 		ev = ppc970_event_table;
3141 		count = PMC_EVENT_TABLE_SIZE(ppc970);
3142 		break;
3143 	case PMC_CLASS_E500:
3144 		ev = e500_event_table;
3145 		count = PMC_EVENT_TABLE_SIZE(e500);
3146 		break;
3147 	case PMC_CLASS_SOFT:
3148 		ev = soft_event_table;
3149 		count = soft_event_info.pm_nevent;
3150 		break;
3151 	default:
3152 		errno = EINVAL;
3153 		return (-1);
3154 	}
3155 
3156 	if ((names = malloc(count * sizeof(const char *))) == NULL)
3157 		return (-1);
3158 
3159 	*eventnames = names;
3160 	*nevents = count;
3161 
3162 	for (;count--; ev++, names++)
3163 		*names = ev->pm_ev_name;
3164 
3165 	return (0);
3166 }
3167 
3168 int
3169 pmc_flush_logfile(void)
3170 {
3171 	return (PMC_CALL(FLUSHLOG,0));
3172 }
3173 
3174 int
3175 pmc_close_logfile(void)
3176 {
3177 	return (PMC_CALL(CLOSELOG,0));
3178 }
3179 
3180 int
3181 pmc_get_driver_stats(struct pmc_driverstats *ds)
3182 {
3183 	struct pmc_op_getdriverstats gms;
3184 
3185 	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
3186 		return (-1);
3187 
3188 	/* copy out fields in the current userland<->library interface */
3189 	ds->pm_intr_ignored    = gms.pm_intr_ignored;
3190 	ds->pm_intr_processed  = gms.pm_intr_processed;
3191 	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
3192 	ds->pm_syscalls        = gms.pm_syscalls;
3193 	ds->pm_syscall_errors  = gms.pm_syscall_errors;
3194 	ds->pm_buffer_requests = gms.pm_buffer_requests;
3195 	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
3196 	ds->pm_log_sweeps      = gms.pm_log_sweeps;
3197 	return (0);
3198 }
3199 
3200 int
3201 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
3202 {
3203 	struct pmc_op_getmsr gm;
3204 
3205 	gm.pm_pmcid = pmc;
3206 	if (PMC_CALL(PMCGETMSR, &gm) < 0)
3207 		return (-1);
3208 	*msr = gm.pm_msr;
3209 	return (0);
3210 }
3211 
3212 int
3213 pmc_init(void)
3214 {
3215 	int error, pmc_mod_id;
3216 	unsigned int n;
3217 	uint32_t abi_version;
3218 	struct module_stat pmc_modstat;
3219 	struct pmc_op_getcpuinfo op_cpu_info;
3220 #if defined(__amd64__) || defined(__i386__)
3221 	int cpu_has_iaf_counters;
3222 	unsigned int t;
3223 #endif
3224 
3225 	if (pmc_syscall != -1) /* already inited */
3226 		return (0);
3227 
3228 	/* retrieve the system call number from the KLD */
3229 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
3230 		return (-1);
3231 
3232 	pmc_modstat.version = sizeof(struct module_stat);
3233 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
3234 		return (-1);
3235 
3236 	pmc_syscall = pmc_modstat.data.intval;
3237 
3238 	/* check the kernel module's ABI against our compiled-in version */
3239 	abi_version = PMC_VERSION;
3240 	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
3241 		return (pmc_syscall = -1);
3242 
3243 	/* ignore patch & minor numbers for the comparison */
3244 	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
3245 		errno  = EPROGMISMATCH;
3246 		return (pmc_syscall = -1);
3247 	}
3248 
3249 	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
3250 		return (pmc_syscall = -1);
3251 
3252 	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
3253 	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
3254 	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
3255 	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
3256 	for (n = 0; n < cpu_info.pm_nclass; n++)
3257 		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
3258 
3259 	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
3260 	    sizeof(struct pmc_class_descr *));
3261 
3262 	if (pmc_class_table == NULL)
3263 		return (-1);
3264 
3265 	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
3266 		pmc_class_table[n] = NULL;
3267 
3268 	/*
3269 	 * Get soft events list.
3270 	 */
3271 	soft_event_info.pm_class = PMC_CLASS_SOFT;
3272 	if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
3273 		return (pmc_syscall = -1);
3274 
3275 	/* Map soft events to static list. */
3276 	for (n = 0; n < soft_event_info.pm_nevent; n++) {
3277 		soft_event_table[n].pm_ev_name =
3278 		    soft_event_info.pm_events[n].pm_ev_name;
3279 		soft_event_table[n].pm_ev_code =
3280 		    soft_event_info.pm_events[n].pm_ev_code;
3281 	}
3282 	soft_class_table_descr.pm_evc_event_table_size = \
3283 	    soft_event_info.pm_nevent;
3284 	soft_class_table_descr.pm_evc_event_table = \
3285 	    soft_event_table;
3286 
3287 	/*
3288 	 * Fill in the class table.
3289 	 */
3290 	n = 0;
3291 
3292 	/* Fill soft events information. */
3293 	pmc_class_table[n++] = &soft_class_table_descr;
3294 #if defined(__amd64__) || defined(__i386__)
3295 	if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
3296 		pmc_class_table[n++] = &tsc_class_table_descr;
3297 
3298 	/*
3299  	 * Check if this CPU has fixed function counters.
3300 	 */
3301 	cpu_has_iaf_counters = 0;
3302 	for (t = 0; t < cpu_info.pm_nclass; t++)
3303 		if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF &&
3304 		    cpu_info.pm_classes[t].pm_num > 0)
3305 			cpu_has_iaf_counters = 1;
3306 #endif
3307 
3308 #define	PMC_MDEP_INIT(C) do {					\
3309 		pmc_mdep_event_aliases    = C##_aliases;	\
3310 		pmc_mdep_class_list  = C##_pmc_classes;		\
3311 		pmc_mdep_class_list_size =			\
3312 		    PMC_TABLE_SIZE(C##_pmc_classes);		\
3313 	} while (0)
3314 
3315 #define	PMC_MDEP_INIT_INTEL_V2(C) do {					\
3316 		PMC_MDEP_INIT(C);					\
3317 		pmc_class_table[n++] = &iaf_class_table_descr;		\
3318 		if (!cpu_has_iaf_counters) 				\
3319 			pmc_mdep_event_aliases =			\
3320 				C##_aliases_without_iaf;		\
3321 		pmc_class_table[n] = &C##_class_table_descr;		\
3322 	} while (0)
3323 
3324 	/* Configure the event name parser. */
3325 	switch (cpu_info.pm_cputype) {
3326 #if defined(__i386__)
3327 	case PMC_CPU_AMD_K7:
3328 		PMC_MDEP_INIT(k7);
3329 		pmc_class_table[n] = &k7_class_table_descr;
3330 		break;
3331 	case PMC_CPU_INTEL_P5:
3332 		PMC_MDEP_INIT(p5);
3333 		pmc_class_table[n]  = &p5_class_table_descr;
3334 		break;
3335 	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
3336 	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
3337 	case PMC_CPU_INTEL_PIII:
3338 	case PMC_CPU_INTEL_PM:
3339 		PMC_MDEP_INIT(p6);
3340 		pmc_class_table[n] = &p6_class_table_descr;
3341 		break;
3342 #endif
3343 #if defined(__amd64__) || defined(__i386__)
3344 	case PMC_CPU_AMD_K8:
3345 		PMC_MDEP_INIT(k8);
3346 		pmc_class_table[n] = &k8_class_table_descr;
3347 		break;
3348 	case PMC_CPU_INTEL_ATOM:
3349 		PMC_MDEP_INIT_INTEL_V2(atom);
3350 		break;
3351 	case PMC_CPU_INTEL_ATOM_SILVERMONT:
3352 		PMC_MDEP_INIT_INTEL_V2(atom_silvermont);
3353 		break;
3354 	case PMC_CPU_INTEL_CORE:
3355 		PMC_MDEP_INIT(core);
3356 		pmc_class_table[n] = &core_class_table_descr;
3357 		break;
3358 	case PMC_CPU_INTEL_CORE2:
3359 	case PMC_CPU_INTEL_CORE2EXTREME:
3360 		PMC_MDEP_INIT_INTEL_V2(core2);
3361 		break;
3362 	case PMC_CPU_INTEL_COREI7:
3363 		pmc_class_table[n++] = &ucf_class_table_descr;
3364 		pmc_class_table[n++] = &corei7uc_class_table_descr;
3365 		PMC_MDEP_INIT_INTEL_V2(corei7);
3366 		break;
3367 	case PMC_CPU_INTEL_NEHALEM_EX:
3368 		PMC_MDEP_INIT_INTEL_V2(nehalem_ex);
3369 		break;
3370 	case PMC_CPU_INTEL_HASWELL:
3371 		pmc_class_table[n++] = &ucf_class_table_descr;
3372 		pmc_class_table[n++] = &haswelluc_class_table_descr;
3373 		PMC_MDEP_INIT_INTEL_V2(haswell);
3374 		break;
3375 	case PMC_CPU_INTEL_HASWELL_XEON:
3376 		PMC_MDEP_INIT_INTEL_V2(haswell_xeon);
3377 		break;
3378 	case PMC_CPU_INTEL_BROADWELL:
3379 		pmc_class_table[n++] = &ucf_class_table_descr;
3380 		pmc_class_table[n++] = &broadwelluc_class_table_descr;
3381 		PMC_MDEP_INIT_INTEL_V2(broadwell);
3382 		break;
3383 	case PMC_CPU_INTEL_BROADWELL_XEON:
3384 		PMC_MDEP_INIT_INTEL_V2(broadwell_xeon);
3385 		break;
3386 	case PMC_CPU_INTEL_SKYLAKE:
3387 		PMC_MDEP_INIT_INTEL_V2(skylake);
3388 		break;
3389 	case PMC_CPU_INTEL_IVYBRIDGE:
3390 		PMC_MDEP_INIT_INTEL_V2(ivybridge);
3391 		break;
3392 	case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3393 		PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon);
3394 		break;
3395 	case PMC_CPU_INTEL_SANDYBRIDGE:
3396 		pmc_class_table[n++] = &ucf_class_table_descr;
3397 		pmc_class_table[n++] = &sandybridgeuc_class_table_descr;
3398 		PMC_MDEP_INIT_INTEL_V2(sandybridge);
3399 		break;
3400 	case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3401 		PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon);
3402 		break;
3403 	case PMC_CPU_INTEL_WESTMERE:
3404 		pmc_class_table[n++] = &ucf_class_table_descr;
3405 		pmc_class_table[n++] = &westmereuc_class_table_descr;
3406 		PMC_MDEP_INIT_INTEL_V2(westmere);
3407 		break;
3408 	case PMC_CPU_INTEL_WESTMERE_EX:
3409 		PMC_MDEP_INIT_INTEL_V2(westmere_ex);
3410 		break;
3411 	case PMC_CPU_INTEL_PIV:
3412 		PMC_MDEP_INIT(p4);
3413 		pmc_class_table[n] = &p4_class_table_descr;
3414 		break;
3415 #endif
3416 	case PMC_CPU_GENERIC:
3417 		PMC_MDEP_INIT(generic);
3418 		break;
3419 #if defined(__arm__)
3420 #if defined(__XSCALE__)
3421 	case PMC_CPU_INTEL_XSCALE:
3422 		PMC_MDEP_INIT(xscale);
3423 		pmc_class_table[n] = &xscale_class_table_descr;
3424 		break;
3425 #endif
3426 	case PMC_CPU_ARMV7_CORTEX_A8:
3427 		PMC_MDEP_INIT(cortex_a8);
3428 		pmc_class_table[n] = &cortex_a8_class_table_descr;
3429 		break;
3430 	case PMC_CPU_ARMV7_CORTEX_A9:
3431 		PMC_MDEP_INIT(cortex_a9);
3432 		pmc_class_table[n] = &cortex_a9_class_table_descr;
3433 		break;
3434 #endif
3435 #if defined(__aarch64__)
3436 	case PMC_CPU_ARMV8_CORTEX_A53:
3437 		PMC_MDEP_INIT(cortex_a53);
3438 		pmc_class_table[n] = &cortex_a53_class_table_descr;
3439 		break;
3440 	case PMC_CPU_ARMV8_CORTEX_A57:
3441 		PMC_MDEP_INIT(cortex_a57);
3442 		pmc_class_table[n] = &cortex_a57_class_table_descr;
3443 		break;
3444 #endif
3445 #if defined(__mips__)
3446 	case PMC_CPU_MIPS_24K:
3447 		PMC_MDEP_INIT(mips24k);
3448 		pmc_class_table[n] = &mips24k_class_table_descr;
3449 		break;
3450 	case PMC_CPU_MIPS_74K:
3451 		PMC_MDEP_INIT(mips74k);
3452 		pmc_class_table[n] = &mips74k_class_table_descr;
3453 		break;
3454 	case PMC_CPU_MIPS_OCTEON:
3455 		PMC_MDEP_INIT(octeon);
3456 		pmc_class_table[n] = &octeon_class_table_descr;
3457 		break;
3458 #endif /* __mips__ */
3459 #if defined(__powerpc__)
3460 	case PMC_CPU_PPC_7450:
3461 		PMC_MDEP_INIT(ppc7450);
3462 		pmc_class_table[n] = &ppc7450_class_table_descr;
3463 		break;
3464 	case PMC_CPU_PPC_970:
3465 		PMC_MDEP_INIT(ppc970);
3466 		pmc_class_table[n] = &ppc970_class_table_descr;
3467 		break;
3468 	case PMC_CPU_PPC_E500:
3469 		PMC_MDEP_INIT(e500);
3470 		pmc_class_table[n] = &e500_class_table_descr;
3471 		break;
3472 #endif
3473 	default:
3474 		/*
3475 		 * Some kind of CPU this version of the library knows nothing
3476 		 * about.  This shouldn't happen since the abi version check
3477 		 * should have caught this.
3478 		 */
3479 		errno = ENXIO;
3480 		return (pmc_syscall = -1);
3481 	}
3482 
3483 	return (0);
3484 }
3485 
3486 const char *
3487 pmc_name_of_capability(enum pmc_caps cap)
3488 {
3489 	int i;
3490 
3491 	/*
3492 	 * 'cap' should have a single bit set and should be in
3493 	 * range.
3494 	 */
3495 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
3496 	    cap > PMC_CAP_LAST) {
3497 		errno = EINVAL;
3498 		return (NULL);
3499 	}
3500 
3501 	i = ffs(cap);
3502 	return (pmc_capability_names[i - 1]);
3503 }
3504 
3505 const char *
3506 pmc_name_of_class(enum pmc_class pc)
3507 {
3508 	size_t n;
3509 
3510 	for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
3511 		if (pc == pmc_class_names[n].pm_class)
3512 			return (pmc_class_names[n].pm_name);
3513 
3514 	errno = EINVAL;
3515 	return (NULL);
3516 }
3517 
3518 const char *
3519 pmc_name_of_cputype(enum pmc_cputype cp)
3520 {
3521 	size_t n;
3522 
3523 	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
3524 		if (cp == pmc_cputype_names[n].pm_cputype)
3525 			return (pmc_cputype_names[n].pm_name);
3526 
3527 	errno = EINVAL;
3528 	return (NULL);
3529 }
3530 
3531 const char *
3532 pmc_name_of_disposition(enum pmc_disp pd)
3533 {
3534 	if ((int) pd >= PMC_DISP_FIRST &&
3535 	    pd <= PMC_DISP_LAST)
3536 		return (pmc_disposition_names[pd]);
3537 
3538 	errno = EINVAL;
3539 	return (NULL);
3540 }
3541 
3542 const char *
3543 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
3544 {
3545 	const struct pmc_event_descr *ev, *evfence;
3546 
3547 	ev = evfence = NULL;
3548 	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
3549 		ev = iaf_event_table;
3550 		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
3551 	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
3552 		switch (cpu) {
3553 		case PMC_CPU_INTEL_ATOM:
3554 			ev = atom_event_table;
3555 			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
3556 			break;
3557 		case PMC_CPU_INTEL_ATOM_SILVERMONT:
3558 			ev = atom_silvermont_event_table;
3559 			evfence = atom_silvermont_event_table +
3560 			    PMC_EVENT_TABLE_SIZE(atom_silvermont);
3561 			break;
3562 		case PMC_CPU_INTEL_CORE:
3563 			ev = core_event_table;
3564 			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
3565 			break;
3566 		case PMC_CPU_INTEL_CORE2:
3567 		case PMC_CPU_INTEL_CORE2EXTREME:
3568 			ev = core2_event_table;
3569 			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
3570 			break;
3571 		case PMC_CPU_INTEL_COREI7:
3572 			ev = corei7_event_table;
3573 			evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
3574 			break;
3575 		case PMC_CPU_INTEL_NEHALEM_EX:
3576 			ev = nehalem_ex_event_table;
3577 			evfence = nehalem_ex_event_table +
3578 			    PMC_EVENT_TABLE_SIZE(nehalem_ex);
3579 			break;
3580 		case PMC_CPU_INTEL_HASWELL:
3581 			ev = haswell_event_table;
3582 			evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell);
3583 			break;
3584 		case PMC_CPU_INTEL_HASWELL_XEON:
3585 			ev = haswell_xeon_event_table;
3586 			evfence = haswell_xeon_event_table + PMC_EVENT_TABLE_SIZE(haswell_xeon);
3587 			break;
3588 		case PMC_CPU_INTEL_BROADWELL:
3589 			ev = broadwell_event_table;
3590 			evfence = broadwell_event_table + PMC_EVENT_TABLE_SIZE(broadwell);
3591 			break;
3592 		case PMC_CPU_INTEL_BROADWELL_XEON:
3593 			ev = broadwell_xeon_event_table;
3594 			evfence = broadwell_xeon_event_table + PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3595 			break;
3596 		case PMC_CPU_INTEL_SKYLAKE:
3597 			ev = skylake_event_table;
3598 			evfence = skylake_event_table + PMC_EVENT_TABLE_SIZE(skylake);
3599 			break;
3600 		case PMC_CPU_INTEL_IVYBRIDGE:
3601 			ev = ivybridge_event_table;
3602 			evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge);
3603 			break;
3604 		case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3605 			ev = ivybridge_xeon_event_table;
3606 			evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3607 			break;
3608 		case PMC_CPU_INTEL_SANDYBRIDGE:
3609 			ev = sandybridge_event_table;
3610 			evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge);
3611 			break;
3612 		case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3613 			ev = sandybridge_xeon_event_table;
3614 			evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3615 			break;
3616 		case PMC_CPU_INTEL_WESTMERE:
3617 			ev = westmere_event_table;
3618 			evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere);
3619 			break;
3620 		case PMC_CPU_INTEL_WESTMERE_EX:
3621 			ev = westmere_ex_event_table;
3622 			evfence = westmere_ex_event_table +
3623 			    PMC_EVENT_TABLE_SIZE(westmere_ex);
3624 			break;
3625 		default:	/* Unknown CPU type. */
3626 			break;
3627 		}
3628 	} else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) {
3629 		ev = ucf_event_table;
3630 		evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf);
3631 	} else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) {
3632 		switch (cpu) {
3633 		case PMC_CPU_INTEL_COREI7:
3634 			ev = corei7uc_event_table;
3635 			evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc);
3636 			break;
3637 		case PMC_CPU_INTEL_SANDYBRIDGE:
3638 			ev = sandybridgeuc_event_table;
3639 			evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3640 			break;
3641 		case PMC_CPU_INTEL_WESTMERE:
3642 			ev = westmereuc_event_table;
3643 			evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc);
3644 			break;
3645 		default:	/* Unknown CPU type. */
3646 			break;
3647 		}
3648 	} else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
3649 		ev = k7_event_table;
3650 		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
3651 	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
3652 		ev = k8_event_table;
3653 		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
3654 	} else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
3655 		ev = p4_event_table;
3656 		evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
3657 	} else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
3658 		ev = p5_event_table;
3659 		evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
3660 	} else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
3661 		ev = p6_event_table;
3662 		evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
3663 	} else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
3664 		ev = xscale_event_table;
3665 		evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
3666 	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
3667 		switch (cpu) {
3668 		case PMC_CPU_ARMV7_CORTEX_A8:
3669 			ev = cortex_a8_event_table;
3670 			evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
3671 			break;
3672 		case PMC_CPU_ARMV7_CORTEX_A9:
3673 			ev = cortex_a9_event_table;
3674 			evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
3675 			break;
3676 		default:	/* Unknown CPU type. */
3677 			break;
3678 		}
3679 	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
3680 		switch (cpu) {
3681 		case PMC_CPU_ARMV8_CORTEX_A53:
3682 			ev = cortex_a53_event_table;
3683 			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
3684 			break;
3685 		case PMC_CPU_ARMV8_CORTEX_A57:
3686 			ev = cortex_a57_event_table;
3687 			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
3688 			break;
3689 		default:	/* Unknown CPU type. */
3690 			break;
3691 		}
3692 	} else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
3693 		ev = mips24k_event_table;
3694 		evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
3695 	} else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) {
3696 		ev = mips74k_event_table;
3697 		evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k);
3698 	} else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
3699 		ev = octeon_event_table;
3700 		evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
3701 	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
3702 		ev = ppc7450_event_table;
3703 		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
3704 	} else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
3705 		ev = ppc970_event_table;
3706 		evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
3707 	} else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
3708 		ev = e500_event_table;
3709 		evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
3710 	} else if (pe == PMC_EV_TSC_TSC) {
3711 		ev = tsc_event_table;
3712 		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
3713 	} else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
3714 		ev = soft_event_table;
3715 		evfence = soft_event_table + soft_event_info.pm_nevent;
3716 	}
3717 
3718 	for (; ev != evfence; ev++)
3719 		if (pe == ev->pm_ev_code)
3720 			return (ev->pm_ev_name);
3721 
3722 	return (NULL);
3723 }
3724 
3725 const char *
3726 pmc_name_of_event(enum pmc_event pe)
3727 {
3728 	const char *n;
3729 
3730 	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
3731 		return (n);
3732 
3733 	errno = EINVAL;
3734 	return (NULL);
3735 }
3736 
3737 const char *
3738 pmc_name_of_mode(enum pmc_mode pm)
3739 {
3740 	if ((int) pm >= PMC_MODE_FIRST &&
3741 	    pm <= PMC_MODE_LAST)
3742 		return (pmc_mode_names[pm]);
3743 
3744 	errno = EINVAL;
3745 	return (NULL);
3746 }
3747 
3748 const char *
3749 pmc_name_of_state(enum pmc_state ps)
3750 {
3751 	if ((int) ps >= PMC_STATE_FIRST &&
3752 	    ps <= PMC_STATE_LAST)
3753 		return (pmc_state_names[ps]);
3754 
3755 	errno = EINVAL;
3756 	return (NULL);
3757 }
3758 
3759 int
3760 pmc_ncpu(void)
3761 {
3762 	if (pmc_syscall == -1) {
3763 		errno = ENXIO;
3764 		return (-1);
3765 	}
3766 
3767 	return (cpu_info.pm_ncpu);
3768 }
3769 
3770 int
3771 pmc_npmc(int cpu)
3772 {
3773 	if (pmc_syscall == -1) {
3774 		errno = ENXIO;
3775 		return (-1);
3776 	}
3777 
3778 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
3779 		errno = EINVAL;
3780 		return (-1);
3781 	}
3782 
3783 	return (cpu_info.pm_npmc);
3784 }
3785 
3786 int
3787 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
3788 {
3789 	int nbytes, npmc;
3790 	struct pmc_op_getpmcinfo *pmci;
3791 
3792 	if ((npmc = pmc_npmc(cpu)) < 0)
3793 		return (-1);
3794 
3795 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
3796 	    npmc * sizeof(struct pmc_info);
3797 
3798 	if ((pmci = calloc(1, nbytes)) == NULL)
3799 		return (-1);
3800 
3801 	pmci->pm_cpu  = cpu;
3802 
3803 	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
3804 		free(pmci);
3805 		return (-1);
3806 	}
3807 
3808 	/* kernel<->library, library<->userland interfaces are identical */
3809 	*ppmci = (struct pmc_pmcinfo *) pmci;
3810 	return (0);
3811 }
3812 
3813 int
3814 pmc_read(pmc_id_t pmc, pmc_value_t *value)
3815 {
3816 	struct pmc_op_pmcrw pmc_read_op;
3817 
3818 	pmc_read_op.pm_pmcid = pmc;
3819 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
3820 	pmc_read_op.pm_value = -1;
3821 
3822 	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
3823 		return (-1);
3824 
3825 	*value = pmc_read_op.pm_value;
3826 	return (0);
3827 }
3828 
3829 int
3830 pmc_release(pmc_id_t pmc)
3831 {
3832 	struct pmc_op_simple	pmc_release_args;
3833 
3834 	pmc_release_args.pm_pmcid = pmc;
3835 	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
3836 }
3837 
3838 int
3839 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
3840 {
3841 	struct pmc_op_pmcrw pmc_rw_op;
3842 
3843 	pmc_rw_op.pm_pmcid = pmc;
3844 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
3845 	pmc_rw_op.pm_value = newvalue;
3846 
3847 	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
3848 		return (-1);
3849 
3850 	*oldvaluep = pmc_rw_op.pm_value;
3851 	return (0);
3852 }
3853 
3854 int
3855 pmc_set(pmc_id_t pmc, pmc_value_t value)
3856 {
3857 	struct pmc_op_pmcsetcount sc;
3858 
3859 	sc.pm_pmcid = pmc;
3860 	sc.pm_count = value;
3861 
3862 	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
3863 		return (-1);
3864 	return (0);
3865 }
3866 
3867 int
3868 pmc_start(pmc_id_t pmc)
3869 {
3870 	struct pmc_op_simple	pmc_start_args;
3871 
3872 	pmc_start_args.pm_pmcid = pmc;
3873 	return (PMC_CALL(PMCSTART, &pmc_start_args));
3874 }
3875 
3876 int
3877 pmc_stop(pmc_id_t pmc)
3878 {
3879 	struct pmc_op_simple	pmc_stop_args;
3880 
3881 	pmc_stop_args.pm_pmcid = pmc;
3882 	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
3883 }
3884 
3885 int
3886 pmc_width(pmc_id_t pmcid, uint32_t *width)
3887 {
3888 	unsigned int i;
3889 	enum pmc_class cl;
3890 
3891 	cl = PMC_ID_TO_CLASS(pmcid);
3892 	for (i = 0; i < cpu_info.pm_nclass; i++)
3893 		if (cpu_info.pm_classes[i].pm_class == cl) {
3894 			*width = cpu_info.pm_classes[i].pm_width;
3895 			return (0);
3896 		}
3897 	errno = EINVAL;
3898 	return (-1);
3899 }
3900 
3901 int
3902 pmc_write(pmc_id_t pmc, pmc_value_t value)
3903 {
3904 	struct pmc_op_pmcrw pmc_write_op;
3905 
3906 	pmc_write_op.pm_pmcid = pmc;
3907 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
3908 	pmc_write_op.pm_value = value;
3909 	return (PMC_CALL(PMCRW, &pmc_write_op));
3910 }
3911 
3912 int
3913 pmc_writelog(uint32_t userdata)
3914 {
3915 	struct pmc_op_writelog wl;
3916 
3917 	wl.pm_userdata = userdata;
3918 	return (PMC_CALL(WRITELOG, &wl));
3919 }
3920