xref: /freebsd/lib/libpmc/libpmc.c (revision 2be1a816b9ff69588e55be0a84cbe2a31efc0f2f)
1 /*-
2  * Copyright (c) 2003-2008 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/types.h>
31 #include <sys/module.h>
32 #include <sys/pmc.h>
33 #include <sys/syscall.h>
34 
35 #include <ctype.h>
36 #include <errno.h>
37 #include <fcntl.h>
38 #include <pmc.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <strings.h>
43 #include <unistd.h>
44 
45 /* Function prototypes */
46 #if defined(__i386__)
47 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
48     struct pmc_op_pmcallocate *_pmc_config);
49 #endif
50 #if defined(__amd64__) || defined(__i386__)
51 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52     struct pmc_op_pmcallocate *_pmc_config);
53 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54     struct pmc_op_pmcallocate *_pmc_config);
55 #endif
56 #if defined(__i386__)
57 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58     struct pmc_op_pmcallocate *_pmc_config);
59 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60     struct pmc_op_pmcallocate *_pmc_config);
61 #endif
62 
63 #define PMC_CALL(cmd, params)				\
64 	syscall(pmc_syscall, PMC_OP_##cmd, (params))
65 
66 /*
67  * Event aliases provide a way for the user to ask for generic events
68  * like "cache-misses", or "instructions-retired".  These aliases are
69  * mapped to the appropriate canonical event descriptions using a
70  * lookup table.
71  */
72 struct pmc_event_alias {
73 	const char	*pm_alias;
74 	const char	*pm_spec;
75 };
76 
77 static const struct pmc_event_alias *pmc_mdep_event_aliases;
78 
79 /*
80  * The pmc_event_descr table maps symbolic names known to the user
81  * to integer codes used by the PMC KLD.
82  */
83 struct pmc_event_descr {
84 	const char	*pm_ev_name;
85 	enum pmc_event	pm_ev_code;
86 	enum pmc_class	pm_ev_class;
87 };
88 
89 static const struct pmc_event_descr
90 pmc_event_table[] =
91 {
92 #undef  __PMC_EV
93 #define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
94 	__PMC_EVENTS()
95 };
96 
97 /*
98  * Mapping tables, mapping enumeration values to human readable
99  * strings.
100  */
101 
102 static const char * pmc_capability_names[] = {
103 #undef	__PMC_CAP
104 #define	__PMC_CAP(N,V,D)	#N ,
105 	__PMC_CAPS()
106 };
107 
108 static const char * pmc_class_names[] = {
109 #undef	__PMC_CLASS
110 #define __PMC_CLASS(C)	#C ,
111 	__PMC_CLASSES()
112 };
113 
114 static const char * pmc_cputype_names[] = {
115 #undef	__PMC_CPU
116 #define	__PMC_CPU(S, D) #S ,
117 	__PMC_CPUS()
118 };
119 
120 static const char * pmc_disposition_names[] = {
121 #undef	__PMC_DISP
122 #define	__PMC_DISP(D)	#D ,
123 	__PMC_DISPOSITIONS()
124 };
125 
126 static const char * pmc_mode_names[] = {
127 #undef  __PMC_MODE
128 #define __PMC_MODE(M,N)	#M ,
129 	__PMC_MODES()
130 };
131 
132 static const char * pmc_state_names[] = {
133 #undef  __PMC_STATE
134 #define __PMC_STATE(S) #S ,
135 	__PMC_STATES()
136 };
137 
138 static int pmc_syscall = -1;		/* filled in by pmc_init() */
139 
140 static struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
141 
142 
143 /* Architecture dependent event parsing */
144 static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
145     struct pmc_op_pmcallocate *_pmc_config);
146 
147 /* Event masks for events */
148 struct pmc_masks {
149 	const char	*pm_name;
150 	const uint32_t	pm_value;
151 };
152 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
153 #define	NULLMASK	PMCMASK(NULL,0)
154 
155 #if defined(__amd64__) || defined(__i386__)
156 static int
157 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
158 {
159 	const struct pmc_masks *pm;
160 	char *q, *r;
161 	int c;
162 
163 	if (pmask == NULL)	/* no mask keywords */
164 		return (-1);
165 	q = strchr(p, '='); 	/* skip '=' */
166 	if (*++q == '\0')	/* no more data */
167 		return (-1);
168 	c = 0;			/* count of mask keywords seen */
169 	while ((r = strsep(&q, "+")) != NULL) {
170 		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
171 			;
172 		if (pm->pm_name == NULL) /* not found */
173 			return (-1);
174 		*evmask |= pm->pm_value;
175 		c++;
176 	}
177 	return (c);
178 }
179 #endif
180 
181 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
182 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
183 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
184 
185 #if defined(__i386__)
186 
187 /*
188  * AMD K7 (Athlon) CPUs.
189  */
190 
191 static struct pmc_event_alias k7_aliases[] = {
192 	EV_ALIAS("branches",		"k7-retired-branches"),
193 	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
194 	EV_ALIAS("cycles",		"tsc"),
195 	EV_ALIAS("dc-misses",		"k7-dc-misses,mask=moesi"),
196 	EV_ALIAS("ic-misses",		"k7-ic-misses"),
197 	EV_ALIAS("instructions",	"k7-retired-instructions"),
198 	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
199 	EV_ALIAS(NULL, NULL)
200 };
201 
202 #define	K7_KW_COUNT	"count"
203 #define	K7_KW_EDGE	"edge"
204 #define	K7_KW_INV	"inv"
205 #define	K7_KW_OS	"os"
206 #define	K7_KW_UNITMASK	"unitmask"
207 #define	K7_KW_USR	"usr"
208 
209 static int
210 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
211     struct pmc_op_pmcallocate *pmc_config)
212 {
213 	char 		*e, *p, *q;
214 	int 		c, has_unitmask;
215 	uint32_t	count, unitmask;
216 
217 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
218 	pmc_config->pm_caps |= PMC_CAP_READ;
219 
220 	if (pe == PMC_EV_TSC_TSC) {
221 		/* TSC events must be unqualified. */
222 		if (ctrspec && *ctrspec != '\0')
223 			return (-1);
224 		return (0);
225 	}
226 
227 	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
228 	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
229 	    pe == PMC_EV_K7_DC_WRITEBACKS) {
230 		has_unitmask = 1;
231 		unitmask = AMD_PMC_UNITMASK_MOESI;
232 	} else
233 		unitmask = has_unitmask = 0;
234 
235 	pmc_config->pm_caps |= PMC_CAP_WRITE;
236 
237 	while ((p = strsep(&ctrspec, ",")) != NULL) {
238 		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
239 			q = strchr(p, '=');
240 			if (*++q == '\0') /* skip '=' */
241 				return (-1);
242 
243 			count = strtol(q, &e, 0);
244 			if (e == q || *e != '\0')
245 				return (-1);
246 
247 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
248 			pmc_config->pm_md.pm_amd.pm_amd_config |=
249 			    AMD_PMC_TO_COUNTER(count);
250 
251 		} else if (KWMATCH(p, K7_KW_EDGE)) {
252 			pmc_config->pm_caps |= PMC_CAP_EDGE;
253 		} else if (KWMATCH(p, K7_KW_INV)) {
254 			pmc_config->pm_caps |= PMC_CAP_INVERT;
255 		} else if (KWMATCH(p, K7_KW_OS)) {
256 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
257 		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
258 			if (has_unitmask == 0)
259 				return (-1);
260 			unitmask = 0;
261 			q = strchr(p, '=');
262 			if (*++q == '\0') /* skip '=' */
263 				return (-1);
264 
265 			while ((c = tolower(*q++)) != 0)
266 				if (c == 'm')
267 					unitmask |= AMD_PMC_UNITMASK_M;
268 				else if (c == 'o')
269 					unitmask |= AMD_PMC_UNITMASK_O;
270 				else if (c == 'e')
271 					unitmask |= AMD_PMC_UNITMASK_E;
272 				else if (c == 's')
273 					unitmask |= AMD_PMC_UNITMASK_S;
274 				else if (c == 'i')
275 					unitmask |= AMD_PMC_UNITMASK_I;
276 				else if (c == '+')
277 					continue;
278 				else
279 					return (-1);
280 
281 			if (unitmask == 0)
282 				return (-1);
283 
284 		} else if (KWMATCH(p, K7_KW_USR)) {
285 			pmc_config->pm_caps |= PMC_CAP_USER;
286 		} else
287 			return (-1);
288 	}
289 
290 	if (has_unitmask) {
291 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
292 		pmc_config->pm_md.pm_amd.pm_amd_config |=
293 		    AMD_PMC_TO_UNITMASK(unitmask);
294 	}
295 
296 	return (0);
297 
298 }
299 
300 #endif
301 
302 #if defined(__amd64__) || defined(__i386__)
303 
304 /*
305  * AMD K8 PMCs.
306  *
307  * These are very similar to AMD K7 PMCs, but support more kinds of
308  * events.
309  */
310 
311 static struct pmc_event_alias k8_aliases[] = {
312 	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
313 	EV_ALIAS("branch-mispredicts",
314 	    "k8-fr-retired-taken-branches-mispredicted"),
315 	EV_ALIAS("cycles",		"tsc"),
316 	EV_ALIAS("dc-misses",		"k8-dc-miss"),
317 	EV_ALIAS("ic-misses",		"k8-ic-miss"),
318 	EV_ALIAS("instructions", 	"k8-fr-retired-x86-instructions"),
319 	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
320 	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
321 	EV_ALIAS(NULL, NULL)
322 };
323 
324 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
325 
326 /*
327  * Parsing tables
328  */
329 
330 /* fp dispatched fpu ops */
331 static const struct pmc_masks k8_mask_fdfo[] = {
332 	__K8MASK(add-pipe-excluding-junk-ops,	0),
333 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
334 	__K8MASK(store-pipe-excluding-junk-ops,	2),
335 	__K8MASK(add-pipe-junk-ops,		3),
336 	__K8MASK(multiply-pipe-junk-ops,	4),
337 	__K8MASK(store-pipe-junk-ops,		5),
338 	NULLMASK
339 };
340 
341 /* ls segment register loads */
342 static const struct pmc_masks k8_mask_lsrl[] = {
343 	__K8MASK(es,	0),
344 	__K8MASK(cs,	1),
345 	__K8MASK(ss,	2),
346 	__K8MASK(ds,	3),
347 	__K8MASK(fs,	4),
348 	__K8MASK(gs,	5),
349 	__K8MASK(hs,	6),
350 	NULLMASK
351 };
352 
353 /* ls locked operation */
354 static const struct pmc_masks k8_mask_llo[] = {
355 	__K8MASK(locked-instructions,	0),
356 	__K8MASK(cycles-in-request,	1),
357 	__K8MASK(cycles-to-complete,	2),
358 	NULLMASK
359 };
360 
361 /* dc refill from {l2,system} and dc copyback */
362 static const struct pmc_masks k8_mask_dc[] = {
363 	__K8MASK(invalid,	0),
364 	__K8MASK(shared,	1),
365 	__K8MASK(exclusive,	2),
366 	__K8MASK(owner,		3),
367 	__K8MASK(modified,	4),
368 	NULLMASK
369 };
370 
371 /* dc one bit ecc error */
372 static const struct pmc_masks k8_mask_dobee[] = {
373 	__K8MASK(scrubber,	0),
374 	__K8MASK(piggyback,	1),
375 	NULLMASK
376 };
377 
378 /* dc dispatched prefetch instructions */
379 static const struct pmc_masks k8_mask_ddpi[] = {
380 	__K8MASK(load,	0),
381 	__K8MASK(store,	1),
382 	__K8MASK(nta,	2),
383 	NULLMASK
384 };
385 
386 /* dc dcache accesses by locks */
387 static const struct pmc_masks k8_mask_dabl[] = {
388 	__K8MASK(accesses,	0),
389 	__K8MASK(misses,	1),
390 	NULLMASK
391 };
392 
393 /* bu internal l2 request */
394 static const struct pmc_masks k8_mask_bilr[] = {
395 	__K8MASK(ic-fill,	0),
396 	__K8MASK(dc-fill,	1),
397 	__K8MASK(tlb-reload,	2),
398 	__K8MASK(tag-snoop,	3),
399 	__K8MASK(cancelled,	4),
400 	NULLMASK
401 };
402 
403 /* bu fill request l2 miss */
404 static const struct pmc_masks k8_mask_bfrlm[] = {
405 	__K8MASK(ic-fill,	0),
406 	__K8MASK(dc-fill,	1),
407 	__K8MASK(tlb-reload,	2),
408 	NULLMASK
409 };
410 
411 /* bu fill into l2 */
412 static const struct pmc_masks k8_mask_bfil[] = {
413 	__K8MASK(dirty-l2-victim,	0),
414 	__K8MASK(victim-from-l2,	1),
415 	NULLMASK
416 };
417 
418 /* fr retired fpu instructions */
419 static const struct pmc_masks k8_mask_frfi[] = {
420 	__K8MASK(x87,			0),
421 	__K8MASK(mmx-3dnow,		1),
422 	__K8MASK(packed-sse-sse2,	2),
423 	__K8MASK(scalar-sse-sse2,	3),
424 	NULLMASK
425 };
426 
427 /* fr retired fastpath double op instructions */
428 static const struct pmc_masks k8_mask_frfdoi[] = {
429 	__K8MASK(low-op-pos-0,		0),
430 	__K8MASK(low-op-pos-1,		1),
431 	__K8MASK(low-op-pos-2,		2),
432 	NULLMASK
433 };
434 
435 /* fr fpu exceptions */
436 static const struct pmc_masks k8_mask_ffe[] = {
437 	__K8MASK(x87-reclass-microfaults,	0),
438 	__K8MASK(sse-retype-microfaults,	1),
439 	__K8MASK(sse-reclass-microfaults,	2),
440 	__K8MASK(sse-and-x87-microtraps,	3),
441 	NULLMASK
442 };
443 
444 /* nb memory controller page access event */
445 static const struct pmc_masks k8_mask_nmcpae[] = {
446 	__K8MASK(page-hit,	0),
447 	__K8MASK(page-miss,	1),
448 	__K8MASK(page-conflict,	2),
449 	NULLMASK
450 };
451 
452 /* nb memory controller turnaround */
453 static const struct pmc_masks k8_mask_nmct[] = {
454 	__K8MASK(dimm-turnaround,		0),
455 	__K8MASK(read-to-write-turnaround,	1),
456 	__K8MASK(write-to-read-turnaround,	2),
457 	NULLMASK
458 };
459 
460 /* nb memory controller bypass saturation */
461 static const struct pmc_masks k8_mask_nmcbs[] = {
462 	__K8MASK(memory-controller-hi-pri-bypass,	0),
463 	__K8MASK(memory-controller-lo-pri-bypass,	1),
464 	__K8MASK(dram-controller-interface-bypass,	2),
465 	__K8MASK(dram-controller-queue-bypass,		3),
466 	NULLMASK
467 };
468 
469 /* nb sized commands */
470 static const struct pmc_masks k8_mask_nsc[] = {
471 	__K8MASK(nonpostwrszbyte,	0),
472 	__K8MASK(nonpostwrszdword,	1),
473 	__K8MASK(postwrszbyte,		2),
474 	__K8MASK(postwrszdword,		3),
475 	__K8MASK(rdszbyte,		4),
476 	__K8MASK(rdszdword,		5),
477 	__K8MASK(rdmodwr,		6),
478 	NULLMASK
479 };
480 
481 /* nb probe result */
482 static const struct pmc_masks k8_mask_npr[] = {
483 	__K8MASK(probe-miss,		0),
484 	__K8MASK(probe-hit,		1),
485 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
486 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
487 	NULLMASK
488 };
489 
490 /* nb hypertransport bus bandwidth */
491 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
492 	__K8MASK(command,	0),
493 	__K8MASK(data, 	1),
494 	__K8MASK(buffer-release, 2),
495 	__K8MASK(nop,	3),
496 	NULLMASK
497 };
498 
499 #undef	__K8MASK
500 
501 #define	K8_KW_COUNT	"count"
502 #define	K8_KW_EDGE	"edge"
503 #define	K8_KW_INV	"inv"
504 #define	K8_KW_MASK	"mask"
505 #define	K8_KW_OS	"os"
506 #define	K8_KW_USR	"usr"
507 
508 static int
509 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
510     struct pmc_op_pmcallocate *pmc_config)
511 {
512 	char 		*e, *p, *q;
513 	int 		n;
514 	uint32_t	count, evmask;
515 	const struct pmc_masks	*pm, *pmask;
516 
517 	pmc_config->pm_caps |= PMC_CAP_READ;
518 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
519 
520 	if (pe == PMC_EV_TSC_TSC) {
521 		/* TSC events must be unqualified. */
522 		if (ctrspec && *ctrspec != '\0')
523 			return (-1);
524 		return (0);
525 	}
526 
527 	pmask = NULL;
528 	evmask = 0;
529 
530 #define	__K8SETMASK(M) pmask = k8_mask_##M
531 
532 	/* setup parsing tables */
533 	switch (pe) {
534 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
535 		__K8SETMASK(fdfo);
536 		break;
537 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
538 		__K8SETMASK(lsrl);
539 		break;
540 	case PMC_EV_K8_LS_LOCKED_OPERATION:
541 		__K8SETMASK(llo);
542 		break;
543 	case PMC_EV_K8_DC_REFILL_FROM_L2:
544 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
545 	case PMC_EV_K8_DC_COPYBACK:
546 		__K8SETMASK(dc);
547 		break;
548 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
549 		__K8SETMASK(dobee);
550 		break;
551 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
552 		__K8SETMASK(ddpi);
553 		break;
554 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
555 		__K8SETMASK(dabl);
556 		break;
557 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
558 		__K8SETMASK(bilr);
559 		break;
560 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
561 		__K8SETMASK(bfrlm);
562 		break;
563 	case PMC_EV_K8_BU_FILL_INTO_L2:
564 		__K8SETMASK(bfil);
565 		break;
566 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
567 		__K8SETMASK(frfi);
568 		break;
569 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
570 		__K8SETMASK(frfdoi);
571 		break;
572 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
573 		__K8SETMASK(ffe);
574 		break;
575 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
576 		__K8SETMASK(nmcpae);
577 		break;
578 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
579 		__K8SETMASK(nmct);
580 		break;
581 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
582 		__K8SETMASK(nmcbs);
583 		break;
584 	case PMC_EV_K8_NB_SIZED_COMMANDS:
585 		__K8SETMASK(nsc);
586 		break;
587 	case PMC_EV_K8_NB_PROBE_RESULT:
588 		__K8SETMASK(npr);
589 		break;
590 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
591 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
592 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
593 		__K8SETMASK(nhbb);
594 		break;
595 
596 	default:
597 		break;		/* no options defined */
598 	}
599 
600 	pmc_config->pm_caps |= PMC_CAP_WRITE;
601 
602 	while ((p = strsep(&ctrspec, ",")) != NULL) {
603 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
604 			q = strchr(p, '=');
605 			if (*++q == '\0') /* skip '=' */
606 				return (-1);
607 
608 			count = strtol(q, &e, 0);
609 			if (e == q || *e != '\0')
610 				return (-1);
611 
612 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
613 			pmc_config->pm_md.pm_amd.pm_amd_config |=
614 			    AMD_PMC_TO_COUNTER(count);
615 
616 		} else if (KWMATCH(p, K8_KW_EDGE)) {
617 			pmc_config->pm_caps |= PMC_CAP_EDGE;
618 		} else if (KWMATCH(p, K8_KW_INV)) {
619 			pmc_config->pm_caps |= PMC_CAP_INVERT;
620 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
621 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
622 				return (-1);
623 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
624 		} else if (KWMATCH(p, K8_KW_OS)) {
625 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
626 		} else if (KWMATCH(p, K8_KW_USR)) {
627 			pmc_config->pm_caps |= PMC_CAP_USER;
628 		} else
629 			return (-1);
630 	}
631 
632 	/* other post processing */
633 	switch (pe) {
634 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
635 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
636 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
637 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
638 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
639 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
640 		/* XXX only available in rev B and later */
641 		break;
642 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
643 		/* XXX only available in rev C and later */
644 		break;
645 	case PMC_EV_K8_LS_LOCKED_OPERATION:
646 		/* XXX CPU Rev A,B evmask is to be zero */
647 		if (evmask & (evmask - 1)) /* > 1 bit set */
648 			return (-1);
649 		if (evmask == 0) {
650 			evmask = 0x01; /* Rev C and later: #instrs */
651 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
652 		}
653 		break;
654 	default:
655 		if (evmask == 0 && pmask != NULL) {
656 			for (pm = pmask; pm->pm_name; pm++)
657 				evmask |= pm->pm_value;
658 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
659 		}
660 	}
661 
662 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
663 		pmc_config->pm_md.pm_amd.pm_amd_config =
664 		    AMD_PMC_TO_UNITMASK(evmask);
665 
666 	return (0);
667 }
668 
669 #endif
670 
671 #if defined(__amd64__) || defined(__i386__)
672 
673 /*
674  * Intel P4 PMCs
675  */
676 
677 static struct pmc_event_alias p4_aliases[] = {
678 	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
679 	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
680 	EV_ALIAS("cycles",		"tsc"),
681 	EV_ALIAS("instructions",
682 	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
683 	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
684 	EV_ALIAS(NULL, NULL)
685 };
686 
687 #define	P4_KW_ACTIVE	"active"
688 #define	P4_KW_ACTIVE_ANY "any"
689 #define	P4_KW_ACTIVE_BOTH "both"
690 #define	P4_KW_ACTIVE_NONE "none"
691 #define	P4_KW_ACTIVE_SINGLE "single"
692 #define	P4_KW_BUSREQTYPE "busreqtype"
693 #define	P4_KW_CASCADE	"cascade"
694 #define	P4_KW_EDGE	"edge"
695 #define	P4_KW_INV	"complement"
696 #define	P4_KW_OS	"os"
697 #define	P4_KW_MASK	"mask"
698 #define	P4_KW_PRECISE	"precise"
699 #define	P4_KW_TAG	"tag"
700 #define	P4_KW_THRESHOLD	"threshold"
701 #define	P4_KW_USR	"usr"
702 
703 #define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
704 
705 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
706 	__P4MASK(dd, 0),
707 	__P4MASK(db, 1),
708 	__P4MASK(di, 2),
709 	__P4MASK(bd, 3),
710 	__P4MASK(bb, 4),
711 	__P4MASK(bi, 5),
712 	__P4MASK(id, 6),
713 	__P4MASK(ib, 7),
714 	NULLMASK
715 };
716 
717 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
718 	__P4MASK(tcmiss, 0),
719 	NULLMASK,
720 };
721 
722 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
723 	__P4MASK(hit, 0),
724 	__P4MASK(miss, 1),
725 	__P4MASK(hit-uc, 2),
726 	NULLMASK
727 };
728 
729 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
730 	__P4MASK(st-rb-full, 2),
731 	__P4MASK(64k-conf, 3),
732 	NULLMASK
733 };
734 
735 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
736 	__P4MASK(lsc, 0),
737 	__P4MASK(ssc, 1),
738 	NULLMASK
739 };
740 
741 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
742 	__P4MASK(split-ld, 1),
743 	NULLMASK
744 };
745 
746 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
747 	__P4MASK(split-st, 1),
748 	NULLMASK
749 };
750 
751 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
752 	__P4MASK(no-sta, 1),
753 	__P4MASK(no-std, 3),
754 	__P4MASK(partial-data, 4),
755 	__P4MASK(unalgn-addr, 5),
756 	NULLMASK
757 };
758 
759 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
760 	__P4MASK(dtmiss, 0),
761 	__P4MASK(itmiss, 1),
762 	NULLMASK
763 };
764 
765 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
766 	__P4MASK(rd-2ndl-hits, 0),
767 	__P4MASK(rd-2ndl-hite, 1),
768 	__P4MASK(rd-2ndl-hitm, 2),
769 	__P4MASK(rd-3rdl-hits, 3),
770 	__P4MASK(rd-3rdl-hite, 4),
771 	__P4MASK(rd-3rdl-hitm, 5),
772 	__P4MASK(rd-2ndl-miss, 8),
773 	__P4MASK(rd-3rdl-miss, 9),
774 	__P4MASK(wr-2ndl-miss, 10),
775 	NULLMASK
776 };
777 
778 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
779 	__P4MASK(all-read, 5),
780 	__P4MASK(all-write, 6),
781 	__P4MASK(mem-uc, 7),
782 	__P4MASK(mem-wc, 8),
783 	__P4MASK(mem-wt, 9),
784 	__P4MASK(mem-wp, 10),
785 	__P4MASK(mem-wb, 11),
786 	__P4MASK(own, 13),
787 	__P4MASK(other, 14),
788 	__P4MASK(prefetch, 15),
789 	NULLMASK
790 };
791 
792 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
793 	__P4MASK(all-read, 5),
794 	__P4MASK(all-write, 6),
795 	__P4MASK(mem-uc, 7),
796 	__P4MASK(mem-wc, 8),
797 	__P4MASK(mem-wt, 9),
798 	__P4MASK(mem-wp, 10),
799 	__P4MASK(mem-wb, 11),
800 	__P4MASK(own, 13),
801 	__P4MASK(other, 14),
802 	__P4MASK(prefetch, 15),
803 	NULLMASK
804 };
805 
806 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
807 	__P4MASK(drdy-drv, 0),
808 	__P4MASK(drdy-own, 1),
809 	__P4MASK(drdy-other, 2),
810 	__P4MASK(dbsy-drv, 3),
811 	__P4MASK(dbsy-own, 4),
812 	__P4MASK(dbsy-other, 5),
813 	NULLMASK
814 };
815 
816 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
817 	__P4MASK(req-type0, 0),
818 	__P4MASK(req-type1, 1),
819 	__P4MASK(req-len0, 2),
820 	__P4MASK(req-len1, 3),
821 	__P4MASK(req-io-type, 5),
822 	__P4MASK(req-lock-type, 6),
823 	__P4MASK(req-cache-type, 7),
824 	__P4MASK(req-split-type, 8),
825 	__P4MASK(req-dem-type, 9),
826 	__P4MASK(req-ord-type, 10),
827 	__P4MASK(mem-type0, 11),
828 	__P4MASK(mem-type1, 12),
829 	__P4MASK(mem-type2, 13),
830 	NULLMASK
831 };
832 
833 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
834 	__P4MASK(all, 15),
835 	NULLMASK
836 };
837 
838 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
839 	__P4MASK(all, 15),
840 	NULLMASK
841 };
842 
843 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
844 	__P4MASK(all, 15),
845 	NULLMASK
846 };
847 
848 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
849 	__P4MASK(all, 15),
850 	NULLMASK
851 };
852 
853 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
854 	__P4MASK(all, 15),
855 	NULLMASK
856 };
857 
858 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
859 	__P4MASK(all, 15),
860 	NULLMASK
861 };
862 
863 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
864 	__P4MASK(all, 15),
865 	NULLMASK
866 };
867 
868 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
869 	__P4MASK(all, 15),
870 	NULLMASK
871 };
872 
873 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
874 	__P4MASK(allp0, 3),
875 	__P4MASK(allp2, 4),
876 	NULLMASK
877 };
878 
879 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
880 	__P4MASK(running, 0),
881 	NULLMASK
882 };
883 
884 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
885 	__P4MASK(cisc, 0),
886 	NULLMASK
887 };
888 
889 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
890 	__P4MASK(from-tc-build, 0),
891 	__P4MASK(from-tc-deliver, 1),
892 	__P4MASK(from-rom, 2),
893 	NULLMASK
894 };
895 
896 static const struct pmc_masks p4_mask_rmbt[] = {
897 	/* retired mispred branch type */
898 	__P4MASK(conditional, 1),
899 	__P4MASK(call, 2),
900 	__P4MASK(return, 3),
901 	__P4MASK(indirect, 4),
902 	NULLMASK
903 };
904 
905 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
906 	__P4MASK(conditional, 1),
907 	__P4MASK(call, 2),
908 	__P4MASK(retired, 3),
909 	__P4MASK(indirect, 4),
910 	NULLMASK
911 };
912 
913 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
914 	__P4MASK(sbfull, 5),
915 	NULLMASK
916 };
917 
918 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
919 	__P4MASK(wcb-evicts, 0),
920 	__P4MASK(wcb-full-evict, 1),
921 	NULLMASK
922 };
923 
924 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
925 	__P4MASK(nbogus, 0),
926 	__P4MASK(bogus, 1),
927 	NULLMASK
928 };
929 
930 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
931 	__P4MASK(nbogus0, 0),
932 	__P4MASK(nbogus1, 1),
933 	__P4MASK(nbogus2, 2),
934 	__P4MASK(nbogus3, 3),
935 	__P4MASK(bogus0, 4),
936 	__P4MASK(bogus1, 5),
937 	__P4MASK(bogus2, 6),
938 	__P4MASK(bogus3, 7),
939 	NULLMASK
940 };
941 
942 static const struct pmc_masks p4_mask_re[] = { /* replay event */
943 	__P4MASK(nbogus, 0),
944 	__P4MASK(bogus, 1),
945 	NULLMASK
946 };
947 
948 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
949 	__P4MASK(nbogusntag, 0),
950 	__P4MASK(nbogustag, 1),
951 	__P4MASK(bogusntag, 2),
952 	__P4MASK(bogustag, 3),
953 	NULLMASK
954 };
955 
956 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
957 	__P4MASK(nbogus, 0),
958 	__P4MASK(bogus, 1),
959 	NULLMASK
960 };
961 
962 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
963 	__P4MASK(tagloads, 1),
964 	__P4MASK(tagstores, 2),
965 	NULLMASK
966 };
967 
968 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
969 	__P4MASK(mmnp, 0),
970 	__P4MASK(mmnm, 1),
971 	__P4MASK(mmtp, 2),
972 	__P4MASK(mmtm, 3),
973 	NULLMASK
974 };
975 
976 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
977 	__P4MASK(nbogus, 0),
978 	NULLMASK
979 };
980 
981 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
982 	__P4MASK(fpsu, 0),
983 	__P4MASK(fpso, 1),
984 	__P4MASK(poao, 2),
985 	__P4MASK(poau, 3),
986 	__P4MASK(prea, 4),
987 	NULLMASK
988 };
989 
990 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
991 	__P4MASK(clear, 0),
992 	__P4MASK(moclear, 2),
993 	__P4MASK(smclear, 3),
994 	NULLMASK
995 };
996 
997 /* P4 event parser */
998 static int
999 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1000     struct pmc_op_pmcallocate *pmc_config)
1001 {
1002 
1003 	char	*e, *p, *q;
1004 	int	count, has_tag, has_busreqtype, n;
1005 	uint32_t evmask, cccractivemask;
1006 	const struct pmc_masks *pm, *pmask;
1007 
1008 	pmc_config->pm_caps |= PMC_CAP_READ;
1009 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1010 	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1011 
1012 	if (pe == PMC_EV_TSC_TSC) {
1013 		/* TSC must not be further qualified */
1014 		if (ctrspec && *ctrspec != '\0')
1015 			return (-1);
1016 		return (0);
1017 	}
1018 
1019 	pmask   = NULL;
1020 	evmask  = 0;
1021 	cccractivemask = 0x3;
1022 	has_tag = has_busreqtype = 0;
1023 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1024 
1025 #define	__P4SETMASK(M) do {				\
1026 	pmask = p4_mask_##M; 				\
1027 } while (0)
1028 
1029 	switch (pe) {
1030 	case PMC_EV_P4_TC_DELIVER_MODE:
1031 		__P4SETMASK(tcdm);
1032 		break;
1033 	case PMC_EV_P4_BPU_FETCH_REQUEST:
1034 		__P4SETMASK(bfr);
1035 		break;
1036 	case PMC_EV_P4_ITLB_REFERENCE:
1037 		__P4SETMASK(ir);
1038 		break;
1039 	case PMC_EV_P4_MEMORY_CANCEL:
1040 		__P4SETMASK(memcan);
1041 		break;
1042 	case PMC_EV_P4_MEMORY_COMPLETE:
1043 		__P4SETMASK(memcomp);
1044 		break;
1045 	case PMC_EV_P4_LOAD_PORT_REPLAY:
1046 		__P4SETMASK(lpr);
1047 		break;
1048 	case PMC_EV_P4_STORE_PORT_REPLAY:
1049 		__P4SETMASK(spr);
1050 		break;
1051 	case PMC_EV_P4_MOB_LOAD_REPLAY:
1052 		__P4SETMASK(mlr);
1053 		break;
1054 	case PMC_EV_P4_PAGE_WALK_TYPE:
1055 		__P4SETMASK(pwt);
1056 		break;
1057 	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1058 		__P4SETMASK(bcr);
1059 		break;
1060 	case PMC_EV_P4_IOQ_ALLOCATION:
1061 		__P4SETMASK(ia);
1062 		has_busreqtype = 1;
1063 		break;
1064 	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1065 		__P4SETMASK(iae);
1066 		has_busreqtype = 1;
1067 		break;
1068 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1069 		__P4SETMASK(fda);
1070 		break;
1071 	case PMC_EV_P4_BSQ_ALLOCATION:
1072 		__P4SETMASK(ba);
1073 		break;
1074 	case PMC_EV_P4_SSE_INPUT_ASSIST:
1075 		__P4SETMASK(sia);
1076 		break;
1077 	case PMC_EV_P4_PACKED_SP_UOP:
1078 		__P4SETMASK(psu);
1079 		break;
1080 	case PMC_EV_P4_PACKED_DP_UOP:
1081 		__P4SETMASK(pdu);
1082 		break;
1083 	case PMC_EV_P4_SCALAR_SP_UOP:
1084 		__P4SETMASK(ssu);
1085 		break;
1086 	case PMC_EV_P4_SCALAR_DP_UOP:
1087 		__P4SETMASK(sdu);
1088 		break;
1089 	case PMC_EV_P4_64BIT_MMX_UOP:
1090 		__P4SETMASK(64bmu);
1091 		break;
1092 	case PMC_EV_P4_128BIT_MMX_UOP:
1093 		__P4SETMASK(128bmu);
1094 		break;
1095 	case PMC_EV_P4_X87_FP_UOP:
1096 		__P4SETMASK(xfu);
1097 		break;
1098 	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1099 		__P4SETMASK(xsmu);
1100 		break;
1101 	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1102 		__P4SETMASK(gpe);
1103 		break;
1104 	case PMC_EV_P4_TC_MS_XFER:
1105 		__P4SETMASK(tmx);
1106 		break;
1107 	case PMC_EV_P4_UOP_QUEUE_WRITES:
1108 		__P4SETMASK(uqw);
1109 		break;
1110 	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1111 		__P4SETMASK(rmbt);
1112 		break;
1113 	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1114 		__P4SETMASK(rbt);
1115 		break;
1116 	case PMC_EV_P4_RESOURCE_STALL:
1117 		__P4SETMASK(rs);
1118 		break;
1119 	case PMC_EV_P4_WC_BUFFER:
1120 		__P4SETMASK(wb);
1121 		break;
1122 	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1123 	case PMC_EV_P4_B2B_CYCLES:
1124 	case PMC_EV_P4_BNR:
1125 	case PMC_EV_P4_SNOOP:
1126 	case PMC_EV_P4_RESPONSE:
1127 		break;
1128 	case PMC_EV_P4_FRONT_END_EVENT:
1129 		__P4SETMASK(fee);
1130 		break;
1131 	case PMC_EV_P4_EXECUTION_EVENT:
1132 		__P4SETMASK(ee);
1133 		break;
1134 	case PMC_EV_P4_REPLAY_EVENT:
1135 		__P4SETMASK(re);
1136 		break;
1137 	case PMC_EV_P4_INSTR_RETIRED:
1138 		__P4SETMASK(insret);
1139 		break;
1140 	case PMC_EV_P4_UOPS_RETIRED:
1141 		__P4SETMASK(ur);
1142 		break;
1143 	case PMC_EV_P4_UOP_TYPE:
1144 		__P4SETMASK(ut);
1145 		break;
1146 	case PMC_EV_P4_BRANCH_RETIRED:
1147 		__P4SETMASK(br);
1148 		break;
1149 	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1150 		__P4SETMASK(mbr);
1151 		break;
1152 	case PMC_EV_P4_X87_ASSIST:
1153 		__P4SETMASK(xa);
1154 		break;
1155 	case PMC_EV_P4_MACHINE_CLEAR:
1156 		__P4SETMASK(machclr);
1157 		break;
1158 	default:
1159 		return (-1);
1160 	}
1161 
1162 	/* process additional flags */
1163 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1164 		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1165 			q = strchr(p, '=');
1166 			if (*++q == '\0') /* skip '=' */
1167 				return (-1);
1168 
1169 			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
1170 				cccractivemask = 0x0;
1171 			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1172 				cccractivemask = 0x1;
1173 			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
1174 				cccractivemask = 0x2;
1175 			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
1176 				cccractivemask = 0x3;
1177 			else
1178 				return (-1);
1179 
1180 		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1181 			if (has_busreqtype == 0)
1182 				return (-1);
1183 
1184 			q = strchr(p, '=');
1185 			if (*++q == '\0') /* skip '=' */
1186 				return (-1);
1187 
1188 			count = strtol(q, &e, 0);
1189 			if (e == q || *e != '\0')
1190 				return (-1);
1191 			evmask = (evmask & ~0x1F) | (count & 0x1F);
1192 		} else if (KWMATCH(p, P4_KW_CASCADE))
1193 			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1194 		else if (KWMATCH(p, P4_KW_EDGE))
1195 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1196 		else if (KWMATCH(p, P4_KW_INV))
1197 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1198 		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1199 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1200 				return (-1);
1201 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1202 		} else if (KWMATCH(p, P4_KW_OS))
1203 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1204 		else if (KWMATCH(p, P4_KW_PRECISE))
1205 			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1206 		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1207 			if (has_tag == 0)
1208 				return (-1);
1209 
1210 			q = strchr(p, '=');
1211 			if (*++q == '\0') /* skip '=' */
1212 				return (-1);
1213 
1214 			count = strtol(q, &e, 0);
1215 			if (e == q || *e != '\0')
1216 				return (-1);
1217 
1218 			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1219 			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1220 			    P4_ESCR_TO_TAG_VALUE(count);
1221 		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1222 			q = strchr(p, '=');
1223 			if (*++q == '\0') /* skip '=' */
1224 				return (-1);
1225 
1226 			count = strtol(q, &e, 0);
1227 			if (e == q || *e != '\0')
1228 				return (-1);
1229 
1230 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1231 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1232 			    ~P4_CCCR_THRESHOLD_MASK;
1233 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1234 			    P4_CCCR_TO_THRESHOLD(count);
1235 		} else if (KWMATCH(p, P4_KW_USR))
1236 			pmc_config->pm_caps |= PMC_CAP_USER;
1237 		else
1238 			return (-1);
1239 	}
1240 
1241 	/* other post processing */
1242 	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1243 	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1244 	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1245 		pmc_config->pm_caps |= PMC_CAP_EDGE;
1246 
1247 	/* fill in thread activity mask */
1248 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1249 	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1250 
1251 	if (evmask)
1252 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1253 
1254 	switch (pe) {
1255 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1256 		if ((evmask & 0x06) == 0x06 ||
1257 		    (evmask & 0x18) == 0x18)
1258 			return (-1); /* can't have own+other bits together */
1259 		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1260 			evmask = 0x1D;
1261 		break;
1262 	case PMC_EV_P4_MACHINE_CLEAR:
1263 		/* only one bit is allowed to be set */
1264 		if ((evmask & (evmask - 1)) != 0)
1265 			return (-1);
1266 		if (evmask == 0) {
1267 			evmask = 0x1; 	/* 'CLEAR' */
1268 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1269 		}
1270 		break;
1271 	default:
1272 		if (evmask == 0 && pmask) {
1273 			for (pm = pmask; pm->pm_name; pm++)
1274 				evmask |= pm->pm_value;
1275 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1276 		}
1277 	}
1278 
1279 	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1280 	    P4_ESCR_TO_EVENT_MASK(evmask);
1281 
1282 	return (0);
1283 }
1284 
1285 #endif
1286 
1287 #if defined(__i386__)
1288 
1289 /*
1290  * Pentium style PMCs
1291  */
1292 
1293 static struct pmc_event_alias p5_aliases[] = {
1294 	EV_ALIAS("cycles", "tsc"),
1295 	EV_ALIAS(NULL, NULL)
1296 };
1297 
1298 static int
1299 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1300     struct pmc_op_pmcallocate *pmc_config)
1301 {
1302 	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1303 }
1304 
1305 /*
1306  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1307  * and Pentium M CPUs.
1308  */
1309 
1310 static struct pmc_event_alias p6_aliases[] = {
1311 	EV_ALIAS("branches",		"p6-br-inst-retired"),
1312 	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1313 	EV_ALIAS("cycles",		"tsc"),
1314 	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1315 	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
1316 	EV_ALIAS("instructions",	"p6-inst-retired"),
1317 	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1318 	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
1319 	EV_ALIAS(NULL, NULL)
1320 };
1321 
1322 #define	P6_KW_CMASK	"cmask"
1323 #define	P6_KW_EDGE	"edge"
1324 #define	P6_KW_INV	"inv"
1325 #define	P6_KW_OS	"os"
1326 #define	P6_KW_UMASK	"umask"
1327 #define	P6_KW_USR	"usr"
1328 
1329 static struct pmc_masks p6_mask_mesi[] = {
1330 	PMCMASK(m,	0x01),
1331 	PMCMASK(e,	0x02),
1332 	PMCMASK(s,	0x04),
1333 	PMCMASK(i,	0x08),
1334 	NULLMASK
1335 };
1336 
1337 static struct pmc_masks p6_mask_mesihw[] = {
1338 	PMCMASK(m,	0x01),
1339 	PMCMASK(e,	0x02),
1340 	PMCMASK(s,	0x04),
1341 	PMCMASK(i,	0x08),
1342 	PMCMASK(nonhw,	0x00),
1343 	PMCMASK(hw,	0x10),
1344 	PMCMASK(both,	0x30),
1345 	NULLMASK
1346 };
1347 
1348 static struct pmc_masks p6_mask_hw[] = {
1349 	PMCMASK(nonhw,	0x00),
1350 	PMCMASK(hw,	0x10),
1351 	PMCMASK(both,	0x30),
1352 	NULLMASK
1353 };
1354 
1355 static struct pmc_masks p6_mask_any[] = {
1356 	PMCMASK(self,	0x00),
1357 	PMCMASK(any,	0x20),
1358 	NULLMASK
1359 };
1360 
1361 static struct pmc_masks p6_mask_ekp[] = {
1362 	PMCMASK(nta,	0x00),
1363 	PMCMASK(t1,	0x01),
1364 	PMCMASK(t2,	0x02),
1365 	PMCMASK(wos,	0x03),
1366 	NULLMASK
1367 };
1368 
1369 static struct pmc_masks p6_mask_pps[] = {
1370 	PMCMASK(packed-and-scalar, 0x00),
1371 	PMCMASK(scalar,	0x01),
1372 	NULLMASK
1373 };
1374 
1375 static struct pmc_masks p6_mask_mite[] = {
1376 	PMCMASK(packed-multiply,	 0x01),
1377 	PMCMASK(packed-shift,		0x02),
1378 	PMCMASK(pack,			0x04),
1379 	PMCMASK(unpack,			0x08),
1380 	PMCMASK(packed-logical,		0x10),
1381 	PMCMASK(packed-arithmetic,	0x20),
1382 	NULLMASK
1383 };
1384 
1385 static struct pmc_masks p6_mask_fmt[] = {
1386 	PMCMASK(mmxtofp,	0x00),
1387 	PMCMASK(fptommx,	0x01),
1388 	NULLMASK
1389 };
1390 
1391 static struct pmc_masks p6_mask_sr[] = {
1392 	PMCMASK(es,	0x01),
1393 	PMCMASK(ds,	0x02),
1394 	PMCMASK(fs,	0x04),
1395 	PMCMASK(gs,	0x08),
1396 	NULLMASK
1397 };
1398 
1399 static struct pmc_masks p6_mask_eet[] = {
1400 	PMCMASK(all,	0x00),
1401 	PMCMASK(freq,	0x02),
1402 	NULLMASK
1403 };
1404 
1405 static struct pmc_masks p6_mask_efur[] = {
1406 	PMCMASK(all,	0x00),
1407 	PMCMASK(loadop,	0x01),
1408 	PMCMASK(stdsta,	0x02),
1409 	NULLMASK
1410 };
1411 
1412 static struct pmc_masks p6_mask_essir[] = {
1413 	PMCMASK(sse-packed-single,	0x00),
1414 	PMCMASK(sse-packed-single-scalar-single, 0x01),
1415 	PMCMASK(sse2-packed-double,	0x02),
1416 	PMCMASK(sse2-scalar-double,	0x03),
1417 	NULLMASK
1418 };
1419 
1420 static struct pmc_masks p6_mask_esscir[] = {
1421 	PMCMASK(sse-packed-single,	0x00),
1422 	PMCMASK(sse-scalar-single,	0x01),
1423 	PMCMASK(sse2-packed-double,	0x02),
1424 	PMCMASK(sse2-scalar-double,	0x03),
1425 	NULLMASK
1426 };
1427 
1428 /* P6 event parser */
1429 static int
1430 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1431     struct pmc_op_pmcallocate *pmc_config)
1432 {
1433 	char *e, *p, *q;
1434 	uint32_t evmask;
1435 	int count, n;
1436 	const struct pmc_masks *pm, *pmask;
1437 
1438 	pmc_config->pm_caps |= PMC_CAP_READ;
1439 	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1440 
1441 	if (pe == PMC_EV_TSC_TSC) {
1442 		if (ctrspec && *ctrspec != '\0')
1443 			return (-1);
1444 		return (0);
1445 	}
1446 
1447 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1448 	evmask = 0;
1449 
1450 #define	P6MASKSET(M)	pmask = p6_mask_ ## M
1451 
1452 	switch(pe) {
1453 	case PMC_EV_P6_L2_IFETCH: 	P6MASKSET(mesi); break;
1454 	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1455 	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1456 	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1457 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1458 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1459 	case PMC_EV_P6_BUS_TRAN_BRD:
1460 	case PMC_EV_P6_BUS_TRAN_RFO:
1461 	case PMC_EV_P6_BUS_TRANS_WB:
1462 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1463 	case PMC_EV_P6_BUS_TRAN_INVAL:
1464 	case PMC_EV_P6_BUS_TRAN_PWR:
1465 	case PMC_EV_P6_BUS_TRANS_P:
1466 	case PMC_EV_P6_BUS_TRANS_IO:
1467 	case PMC_EV_P6_BUS_TRAN_DEF:
1468 	case PMC_EV_P6_BUS_TRAN_BURST:
1469 	case PMC_EV_P6_BUS_TRAN_ANY:
1470 	case PMC_EV_P6_BUS_TRAN_MEM:
1471 		P6MASKSET(any);	break;
1472 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1473 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1474 		P6MASKSET(ekp); break;
1475 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1476 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1477 		P6MASKSET(pps);	break;
1478 	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1479 		P6MASKSET(mite); break;
1480 	case PMC_EV_P6_FP_MMX_TRANS:
1481 		P6MASKSET(fmt);	break;
1482 	case PMC_EV_P6_SEG_RENAME_STALLS:
1483 	case PMC_EV_P6_SEG_REG_RENAMES:
1484 		P6MASKSET(sr);	break;
1485 	case PMC_EV_P6_EMON_EST_TRANS:
1486 		P6MASKSET(eet);	break;
1487 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1488 		P6MASKSET(efur); break;
1489 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1490 		P6MASKSET(essir); break;
1491 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1492 		P6MASKSET(esscir); break;
1493 	default:
1494 		pmask = NULL;
1495 		break;
1496 	}
1497 
1498 	/* Pentium M PMCs have a few events with different semantics */
1499 	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1500 		if (pe == PMC_EV_P6_L2_LD ||
1501 		    pe == PMC_EV_P6_L2_LINES_IN ||
1502 		    pe == PMC_EV_P6_L2_LINES_OUT)
1503 			P6MASKSET(mesihw);
1504 		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1505 			P6MASKSET(hw);
1506 	}
1507 
1508 	/* Parse additional modifiers if present */
1509 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1510 		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1511 			q = strchr(p, '=');
1512 			if (*++q == '\0') /* skip '=' */
1513 				return (-1);
1514 			count = strtol(q, &e, 0);
1515 			if (e == q || *e != '\0')
1516 				return (-1);
1517 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1518 			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1519 			    P6_EVSEL_TO_CMASK(count);
1520 		} else if (KWMATCH(p, P6_KW_EDGE)) {
1521 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1522 		} else if (KWMATCH(p, P6_KW_INV)) {
1523 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1524 		} else if (KWMATCH(p, P6_KW_OS)) {
1525 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1526 		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1527 			evmask = 0;
1528 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1529 				return (-1);
1530 			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1531 			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1532 			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1533 			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1534 			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1535 			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1536 			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1537 			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1538 			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1539 			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1540 			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1541 			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1542 			     pe == PMC_EV_P6_BUS_TRANS_P ||
1543 			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1544 			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1545 			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1546 			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1547 			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1548 			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1549 			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1550 			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1551 			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1552 			     pe == PMC_EV_P6_FP_MMX_TRANS)
1553 			    && (n > 1))	/* Only one mask keyword is allowed. */
1554 				return (-1);
1555 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1556 		} else if (KWMATCH(p, P6_KW_USR)) {
1557 			pmc_config->pm_caps |= PMC_CAP_USER;
1558 		} else
1559 			return (-1);
1560 	}
1561 
1562 	/* post processing */
1563 	switch (pe) {
1564 
1565 		/*
1566 		 * The following events default to an evmask of 0
1567 		 */
1568 
1569 		/* default => 'self' */
1570 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1571 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1572 	case PMC_EV_P6_BUS_TRAN_BRD:
1573 	case PMC_EV_P6_BUS_TRAN_RFO:
1574 	case PMC_EV_P6_BUS_TRANS_WB:
1575 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1576 	case PMC_EV_P6_BUS_TRAN_INVAL:
1577 	case PMC_EV_P6_BUS_TRAN_PWR:
1578 	case PMC_EV_P6_BUS_TRANS_P:
1579 	case PMC_EV_P6_BUS_TRANS_IO:
1580 	case PMC_EV_P6_BUS_TRAN_DEF:
1581 	case PMC_EV_P6_BUS_TRAN_BURST:
1582 	case PMC_EV_P6_BUS_TRAN_ANY:
1583 	case PMC_EV_P6_BUS_TRAN_MEM:
1584 
1585 		/* default => 'nta' */
1586 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1587 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1588 
1589 		/* default => 'packed and scalar' */
1590 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1591 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1592 
1593 		/* default => 'mmx to fp transitions' */
1594 	case PMC_EV_P6_FP_MMX_TRANS:
1595 
1596 		/* default => 'SSE Packed Single' */
1597 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1598 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1599 
1600 		/* default => 'all fused micro-ops' */
1601 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1602 
1603 		/* default => 'all transitions' */
1604 	case PMC_EV_P6_EMON_EST_TRANS:
1605 		break;
1606 
1607 	case PMC_EV_P6_MMX_UOPS_EXEC:
1608 		evmask = 0x0F;		/* only value allowed */
1609 		break;
1610 
1611 	default:
1612 		/*
1613 		 * For all other events, set the default event mask
1614 		 * to a logical OR of all the allowed event mask bits.
1615 		 */
1616 		if (evmask == 0 && pmask) {
1617 			for (pm = pmask; pm->pm_name; pm++)
1618 				evmask |= pm->pm_value;
1619 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1620 		}
1621 
1622 		break;
1623 	}
1624 
1625 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1626 		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1627 		    P6_EVSEL_TO_UMASK(evmask);
1628 
1629 	return (0);
1630 }
1631 
1632 #endif
1633 
1634 /*
1635  * API entry points
1636  */
1637 
1638 int
1639 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1640     uint32_t flags, int cpu, pmc_id_t *pmcid)
1641 {
1642 	int retval;
1643 	enum pmc_event pe;
1644 	char *r, *spec_copy;
1645 	const char *ctrname;
1646 	const struct pmc_event_alias *p;
1647 	struct pmc_op_pmcallocate pmc_config;
1648 
1649 	spec_copy = NULL;
1650 	retval    = -1;
1651 
1652 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1653 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1654 		errno = EINVAL;
1655 		goto out;
1656 	}
1657 
1658 	/* replace an event alias with the canonical event specifier */
1659 	if (pmc_mdep_event_aliases)
1660 		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1661 			if (!strcmp(ctrspec, p->pm_alias)) {
1662 				spec_copy = strdup(p->pm_spec);
1663 				break;
1664 			}
1665 
1666 	if (spec_copy == NULL)
1667 		spec_copy = strdup(ctrspec);
1668 
1669 	r = spec_copy;
1670 	ctrname = strsep(&r, ",");
1671 
1672 	/* look for the given counter name */
1673 	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1674 		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1675 			break;
1676 
1677 	if (pe > PMC_EVENT_LAST) {
1678 		errno = EINVAL;
1679 		goto out;
1680 	}
1681 
1682 	bzero(&pmc_config, sizeof(pmc_config));
1683 	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1684 	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1685 	pmc_config.pm_cpu   = cpu;
1686 	pmc_config.pm_mode  = mode;
1687 	pmc_config.pm_flags = flags;
1688 
1689 	if (PMC_IS_SAMPLING_MODE(mode))
1690 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1691 
1692 	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1693 		errno = EINVAL;
1694 		goto out;
1695 	}
1696 
1697 	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1698 		goto out;
1699 
1700 	*pmcid = pmc_config.pm_pmcid;
1701 
1702 	retval = 0;
1703 
1704  out:
1705 	if (spec_copy)
1706 		free(spec_copy);
1707 
1708 	return (retval);
1709 }
1710 
1711 int
1712 pmc_attach(pmc_id_t pmc, pid_t pid)
1713 {
1714 	struct pmc_op_pmcattach pmc_attach_args;
1715 
1716 	pmc_attach_args.pm_pmc = pmc;
1717 	pmc_attach_args.pm_pid = pid;
1718 
1719 	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
1720 }
1721 
1722 int
1723 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1724 {
1725 	unsigned int i;
1726 	enum pmc_class cl;
1727 
1728 	cl = PMC_ID_TO_CLASS(pmcid);
1729 	for (i = 0; i < cpu_info.pm_nclass; i++)
1730 		if (cpu_info.pm_classes[i].pm_class == cl) {
1731 			*caps = cpu_info.pm_classes[i].pm_caps;
1732 			return (0);
1733 		}
1734 	errno = EINVAL;
1735 	return (-1);
1736 }
1737 
1738 int
1739 pmc_configure_logfile(int fd)
1740 {
1741 	struct pmc_op_configurelog cla;
1742 
1743 	cla.pm_logfd = fd;
1744 	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1745 		return (-1);
1746 	return (0);
1747 }
1748 
1749 int
1750 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1751 {
1752 	if (pmc_syscall == -1) {
1753 		errno = ENXIO;
1754 		return (-1);
1755 	}
1756 
1757 	*pci = &cpu_info;
1758 	return (0);
1759 }
1760 
1761 int
1762 pmc_detach(pmc_id_t pmc, pid_t pid)
1763 {
1764 	struct pmc_op_pmcattach pmc_detach_args;
1765 
1766 	pmc_detach_args.pm_pmc = pmc;
1767 	pmc_detach_args.pm_pid = pid;
1768 	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
1769 }
1770 
1771 int
1772 pmc_disable(int cpu, int pmc)
1773 {
1774 	struct pmc_op_pmcadmin ssa;
1775 
1776 	ssa.pm_cpu = cpu;
1777 	ssa.pm_pmc = pmc;
1778 	ssa.pm_state = PMC_STATE_DISABLED;
1779 	return (PMC_CALL(PMCADMIN, &ssa));
1780 }
1781 
1782 int
1783 pmc_enable(int cpu, int pmc)
1784 {
1785 	struct pmc_op_pmcadmin ssa;
1786 
1787 	ssa.pm_cpu = cpu;
1788 	ssa.pm_pmc = pmc;
1789 	ssa.pm_state = PMC_STATE_FREE;
1790 	return (PMC_CALL(PMCADMIN, &ssa));
1791 }
1792 
1793 /*
1794  * Return a list of events known to a given PMC class.  'cl' is the
1795  * PMC class identifier, 'eventnames' is the returned list of 'const
1796  * char *' pointers pointing to the names of the events. 'nevents' is
1797  * the number of event name pointers returned.
1798  *
1799  * The space for 'eventnames' is allocated using malloc(3).  The caller
1800  * is responsible for freeing this space when done.
1801  */
1802 int
1803 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1804     int *nevents)
1805 {
1806 	int count;
1807 	const char **names;
1808 	const struct pmc_event_descr *ev;
1809 
1810 	switch (cl)
1811 	{
1812 	case PMC_CLASS_TSC:
1813 		ev = &pmc_event_table[PMC_EV_TSC_TSC];
1814 		count = 1;
1815 		break;
1816 	case PMC_CLASS_K7:
1817 		ev = &pmc_event_table[PMC_EV_K7_FIRST];
1818 		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
1819 		break;
1820 	case PMC_CLASS_K8:
1821 		ev = &pmc_event_table[PMC_EV_K8_FIRST];
1822 		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
1823 		break;
1824 	case PMC_CLASS_P5:
1825 		ev = &pmc_event_table[PMC_EV_P5_FIRST];
1826 		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
1827 		break;
1828 	case PMC_CLASS_P6:
1829 		ev = &pmc_event_table[PMC_EV_P6_FIRST];
1830 		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
1831 		break;
1832 	case PMC_CLASS_P4:
1833 		ev = &pmc_event_table[PMC_EV_P4_FIRST];
1834 		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
1835 		break;
1836 	default:
1837 		errno = EINVAL;
1838 		return (-1);
1839 	}
1840 
1841 	if ((names = malloc(count * sizeof(const char *))) == NULL)
1842 		return (-1);
1843 
1844 	*eventnames = names;
1845 	*nevents = count;
1846 
1847 	for (;count--; ev++, names++)
1848 		*names = ev->pm_ev_name;
1849 	return (0);
1850 }
1851 
1852 int
1853 pmc_flush_logfile(void)
1854 {
1855 	return (PMC_CALL(FLUSHLOG,0));
1856 }
1857 
1858 int
1859 pmc_get_driver_stats(struct pmc_driverstats *ds)
1860 {
1861 	struct pmc_op_getdriverstats gms;
1862 
1863 	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
1864 		return (-1);
1865 
1866 	/* copy out fields in the current userland<->library interface */
1867 	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1868 	ds->pm_intr_processed  = gms.pm_intr_processed;
1869 	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1870 	ds->pm_syscalls        = gms.pm_syscalls;
1871 	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1872 	ds->pm_buffer_requests = gms.pm_buffer_requests;
1873 	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1874 	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1875 	return (0);
1876 }
1877 
1878 int
1879 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1880 {
1881 	struct pmc_op_getmsr gm;
1882 
1883 	gm.pm_pmcid = pmc;
1884 	if (PMC_CALL(PMCGETMSR, &gm) < 0)
1885 		return (-1);
1886 	*msr = gm.pm_msr;
1887 	return (0);
1888 }
1889 
1890 int
1891 pmc_init(void)
1892 {
1893 	int error, pmc_mod_id;
1894 	unsigned int n;
1895 	uint32_t abi_version;
1896 	struct module_stat pmc_modstat;
1897 	struct pmc_op_getcpuinfo op_cpu_info;
1898 
1899 	if (pmc_syscall != -1) /* already inited */
1900 		return (0);
1901 
1902 	/* retrieve the system call number from the KLD */
1903 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1904 		return (-1);
1905 
1906 	pmc_modstat.version = sizeof(struct module_stat);
1907 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1908 		return (-1);
1909 
1910 	pmc_syscall = pmc_modstat.data.intval;
1911 
1912 	/* check the kernel module's ABI against our compiled-in version */
1913 	abi_version = PMC_VERSION;
1914 	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1915 		return (pmc_syscall = -1);
1916 
1917 	/* ignore patch & minor numbers for the comparision */
1918 	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1919 		errno  = EPROGMISMATCH;
1920 		return (pmc_syscall = -1);
1921 	}
1922 
1923 	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
1924 		return (pmc_syscall = -1);
1925 
1926 	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1927 	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1928 	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1929 	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1930 	for (n = 0; n < cpu_info.pm_nclass; n++)
1931 		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
1932 
1933 	/* set parser pointer */
1934 	switch (cpu_info.pm_cputype) {
1935 #if defined(__i386__)
1936 	case PMC_CPU_AMD_K7:
1937 		pmc_mdep_event_aliases = k7_aliases;
1938 		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1939 		break;
1940 	case PMC_CPU_INTEL_P5:
1941 		pmc_mdep_event_aliases = p5_aliases;
1942 		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1943 		break;
1944 	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1945 	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1946 	case PMC_CPU_INTEL_PIII:
1947 	case PMC_CPU_INTEL_PM:
1948 		pmc_mdep_event_aliases = p6_aliases;
1949 		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1950 		break;
1951 #endif
1952 #if defined(__amd64__) || defined(__i386__)
1953 	case PMC_CPU_INTEL_PIV:
1954 		pmc_mdep_event_aliases = p4_aliases;
1955 		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1956 		break;
1957 	case PMC_CPU_AMD_K8:
1958 		pmc_mdep_event_aliases = k8_aliases;
1959 		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1960 		break;
1961 #endif
1962 
1963 	default:
1964 		/*
1965 		 * Some kind of CPU this version of the library knows nothing
1966 		 * about.  This shouldn't happen since the abi version check
1967 		 * should have caught this.
1968 		 */
1969 		errno = ENXIO;
1970 		return (pmc_syscall = -1);
1971 	}
1972 
1973 	return (0);
1974 }
1975 
1976 const char *
1977 pmc_name_of_capability(enum pmc_caps cap)
1978 {
1979 	int i;
1980 
1981 	/*
1982 	 * 'cap' should have a single bit set and should be in
1983 	 * range.
1984 	 */
1985 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1986 	    cap > PMC_CAP_LAST) {
1987 		errno = EINVAL;
1988 		return (NULL);
1989 	}
1990 
1991 	i = ffs(cap);
1992 	return (pmc_capability_names[i - 1]);
1993 }
1994 
1995 const char *
1996 pmc_name_of_class(enum pmc_class pc)
1997 {
1998 	if ((int) pc >= PMC_CLASS_FIRST &&
1999 	    pc <= PMC_CLASS_LAST)
2000 		return (pmc_class_names[pc]);
2001 
2002 	errno = EINVAL;
2003 	return (NULL);
2004 }
2005 
2006 const char *
2007 pmc_name_of_cputype(enum pmc_cputype cp)
2008 {
2009 	if ((int) cp >= PMC_CPU_FIRST &&
2010 	    cp <= PMC_CPU_LAST)
2011 		return (pmc_cputype_names[cp]);
2012 	errno = EINVAL;
2013 	return (NULL);
2014 }
2015 
2016 const char *
2017 pmc_name_of_disposition(enum pmc_disp pd)
2018 {
2019 	if ((int) pd >= PMC_DISP_FIRST &&
2020 	    pd <= PMC_DISP_LAST)
2021 		return (pmc_disposition_names[pd]);
2022 
2023 	errno = EINVAL;
2024 	return (NULL);
2025 }
2026 
2027 const char *
2028 pmc_name_of_event(enum pmc_event pe)
2029 {
2030 	if ((int) pe >= PMC_EVENT_FIRST &&
2031 	    pe <= PMC_EVENT_LAST)
2032 		return (pmc_event_table[pe].pm_ev_name);
2033 
2034 	errno = EINVAL;
2035 	return (NULL);
2036 }
2037 
2038 const char *
2039 pmc_name_of_mode(enum pmc_mode pm)
2040 {
2041 	if ((int) pm >= PMC_MODE_FIRST &&
2042 	    pm <= PMC_MODE_LAST)
2043 		return (pmc_mode_names[pm]);
2044 
2045 	errno = EINVAL;
2046 	return (NULL);
2047 }
2048 
2049 const char *
2050 pmc_name_of_state(enum pmc_state ps)
2051 {
2052 	if ((int) ps >= PMC_STATE_FIRST &&
2053 	    ps <= PMC_STATE_LAST)
2054 		return (pmc_state_names[ps]);
2055 
2056 	errno = EINVAL;
2057 	return (NULL);
2058 }
2059 
2060 int
2061 pmc_ncpu(void)
2062 {
2063 	if (pmc_syscall == -1) {
2064 		errno = ENXIO;
2065 		return (-1);
2066 	}
2067 
2068 	return (cpu_info.pm_ncpu);
2069 }
2070 
2071 int
2072 pmc_npmc(int cpu)
2073 {
2074 	if (pmc_syscall == -1) {
2075 		errno = ENXIO;
2076 		return (-1);
2077 	}
2078 
2079 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2080 		errno = EINVAL;
2081 		return (-1);
2082 	}
2083 
2084 	return (cpu_info.pm_npmc);
2085 }
2086 
2087 int
2088 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2089 {
2090 	int nbytes, npmc;
2091 	struct pmc_op_getpmcinfo *pmci;
2092 
2093 	if ((npmc = pmc_npmc(cpu)) < 0)
2094 		return (-1);
2095 
2096 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2097 	    npmc * sizeof(struct pmc_info);
2098 
2099 	if ((pmci = calloc(1, nbytes)) == NULL)
2100 		return (-1);
2101 
2102 	pmci->pm_cpu  = cpu;
2103 
2104 	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2105 		free(pmci);
2106 		return (-1);
2107 	}
2108 
2109 	/* kernel<->library, library<->userland interfaces are identical */
2110 	*ppmci = (struct pmc_pmcinfo *) pmci;
2111 	return (0);
2112 }
2113 
2114 int
2115 pmc_read(pmc_id_t pmc, pmc_value_t *value)
2116 {
2117 	struct pmc_op_pmcrw pmc_read_op;
2118 
2119 	pmc_read_op.pm_pmcid = pmc;
2120 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2121 	pmc_read_op.pm_value = -1;
2122 
2123 	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2124 		return (-1);
2125 
2126 	*value = pmc_read_op.pm_value;
2127 	return (0);
2128 }
2129 
2130 int
2131 pmc_release(pmc_id_t pmc)
2132 {
2133 	struct pmc_op_simple	pmc_release_args;
2134 
2135 	pmc_release_args.pm_pmcid = pmc;
2136 	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2137 }
2138 
2139 int
2140 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2141 {
2142 	struct pmc_op_pmcrw pmc_rw_op;
2143 
2144 	pmc_rw_op.pm_pmcid = pmc;
2145 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2146 	pmc_rw_op.pm_value = newvalue;
2147 
2148 	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2149 		return (-1);
2150 
2151 	*oldvaluep = pmc_rw_op.pm_value;
2152 	return (0);
2153 }
2154 
2155 int
2156 pmc_set(pmc_id_t pmc, pmc_value_t value)
2157 {
2158 	struct pmc_op_pmcsetcount sc;
2159 
2160 	sc.pm_pmcid = pmc;
2161 	sc.pm_count = value;
2162 
2163 	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2164 		return (-1);
2165 	return (0);
2166 }
2167 
2168 int
2169 pmc_start(pmc_id_t pmc)
2170 {
2171 	struct pmc_op_simple	pmc_start_args;
2172 
2173 	pmc_start_args.pm_pmcid = pmc;
2174 	return (PMC_CALL(PMCSTART, &pmc_start_args));
2175 }
2176 
2177 int
2178 pmc_stop(pmc_id_t pmc)
2179 {
2180 	struct pmc_op_simple	pmc_stop_args;
2181 
2182 	pmc_stop_args.pm_pmcid = pmc;
2183 	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2184 }
2185 
2186 int
2187 pmc_width(pmc_id_t pmcid, uint32_t *width)
2188 {
2189 	unsigned int i;
2190 	enum pmc_class cl;
2191 
2192 	cl = PMC_ID_TO_CLASS(pmcid);
2193 	for (i = 0; i < cpu_info.pm_nclass; i++)
2194 		if (cpu_info.pm_classes[i].pm_class == cl) {
2195 			*width = cpu_info.pm_classes[i].pm_width;
2196 			return (0);
2197 		}
2198 	errno = EINVAL;
2199 	return (-1);
2200 }
2201 
2202 int
2203 pmc_write(pmc_id_t pmc, pmc_value_t value)
2204 {
2205 	struct pmc_op_pmcrw pmc_write_op;
2206 
2207 	pmc_write_op.pm_pmcid = pmc;
2208 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2209 	pmc_write_op.pm_value = value;
2210 	return (PMC_CALL(PMCRW, &pmc_write_op));
2211 }
2212 
2213 int
2214 pmc_writelog(uint32_t userdata)
2215 {
2216 	struct pmc_op_writelog wl;
2217 
2218 	wl.pm_userdata = userdata;
2219 	return (PMC_CALL(WRITELOG, &wl));
2220 }
2221