xref: /freebsd/lib/libpmc/libpmc.c (revision ebccf1e3a6b11b97cbf5f813dd76636e892a9035)
1 /*-
2  * Copyright (c) 2003,2004 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/types.h>
33 #include <sys/module.h>
34 #include <sys/pmc.h>
35 #include <sys/syscall.h>
36 
37 #include <machine/pmc_mdep.h>
38 
39 #include <ctype.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <pmc.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <strings.h>
47 #include <unistd.h>
48 
49 /* Function prototypes */
50 #if	__i386__
51 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52     struct pmc_op_pmcallocate *_pmc_config);
53 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54     struct pmc_op_pmcallocate *_pmc_config);
55 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56     struct pmc_op_pmcallocate *_pmc_config);
57 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58     struct pmc_op_pmcallocate *_pmc_config);
59 #elif	__amd64__
60 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61     struct pmc_op_pmcallocate *_pmc_config);
62 #endif
63 
64 #define PMC_CALL(cmd, params)				\
65 	syscall(pmc_syscall, PMC_OP_##cmd, (params))
66 
67 /*
68  * Event aliases provide a way for the user to ask for generic events
69  * like "cache-misses", or "instructions-retired".  These aliases are
70  * mapped to the appropriate canonical event descriptions using a
71  * lookup table.
72  */
73 
74 struct pmc_event_alias {
75 	const char	*pm_alias;
76 	const char	*pm_spec;
77 };
78 
79 static const struct pmc_event_alias *pmc_mdep_event_aliases;
80 
81 /*
82  * The pmc_event_descr table maps symbolic names known to the user
83  * to integer codes used by the PMC KLD.
84  */
85 
86 struct pmc_event_descr {
87 	const char	*pm_ev_name;
88 	enum pmc_event	pm_ev_code;
89 	enum pmc_class	pm_ev_class;
90 };
91 
92 static const struct pmc_event_descr
93 pmc_event_table[] =
94 {
95 #undef  __PMC_EV
96 #define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
97 	__PMC_EVENTS()
98 };
99 
100 /*
101  * Mapping tables, mapping enumeration values to human readable
102  * strings.
103  */
104 
105 static const char * pmc_capability_names[] = {
106 #undef	__PMC_CAP
107 #define	__PMC_CAP(N,V,D)	#N ,
108 	__PMC_CAPS()
109 };
110 
111 static const char * pmc_class_names[] = {
112 #undef	__PMC_CLASS
113 #define __PMC_CLASS(C)	#C ,
114 	__PMC_CLASSES()
115 };
116 
117 static const char * pmc_cputype_names[] = {
118 #undef	__PMC_CPU
119 #define	__PMC_CPU(S, D) #S ,
120 	__PMC_CPUS()
121 };
122 
123 static const char * pmc_disposition_names[] = {
124 #undef	__PMC_DISP
125 #define	__PMC_DISP(D)	#D ,
126 	__PMC_DISPOSITIONS()
127 };
128 
129 static const char * pmc_mode_names[] = {
130 #undef  __PMC_MODE
131 #define __PMC_MODE(M,N)	#M ,
132 	__PMC_MODES()
133 };
134 
135 static const char * pmc_state_names[] = {
136 #undef  __PMC_STATE
137 #define __PMC_STATE(S) #S ,
138 	__PMC_STATES()
139 };
140 
141 static int pmc_syscall = -1;		/* filled in by pmc_init() */
142 
143 struct pmc_op_getcpuinfo cpu_info;	/* filled in by pmc_init() */
144 
145 /* Architecture dependent event parsing */
146 static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
147     struct pmc_op_pmcallocate *_pmc_config);
148 
149 /* Event masks for events */
150 struct pmc_masks {
151 	const char	*pm_name;
152 	const uint32_t	pm_value;
153 };
154 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
155 #define	NULLMASK	PMCMASK(NULL,0)
156 
157 static int
158 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
159 {
160 	const struct pmc_masks *pm;
161 	char *q, *r;
162 	int c;
163 
164 	if (pmask == NULL)	/* no mask keywords */
165 		return -1;
166 	q = strchr(p, '='); 	/* skip '=' */
167 	if (*++q == '\0')	/* no more data */
168 		return -1;
169 	c = 0;			/* count of mask keywords seen */
170 	while ((r = strsep(&q, "+")) != NULL) {
171 		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
172 			;
173 		if (pm->pm_name == NULL) /* not found */
174 			return -1;
175 		*evmask |= pm->pm_value;
176 		c++;
177 	}
178 	return c;
179 }
180 
181 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
182 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
183 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
184 
185 #if	__i386__
186 
187 /*
188  * AMD K7 (Athlon) CPUs.
189  */
190 
191 static struct pmc_event_alias k7_aliases[] = {
192 EV_ALIAS("branches",		"k7-retired-branches"),
193 EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
194 EV_ALIAS("cycles",		"tsc"),
195 EV_ALIAS("dc-misses",		"k7-dc-misses,mask=moesi"),
196 EV_ALIAS("ic-misses",		"k7-ic-misses"),
197 EV_ALIAS("instructions",	"k7-retired-instructions"),
198 EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
199 EV_ALIAS(NULL, NULL)
200 };
201 
202 #define	K7_KW_COUNT	"count"
203 #define	K7_KW_EDGE	"edge"
204 #define	K7_KW_INV	"inv"
205 #define	K7_KW_OS	"os"
206 #define	K7_KW_UNITMASK	"unitmask"
207 #define	K7_KW_USR	"usr"
208 
209 static int
210 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
211     struct pmc_op_pmcallocate *pmc_config)
212 {
213 	char 		*e, *p, *q;
214 	int 		c, has_unitmask;
215 	uint32_t	count, unitmask;
216 
217 	pmc_config->pm_amd_config = 0;
218 	pmc_config->pm_caps |= PMC_CAP_READ;
219 
220 	if (pe == PMC_EV_TSC_TSC) {
221 		/* TSC events must be unqualified. */
222 		if (ctrspec && *ctrspec != '\0')
223 			return -1;
224 		return 0;
225 	}
226 
227 	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
228 	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
229 	    pe == PMC_EV_K7_DC_WRITEBACKS) {
230 		has_unitmask = 1;
231 		unitmask = K7_PMC_UNITMASK_MOESI;
232 	} else
233 		unitmask = has_unitmask = 0;
234 
235 	pmc_config->pm_caps |= PMC_CAP_WRITE;
236 
237 	while ((p = strsep(&ctrspec, ",")) != NULL) {
238 		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
239 			q = strchr(p, '=');
240 			if (*++q == '\0') /* skip '=' */
241 				return -1;
242 
243 			count = strtol(q, &e, 0);
244 			if (e == q || *e != '\0')
245 				return -1;
246 
247 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
248 			pmc_config->pm_amd_config |= K7_PMC_TO_COUNTER(count);
249 
250 		} else if (KWMATCH(p, K7_KW_EDGE)) {
251 			pmc_config->pm_caps |= PMC_CAP_EDGE;
252 		} else if (KWMATCH(p, K7_KW_INV)) {
253 			pmc_config->pm_caps |= PMC_CAP_INVERT;
254 		} else if (KWMATCH(p, K7_KW_OS)) {
255 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
256 		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
257 			if (has_unitmask == 0)
258 				return -1;
259 			unitmask = 0;
260 			q = strchr(p, '=');
261 			if (*++q == '\0') /* skip '=' */
262 				return -1;
263 
264 			while ((c = tolower(*q++)) != 0)
265 				if (c == 'm')
266 					unitmask |= K7_PMC_UNITMASK_M;
267 				else if (c == 'o')
268 					unitmask |= K7_PMC_UNITMASK_O;
269 				else if (c == 'e')
270 					unitmask |= K7_PMC_UNITMASK_E;
271 				else if (c == 's')
272 					unitmask |= K7_PMC_UNITMASK_S;
273 				else if (c == 'i')
274 					unitmask |= K7_PMC_UNITMASK_I;
275 				else if (c == '+')
276 					continue;
277 				else
278 					return -1;
279 
280 			if (unitmask == 0)
281 				return -1;
282 
283 		} else if (KWMATCH(p, K7_KW_USR)) {
284 			pmc_config->pm_caps |= PMC_CAP_USER;
285 		} else
286 			return -1;
287 	}
288 
289 	if (has_unitmask) {
290 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
291 		pmc_config->pm_amd_config |=
292 		    K7_PMC_TO_UNITMASK(unitmask);
293 	}
294 
295 	return 0;
296 
297 }
298 
299 /*
300  * Intel P4 PMCs
301  */
302 
303 static struct pmc_event_alias p4_aliases[] = {
304 	EV_ALIAS("cycles", "tsc"),
305 	EV_ALIAS(NULL, NULL)
306 };
307 
308 #define	P4_KW_ACTIVE	"active"
309 #define	P4_KW_ACTIVE_ANY "any"
310 #define	P4_KW_ACTIVE_BOTH "both"
311 #define	P4_KW_ACTIVE_NONE "none"
312 #define	P4_KW_ACTIVE_SINGLE "single"
313 #define	P4_KW_BUSREQTYPE "busreqtype"
314 #define	P4_KW_CASCADE	"cascade"
315 #define	P4_KW_EDGE	"edge"
316 #define	P4_KW_INV	"complement"
317 #define	P4_KW_OS	"os"
318 #define	P4_KW_MASK	"mask"
319 #define	P4_KW_PRECISE	"precise"
320 #define	P4_KW_TAG	"tag"
321 #define	P4_KW_THRESHOLD	"threshold"
322 #define	P4_KW_USR	"usr"
323 
324 #define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
325 
326 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
327 	__P4MASK(dd, 0),
328 	__P4MASK(db, 1),
329 	__P4MASK(di, 2),
330 	__P4MASK(bd, 3),
331 	__P4MASK(bb, 4),
332 	__P4MASK(bi, 5),
333 	__P4MASK(id, 6),
334 	__P4MASK(ib, 7),
335 	NULLMASK
336 };
337 
338 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
339 	__P4MASK(tcmiss, 0),
340 	NULLMASK,
341 };
342 
343 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
344 	__P4MASK(hit, 0),
345 	__P4MASK(miss, 1),
346 	__P4MASK(hit-uc, 2),
347 	NULLMASK
348 };
349 
350 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
351 	__P4MASK(st-rb-full, 2),
352 	__P4MASK(64k-conf, 3),
353 	NULLMASK
354 };
355 
356 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
357 	__P4MASK(lsc, 0),
358 	__P4MASK(ssc, 1),
359 	NULLMASK
360 };
361 
362 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
363 	__P4MASK(split-ld, 1),
364 	NULLMASK
365 };
366 
367 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
368 	__P4MASK(split-st, 1),
369 	NULLMASK
370 };
371 
372 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
373 	__P4MASK(no-sta, 1),
374 	__P4MASK(no-std, 3),
375 	__P4MASK(partial-data, 4),
376 	__P4MASK(unalgn-addr, 5),
377 	NULLMASK
378 };
379 
380 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
381 	__P4MASK(dtmiss, 0),
382 	__P4MASK(itmiss, 1),
383 	NULLMASK
384 };
385 
386 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
387 	__P4MASK(rd-2ndl-hits, 0),
388 	__P4MASK(rd-2ndl-hite, 1),
389 	__P4MASK(rd-2ndl-hitm, 2),
390 	__P4MASK(rd-3rdl-hits, 3),
391 	__P4MASK(rd-3rdl-hite, 4),
392 	__P4MASK(rd-3rdl-hitm, 5),
393 	__P4MASK(rd-2ndl-miss, 8),
394 	__P4MASK(rd-3rdl-miss, 9),
395 	__P4MASK(wr-2ndl-miss, 10),
396 	NULLMASK
397 };
398 
399 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
400 	__P4MASK(all-read, 5),
401 	__P4MASK(all-write, 6),
402 	__P4MASK(mem-uc, 7),
403 	__P4MASK(mem-wc, 8),
404 	__P4MASK(mem-wt, 9),
405 	__P4MASK(mem-wp, 10),
406 	__P4MASK(mem-wb, 11),
407 	__P4MASK(own, 13),
408 	__P4MASK(other, 14),
409 	__P4MASK(prefetch, 15),
410 	NULLMASK
411 };
412 
413 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
414 	__P4MASK(all-read, 5),
415 	__P4MASK(all-write, 6),
416 	__P4MASK(mem-uc, 7),
417 	__P4MASK(mem-wc, 8),
418 	__P4MASK(mem-wt, 9),
419 	__P4MASK(mem-wp, 10),
420 	__P4MASK(mem-wb, 11),
421 	__P4MASK(own, 13),
422 	__P4MASK(other, 14),
423 	__P4MASK(prefetch, 15),
424 	NULLMASK
425 };
426 
427 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
428 	__P4MASK(drdy-drv, 0),
429 	__P4MASK(drdy-own, 1),
430 	__P4MASK(drdy-other, 2),
431 	__P4MASK(dbsy-drv, 3),
432 	__P4MASK(dbsy-own, 4),
433 	__P4MASK(dbsy-other, 5),
434 	NULLMASK
435 };
436 
437 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
438 	__P4MASK(req-type0, 0),
439 	__P4MASK(req-type1, 1),
440 	__P4MASK(req-len0, 2),
441 	__P4MASK(req-len1, 3),
442 	__P4MASK(req-io-type, 5),
443 	__P4MASK(req-lock-type, 6),
444 	__P4MASK(req-cache-type, 7),
445 	__P4MASK(req-split-type, 8),
446 	__P4MASK(req-dem-type, 9),
447 	__P4MASK(req-ord-type, 10),
448 	__P4MASK(mem-type0, 11),
449 	__P4MASK(mem-type1, 12),
450 	__P4MASK(mem-type2, 13),
451 	NULLMASK
452 };
453 
454 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
455 	__P4MASK(all, 15),
456 	NULLMASK
457 };
458 
459 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
460 	__P4MASK(all, 15),
461 	NULLMASK
462 };
463 
464 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
465 	__P4MASK(all, 15),
466 	NULLMASK
467 };
468 
469 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
470 	__P4MASK(all, 15),
471 	NULLMASK
472 };
473 
474 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
475 	__P4MASK(all, 15),
476 	NULLMASK
477 };
478 
479 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
480 	__P4MASK(all, 15),
481 	NULLMASK
482 };
483 
484 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
485 	__P4MASK(all, 15),
486 	NULLMASK
487 };
488 
489 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
490 	__P4MASK(all, 15),
491 	NULLMASK
492 };
493 
494 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
495 	__P4MASK(allp0, 3),
496 	__P4MASK(allp2, 4),
497 	NULLMASK
498 };
499 
500 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
501 	__P4MASK(running, 0),
502 	NULLMASK
503 };
504 
505 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
506 	__P4MASK(cisc, 0),
507 	NULLMASK
508 };
509 
510 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
511 	__P4MASK(from-tc-build, 0),
512 	__P4MASK(from-tc-deliver, 1),
513 	__P4MASK(from-rom, 2),
514 	NULLMASK
515 };
516 
517 static const struct pmc_masks p4_mask_rmbt[] = { /* retired mispred branch type */
518 	__P4MASK(conditional, 1),
519 	__P4MASK(call, 2),
520 	__P4MASK(return, 3),
521 	__P4MASK(indirect, 4),
522 	NULLMASK
523 };
524 
525 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
526 	__P4MASK(conditional, 1),
527 	__P4MASK(call, 2),
528 	__P4MASK(retired, 3),
529 	__P4MASK(indirect, 4),
530 	NULLMASK
531 };
532 
533 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
534 	__P4MASK(sbfull, 5),
535 	NULLMASK
536 };
537 
538 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
539 	__P4MASK(wcb-evicts, 0),
540 	__P4MASK(wcb-full-evict, 1),
541 	NULLMASK
542 };
543 
544 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
545 	__P4MASK(nbogus, 0),
546 	__P4MASK(bogus, 1),
547 	NULLMASK
548 };
549 
550 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
551 	__P4MASK(nbogus0, 0),
552 	__P4MASK(nbogus1, 1),
553 	__P4MASK(nbogus2, 2),
554 	__P4MASK(nbogus3, 3),
555 	__P4MASK(bogus0, 4),
556 	__P4MASK(bogus1, 5),
557 	__P4MASK(bogus2, 6),
558 	__P4MASK(bogus3, 7),
559 	NULLMASK
560 };
561 
562 static const struct pmc_masks p4_mask_re[] = { /* replay event */
563 	__P4MASK(nbogus, 0),
564 	__P4MASK(bogus, 1),
565 	NULLMASK
566 };
567 
568 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
569 	__P4MASK(nbogusntag, 0),
570 	__P4MASK(nbogustag, 1),
571 	__P4MASK(bogusntag, 2),
572 	__P4MASK(bogustag, 3),
573 	NULLMASK
574 };
575 
576 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
577 	__P4MASK(nbogus, 0),
578 	__P4MASK(bogus, 1),
579 	NULLMASK
580 };
581 
582 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
583 	__P4MASK(tagloads, 1),
584 	__P4MASK(tagstores, 2),
585 	NULLMASK
586 };
587 
588 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
589 	__P4MASK(mmnp, 0),
590 	__P4MASK(mmnm, 1),
591 	__P4MASK(mmtp, 2),
592 	__P4MASK(mmtm, 3),
593 	NULLMASK
594 };
595 
596 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
597 	__P4MASK(nbogus, 0),
598 	NULLMASK
599 };
600 
601 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
602 	__P4MASK(fpsu, 0),
603 	__P4MASK(fpso, 1),
604 	__P4MASK(poao, 2),
605 	__P4MASK(poau, 3),
606 	__P4MASK(prea, 4),
607 	NULLMASK
608 };
609 
610 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
611 	__P4MASK(clear, 0),
612 	__P4MASK(moclear, 2),
613 	__P4MASK(smclear, 3),
614 	NULLMASK
615 };
616 
617 /* P4 event parser */
618 static int
619 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
620     struct pmc_op_pmcallocate *pmc_config)
621 {
622 
623 	char	*e, *p, *q;
624 	int	count, has_tag, has_busreqtype, n;
625 	uint32_t evmask, cccractivemask;
626 	const struct pmc_masks *pm, *pmask;
627 
628 	pmc_config->pm_caps |= PMC_CAP_READ;
629 	pmc_config->pm_p4_cccrconfig = pmc_config->pm_p4_escrconfig = 0;
630 
631 	if (pe == PMC_EV_TSC_TSC) {
632 		/* TSC must not be further qualified */
633 		if (ctrspec && *ctrspec != '\0')
634 			return -1;
635 		return 0;
636 	}
637 
638 	pmask   = NULL;
639 	evmask  = 0;
640 	cccractivemask = 0x3;
641 	has_tag = has_busreqtype = 0;
642 	pmc_config->pm_caps |= PMC_CAP_WRITE;
643 
644 #define	__P4SETMASK(M) do {				\
645 	pmask = p4_mask_##M; 				\
646 } while (0)
647 
648 	switch (pe) {
649 	case PMC_EV_P4_TC_DELIVER_MODE:
650 		__P4SETMASK(tcdm);
651 		break;
652 	case PMC_EV_P4_BPU_FETCH_REQUEST:
653 		__P4SETMASK(bfr);
654 		break;
655 	case PMC_EV_P4_ITLB_REFERENCE:
656 		__P4SETMASK(ir);
657 		break;
658 	case PMC_EV_P4_MEMORY_CANCEL:
659 		__P4SETMASK(memcan);
660 		break;
661 	case PMC_EV_P4_MEMORY_COMPLETE:
662 		__P4SETMASK(memcomp);
663 		break;
664 	case PMC_EV_P4_LOAD_PORT_REPLAY:
665 		__P4SETMASK(lpr);
666 		break;
667 	case PMC_EV_P4_STORE_PORT_REPLAY:
668 		__P4SETMASK(spr);
669 		break;
670 	case PMC_EV_P4_MOB_LOAD_REPLAY:
671 		__P4SETMASK(mlr);
672 		break;
673 	case PMC_EV_P4_PAGE_WALK_TYPE:
674 		__P4SETMASK(pwt);
675 		break;
676 	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
677 		__P4SETMASK(bcr);
678 		break;
679 	case PMC_EV_P4_IOQ_ALLOCATION:
680 		__P4SETMASK(ia);
681 		has_busreqtype = 1;
682 		break;
683 	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
684 		__P4SETMASK(iae);
685 		has_busreqtype = 1;
686 		break;
687 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
688 		__P4SETMASK(fda);
689 		break;
690 	case PMC_EV_P4_BSQ_ALLOCATION:
691 		__P4SETMASK(ba);
692 		break;
693 	case PMC_EV_P4_SSE_INPUT_ASSIST:
694 		__P4SETMASK(sia);
695 		break;
696 	case PMC_EV_P4_PACKED_SP_UOP:
697 		__P4SETMASK(psu);
698 		break;
699 	case PMC_EV_P4_PACKED_DP_UOP:
700 		__P4SETMASK(pdu);
701 		break;
702 	case PMC_EV_P4_SCALAR_SP_UOP:
703 		__P4SETMASK(ssu);
704 		break;
705 	case PMC_EV_P4_SCALAR_DP_UOP:
706 		__P4SETMASK(sdu);
707 		break;
708 	case PMC_EV_P4_64BIT_MMX_UOP:
709 		__P4SETMASK(64bmu);
710 		break;
711 	case PMC_EV_P4_128BIT_MMX_UOP:
712 		__P4SETMASK(128bmu);
713 		break;
714 	case PMC_EV_P4_X87_FP_UOP:
715 		__P4SETMASK(xfu);
716 		break;
717 	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
718 		__P4SETMASK(xsmu);
719 		break;
720 	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
721 		__P4SETMASK(gpe);
722 		break;
723 	case PMC_EV_P4_TC_MS_XFER:
724 		__P4SETMASK(tmx);
725 		break;
726 	case PMC_EV_P4_UOP_QUEUE_WRITES:
727 		__P4SETMASK(uqw);
728 		break;
729 	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
730 		__P4SETMASK(rmbt);
731 		break;
732 	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
733 		__P4SETMASK(rbt);
734 		break;
735 	case PMC_EV_P4_RESOURCE_STALL:
736 		__P4SETMASK(rs);
737 		break;
738 	case PMC_EV_P4_WC_BUFFER:
739 		__P4SETMASK(wb);
740 		break;
741 	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
742 	case PMC_EV_P4_B2B_CYCLES:
743 	case PMC_EV_P4_BNR:
744 	case PMC_EV_P4_SNOOP:
745 	case PMC_EV_P4_RESPONSE:
746 		break;
747 	case PMC_EV_P4_FRONT_END_EVENT:
748 		__P4SETMASK(fee);
749 		break;
750 	case PMC_EV_P4_EXECUTION_EVENT:
751 		__P4SETMASK(ee);
752 		break;
753 	case PMC_EV_P4_REPLAY_EVENT:
754 		__P4SETMASK(re);
755 		break;
756 	case PMC_EV_P4_INSTR_RETIRED:
757 		__P4SETMASK(insret);
758 		break;
759 	case PMC_EV_P4_UOPS_RETIRED:
760 		__P4SETMASK(ur);
761 		break;
762 	case PMC_EV_P4_UOP_TYPE:
763 		__P4SETMASK(ut);
764 		break;
765 	case PMC_EV_P4_BRANCH_RETIRED:
766 		__P4SETMASK(br);
767 		break;
768 	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
769 		__P4SETMASK(mbr);
770 		break;
771 	case PMC_EV_P4_X87_ASSIST:
772 		__P4SETMASK(xa);
773 		break;
774 	case PMC_EV_P4_MACHINE_CLEAR:
775 		__P4SETMASK(machclr);
776 		break;
777 	default:
778 		return -1;
779 	}
780 
781 	/* process additional flags */
782 	while ((p = strsep(&ctrspec, ",")) != NULL) {
783 		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
784 			q = strchr(p, '=');
785 			if (*++q == '\0') /* skip '=' */
786 				return -1;
787 
788 			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
789 				cccractivemask = 0x0;
790 			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
791 				cccractivemask = 0x1;
792 			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
793 				cccractivemask = 0x2;
794 			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
795 				cccractivemask = 0x3;
796 			else
797 				return -1;
798 
799 		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
800 			if (has_busreqtype == 0)
801 				return -1;
802 
803 			q = strchr(p, '=');
804 			if (*++q == '\0') /* skip '=' */
805 				return -1;
806 
807 			count = strtol(q, &e, 0);
808 			if (e == q || *e != '\0')
809 				return -1;
810 			evmask = (evmask & ~0x1F) | (count & 0x1F);
811 		} else if (KWMATCH(p, P4_KW_CASCADE))
812 			pmc_config->pm_caps |= PMC_CAP_CASCADE;
813 		else if (KWMATCH(p, P4_KW_EDGE))
814 			pmc_config->pm_caps |= PMC_CAP_EDGE;
815 		else if (KWMATCH(p, P4_KW_INV))
816 			pmc_config->pm_caps |= PMC_CAP_INVERT;
817 		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
818 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
819 				return -1;
820 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
821 		} else if (KWMATCH(p, P4_KW_OS))
822 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
823 		else if (KWMATCH(p, P4_KW_PRECISE))
824 			pmc_config->pm_caps |= PMC_CAP_PRECISE;
825 		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
826 			if (has_tag == 0)
827 				return -1;
828 
829 			q = strchr(p, '=');
830 			if (*++q == '\0') /* skip '=' */
831 				return -1;
832 
833 			count = strtol(q, &e, 0);
834 			if (e == q || *e != '\0')
835 				return -1;
836 
837 			pmc_config->pm_caps |= PMC_CAP_TAGGING;
838 			pmc_config->pm_p4_escrconfig |=
839 			    P4_ESCR_TO_TAG_VALUE(count);
840 		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
841 			q = strchr(p, '=');
842 			if (*++q == '\0') /* skip '=' */
843 				return -1;
844 
845 			count = strtol(q, &e, 0);
846 			if (e == q || *e != '\0')
847 				return -1;
848 
849 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
850 			pmc_config->pm_p4_cccrconfig &= ~P4_CCCR_THRESHOLD_MASK;
851 			pmc_config->pm_p4_cccrconfig |= P4_CCCR_TO_THRESHOLD(count);
852 		} else if (KWMATCH(p, P4_KW_USR))
853 			pmc_config->pm_caps |= PMC_CAP_USER;
854 		else
855 			return -1;
856 	}
857 
858 	/* other post processing */
859 	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
860 	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
861 	    pe == PMC_EV_P4_BSQ_ALLOCATION)
862 		pmc_config->pm_caps |= PMC_CAP_EDGE;
863 
864 	/* fill in thread activity mask */
865 	pmc_config->pm_p4_cccrconfig |=
866 	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
867 
868 	if (evmask)
869 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
870 
871 	switch (pe) {
872 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
873 		if ((evmask & 0x06) == 0x06 ||
874 		    (evmask & 0x18) == 0x18)
875 			return -1; /* can't have own+other bits together */
876 		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
877 			evmask = 0x1D;
878 		break;
879 	case PMC_EV_P4_MACHINE_CLEAR:
880 		/* only one bit is allowed to be set */
881 		if ((evmask & (evmask - 1)) != 0)
882 			return -1;
883 		if (evmask == 0) {
884 			evmask = 0x1; 	/* 'CLEAR' */
885 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
886 		}
887 		break;
888 	default:
889 		if (evmask == 0 && pmask) {
890 			for (pm = pmask; pm->pm_name; pm++)
891 				evmask |= pm->pm_value;
892 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
893 		}
894 	}
895 
896 	pmc_config->pm_p4_escrconfig = P4_ESCR_TO_EVENT_MASK(evmask);
897 
898 	return 0;
899 }
900 
901 /*
902  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
903  * and Pentium M CPUs.
904  */
905 
906 static struct pmc_event_alias p6_aliases[] = {
907 EV_ALIAS("branches",		"p6-br-inst-retired"),
908 EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
909 EV_ALIAS("cycles",		"tsc"),
910 EV_ALIAS("instructions",	"p6-inst-retired"),
911 EV_ALIAS("interrupts",		"p6-hw-int-rx"),
912 EV_ALIAS(NULL, NULL)
913 };
914 
915 #define	P6_KW_CMASK	"cmask"
916 #define	P6_KW_EDGE	"edge"
917 #define	P6_KW_INV	"inv"
918 #define	P6_KW_OS	"os"
919 #define	P6_KW_UMASK	"umask"
920 #define	P6_KW_USR	"usr"
921 
922 static struct pmc_masks p6_mask_mesi[] = {
923 	PMCMASK(m,	0x01),
924 	PMCMASK(e,	0x02),
925 	PMCMASK(s,	0x04),
926 	PMCMASK(i,	0x08),
927 	NULLMASK
928 };
929 
930 static struct pmc_masks p6_mask_mesihw[] = {
931 	PMCMASK(m,	0x01),
932 	PMCMASK(e,	0x02),
933 	PMCMASK(s,	0x04),
934 	PMCMASK(i,	0x08),
935 	PMCMASK(nonhw,	0x00),
936 	PMCMASK(hw,	0x10),
937 	PMCMASK(both,	0x30),
938 	NULLMASK
939 };
940 
941 static struct pmc_masks p6_mask_hw[] = {
942 	PMCMASK(nonhw,	0x00),
943 	PMCMASK(hw,	0x10),
944 	PMCMASK(both,	0x30),
945 	NULLMASK
946 };
947 
948 static struct pmc_masks p6_mask_any[] = {
949 	PMCMASK(self,	0x00),
950 	PMCMASK(any,	0x20),
951 	NULLMASK
952 };
953 
954 static struct pmc_masks p6_mask_ekp[] = {
955 	PMCMASK(nta,	0x00),
956 	PMCMASK(t1,	0x01),
957 	PMCMASK(t2,	0x02),
958 	PMCMASK(wos,	0x03),
959 	NULLMASK
960 };
961 
962 static struct pmc_masks p6_mask_pps[] = {
963 	PMCMASK(packed-and-scalar, 0x00),
964 	PMCMASK(scalar,	0x01),
965 	NULLMASK
966 };
967 
968 static struct pmc_masks p6_mask_mite[] = {
969 	PMCMASK(packed-multiply,	 0x01),
970 	PMCMASK(packed-shift,		0x02),
971 	PMCMASK(pack,			0x04),
972 	PMCMASK(unpack,			0x08),
973 	PMCMASK(packed-logical,		0x10),
974 	PMCMASK(packed-arithmetic,	0x20),
975 	NULLMASK
976 };
977 
978 static struct pmc_masks p6_mask_fmt[] = {
979 	PMCMASK(mmxtofp,	0x00),
980 	PMCMASK(fptommx,	0x01),
981 	NULLMASK
982 };
983 
984 static struct pmc_masks p6_mask_sr[] = {
985 	PMCMASK(es,	0x01),
986 	PMCMASK(ds,	0x02),
987 	PMCMASK(fs,	0x04),
988 	PMCMASK(gs,	0x08),
989 	NULLMASK
990 };
991 
992 static struct pmc_masks p6_mask_eet[] = {
993 	PMCMASK(all,	0x00),
994 	PMCMASK(freq,	0x02),
995 	NULLMASK
996 };
997 
998 static struct pmc_masks p6_mask_efur[] = {
999 	PMCMASK(all,	0x00),
1000 	PMCMASK(loadop,	0x01),
1001 	PMCMASK(stdsta,	0x02),
1002 	NULLMASK
1003 };
1004 
1005 static struct pmc_masks p6_mask_essir[] = {
1006 	PMCMASK(sse-packed-single,	0x00),
1007 	PMCMASK(sse-packed-single-scalar-single, 0x01),
1008 	PMCMASK(sse2-packed-double,	0x02),
1009 	PMCMASK(sse2-scalar-double,	0x03),
1010 	NULLMASK
1011 };
1012 
1013 static struct pmc_masks p6_mask_esscir[] = {
1014 	PMCMASK(sse-packed-single,	0x00),
1015 	PMCMASK(sse-scalar-single,	0x01),
1016 	PMCMASK(sse2-packed-double,	0x02),
1017 	PMCMASK(sse2-scalar-double,	0x03),
1018 	NULLMASK
1019 };
1020 
1021 /* P6 event parser */
1022 static int
1023 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1024     struct pmc_op_pmcallocate *pmc_config)
1025 {
1026 	char *e, *p, *q;
1027 	uint32_t evmask;
1028 	int count, n;
1029 	const struct pmc_masks *pm, *pmask;
1030 
1031 	pmc_config->pm_caps |= PMC_CAP_READ;
1032 	pmc_config->pm_p6_config = 0;
1033 
1034 	if (pe == PMC_EV_TSC_TSC) {
1035 		if (ctrspec && *ctrspec != '\0')
1036 			return -1;
1037 		return 0;
1038 	}
1039 
1040 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1041 	evmask = 0;
1042 
1043 #define	P6MASKSET(M)	pmask = p6_mask_ ## M
1044 
1045 	switch(pe) {
1046 	case PMC_EV_P6_L2_IFETCH: 	P6MASKSET(mesi); break;
1047 	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1048 	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1049 	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1050 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1051 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1052 	case PMC_EV_P6_BUS_TRAN_BRD:
1053 	case PMC_EV_P6_BUS_TRAN_RFO:
1054 	case PMC_EV_P6_BUS_TRANS_WB:
1055 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1056 	case PMC_EV_P6_BUS_TRAN_INVAL:
1057 	case PMC_EV_P6_BUS_TRAN_PWR:
1058 	case PMC_EV_P6_BUS_TRANS_P:
1059 	case PMC_EV_P6_BUS_TRANS_IO:
1060 	case PMC_EV_P6_BUS_TRAN_DEF:
1061 	case PMC_EV_P6_BUS_TRAN_BURST:
1062 	case PMC_EV_P6_BUS_TRAN_ANY:
1063 	case PMC_EV_P6_BUS_TRAN_MEM:
1064 		P6MASKSET(any);	break;
1065 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1066 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1067 		P6MASKSET(ekp); break;
1068 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1069 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1070 		P6MASKSET(pps);	break;
1071 	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1072 		P6MASKSET(mite); break;
1073 	case PMC_EV_P6_FP_MMX_TRANS:
1074 		P6MASKSET(fmt);	break;
1075 	case PMC_EV_P6_SEG_RENAME_STALLS:
1076 	case PMC_EV_P6_SEG_REG_RENAMES:
1077 		P6MASKSET(sr);	break;
1078 	case PMC_EV_P6_EMON_EST_TRANS:
1079 		P6MASKSET(eet);	break;
1080 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1081 		P6MASKSET(efur); break;
1082 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1083 		P6MASKSET(essir); break;
1084 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1085 		P6MASKSET(esscir); break;
1086 	default:
1087 		pmask = NULL;
1088 		break;
1089 	}
1090 
1091 	/* Pentium M PMCs have a few events with different semantics */
1092 	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1093 		if (pe == PMC_EV_P6_L2_LD ||
1094 		    pe == PMC_EV_P6_L2_LINES_IN ||
1095 		    pe == PMC_EV_P6_L2_LINES_OUT)
1096 			P6MASKSET(mesihw);
1097 		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1098 			P6MASKSET(hw);
1099 	}
1100 
1101 	/* Parse additional modifiers if present */
1102 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1103 		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1104 			q = strchr(p, '=');
1105 			if (*++q == '\0') /* skip '=' */
1106 				return -1;
1107 			count = strtol(q, &e, 0);
1108 			if (e == q || *e != '\0')
1109 				return -1;
1110 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1111 			pmc_config->pm_p6_config |= P6_EVSEL_TO_CMASK(count);
1112 		} else if (KWMATCH(p, P6_KW_EDGE)) {
1113 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1114 		} else if (KWMATCH(p, P6_KW_INV)) {
1115 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1116 		} else if (KWMATCH(p, P6_KW_OS)) {
1117 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1118 		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1119 			evmask = 0;
1120 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1121 				return -1;
1122 			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1123 			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1124 			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1125 			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1126 			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1127 			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1128 			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1129 			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1130 			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1131 			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1132 			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1133 			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1134 			     pe == PMC_EV_P6_BUS_TRANS_P ||
1135 			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1136 			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1137 			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1138 			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1139 			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1140 			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1141 			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1142 			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1143 			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1144 			     pe == PMC_EV_P6_FP_MMX_TRANS)
1145 			    && (n > 1))
1146 				return -1; /* only one mask keyword allowed */
1147 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1148 		} else if (KWMATCH(p, P6_KW_USR)) {
1149 			pmc_config->pm_caps |= PMC_CAP_USER;
1150 		} else
1151 			return -1;
1152 	}
1153 
1154 	/* post processing */
1155 	switch (pe) {
1156 
1157 		/*
1158 		 * The following events default to an evmask of 0
1159 		 */
1160 
1161 		/* default => 'self' */
1162 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1163 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1164 	case PMC_EV_P6_BUS_TRAN_BRD:
1165 	case PMC_EV_P6_BUS_TRAN_RFO:
1166 	case PMC_EV_P6_BUS_TRANS_WB:
1167 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1168 	case PMC_EV_P6_BUS_TRAN_INVAL:
1169 	case PMC_EV_P6_BUS_TRAN_PWR:
1170 	case PMC_EV_P6_BUS_TRANS_P:
1171 	case PMC_EV_P6_BUS_TRANS_IO:
1172 	case PMC_EV_P6_BUS_TRAN_DEF:
1173 	case PMC_EV_P6_BUS_TRAN_BURST:
1174 	case PMC_EV_P6_BUS_TRAN_ANY:
1175 	case PMC_EV_P6_BUS_TRAN_MEM:
1176 
1177 		/* default => 'nta' */
1178 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1179 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1180 
1181 		/* default => 'packed and scalar' */
1182 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1183 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1184 
1185 		/* default => 'mmx to fp transitions' */
1186 	case PMC_EV_P6_FP_MMX_TRANS:
1187 
1188 		/* default => 'SSE Packed Single' */
1189 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1190 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1191 
1192 		/* default => 'all fused micro-ops' */
1193 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1194 
1195 		/* default => 'all transitions' */
1196 	case PMC_EV_P6_EMON_EST_TRANS:
1197 		break;
1198 
1199 	case PMC_EV_P6_MMX_UOPS_EXEC:
1200 		evmask = 0x0F;		/* only value allowed */
1201 		break;
1202 
1203 	default:
1204 
1205 		/*
1206 		 * For all other events, set the default event mask
1207 		 * to a logical OR of all the allowed event mask bits.
1208 		 */
1209 
1210 		if (evmask == 0 && pmask) {
1211 			for (pm = pmask; pm->pm_name; pm++)
1212 				evmask |= pm->pm_value;
1213 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1214 		}
1215 
1216 		break;
1217 	}
1218 
1219 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1220 		pmc_config->pm_p6_config |= P6_EVSEL_TO_UMASK(evmask);
1221 
1222 	return 0;
1223 }
1224 
1225 /*
1226  * Pentium style PMCs
1227  */
1228 
1229 static struct pmc_event_alias p5_aliases[] = {
1230 	EV_ALIAS("cycles", "tsc"),
1231 	EV_ALIAS(NULL, NULL)
1232 };
1233 
1234 static int
1235 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1236     struct pmc_op_pmcallocate *pmc_config)
1237 {
1238 	return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
1239 }
1240 
1241 #elif	__amd64__
1242 
1243 /*
1244  * AMD K8 PMCs.
1245  *
1246  * These are very similar to AMD K7 PMCs, but support more kinds of
1247  * events.
1248  */
1249 
1250 static struct pmc_event_alias k8_aliases[] = {
1251 	EV_ALIAS("cycles", "tsc"),
1252 	EV_ALIAS(NULL, NULL)
1253 };
1254 
1255 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
1256 
1257 /*
1258  * Parsing tables
1259  */
1260 
1261 /* fp dispatched fpu ops */
1262 static const struct pmc_masks k8_mask_fdfo[] = {
1263 	__K8MASK(add-pipe-excluding-junk-ops,	0),
1264 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
1265 	__K8MASK(store-pipe-excluding-junk-ops,	2),
1266 	__K8MASK(add-pipe-junk-ops,		3),
1267 	__K8MASK(multiply-pipe-junk-ops,	4),
1268 	__K8MASK(store-pipe-junk-ops,		5),
1269 	NULLMASK
1270 };
1271 
1272 /* ls segment register loads */
1273 static const struct pmc_masks k8_mask_lsrl[] = {
1274 	__K8MASK(es,	0),
1275 	__K8MASK(cs,	1),
1276 	__K8MASK(ss,	2),
1277 	__K8MASK(ds,	3),
1278 	__K8MASK(fs,	4),
1279 	__K8MASK(gs,	5),
1280 	__K8MASK(hs,	6),
1281 	NULLMASK
1282 };
1283 
1284 /* ls locked operation */
1285 static const struct pmc_masks k8_mask_llo[] = {
1286 	__K8MASK(locked-instructions,	0),
1287 	__K8MASK(cycles-in-request,	1),
1288 	__K8MASK(cycles-to-complete,	2),
1289 	NULLMASK
1290 };
1291 
1292 /* dc refill from {l2,system} and dc copyback */
1293 static const struct pmc_masks k8_mask_dc[] = {
1294 	__K8MASK(invalid,	0),
1295 	__K8MASK(shared,	1),
1296 	__K8MASK(exclusive,	2),
1297 	__K8MASK(owner,		3),
1298 	__K8MASK(modified,	4),
1299 	NULLMASK
1300 };
1301 
1302 /* dc one bit ecc error */
1303 static const struct pmc_masks k8_mask_dobee[] = {
1304 	__K8MASK(scrubber,	0),
1305 	__K8MASK(piggyback,	1),
1306 	NULLMASK
1307 };
1308 
1309 /* dc dispatched prefetch instructions */
1310 static const struct pmc_masks k8_mask_ddpi[] = {
1311 	__K8MASK(load,	0),
1312 	__K8MASK(store,	1),
1313 	__K8MASK(nta,	2),
1314 	NULLMASK
1315 };
1316 
1317 /* dc dcache accesses by locks */
1318 static const struct pmc_masks k8_mask_dabl[] = {
1319 	__K8MASK(accesses,	0),
1320 	__K8MASK(misses,	1),
1321 	NULLMASK
1322 };
1323 
1324 /* bu internal l2 request */
1325 static const struct pmc_masks k8_mask_bilr[] = {
1326 	__K8MASK(ic-fill,	0),
1327 	__K8MASK(dc-fill,	1),
1328 	__K8MASK(tlb-reload,	2),
1329 	__K8MASK(tag-snoop,	3),
1330 	__K8MASK(cancelled,	4),
1331 	NULLMASK
1332 };
1333 
1334 /* bu fill request l2 miss */
1335 static const struct pmc_masks k8_mask_bfrlm[] = {
1336 	__K8MASK(ic-fill,	0),
1337 	__K8MASK(dc-fill,	1),
1338 	__K8MASK(tlb-reload,	2),
1339 	NULLMASK
1340 };
1341 
1342 /* bu fill into l2 */
1343 static const struct pmc_masks k8_mask_bfil[] = {
1344 	__K8MASK(dirty-l2-victim,	0),
1345 	__K8MASK(victim-from-l2,	1),
1346 	NULLMASK
1347 };
1348 
1349 /* fr retired fpu instructions */
1350 static const struct pmc_masks k8_mask_frfi[] = {
1351 	__K8MASK(x87,			0),
1352 	__K8MASK(mmx-3dnow,		1),
1353 	__K8MASK(packed-sse-sse2,	2),
1354 	__K8MASK(scalar-sse-sse2,	3),
1355 	NULLMASK
1356 };
1357 
1358 /* fr retired fastpath double op instructions */
1359 static const struct pmc_masks k8_mask_frfdoi[] = {
1360 	__K8MASK(low-op-pos-0,		0),
1361 	__K8MASK(low-op-pos-1,		1),
1362 	__K8MASK(low-op-pos-2,		2),
1363 	NULLMASK
1364 };
1365 
1366 /* fr fpu exceptions */
1367 static const struct pmc_masks k8_mask_ffe[] = {
1368 	__K8MASK(x87-reclass-microfaults,	0),
1369 	__K8MASK(sse-retype-microfaults,	1),
1370 	__K8MASK(sse-reclass-microfaults,	2),
1371 	__K8MASK(sse-and-x87-microtraps,	3),
1372 	NULLMASK
1373 };
1374 
1375 /* nb memory controller page access event */
1376 static const struct pmc_masks k8_mask_nmcpae[] = {
1377 	__K8MASK(page-hit,	0),
1378 	__K8MASK(page-miss,	1),
1379 	__K8MASK(page-conflict,	2),
1380 	NULLMASK
1381 };
1382 
1383 /* nb memory controller turnaround */
1384 static const struct pmc_masks k8_mask_nmct[] = {
1385 	__K8MASK(dimm-turnaround,		0),
1386 	__K8MASK(read-to-write-turnaround,	1),
1387 	__K8MASK(write-to-read-turnaround,	2),
1388 	NULLMASK
1389 };
1390 
1391 /* nb memory controller bypass saturation */
1392 static const struct pmc_masks k8_mask_nmcbs[] = {
1393 	__K8MASK(memory-controller-hi-pri-bypass,	0),
1394 	__K8MASK(memory-controller-lo-pri-bypass,	1),
1395 	__K8MASK(dram-controller-interface-bypass,	2),
1396 	__K8MASK(dram-controller-queue-bypass,		3),
1397 	NULLMASK
1398 };
1399 
1400 /* nb sized commands */
1401 static const struct pmc_masks k8_mask_nsc[] = {
1402 	__K8MASK(nonpostwrszbyte,	0),
1403 	__K8MASK(nonpostwrszdword,	1),
1404 	__K8MASK(postwrszbyte,		2),
1405 	__K8MASK(postwrszdword,		3),
1406 	__K8MASK(rdszbyte,		4),
1407 	__K8MASK(rdszdword,		5),
1408 	__K8MASK(rdmodwr,		6),
1409 	NULLMASK
1410 };
1411 
1412 /* nb probe result */
1413 static const struct pmc_masks k8_mask_npr[] = {
1414 	__K8MASK(probe-miss,		0),
1415 	__K8MASK(probe-hit,		1),
1416 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1417 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1418 	NULLMASK
1419 };
1420 
1421 /* nb hypertransport bus bandwidth */
1422 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1423 	__K8MASK(command,	0),
1424 	__K8MASK(data, 	1),
1425 	__K8MASK(buffer-release, 2),
1426 	__K8MASK(nop,	3),
1427 	NULLMASK
1428 };
1429 
1430 #undef	__K8MASK
1431 
1432 #define	K8_KW_COUNT	"count"
1433 #define	K8_KW_EDGE	"edge"
1434 #define	K8_KW_INV	"inv"
1435 #define	K8_KW_MASK	"mask"
1436 #define	K8_KW_OS	"os"
1437 #define	K8_KW_USR	"usr"
1438 
1439 static int
1440 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1441     struct pmc_op_pmcallocate *pmc_config)
1442 {
1443 	char 		*e, *p, *q;
1444 	int 		n;
1445 	uint32_t	count, evmask;
1446 	const struct pmc_masks	*pm, *pmask;
1447 
1448 	pmc_config->pm_caps |= PMC_CAP_READ;
1449 	pmc_config->pm_amd_config = 0;
1450 
1451 	if (pe == PMC_EV_TSC_TSC) {
1452 		/* TSC events must be unqualified. */
1453 		if (ctrspec && *ctrspec != '\0')
1454 			return -1;
1455 		return 0;
1456 	}
1457 
1458 	pmask = NULL;
1459 	evmask = 0;
1460 
1461 #define	__K8SETMASK(M) pmask = k8_mask_##M
1462 
1463 	/* setup parsing tables */
1464 	switch (pe) {
1465 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1466 		__K8SETMASK(fdfo);
1467 		break;
1468 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1469 		__K8SETMASK(lsrl);
1470 		break;
1471 	case PMC_EV_K8_LS_LOCKED_OPERATION:
1472 		__K8SETMASK(llo);
1473 		break;
1474 	case PMC_EV_K8_DC_REFILL_FROM_L2:
1475 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1476 	case PMC_EV_K8_DC_COPYBACK:
1477 		__K8SETMASK(dc);
1478 		break;
1479 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1480 		__K8SETMASK(dobee);
1481 		break;
1482 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1483 		__K8SETMASK(ddpi);
1484 		break;
1485 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1486 		__K8SETMASK(dabl);
1487 		break;
1488 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1489 		__K8SETMASK(bilr);
1490 		break;
1491 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1492 		__K8SETMASK(bfrlm);
1493 		break;
1494 	case PMC_EV_K8_BU_FILL_INTO_L2:
1495 		__K8SETMASK(bfil);
1496 		break;
1497 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1498 		__K8SETMASK(frfi);
1499 		break;
1500 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1501 		__K8SETMASK(frfdoi);
1502 		break;
1503 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1504 		__K8SETMASK(ffe);
1505 		break;
1506 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1507 		__K8SETMASK(nmcpae);
1508 		break;
1509 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1510 		__K8SETMASK(nmct);
1511 		break;
1512 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1513 		__K8SETMASK(nmcbs);
1514 		break;
1515 	case PMC_EV_K8_NB_SIZED_COMMANDS:
1516 		__K8SETMASK(nsc);
1517 		break;
1518 	case PMC_EV_K8_NB_PROBE_RESULT:
1519 		__K8SETMASK(npr);
1520 		break;
1521 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1522 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1523 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1524 		__K8SETMASK(nhbb);
1525 		break;
1526 
1527 	default:
1528 		break;		/* no options defined */
1529 	}
1530 
1531 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1532 
1533 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1534 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1535 			q = strchr(p, '=');
1536 			if (*++q == '\0') /* skip '=' */
1537 				return -1;
1538 
1539 			count = strtol(q, &e, 0);
1540 			if (e == q || *e != '\0')
1541 				return -1;
1542 
1543 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1544 			pmc_config->pm_amd_config |= K8_PMC_TO_COUNTER(count);
1545 
1546 		} else if (KWMATCH(p, K8_KW_EDGE)) {
1547 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1548 		} else if (KWMATCH(p, K8_KW_INV)) {
1549 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1550 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1551 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1552 				return -1;
1553 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1554 		} else if (KWMATCH(p, K8_KW_OS)) {
1555 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1556 		} else if (KWMATCH(p, K8_KW_USR)) {
1557 			pmc_config->pm_caps |= PMC_CAP_USER;
1558 		} else
1559 			return -1;
1560 	}
1561 
1562 	/* other post processing */
1563 
1564 	switch (pe) {
1565 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1566 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1567 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1568 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1569 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1570 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1571 		/* XXX only available in rev B and later */
1572 		break;
1573 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1574 		/* XXX only available in rev C and later */
1575 		break;
1576 	case PMC_EV_K8_LS_LOCKED_OPERATION:
1577 		/* XXX CPU Rev A,B evmask is to be zero */
1578 		if (evmask & (evmask - 1)) /* > 1 bit set */
1579 			return -1;
1580 		if (evmask == 0) {
1581 			evmask = 0x01; /* Rev C and later: #instrs */
1582 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1583 		}
1584 		break;
1585 	default:
1586 		if (evmask == 0 && pmask != NULL) {
1587 			for (pm = pmask; pm->pm_name; pm++)
1588 				evmask |= pm->pm_value;
1589 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1590 		}
1591 	}
1592 
1593 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1594 		pmc_config->pm_amd_config = K8_PMC_TO_UNITMASK(evmask);
1595 
1596 	return 0;
1597 }
1598 #endif
1599 
1600 /*
1601  * API entry points
1602  */
1603 
1604 int
1605 pmc_init(void)
1606 {
1607 	int error, pmc_mod_id;
1608 	uint32_t abi_version;
1609 	struct module_stat pmc_modstat;
1610 
1611 	if (pmc_syscall != -1) /* already inited */
1612 		return 0;
1613 
1614 	/* retrieve the system call number from the KLD */
1615 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1616 		return -1;
1617 
1618 	pmc_modstat.version = sizeof(struct module_stat);
1619 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1620 		return -1;
1621 
1622 	pmc_syscall = pmc_modstat.data.intval;
1623 
1624 	/* check ABI version against compiled-in version */
1625 	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1626 		return (pmc_syscall = -1);
1627 
1628 	/* ignore patch numbers for the comparision */
1629 	if ((abi_version & 0xFFFF0000) != (PMC_VERSION & 0xFFFF0000)) {
1630 		errno  = EPROGMISMATCH;
1631 		return (pmc_syscall = -1);
1632 	}
1633 
1634 	if (PMC_CALL(GETCPUINFO, &cpu_info) < 0)
1635 		return (pmc_syscall = -1);
1636 
1637 	/* set parser pointer */
1638 	switch (cpu_info.pm_cputype) {
1639 #if	__i386__
1640 	case PMC_CPU_AMD_K7:
1641 		pmc_mdep_event_aliases = k7_aliases;
1642 		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1643 		break;
1644 	case PMC_CPU_INTEL_P5:
1645 		pmc_mdep_event_aliases = p5_aliases;
1646 		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1647 		break;
1648 	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1649 	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1650 	case PMC_CPU_INTEL_PIII:
1651 	case PMC_CPU_INTEL_PM:
1652 		pmc_mdep_event_aliases = p6_aliases;
1653 		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1654 		break;
1655 	case PMC_CPU_INTEL_PIV:
1656 		pmc_mdep_event_aliases = p4_aliases;
1657 		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1658 		break;
1659 #elif	__amd64__
1660 	case PMC_CPU_AMD_K8:
1661 		pmc_mdep_event_aliases = k8_aliases;
1662 		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1663 		break;
1664 #endif
1665 
1666 	default:
1667 		/*
1668 		 * Some kind of CPU this version of the library knows nothing
1669 		 * about.  This shouldn't happen since the abi version check
1670 		 * should have caught this.
1671 		 */
1672 		errno = ENXIO;
1673 		return (pmc_syscall = -1);
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 int
1680 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1681     uint32_t flags, int cpu, pmc_id_t *pmcid)
1682 {
1683 	int retval;
1684 	enum pmc_event pe;
1685 	char *r, *spec_copy;
1686 	const char *ctrname;
1687 	const struct pmc_event_alias *p;
1688 	struct pmc_op_pmcallocate pmc_config;
1689 
1690 	spec_copy = NULL;
1691 	retval    = -1;
1692 
1693 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1694 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1695 		errno = EINVAL;
1696 		goto out;
1697 	}
1698 
1699 	/* replace an event alias with the canonical event specifier */
1700 	if (pmc_mdep_event_aliases)
1701 		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1702 			if (!strcmp(ctrspec, p->pm_alias)) {
1703 				spec_copy = strdup(p->pm_spec);
1704 				break;
1705 			}
1706 
1707 	if (spec_copy == NULL)
1708 		spec_copy = strdup(ctrspec);
1709 
1710 	r = spec_copy;
1711 	ctrname = strsep(&r, ",");
1712 
1713 	/* look for the given counter name */
1714 
1715 	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1716 		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1717 			break;
1718 
1719 	if (pe > PMC_EVENT_LAST) {
1720 		errno = EINVAL;
1721 		goto out;
1722 	}
1723 
1724 	bzero(&pmc_config, sizeof(pmc_config));
1725 	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1726 	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1727 	pmc_config.pm_cpu   = cpu;
1728 	pmc_config.pm_mode  = mode;
1729 	pmc_config.pm_flags = flags;
1730 
1731 	if (PMC_IS_SAMPLING_MODE(mode))
1732 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1733 
1734 	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1735 		errno = EINVAL;
1736 		goto out;
1737 	}
1738 
1739 	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1740 		goto out;
1741 
1742 	*pmcid = pmc_config.pm_pmcid;
1743 
1744 	retval = 0;
1745 
1746  out:
1747 	if (spec_copy)
1748 		free(spec_copy);
1749 
1750 	return retval;
1751 }
1752 
1753 int
1754 pmc_attach(pmc_id_t pmc, pid_t pid)
1755 {
1756 	struct pmc_op_pmcattach pmc_attach_args;
1757 
1758 	pmc_attach_args.pm_pmc = pmc;
1759 	pmc_attach_args.pm_pid = pid;
1760 
1761 	return PMC_CALL(PMCATTACH, &pmc_attach_args);
1762 }
1763 
1764 int
1765 pmc_detach(pmc_id_t pmc, pid_t pid)
1766 {
1767 	struct pmc_op_pmcattach pmc_detach_args;
1768 
1769 	pmc_detach_args.pm_pmc = pmc;
1770 	pmc_detach_args.pm_pid = pid;
1771 
1772 	return PMC_CALL(PMCDETACH, &pmc_detach_args);
1773 }
1774 
1775 int
1776 pmc_release(pmc_id_t pmc)
1777 {
1778 	struct pmc_op_simple	pmc_release_args;
1779 
1780 	pmc_release_args.pm_pmcid = pmc;
1781 
1782 	return PMC_CALL(PMCRELEASE, &pmc_release_args);
1783 }
1784 
1785 int
1786 pmc_start(pmc_id_t pmc)
1787 {
1788 	struct pmc_op_simple	pmc_start_args;
1789 
1790 	pmc_start_args.pm_pmcid = pmc;
1791 	return PMC_CALL(PMCSTART, &pmc_start_args);
1792 }
1793 
1794 int
1795 pmc_stop(pmc_id_t pmc)
1796 {
1797 	struct pmc_op_simple	pmc_stop_args;
1798 
1799 	pmc_stop_args.pm_pmcid = pmc;
1800 	return PMC_CALL(PMCSTOP, &pmc_stop_args);
1801 }
1802 
1803 int
1804 pmc_read(pmc_id_t pmc, pmc_value_t *value)
1805 {
1806 	struct pmc_op_pmcrw pmc_read_op;
1807 
1808 	pmc_read_op.pm_pmcid = pmc;
1809 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1810 	pmc_read_op.pm_value = -1;
1811 
1812 	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
1813 		return -1;
1814 
1815 	*value = pmc_read_op.pm_value;
1816 
1817 	return 0;
1818 }
1819 
1820 int
1821 pmc_write(pmc_id_t pmc, pmc_value_t value)
1822 {
1823 	struct pmc_op_pmcrw pmc_write_op;
1824 
1825 	pmc_write_op.pm_pmcid = pmc;
1826 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1827 	pmc_write_op.pm_value = value;
1828 
1829 	return PMC_CALL(PMCRW, &pmc_write_op);
1830 }
1831 
1832 int
1833 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1834 {
1835 	struct pmc_op_pmcrw pmc_rw_op;
1836 
1837 	pmc_rw_op.pm_pmcid = pmc;
1838 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1839 	pmc_rw_op.pm_value = newvalue;
1840 
1841 	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
1842 		return -1;
1843 
1844 	*oldvaluep = pmc_rw_op.pm_value;
1845 
1846 	return 0;
1847 }
1848 
1849 int
1850 pmc_set(pmc_id_t pmc, pmc_value_t value)
1851 {
1852 	struct pmc_op_pmcsetcount sc;
1853 
1854 	sc.pm_pmcid = pmc;
1855 	sc.pm_count = value;
1856 
1857 	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
1858 		return -1;
1859 
1860 	return 0;
1861 
1862 }
1863 
1864 int
1865 pmc_configure_logfile(int fd)
1866 {
1867 	struct pmc_op_configurelog cla;
1868 
1869 	cla.pm_logfd = fd;
1870 	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1871 		return -1;
1872 
1873 	return 0;
1874 }
1875 
1876 int
1877 pmc_get_driver_stats(struct pmc_op_getdriverstats *gms)
1878 {
1879 	return PMC_CALL(GETDRIVERSTATS, gms);
1880 }
1881 
1882 int
1883 pmc_ncpu(void)
1884 {
1885 	if (pmc_syscall == -1) {
1886 		errno = ENXIO;
1887 		return -1;
1888 	}
1889 
1890 	return cpu_info.pm_ncpu;
1891 }
1892 
1893 int
1894 pmc_npmc(int cpu)
1895 {
1896 	if (pmc_syscall == -1) {
1897 		errno = ENXIO;
1898 		return -1;
1899 	}
1900 
1901 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1902 		errno = EINVAL;
1903 		return -1;
1904 	}
1905 
1906 	return cpu_info.pm_npmc;
1907 }
1908 
1909 int
1910 pmc_enable(int cpu, int pmc)
1911 {
1912 	struct pmc_op_pmcadmin ssa;
1913 
1914 	ssa.pm_cpu = cpu;
1915 	ssa.pm_pmc = pmc;
1916 	ssa.pm_state = PMC_STATE_FREE;
1917 	return PMC_CALL(PMCADMIN, &ssa);
1918 }
1919 
1920 int
1921 pmc_disable(int cpu, int pmc)
1922 {
1923 	struct pmc_op_pmcadmin ssa;
1924 
1925 	ssa.pm_cpu = cpu;
1926 	ssa.pm_pmc = pmc;
1927 	ssa.pm_state = PMC_STATE_DISABLED;
1928 	return PMC_CALL(PMCADMIN, &ssa);
1929 }
1930 
1931 
1932 int
1933 pmc_pmcinfo(int cpu, struct pmc_op_getpmcinfo **ppmci)
1934 {
1935 	int nbytes, npmc, saved_errno;
1936 	struct pmc_op_getpmcinfo *pmci;
1937 
1938 	if ((npmc = pmc_npmc(cpu)) < 0)
1939 		return -1;
1940 
1941 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
1942 	    npmc * sizeof(struct pmc_info);
1943 
1944 	if ((pmci = calloc(1, nbytes)) == NULL)
1945 		return -1;
1946 
1947 	pmci->pm_cpu  = cpu;
1948 
1949 	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
1950 		saved_errno = errno;
1951 		free(pmci);
1952 		errno = saved_errno;
1953 		return -1;
1954 	}
1955 
1956 	*ppmci = pmci;
1957 	return 0;
1958 }
1959 
1960 int
1961 pmc_cpuinfo(const struct pmc_op_getcpuinfo **pci)
1962 {
1963 	if (pmc_syscall == -1) {
1964 		errno = ENXIO;
1965 		return -1;
1966 	}
1967 
1968 	*pci = &cpu_info;
1969 	return 0;
1970 }
1971 
1972 const char *
1973 pmc_name_of_cputype(enum pmc_cputype cp)
1974 {
1975 	if ((int) cp >= PMC_CPU_FIRST &&
1976 	    cp <= PMC_CPU_LAST)
1977 		return pmc_cputype_names[cp];
1978 	errno = EINVAL;
1979 	return NULL;
1980 }
1981 
1982 const char *
1983 pmc_name_of_class(enum pmc_class pc)
1984 {
1985 	if ((int) pc >= PMC_CLASS_FIRST &&
1986 	    pc <= PMC_CLASS_LAST)
1987 		return pmc_class_names[pc];
1988 
1989 	errno = EINVAL;
1990 	return NULL;
1991 }
1992 
1993 const char *
1994 pmc_name_of_mode(enum pmc_mode pm)
1995 {
1996 	if ((int) pm >= PMC_MODE_FIRST &&
1997 	    pm <= PMC_MODE_LAST)
1998 		return pmc_mode_names[pm];
1999 
2000 	errno = EINVAL;
2001 	return NULL;
2002 }
2003 
2004 const char *
2005 pmc_name_of_event(enum pmc_event pe)
2006 {
2007 	if ((int) pe >= PMC_EVENT_FIRST &&
2008 	    pe <= PMC_EVENT_LAST)
2009 		return pmc_event_table[pe].pm_ev_name;
2010 
2011 	errno = EINVAL;
2012 	return NULL;
2013 }
2014 
2015 const char *
2016 pmc_name_of_state(enum pmc_state ps)
2017 {
2018 	if ((int) ps >= PMC_STATE_FIRST &&
2019 	    ps <= PMC_STATE_LAST)
2020 		return pmc_state_names[ps];
2021 
2022 	errno = EINVAL;
2023 	return NULL;
2024 }
2025 
2026 const char *
2027 pmc_name_of_disposition(enum pmc_disp pd)
2028 {
2029 	if ((int) pd >= PMC_DISP_FIRST &&
2030 	    pd <= PMC_DISP_LAST)
2031 		return pmc_disposition_names[pd];
2032 
2033 	errno = EINVAL;
2034 	return NULL;
2035 }
2036 
2037 const char *
2038 pmc_name_of_capability(enum pmc_caps cap)
2039 {
2040 	int i;
2041 
2042 	/*
2043 	 * 'cap' should have a single bit set and should be in
2044 	 * range.
2045 	 */
2046 
2047 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2048 	    cap > PMC_CAP_LAST) {
2049 		errno = EINVAL;
2050 		return NULL;
2051 	}
2052 
2053 	i = ffs(cap);
2054 
2055 	return pmc_capability_names[i - 1];
2056 }
2057 
2058 /*
2059  * Return a list of events known to a given PMC class.  'cl' is the
2060  * PMC class identifier, 'eventnames' is the returned list of 'const
2061  * char *' pointers pointing to the names of the events. 'nevents' is
2062  * the number of event name pointers returned.
2063  *
2064  * The space for 'eventnames' is allocated using malloc(3).  The caller
2065  * is responsible for freeing this space when done.
2066  */
2067 
2068 int
2069 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2070     int *nevents)
2071 {
2072 	int count;
2073 	const char **names;
2074 	const struct pmc_event_descr *ev;
2075 
2076 	switch (cl)
2077 	{
2078 	case PMC_CLASS_TSC:
2079 		ev = &pmc_event_table[PMC_EV_TSC_TSC];
2080 		count = 1;
2081 		break;
2082 	case PMC_CLASS_K7:
2083 		ev = &pmc_event_table[PMC_EV_K7_FIRST];
2084 		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
2085 		break;
2086 	case PMC_CLASS_K8:
2087 		ev = &pmc_event_table[PMC_EV_K8_FIRST];
2088 		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
2089 		break;
2090 	case PMC_CLASS_P5:
2091 		ev = &pmc_event_table[PMC_EV_P5_FIRST];
2092 		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
2093 		break;
2094 	case PMC_CLASS_P6:
2095 		ev = &pmc_event_table[PMC_EV_P6_FIRST];
2096 		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
2097 		break;
2098 	case PMC_CLASS_P4:
2099 		ev = &pmc_event_table[PMC_EV_P4_FIRST];
2100 		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
2101 		break;
2102 	default:
2103 		errno = EINVAL;
2104 		return -1;
2105 	}
2106 
2107 	if ((names = malloc(count * sizeof(const char *))) == NULL)
2108 		return -1;
2109 
2110 	*eventnames = names;
2111 	*nevents = count;
2112 
2113 	for (;count--; ev++, names++)
2114 		*names = ev->pm_ev_name;
2115 	return 0;
2116 }
2117 
2118 /*
2119  * Architecture specific APIs
2120  */
2121 
2122 #if	__i386__ || __amd64__
2123 
2124 int
2125 pmc_x86_get_msr(pmc_id_t pmc, uint32_t *msr)
2126 {
2127 	struct pmc_op_x86_getmsr gm;
2128 
2129 	gm.pm_pmcid = pmc;
2130 	if (PMC_CALL(PMCX86GETMSR, &gm) < 0)
2131 		return -1;
2132 	*msr = gm.pm_msr;
2133 	return 0;
2134 }
2135 
2136 #endif
2137