xref: /freebsd/lib/libpmc/libpmc.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 2003-2006 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/types.h>
31 #include <sys/module.h>
32 #include <sys/pmc.h>
33 #include <sys/syscall.h>
34 
35 #include <ctype.h>
36 #include <errno.h>
37 #include <fcntl.h>
38 #include <pmc.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <strings.h>
43 #include <unistd.h>
44 
45 /* Function prototypes */
46 #if defined(__i386__)
47 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
48     struct pmc_op_pmcallocate *_pmc_config);
49 #endif
50 #if defined(__amd64__) || defined(__i386__)
51 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52     struct pmc_op_pmcallocate *_pmc_config);
53 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54     struct pmc_op_pmcallocate *_pmc_config);
55 #endif
56 #if defined(__i386__)
57 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58     struct pmc_op_pmcallocate *_pmc_config);
59 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60     struct pmc_op_pmcallocate *_pmc_config);
61 #endif
62 
63 #define PMC_CALL(cmd, params)				\
64 	syscall(pmc_syscall, PMC_OP_##cmd, (params))
65 
66 /*
67  * Event aliases provide a way for the user to ask for generic events
68  * like "cache-misses", or "instructions-retired".  These aliases are
69  * mapped to the appropriate canonical event descriptions using a
70  * lookup table.
71  */
72 
73 struct pmc_event_alias {
74 	const char	*pm_alias;
75 	const char	*pm_spec;
76 };
77 
78 static const struct pmc_event_alias *pmc_mdep_event_aliases;
79 
80 /*
81  * The pmc_event_descr table maps symbolic names known to the user
82  * to integer codes used by the PMC KLD.
83  */
84 
85 struct pmc_event_descr {
86 	const char	*pm_ev_name;
87 	enum pmc_event	pm_ev_code;
88 	enum pmc_class	pm_ev_class;
89 };
90 
91 static const struct pmc_event_descr
92 pmc_event_table[] =
93 {
94 #undef  __PMC_EV
95 #define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
96 	__PMC_EVENTS()
97 };
98 
99 /*
100  * Mapping tables, mapping enumeration values to human readable
101  * strings.
102  */
103 
104 static const char * pmc_capability_names[] = {
105 #undef	__PMC_CAP
106 #define	__PMC_CAP(N,V,D)	#N ,
107 	__PMC_CAPS()
108 };
109 
110 static const char * pmc_class_names[] = {
111 #undef	__PMC_CLASS
112 #define __PMC_CLASS(C)	#C ,
113 	__PMC_CLASSES()
114 };
115 
116 static const char * pmc_cputype_names[] = {
117 #undef	__PMC_CPU
118 #define	__PMC_CPU(S, D) #S ,
119 	__PMC_CPUS()
120 };
121 
122 static const char * pmc_disposition_names[] = {
123 #undef	__PMC_DISP
124 #define	__PMC_DISP(D)	#D ,
125 	__PMC_DISPOSITIONS()
126 };
127 
128 static const char * pmc_mode_names[] = {
129 #undef  __PMC_MODE
130 #define __PMC_MODE(M,N)	#M ,
131 	__PMC_MODES()
132 };
133 
134 static const char * pmc_state_names[] = {
135 #undef  __PMC_STATE
136 #define __PMC_STATE(S) #S ,
137 	__PMC_STATES()
138 };
139 
140 static int pmc_syscall = -1;		/* filled in by pmc_init() */
141 
142 static struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
143 
144 
145 /* Architecture dependent event parsing */
146 static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
147     struct pmc_op_pmcallocate *_pmc_config);
148 
149 /* Event masks for events */
150 struct pmc_masks {
151 	const char	*pm_name;
152 	const uint32_t	pm_value;
153 };
154 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
155 #define	NULLMASK	PMCMASK(NULL,0)
156 
157 #if defined(__amd64__) || defined(__i386__)
158 static int
159 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
160 {
161 	const struct pmc_masks *pm;
162 	char *q, *r;
163 	int c;
164 
165 	if (pmask == NULL)	/* no mask keywords */
166 		return -1;
167 	q = strchr(p, '='); 	/* skip '=' */
168 	if (*++q == '\0')	/* no more data */
169 		return -1;
170 	c = 0;			/* count of mask keywords seen */
171 	while ((r = strsep(&q, "+")) != NULL) {
172 		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
173 			;
174 		if (pm->pm_name == NULL) /* not found */
175 			return -1;
176 		*evmask |= pm->pm_value;
177 		c++;
178 	}
179 	return c;
180 }
181 #endif
182 
183 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
184 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
185 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
186 
187 #if defined(__i386__)
188 
189 /*
190  * AMD K7 (Athlon) CPUs.
191  */
192 
193 static struct pmc_event_alias k7_aliases[] = {
194 	EV_ALIAS("branches",		"k7-retired-branches"),
195 	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
196 	EV_ALIAS("cycles",		"tsc"),
197 	EV_ALIAS("dc-misses",		"k7-dc-misses,mask=moesi"),
198 	EV_ALIAS("ic-misses",		"k7-ic-misses"),
199 	EV_ALIAS("instructions",	"k7-retired-instructions"),
200 	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
201 	EV_ALIAS(NULL, NULL)
202 };
203 
204 #define	K7_KW_COUNT	"count"
205 #define	K7_KW_EDGE	"edge"
206 #define	K7_KW_INV	"inv"
207 #define	K7_KW_OS	"os"
208 #define	K7_KW_UNITMASK	"unitmask"
209 #define	K7_KW_USR	"usr"
210 
211 static int
212 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
213     struct pmc_op_pmcallocate *pmc_config)
214 {
215 	char 		*e, *p, *q;
216 	int 		c, has_unitmask;
217 	uint32_t	count, unitmask;
218 
219 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
220 	pmc_config->pm_caps |= PMC_CAP_READ;
221 
222 	if (pe == PMC_EV_TSC_TSC) {
223 		/* TSC events must be unqualified. */
224 		if (ctrspec && *ctrspec != '\0')
225 			return -1;
226 		return 0;
227 	}
228 
229 	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
230 	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
231 	    pe == PMC_EV_K7_DC_WRITEBACKS) {
232 		has_unitmask = 1;
233 		unitmask = AMD_PMC_UNITMASK_MOESI;
234 	} else
235 		unitmask = has_unitmask = 0;
236 
237 	pmc_config->pm_caps |= PMC_CAP_WRITE;
238 
239 	while ((p = strsep(&ctrspec, ",")) != NULL) {
240 		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
241 			q = strchr(p, '=');
242 			if (*++q == '\0') /* skip '=' */
243 				return -1;
244 
245 			count = strtol(q, &e, 0);
246 			if (e == q || *e != '\0')
247 				return -1;
248 
249 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
250 			pmc_config->pm_md.pm_amd.pm_amd_config |=
251 			    AMD_PMC_TO_COUNTER(count);
252 
253 		} else if (KWMATCH(p, K7_KW_EDGE)) {
254 			pmc_config->pm_caps |= PMC_CAP_EDGE;
255 		} else if (KWMATCH(p, K7_KW_INV)) {
256 			pmc_config->pm_caps |= PMC_CAP_INVERT;
257 		} else if (KWMATCH(p, K7_KW_OS)) {
258 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
259 		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
260 			if (has_unitmask == 0)
261 				return -1;
262 			unitmask = 0;
263 			q = strchr(p, '=');
264 			if (*++q == '\0') /* skip '=' */
265 				return -1;
266 
267 			while ((c = tolower(*q++)) != 0)
268 				if (c == 'm')
269 					unitmask |= AMD_PMC_UNITMASK_M;
270 				else if (c == 'o')
271 					unitmask |= AMD_PMC_UNITMASK_O;
272 				else if (c == 'e')
273 					unitmask |= AMD_PMC_UNITMASK_E;
274 				else if (c == 's')
275 					unitmask |= AMD_PMC_UNITMASK_S;
276 				else if (c == 'i')
277 					unitmask |= AMD_PMC_UNITMASK_I;
278 				else if (c == '+')
279 					continue;
280 				else
281 					return -1;
282 
283 			if (unitmask == 0)
284 				return -1;
285 
286 		} else if (KWMATCH(p, K7_KW_USR)) {
287 			pmc_config->pm_caps |= PMC_CAP_USER;
288 		} else
289 			return -1;
290 	}
291 
292 	if (has_unitmask) {
293 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
294 		pmc_config->pm_md.pm_amd.pm_amd_config |=
295 		    AMD_PMC_TO_UNITMASK(unitmask);
296 	}
297 
298 	return 0;
299 
300 }
301 
302 #endif
303 
304 #if defined(__amd64__) || defined(__i386__)
305 
306 /*
307  * AMD K8 PMCs.
308  *
309  * These are very similar to AMD K7 PMCs, but support more kinds of
310  * events.
311  */
312 
313 static struct pmc_event_alias k8_aliases[] = {
314 	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
315 	EV_ALIAS("branch-mispredicts",
316 	    "k8-fr-retired-taken-branches-mispredicted"),
317 	EV_ALIAS("cycles",		"tsc"),
318 	EV_ALIAS("dc-misses",		"k8-dc-miss"),
319 	EV_ALIAS("ic-misses",		"k8-ic-miss"),
320 	EV_ALIAS("instructions", 	"k8-fr-retired-x86-instructions"),
321 	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
322 	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
323 	EV_ALIAS(NULL, NULL)
324 };
325 
326 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
327 
328 /*
329  * Parsing tables
330  */
331 
332 /* fp dispatched fpu ops */
333 static const struct pmc_masks k8_mask_fdfo[] = {
334 	__K8MASK(add-pipe-excluding-junk-ops,	0),
335 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
336 	__K8MASK(store-pipe-excluding-junk-ops,	2),
337 	__K8MASK(add-pipe-junk-ops,		3),
338 	__K8MASK(multiply-pipe-junk-ops,	4),
339 	__K8MASK(store-pipe-junk-ops,		5),
340 	NULLMASK
341 };
342 
343 /* ls segment register loads */
344 static const struct pmc_masks k8_mask_lsrl[] = {
345 	__K8MASK(es,	0),
346 	__K8MASK(cs,	1),
347 	__K8MASK(ss,	2),
348 	__K8MASK(ds,	3),
349 	__K8MASK(fs,	4),
350 	__K8MASK(gs,	5),
351 	__K8MASK(hs,	6),
352 	NULLMASK
353 };
354 
355 /* ls locked operation */
356 static const struct pmc_masks k8_mask_llo[] = {
357 	__K8MASK(locked-instructions,	0),
358 	__K8MASK(cycles-in-request,	1),
359 	__K8MASK(cycles-to-complete,	2),
360 	NULLMASK
361 };
362 
363 /* dc refill from {l2,system} and dc copyback */
364 static const struct pmc_masks k8_mask_dc[] = {
365 	__K8MASK(invalid,	0),
366 	__K8MASK(shared,	1),
367 	__K8MASK(exclusive,	2),
368 	__K8MASK(owner,		3),
369 	__K8MASK(modified,	4),
370 	NULLMASK
371 };
372 
373 /* dc one bit ecc error */
374 static const struct pmc_masks k8_mask_dobee[] = {
375 	__K8MASK(scrubber,	0),
376 	__K8MASK(piggyback,	1),
377 	NULLMASK
378 };
379 
380 /* dc dispatched prefetch instructions */
381 static const struct pmc_masks k8_mask_ddpi[] = {
382 	__K8MASK(load,	0),
383 	__K8MASK(store,	1),
384 	__K8MASK(nta,	2),
385 	NULLMASK
386 };
387 
388 /* dc dcache accesses by locks */
389 static const struct pmc_masks k8_mask_dabl[] = {
390 	__K8MASK(accesses,	0),
391 	__K8MASK(misses,	1),
392 	NULLMASK
393 };
394 
395 /* bu internal l2 request */
396 static const struct pmc_masks k8_mask_bilr[] = {
397 	__K8MASK(ic-fill,	0),
398 	__K8MASK(dc-fill,	1),
399 	__K8MASK(tlb-reload,	2),
400 	__K8MASK(tag-snoop,	3),
401 	__K8MASK(cancelled,	4),
402 	NULLMASK
403 };
404 
405 /* bu fill request l2 miss */
406 static const struct pmc_masks k8_mask_bfrlm[] = {
407 	__K8MASK(ic-fill,	0),
408 	__K8MASK(dc-fill,	1),
409 	__K8MASK(tlb-reload,	2),
410 	NULLMASK
411 };
412 
413 /* bu fill into l2 */
414 static const struct pmc_masks k8_mask_bfil[] = {
415 	__K8MASK(dirty-l2-victim,	0),
416 	__K8MASK(victim-from-l2,	1),
417 	NULLMASK
418 };
419 
420 /* fr retired fpu instructions */
421 static const struct pmc_masks k8_mask_frfi[] = {
422 	__K8MASK(x87,			0),
423 	__K8MASK(mmx-3dnow,		1),
424 	__K8MASK(packed-sse-sse2,	2),
425 	__K8MASK(scalar-sse-sse2,	3),
426 	NULLMASK
427 };
428 
429 /* fr retired fastpath double op instructions */
430 static const struct pmc_masks k8_mask_frfdoi[] = {
431 	__K8MASK(low-op-pos-0,		0),
432 	__K8MASK(low-op-pos-1,		1),
433 	__K8MASK(low-op-pos-2,		2),
434 	NULLMASK
435 };
436 
437 /* fr fpu exceptions */
438 static const struct pmc_masks k8_mask_ffe[] = {
439 	__K8MASK(x87-reclass-microfaults,	0),
440 	__K8MASK(sse-retype-microfaults,	1),
441 	__K8MASK(sse-reclass-microfaults,	2),
442 	__K8MASK(sse-and-x87-microtraps,	3),
443 	NULLMASK
444 };
445 
446 /* nb memory controller page access event */
447 static const struct pmc_masks k8_mask_nmcpae[] = {
448 	__K8MASK(page-hit,	0),
449 	__K8MASK(page-miss,	1),
450 	__K8MASK(page-conflict,	2),
451 	NULLMASK
452 };
453 
454 /* nb memory controller turnaround */
455 static const struct pmc_masks k8_mask_nmct[] = {
456 	__K8MASK(dimm-turnaround,		0),
457 	__K8MASK(read-to-write-turnaround,	1),
458 	__K8MASK(write-to-read-turnaround,	2),
459 	NULLMASK
460 };
461 
462 /* nb memory controller bypass saturation */
463 static const struct pmc_masks k8_mask_nmcbs[] = {
464 	__K8MASK(memory-controller-hi-pri-bypass,	0),
465 	__K8MASK(memory-controller-lo-pri-bypass,	1),
466 	__K8MASK(dram-controller-interface-bypass,	2),
467 	__K8MASK(dram-controller-queue-bypass,		3),
468 	NULLMASK
469 };
470 
471 /* nb sized commands */
472 static const struct pmc_masks k8_mask_nsc[] = {
473 	__K8MASK(nonpostwrszbyte,	0),
474 	__K8MASK(nonpostwrszdword,	1),
475 	__K8MASK(postwrszbyte,		2),
476 	__K8MASK(postwrszdword,		3),
477 	__K8MASK(rdszbyte,		4),
478 	__K8MASK(rdszdword,		5),
479 	__K8MASK(rdmodwr,		6),
480 	NULLMASK
481 };
482 
483 /* nb probe result */
484 static const struct pmc_masks k8_mask_npr[] = {
485 	__K8MASK(probe-miss,		0),
486 	__K8MASK(probe-hit,		1),
487 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
488 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
489 	NULLMASK
490 };
491 
492 /* nb hypertransport bus bandwidth */
493 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
494 	__K8MASK(command,	0),
495 	__K8MASK(data, 	1),
496 	__K8MASK(buffer-release, 2),
497 	__K8MASK(nop,	3),
498 	NULLMASK
499 };
500 
501 #undef	__K8MASK
502 
503 #define	K8_KW_COUNT	"count"
504 #define	K8_KW_EDGE	"edge"
505 #define	K8_KW_INV	"inv"
506 #define	K8_KW_MASK	"mask"
507 #define	K8_KW_OS	"os"
508 #define	K8_KW_USR	"usr"
509 
510 static int
511 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
512     struct pmc_op_pmcallocate *pmc_config)
513 {
514 	char 		*e, *p, *q;
515 	int 		n;
516 	uint32_t	count, evmask;
517 	const struct pmc_masks	*pm, *pmask;
518 
519 	pmc_config->pm_caps |= PMC_CAP_READ;
520 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
521 
522 	if (pe == PMC_EV_TSC_TSC) {
523 		/* TSC events must be unqualified. */
524 		if (ctrspec && *ctrspec != '\0')
525 			return -1;
526 		return 0;
527 	}
528 
529 	pmask = NULL;
530 	evmask = 0;
531 
532 #define	__K8SETMASK(M) pmask = k8_mask_##M
533 
534 	/* setup parsing tables */
535 	switch (pe) {
536 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
537 		__K8SETMASK(fdfo);
538 		break;
539 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
540 		__K8SETMASK(lsrl);
541 		break;
542 	case PMC_EV_K8_LS_LOCKED_OPERATION:
543 		__K8SETMASK(llo);
544 		break;
545 	case PMC_EV_K8_DC_REFILL_FROM_L2:
546 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
547 	case PMC_EV_K8_DC_COPYBACK:
548 		__K8SETMASK(dc);
549 		break;
550 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
551 		__K8SETMASK(dobee);
552 		break;
553 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
554 		__K8SETMASK(ddpi);
555 		break;
556 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
557 		__K8SETMASK(dabl);
558 		break;
559 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
560 		__K8SETMASK(bilr);
561 		break;
562 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
563 		__K8SETMASK(bfrlm);
564 		break;
565 	case PMC_EV_K8_BU_FILL_INTO_L2:
566 		__K8SETMASK(bfil);
567 		break;
568 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
569 		__K8SETMASK(frfi);
570 		break;
571 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
572 		__K8SETMASK(frfdoi);
573 		break;
574 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
575 		__K8SETMASK(ffe);
576 		break;
577 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
578 		__K8SETMASK(nmcpae);
579 		break;
580 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
581 		__K8SETMASK(nmct);
582 		break;
583 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
584 		__K8SETMASK(nmcbs);
585 		break;
586 	case PMC_EV_K8_NB_SIZED_COMMANDS:
587 		__K8SETMASK(nsc);
588 		break;
589 	case PMC_EV_K8_NB_PROBE_RESULT:
590 		__K8SETMASK(npr);
591 		break;
592 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
593 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
594 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
595 		__K8SETMASK(nhbb);
596 		break;
597 
598 	default:
599 		break;		/* no options defined */
600 	}
601 
602 	pmc_config->pm_caps |= PMC_CAP_WRITE;
603 
604 	while ((p = strsep(&ctrspec, ",")) != NULL) {
605 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
606 			q = strchr(p, '=');
607 			if (*++q == '\0') /* skip '=' */
608 				return -1;
609 
610 			count = strtol(q, &e, 0);
611 			if (e == q || *e != '\0')
612 				return -1;
613 
614 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
615 			pmc_config->pm_md.pm_amd.pm_amd_config |=
616 			    AMD_PMC_TO_COUNTER(count);
617 
618 		} else if (KWMATCH(p, K8_KW_EDGE)) {
619 			pmc_config->pm_caps |= PMC_CAP_EDGE;
620 		} else if (KWMATCH(p, K8_KW_INV)) {
621 			pmc_config->pm_caps |= PMC_CAP_INVERT;
622 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
623 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
624 				return -1;
625 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
626 		} else if (KWMATCH(p, K8_KW_OS)) {
627 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
628 		} else if (KWMATCH(p, K8_KW_USR)) {
629 			pmc_config->pm_caps |= PMC_CAP_USER;
630 		} else
631 			return -1;
632 	}
633 
634 	/* other post processing */
635 
636 	switch (pe) {
637 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
638 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
639 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
640 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
641 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
642 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
643 		/* XXX only available in rev B and later */
644 		break;
645 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
646 		/* XXX only available in rev C and later */
647 		break;
648 	case PMC_EV_K8_LS_LOCKED_OPERATION:
649 		/* XXX CPU Rev A,B evmask is to be zero */
650 		if (evmask & (evmask - 1)) /* > 1 bit set */
651 			return -1;
652 		if (evmask == 0) {
653 			evmask = 0x01; /* Rev C and later: #instrs */
654 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
655 		}
656 		break;
657 	default:
658 		if (evmask == 0 && pmask != NULL) {
659 			for (pm = pmask; pm->pm_name; pm++)
660 				evmask |= pm->pm_value;
661 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
662 		}
663 	}
664 
665 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
666 		pmc_config->pm_md.pm_amd.pm_amd_config =
667 		    AMD_PMC_TO_UNITMASK(evmask);
668 
669 	return 0;
670 }
671 
672 #endif
673 
674 #if defined(__amd64__) || defined(__i386__)
675 
676 /*
677  * Intel P4 PMCs
678  */
679 
680 static struct pmc_event_alias p4_aliases[] = {
681 	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
682 	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
683 	EV_ALIAS("cycles",		"tsc"),
684 	EV_ALIAS("instructions",
685 	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
686 	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
687 	EV_ALIAS(NULL, NULL)
688 };
689 
690 #define	P4_KW_ACTIVE	"active"
691 #define	P4_KW_ACTIVE_ANY "any"
692 #define	P4_KW_ACTIVE_BOTH "both"
693 #define	P4_KW_ACTIVE_NONE "none"
694 #define	P4_KW_ACTIVE_SINGLE "single"
695 #define	P4_KW_BUSREQTYPE "busreqtype"
696 #define	P4_KW_CASCADE	"cascade"
697 #define	P4_KW_EDGE	"edge"
698 #define	P4_KW_INV	"complement"
699 #define	P4_KW_OS	"os"
700 #define	P4_KW_MASK	"mask"
701 #define	P4_KW_PRECISE	"precise"
702 #define	P4_KW_TAG	"tag"
703 #define	P4_KW_THRESHOLD	"threshold"
704 #define	P4_KW_USR	"usr"
705 
706 #define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
707 
708 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
709 	__P4MASK(dd, 0),
710 	__P4MASK(db, 1),
711 	__P4MASK(di, 2),
712 	__P4MASK(bd, 3),
713 	__P4MASK(bb, 4),
714 	__P4MASK(bi, 5),
715 	__P4MASK(id, 6),
716 	__P4MASK(ib, 7),
717 	NULLMASK
718 };
719 
720 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
721 	__P4MASK(tcmiss, 0),
722 	NULLMASK,
723 };
724 
725 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
726 	__P4MASK(hit, 0),
727 	__P4MASK(miss, 1),
728 	__P4MASK(hit-uc, 2),
729 	NULLMASK
730 };
731 
732 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
733 	__P4MASK(st-rb-full, 2),
734 	__P4MASK(64k-conf, 3),
735 	NULLMASK
736 };
737 
738 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
739 	__P4MASK(lsc, 0),
740 	__P4MASK(ssc, 1),
741 	NULLMASK
742 };
743 
744 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
745 	__P4MASK(split-ld, 1),
746 	NULLMASK
747 };
748 
749 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
750 	__P4MASK(split-st, 1),
751 	NULLMASK
752 };
753 
754 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
755 	__P4MASK(no-sta, 1),
756 	__P4MASK(no-std, 3),
757 	__P4MASK(partial-data, 4),
758 	__P4MASK(unalgn-addr, 5),
759 	NULLMASK
760 };
761 
762 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
763 	__P4MASK(dtmiss, 0),
764 	__P4MASK(itmiss, 1),
765 	NULLMASK
766 };
767 
768 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
769 	__P4MASK(rd-2ndl-hits, 0),
770 	__P4MASK(rd-2ndl-hite, 1),
771 	__P4MASK(rd-2ndl-hitm, 2),
772 	__P4MASK(rd-3rdl-hits, 3),
773 	__P4MASK(rd-3rdl-hite, 4),
774 	__P4MASK(rd-3rdl-hitm, 5),
775 	__P4MASK(rd-2ndl-miss, 8),
776 	__P4MASK(rd-3rdl-miss, 9),
777 	__P4MASK(wr-2ndl-miss, 10),
778 	NULLMASK
779 };
780 
781 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
782 	__P4MASK(all-read, 5),
783 	__P4MASK(all-write, 6),
784 	__P4MASK(mem-uc, 7),
785 	__P4MASK(mem-wc, 8),
786 	__P4MASK(mem-wt, 9),
787 	__P4MASK(mem-wp, 10),
788 	__P4MASK(mem-wb, 11),
789 	__P4MASK(own, 13),
790 	__P4MASK(other, 14),
791 	__P4MASK(prefetch, 15),
792 	NULLMASK
793 };
794 
795 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
796 	__P4MASK(all-read, 5),
797 	__P4MASK(all-write, 6),
798 	__P4MASK(mem-uc, 7),
799 	__P4MASK(mem-wc, 8),
800 	__P4MASK(mem-wt, 9),
801 	__P4MASK(mem-wp, 10),
802 	__P4MASK(mem-wb, 11),
803 	__P4MASK(own, 13),
804 	__P4MASK(other, 14),
805 	__P4MASK(prefetch, 15),
806 	NULLMASK
807 };
808 
809 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
810 	__P4MASK(drdy-drv, 0),
811 	__P4MASK(drdy-own, 1),
812 	__P4MASK(drdy-other, 2),
813 	__P4MASK(dbsy-drv, 3),
814 	__P4MASK(dbsy-own, 4),
815 	__P4MASK(dbsy-other, 5),
816 	NULLMASK
817 };
818 
819 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
820 	__P4MASK(req-type0, 0),
821 	__P4MASK(req-type1, 1),
822 	__P4MASK(req-len0, 2),
823 	__P4MASK(req-len1, 3),
824 	__P4MASK(req-io-type, 5),
825 	__P4MASK(req-lock-type, 6),
826 	__P4MASK(req-cache-type, 7),
827 	__P4MASK(req-split-type, 8),
828 	__P4MASK(req-dem-type, 9),
829 	__P4MASK(req-ord-type, 10),
830 	__P4MASK(mem-type0, 11),
831 	__P4MASK(mem-type1, 12),
832 	__P4MASK(mem-type2, 13),
833 	NULLMASK
834 };
835 
836 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
837 	__P4MASK(all, 15),
838 	NULLMASK
839 };
840 
841 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
842 	__P4MASK(all, 15),
843 	NULLMASK
844 };
845 
846 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
847 	__P4MASK(all, 15),
848 	NULLMASK
849 };
850 
851 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
852 	__P4MASK(all, 15),
853 	NULLMASK
854 };
855 
856 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
857 	__P4MASK(all, 15),
858 	NULLMASK
859 };
860 
861 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
862 	__P4MASK(all, 15),
863 	NULLMASK
864 };
865 
866 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
867 	__P4MASK(all, 15),
868 	NULLMASK
869 };
870 
871 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
872 	__P4MASK(all, 15),
873 	NULLMASK
874 };
875 
876 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
877 	__P4MASK(allp0, 3),
878 	__P4MASK(allp2, 4),
879 	NULLMASK
880 };
881 
882 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
883 	__P4MASK(running, 0),
884 	NULLMASK
885 };
886 
887 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
888 	__P4MASK(cisc, 0),
889 	NULLMASK
890 };
891 
892 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
893 	__P4MASK(from-tc-build, 0),
894 	__P4MASK(from-tc-deliver, 1),
895 	__P4MASK(from-rom, 2),
896 	NULLMASK
897 };
898 
899 static const struct pmc_masks p4_mask_rmbt[] = {
900 	/* retired mispred branch type */
901 	__P4MASK(conditional, 1),
902 	__P4MASK(call, 2),
903 	__P4MASK(return, 3),
904 	__P4MASK(indirect, 4),
905 	NULLMASK
906 };
907 
908 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
909 	__P4MASK(conditional, 1),
910 	__P4MASK(call, 2),
911 	__P4MASK(retired, 3),
912 	__P4MASK(indirect, 4),
913 	NULLMASK
914 };
915 
916 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
917 	__P4MASK(sbfull, 5),
918 	NULLMASK
919 };
920 
921 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
922 	__P4MASK(wcb-evicts, 0),
923 	__P4MASK(wcb-full-evict, 1),
924 	NULLMASK
925 };
926 
927 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
928 	__P4MASK(nbogus, 0),
929 	__P4MASK(bogus, 1),
930 	NULLMASK
931 };
932 
933 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
934 	__P4MASK(nbogus0, 0),
935 	__P4MASK(nbogus1, 1),
936 	__P4MASK(nbogus2, 2),
937 	__P4MASK(nbogus3, 3),
938 	__P4MASK(bogus0, 4),
939 	__P4MASK(bogus1, 5),
940 	__P4MASK(bogus2, 6),
941 	__P4MASK(bogus3, 7),
942 	NULLMASK
943 };
944 
945 static const struct pmc_masks p4_mask_re[] = { /* replay event */
946 	__P4MASK(nbogus, 0),
947 	__P4MASK(bogus, 1),
948 	NULLMASK
949 };
950 
951 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
952 	__P4MASK(nbogusntag, 0),
953 	__P4MASK(nbogustag, 1),
954 	__P4MASK(bogusntag, 2),
955 	__P4MASK(bogustag, 3),
956 	NULLMASK
957 };
958 
959 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
960 	__P4MASK(nbogus, 0),
961 	__P4MASK(bogus, 1),
962 	NULLMASK
963 };
964 
965 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
966 	__P4MASK(tagloads, 1),
967 	__P4MASK(tagstores, 2),
968 	NULLMASK
969 };
970 
971 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
972 	__P4MASK(mmnp, 0),
973 	__P4MASK(mmnm, 1),
974 	__P4MASK(mmtp, 2),
975 	__P4MASK(mmtm, 3),
976 	NULLMASK
977 };
978 
979 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
980 	__P4MASK(nbogus, 0),
981 	NULLMASK
982 };
983 
984 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
985 	__P4MASK(fpsu, 0),
986 	__P4MASK(fpso, 1),
987 	__P4MASK(poao, 2),
988 	__P4MASK(poau, 3),
989 	__P4MASK(prea, 4),
990 	NULLMASK
991 };
992 
993 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
994 	__P4MASK(clear, 0),
995 	__P4MASK(moclear, 2),
996 	__P4MASK(smclear, 3),
997 	NULLMASK
998 };
999 
1000 /* P4 event parser */
1001 static int
1002 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1003     struct pmc_op_pmcallocate *pmc_config)
1004 {
1005 
1006 	char	*e, *p, *q;
1007 	int	count, has_tag, has_busreqtype, n;
1008 	uint32_t evmask, cccractivemask;
1009 	const struct pmc_masks *pm, *pmask;
1010 
1011 	pmc_config->pm_caps |= PMC_CAP_READ;
1012 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1013 	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1014 
1015 	if (pe == PMC_EV_TSC_TSC) {
1016 		/* TSC must not be further qualified */
1017 		if (ctrspec && *ctrspec != '\0')
1018 			return -1;
1019 		return 0;
1020 	}
1021 
1022 	pmask   = NULL;
1023 	evmask  = 0;
1024 	cccractivemask = 0x3;
1025 	has_tag = has_busreqtype = 0;
1026 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1027 
1028 #define	__P4SETMASK(M) do {				\
1029 	pmask = p4_mask_##M; 				\
1030 } while (0)
1031 
1032 	switch (pe) {
1033 	case PMC_EV_P4_TC_DELIVER_MODE:
1034 		__P4SETMASK(tcdm);
1035 		break;
1036 	case PMC_EV_P4_BPU_FETCH_REQUEST:
1037 		__P4SETMASK(bfr);
1038 		break;
1039 	case PMC_EV_P4_ITLB_REFERENCE:
1040 		__P4SETMASK(ir);
1041 		break;
1042 	case PMC_EV_P4_MEMORY_CANCEL:
1043 		__P4SETMASK(memcan);
1044 		break;
1045 	case PMC_EV_P4_MEMORY_COMPLETE:
1046 		__P4SETMASK(memcomp);
1047 		break;
1048 	case PMC_EV_P4_LOAD_PORT_REPLAY:
1049 		__P4SETMASK(lpr);
1050 		break;
1051 	case PMC_EV_P4_STORE_PORT_REPLAY:
1052 		__P4SETMASK(spr);
1053 		break;
1054 	case PMC_EV_P4_MOB_LOAD_REPLAY:
1055 		__P4SETMASK(mlr);
1056 		break;
1057 	case PMC_EV_P4_PAGE_WALK_TYPE:
1058 		__P4SETMASK(pwt);
1059 		break;
1060 	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1061 		__P4SETMASK(bcr);
1062 		break;
1063 	case PMC_EV_P4_IOQ_ALLOCATION:
1064 		__P4SETMASK(ia);
1065 		has_busreqtype = 1;
1066 		break;
1067 	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1068 		__P4SETMASK(iae);
1069 		has_busreqtype = 1;
1070 		break;
1071 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1072 		__P4SETMASK(fda);
1073 		break;
1074 	case PMC_EV_P4_BSQ_ALLOCATION:
1075 		__P4SETMASK(ba);
1076 		break;
1077 	case PMC_EV_P4_SSE_INPUT_ASSIST:
1078 		__P4SETMASK(sia);
1079 		break;
1080 	case PMC_EV_P4_PACKED_SP_UOP:
1081 		__P4SETMASK(psu);
1082 		break;
1083 	case PMC_EV_P4_PACKED_DP_UOP:
1084 		__P4SETMASK(pdu);
1085 		break;
1086 	case PMC_EV_P4_SCALAR_SP_UOP:
1087 		__P4SETMASK(ssu);
1088 		break;
1089 	case PMC_EV_P4_SCALAR_DP_UOP:
1090 		__P4SETMASK(sdu);
1091 		break;
1092 	case PMC_EV_P4_64BIT_MMX_UOP:
1093 		__P4SETMASK(64bmu);
1094 		break;
1095 	case PMC_EV_P4_128BIT_MMX_UOP:
1096 		__P4SETMASK(128bmu);
1097 		break;
1098 	case PMC_EV_P4_X87_FP_UOP:
1099 		__P4SETMASK(xfu);
1100 		break;
1101 	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1102 		__P4SETMASK(xsmu);
1103 		break;
1104 	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1105 		__P4SETMASK(gpe);
1106 		break;
1107 	case PMC_EV_P4_TC_MS_XFER:
1108 		__P4SETMASK(tmx);
1109 		break;
1110 	case PMC_EV_P4_UOP_QUEUE_WRITES:
1111 		__P4SETMASK(uqw);
1112 		break;
1113 	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1114 		__P4SETMASK(rmbt);
1115 		break;
1116 	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1117 		__P4SETMASK(rbt);
1118 		break;
1119 	case PMC_EV_P4_RESOURCE_STALL:
1120 		__P4SETMASK(rs);
1121 		break;
1122 	case PMC_EV_P4_WC_BUFFER:
1123 		__P4SETMASK(wb);
1124 		break;
1125 	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1126 	case PMC_EV_P4_B2B_CYCLES:
1127 	case PMC_EV_P4_BNR:
1128 	case PMC_EV_P4_SNOOP:
1129 	case PMC_EV_P4_RESPONSE:
1130 		break;
1131 	case PMC_EV_P4_FRONT_END_EVENT:
1132 		__P4SETMASK(fee);
1133 		break;
1134 	case PMC_EV_P4_EXECUTION_EVENT:
1135 		__P4SETMASK(ee);
1136 		break;
1137 	case PMC_EV_P4_REPLAY_EVENT:
1138 		__P4SETMASK(re);
1139 		break;
1140 	case PMC_EV_P4_INSTR_RETIRED:
1141 		__P4SETMASK(insret);
1142 		break;
1143 	case PMC_EV_P4_UOPS_RETIRED:
1144 		__P4SETMASK(ur);
1145 		break;
1146 	case PMC_EV_P4_UOP_TYPE:
1147 		__P4SETMASK(ut);
1148 		break;
1149 	case PMC_EV_P4_BRANCH_RETIRED:
1150 		__P4SETMASK(br);
1151 		break;
1152 	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1153 		__P4SETMASK(mbr);
1154 		break;
1155 	case PMC_EV_P4_X87_ASSIST:
1156 		__P4SETMASK(xa);
1157 		break;
1158 	case PMC_EV_P4_MACHINE_CLEAR:
1159 		__P4SETMASK(machclr);
1160 		break;
1161 	default:
1162 		return -1;
1163 	}
1164 
1165 	/* process additional flags */
1166 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1167 		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1168 			q = strchr(p, '=');
1169 			if (*++q == '\0') /* skip '=' */
1170 				return -1;
1171 
1172 			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
1173 				cccractivemask = 0x0;
1174 			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1175 				cccractivemask = 0x1;
1176 			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
1177 				cccractivemask = 0x2;
1178 			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
1179 				cccractivemask = 0x3;
1180 			else
1181 				return -1;
1182 
1183 		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1184 			if (has_busreqtype == 0)
1185 				return -1;
1186 
1187 			q = strchr(p, '=');
1188 			if (*++q == '\0') /* skip '=' */
1189 				return -1;
1190 
1191 			count = strtol(q, &e, 0);
1192 			if (e == q || *e != '\0')
1193 				return -1;
1194 			evmask = (evmask & ~0x1F) | (count & 0x1F);
1195 		} else if (KWMATCH(p, P4_KW_CASCADE))
1196 			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1197 		else if (KWMATCH(p, P4_KW_EDGE))
1198 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1199 		else if (KWMATCH(p, P4_KW_INV))
1200 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1201 		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1202 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1203 				return -1;
1204 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1205 		} else if (KWMATCH(p, P4_KW_OS))
1206 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1207 		else if (KWMATCH(p, P4_KW_PRECISE))
1208 			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1209 		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1210 			if (has_tag == 0)
1211 				return -1;
1212 
1213 			q = strchr(p, '=');
1214 			if (*++q == '\0') /* skip '=' */
1215 				return -1;
1216 
1217 			count = strtol(q, &e, 0);
1218 			if (e == q || *e != '\0')
1219 				return -1;
1220 
1221 			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1222 			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1223 			    P4_ESCR_TO_TAG_VALUE(count);
1224 		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1225 			q = strchr(p, '=');
1226 			if (*++q == '\0') /* skip '=' */
1227 				return -1;
1228 
1229 			count = strtol(q, &e, 0);
1230 			if (e == q || *e != '\0')
1231 				return -1;
1232 
1233 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1234 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1235 			    ~P4_CCCR_THRESHOLD_MASK;
1236 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1237 			    P4_CCCR_TO_THRESHOLD(count);
1238 		} else if (KWMATCH(p, P4_KW_USR))
1239 			pmc_config->pm_caps |= PMC_CAP_USER;
1240 		else
1241 			return -1;
1242 	}
1243 
1244 	/* other post processing */
1245 	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1246 	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1247 	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1248 		pmc_config->pm_caps |= PMC_CAP_EDGE;
1249 
1250 	/* fill in thread activity mask */
1251 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1252 	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1253 
1254 	if (evmask)
1255 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1256 
1257 	switch (pe) {
1258 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1259 		if ((evmask & 0x06) == 0x06 ||
1260 		    (evmask & 0x18) == 0x18)
1261 			return -1; /* can't have own+other bits together */
1262 		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1263 			evmask = 0x1D;
1264 		break;
1265 	case PMC_EV_P4_MACHINE_CLEAR:
1266 		/* only one bit is allowed to be set */
1267 		if ((evmask & (evmask - 1)) != 0)
1268 			return -1;
1269 		if (evmask == 0) {
1270 			evmask = 0x1; 	/* 'CLEAR' */
1271 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1272 		}
1273 		break;
1274 	default:
1275 		if (evmask == 0 && pmask) {
1276 			for (pm = pmask; pm->pm_name; pm++)
1277 				evmask |= pm->pm_value;
1278 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1279 		}
1280 	}
1281 
1282 	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1283 	    P4_ESCR_TO_EVENT_MASK(evmask);
1284 
1285 	return 0;
1286 }
1287 
1288 #endif
1289 
1290 #if defined(__i386__)
1291 
1292 /*
1293  * Pentium style PMCs
1294  */
1295 
1296 static struct pmc_event_alias p5_aliases[] = {
1297 	EV_ALIAS("cycles", "tsc"),
1298 	EV_ALIAS(NULL, NULL)
1299 };
1300 
1301 static int
1302 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1303     struct pmc_op_pmcallocate *pmc_config)
1304 {
1305 	return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
1306 }
1307 
1308 /*
1309  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1310  * and Pentium M CPUs.
1311  */
1312 
1313 static struct pmc_event_alias p6_aliases[] = {
1314 	EV_ALIAS("branches",		"p6-br-inst-retired"),
1315 	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1316 	EV_ALIAS("cycles",		"tsc"),
1317 	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1318 	EV_ALIAS("ic-misses",		"p6-ifu-ifetch-miss"),
1319 	EV_ALIAS("instructions",	"p6-inst-retired"),
1320 	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1321 	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
1322 	EV_ALIAS(NULL, NULL)
1323 };
1324 
1325 #define	P6_KW_CMASK	"cmask"
1326 #define	P6_KW_EDGE	"edge"
1327 #define	P6_KW_INV	"inv"
1328 #define	P6_KW_OS	"os"
1329 #define	P6_KW_UMASK	"umask"
1330 #define	P6_KW_USR	"usr"
1331 
1332 static struct pmc_masks p6_mask_mesi[] = {
1333 	PMCMASK(m,	0x01),
1334 	PMCMASK(e,	0x02),
1335 	PMCMASK(s,	0x04),
1336 	PMCMASK(i,	0x08),
1337 	NULLMASK
1338 };
1339 
1340 static struct pmc_masks p6_mask_mesihw[] = {
1341 	PMCMASK(m,	0x01),
1342 	PMCMASK(e,	0x02),
1343 	PMCMASK(s,	0x04),
1344 	PMCMASK(i,	0x08),
1345 	PMCMASK(nonhw,	0x00),
1346 	PMCMASK(hw,	0x10),
1347 	PMCMASK(both,	0x30),
1348 	NULLMASK
1349 };
1350 
1351 static struct pmc_masks p6_mask_hw[] = {
1352 	PMCMASK(nonhw,	0x00),
1353 	PMCMASK(hw,	0x10),
1354 	PMCMASK(both,	0x30),
1355 	NULLMASK
1356 };
1357 
1358 static struct pmc_masks p6_mask_any[] = {
1359 	PMCMASK(self,	0x00),
1360 	PMCMASK(any,	0x20),
1361 	NULLMASK
1362 };
1363 
1364 static struct pmc_masks p6_mask_ekp[] = {
1365 	PMCMASK(nta,	0x00),
1366 	PMCMASK(t1,	0x01),
1367 	PMCMASK(t2,	0x02),
1368 	PMCMASK(wos,	0x03),
1369 	NULLMASK
1370 };
1371 
1372 static struct pmc_masks p6_mask_pps[] = {
1373 	PMCMASK(packed-and-scalar, 0x00),
1374 	PMCMASK(scalar,	0x01),
1375 	NULLMASK
1376 };
1377 
1378 static struct pmc_masks p6_mask_mite[] = {
1379 	PMCMASK(packed-multiply,	 0x01),
1380 	PMCMASK(packed-shift,		0x02),
1381 	PMCMASK(pack,			0x04),
1382 	PMCMASK(unpack,			0x08),
1383 	PMCMASK(packed-logical,		0x10),
1384 	PMCMASK(packed-arithmetic,	0x20),
1385 	NULLMASK
1386 };
1387 
1388 static struct pmc_masks p6_mask_fmt[] = {
1389 	PMCMASK(mmxtofp,	0x00),
1390 	PMCMASK(fptommx,	0x01),
1391 	NULLMASK
1392 };
1393 
1394 static struct pmc_masks p6_mask_sr[] = {
1395 	PMCMASK(es,	0x01),
1396 	PMCMASK(ds,	0x02),
1397 	PMCMASK(fs,	0x04),
1398 	PMCMASK(gs,	0x08),
1399 	NULLMASK
1400 };
1401 
1402 static struct pmc_masks p6_mask_eet[] = {
1403 	PMCMASK(all,	0x00),
1404 	PMCMASK(freq,	0x02),
1405 	NULLMASK
1406 };
1407 
1408 static struct pmc_masks p6_mask_efur[] = {
1409 	PMCMASK(all,	0x00),
1410 	PMCMASK(loadop,	0x01),
1411 	PMCMASK(stdsta,	0x02),
1412 	NULLMASK
1413 };
1414 
1415 static struct pmc_masks p6_mask_essir[] = {
1416 	PMCMASK(sse-packed-single,	0x00),
1417 	PMCMASK(sse-packed-single-scalar-single, 0x01),
1418 	PMCMASK(sse2-packed-double,	0x02),
1419 	PMCMASK(sse2-scalar-double,	0x03),
1420 	NULLMASK
1421 };
1422 
1423 static struct pmc_masks p6_mask_esscir[] = {
1424 	PMCMASK(sse-packed-single,	0x00),
1425 	PMCMASK(sse-scalar-single,	0x01),
1426 	PMCMASK(sse2-packed-double,	0x02),
1427 	PMCMASK(sse2-scalar-double,	0x03),
1428 	NULLMASK
1429 };
1430 
1431 /* P6 event parser */
1432 static int
1433 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1434     struct pmc_op_pmcallocate *pmc_config)
1435 {
1436 	char *e, *p, *q;
1437 	uint32_t evmask;
1438 	int count, n;
1439 	const struct pmc_masks *pm, *pmask;
1440 
1441 	pmc_config->pm_caps |= PMC_CAP_READ;
1442 	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1443 
1444 	if (pe == PMC_EV_TSC_TSC) {
1445 		if (ctrspec && *ctrspec != '\0')
1446 			return -1;
1447 		return 0;
1448 	}
1449 
1450 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1451 	evmask = 0;
1452 
1453 #define	P6MASKSET(M)	pmask = p6_mask_ ## M
1454 
1455 	switch(pe) {
1456 	case PMC_EV_P6_L2_IFETCH: 	P6MASKSET(mesi); break;
1457 	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1458 	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1459 	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1460 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1461 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1462 	case PMC_EV_P6_BUS_TRAN_BRD:
1463 	case PMC_EV_P6_BUS_TRAN_RFO:
1464 	case PMC_EV_P6_BUS_TRANS_WB:
1465 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1466 	case PMC_EV_P6_BUS_TRAN_INVAL:
1467 	case PMC_EV_P6_BUS_TRAN_PWR:
1468 	case PMC_EV_P6_BUS_TRANS_P:
1469 	case PMC_EV_P6_BUS_TRANS_IO:
1470 	case PMC_EV_P6_BUS_TRAN_DEF:
1471 	case PMC_EV_P6_BUS_TRAN_BURST:
1472 	case PMC_EV_P6_BUS_TRAN_ANY:
1473 	case PMC_EV_P6_BUS_TRAN_MEM:
1474 		P6MASKSET(any);	break;
1475 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1476 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1477 		P6MASKSET(ekp); break;
1478 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1479 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1480 		P6MASKSET(pps);	break;
1481 	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1482 		P6MASKSET(mite); break;
1483 	case PMC_EV_P6_FP_MMX_TRANS:
1484 		P6MASKSET(fmt);	break;
1485 	case PMC_EV_P6_SEG_RENAME_STALLS:
1486 	case PMC_EV_P6_SEG_REG_RENAMES:
1487 		P6MASKSET(sr);	break;
1488 	case PMC_EV_P6_EMON_EST_TRANS:
1489 		P6MASKSET(eet);	break;
1490 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1491 		P6MASKSET(efur); break;
1492 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1493 		P6MASKSET(essir); break;
1494 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1495 		P6MASKSET(esscir); break;
1496 	default:
1497 		pmask = NULL;
1498 		break;
1499 	}
1500 
1501 	/* Pentium M PMCs have a few events with different semantics */
1502 	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1503 		if (pe == PMC_EV_P6_L2_LD ||
1504 		    pe == PMC_EV_P6_L2_LINES_IN ||
1505 		    pe == PMC_EV_P6_L2_LINES_OUT)
1506 			P6MASKSET(mesihw);
1507 		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1508 			P6MASKSET(hw);
1509 	}
1510 
1511 	/* Parse additional modifiers if present */
1512 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1513 		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1514 			q = strchr(p, '=');
1515 			if (*++q == '\0') /* skip '=' */
1516 				return -1;
1517 			count = strtol(q, &e, 0);
1518 			if (e == q || *e != '\0')
1519 				return -1;
1520 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1521 			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1522 			    P6_EVSEL_TO_CMASK(count);
1523 		} else if (KWMATCH(p, P6_KW_EDGE)) {
1524 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1525 		} else if (KWMATCH(p, P6_KW_INV)) {
1526 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1527 		} else if (KWMATCH(p, P6_KW_OS)) {
1528 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1529 		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1530 			evmask = 0;
1531 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1532 				return -1;
1533 			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1534 			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1535 			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1536 			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1537 			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1538 			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1539 			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1540 			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1541 			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1542 			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1543 			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1544 			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1545 			     pe == PMC_EV_P6_BUS_TRANS_P ||
1546 			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1547 			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1548 			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1549 			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1550 			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1551 			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1552 			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1553 			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1554 			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1555 			     pe == PMC_EV_P6_FP_MMX_TRANS)
1556 			    && (n > 1))
1557 				return -1; /* only one mask keyword allowed */
1558 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1559 		} else if (KWMATCH(p, P6_KW_USR)) {
1560 			pmc_config->pm_caps |= PMC_CAP_USER;
1561 		} else
1562 			return -1;
1563 	}
1564 
1565 	/* post processing */
1566 	switch (pe) {
1567 
1568 		/*
1569 		 * The following events default to an evmask of 0
1570 		 */
1571 
1572 		/* default => 'self' */
1573 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1574 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1575 	case PMC_EV_P6_BUS_TRAN_BRD:
1576 	case PMC_EV_P6_BUS_TRAN_RFO:
1577 	case PMC_EV_P6_BUS_TRANS_WB:
1578 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1579 	case PMC_EV_P6_BUS_TRAN_INVAL:
1580 	case PMC_EV_P6_BUS_TRAN_PWR:
1581 	case PMC_EV_P6_BUS_TRANS_P:
1582 	case PMC_EV_P6_BUS_TRANS_IO:
1583 	case PMC_EV_P6_BUS_TRAN_DEF:
1584 	case PMC_EV_P6_BUS_TRAN_BURST:
1585 	case PMC_EV_P6_BUS_TRAN_ANY:
1586 	case PMC_EV_P6_BUS_TRAN_MEM:
1587 
1588 		/* default => 'nta' */
1589 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1590 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1591 
1592 		/* default => 'packed and scalar' */
1593 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1594 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1595 
1596 		/* default => 'mmx to fp transitions' */
1597 	case PMC_EV_P6_FP_MMX_TRANS:
1598 
1599 		/* default => 'SSE Packed Single' */
1600 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1601 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1602 
1603 		/* default => 'all fused micro-ops' */
1604 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1605 
1606 		/* default => 'all transitions' */
1607 	case PMC_EV_P6_EMON_EST_TRANS:
1608 		break;
1609 
1610 	case PMC_EV_P6_MMX_UOPS_EXEC:
1611 		evmask = 0x0F;		/* only value allowed */
1612 		break;
1613 
1614 	default:
1615 
1616 		/*
1617 		 * For all other events, set the default event mask
1618 		 * to a logical OR of all the allowed event mask bits.
1619 		 */
1620 
1621 		if (evmask == 0 && pmask) {
1622 			for (pm = pmask; pm->pm_name; pm++)
1623 				evmask |= pm->pm_value;
1624 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1625 		}
1626 
1627 		break;
1628 	}
1629 
1630 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1631 		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1632 		    P6_EVSEL_TO_UMASK(evmask);
1633 
1634 	return 0;
1635 }
1636 
1637 #endif
1638 
1639 /*
1640  * API entry points
1641  */
1642 
1643 
1644 int
1645 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1646     uint32_t flags, int cpu, pmc_id_t *pmcid)
1647 {
1648 	int retval;
1649 	enum pmc_event pe;
1650 	char *r, *spec_copy;
1651 	const char *ctrname;
1652 	const struct pmc_event_alias *p;
1653 	struct pmc_op_pmcallocate pmc_config;
1654 
1655 	spec_copy = NULL;
1656 	retval    = -1;
1657 
1658 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1659 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1660 		errno = EINVAL;
1661 		goto out;
1662 	}
1663 
1664 	/* replace an event alias with the canonical event specifier */
1665 	if (pmc_mdep_event_aliases)
1666 		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1667 			if (!strcmp(ctrspec, p->pm_alias)) {
1668 				spec_copy = strdup(p->pm_spec);
1669 				break;
1670 			}
1671 
1672 	if (spec_copy == NULL)
1673 		spec_copy = strdup(ctrspec);
1674 
1675 	r = spec_copy;
1676 	ctrname = strsep(&r, ",");
1677 
1678 	/* look for the given counter name */
1679 
1680 	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1681 		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1682 			break;
1683 
1684 	if (pe > PMC_EVENT_LAST) {
1685 		errno = EINVAL;
1686 		goto out;
1687 	}
1688 
1689 	bzero(&pmc_config, sizeof(pmc_config));
1690 	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1691 	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1692 	pmc_config.pm_cpu   = cpu;
1693 	pmc_config.pm_mode  = mode;
1694 	pmc_config.pm_flags = flags;
1695 
1696 	if (PMC_IS_SAMPLING_MODE(mode))
1697 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1698 
1699 	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1700 		errno = EINVAL;
1701 		goto out;
1702 	}
1703 
1704 	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1705 		goto out;
1706 
1707 	*pmcid = pmc_config.pm_pmcid;
1708 
1709 	retval = 0;
1710 
1711  out:
1712 	if (spec_copy)
1713 		free(spec_copy);
1714 
1715 	return retval;
1716 }
1717 
1718 int
1719 pmc_attach(pmc_id_t pmc, pid_t pid)
1720 {
1721 	struct pmc_op_pmcattach pmc_attach_args;
1722 
1723 	pmc_attach_args.pm_pmc = pmc;
1724 	pmc_attach_args.pm_pid = pid;
1725 
1726 	return PMC_CALL(PMCATTACH, &pmc_attach_args);
1727 }
1728 
1729 int
1730 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1731 {
1732 	unsigned int i;
1733 	enum pmc_class cl;
1734 
1735 	cl = PMC_ID_TO_CLASS(pmcid);
1736 	for (i = 0; i < cpu_info.pm_nclass; i++)
1737 		if (cpu_info.pm_classes[i].pm_class == cl) {
1738 			*caps = cpu_info.pm_classes[i].pm_caps;
1739 			return 0;
1740 		}
1741 	return EINVAL;
1742 }
1743 
1744 int
1745 pmc_configure_logfile(int fd)
1746 {
1747 	struct pmc_op_configurelog cla;
1748 
1749 	cla.pm_logfd = fd;
1750 	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1751 		return -1;
1752 	return 0;
1753 }
1754 
1755 int
1756 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1757 {
1758 	if (pmc_syscall == -1) {
1759 		errno = ENXIO;
1760 		return -1;
1761 	}
1762 
1763 	*pci = &cpu_info;
1764 	return 0;
1765 }
1766 
1767 int
1768 pmc_detach(pmc_id_t pmc, pid_t pid)
1769 {
1770 	struct pmc_op_pmcattach pmc_detach_args;
1771 
1772 	pmc_detach_args.pm_pmc = pmc;
1773 	pmc_detach_args.pm_pid = pid;
1774 
1775 	return PMC_CALL(PMCDETACH, &pmc_detach_args);
1776 }
1777 
1778 int
1779 pmc_disable(int cpu, int pmc)
1780 {
1781 	struct pmc_op_pmcadmin ssa;
1782 
1783 	ssa.pm_cpu = cpu;
1784 	ssa.pm_pmc = pmc;
1785 	ssa.pm_state = PMC_STATE_DISABLED;
1786 	return PMC_CALL(PMCADMIN, &ssa);
1787 }
1788 
1789 int
1790 pmc_enable(int cpu, int pmc)
1791 {
1792 	struct pmc_op_pmcadmin ssa;
1793 
1794 	ssa.pm_cpu = cpu;
1795 	ssa.pm_pmc = pmc;
1796 	ssa.pm_state = PMC_STATE_FREE;
1797 	return PMC_CALL(PMCADMIN, &ssa);
1798 }
1799 
1800 /*
1801  * Return a list of events known to a given PMC class.  'cl' is the
1802  * PMC class identifier, 'eventnames' is the returned list of 'const
1803  * char *' pointers pointing to the names of the events. 'nevents' is
1804  * the number of event name pointers returned.
1805  *
1806  * The space for 'eventnames' is allocated using malloc(3).  The caller
1807  * is responsible for freeing this space when done.
1808  */
1809 
1810 int
1811 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1812     int *nevents)
1813 {
1814 	int count;
1815 	const char **names;
1816 	const struct pmc_event_descr *ev;
1817 
1818 	switch (cl)
1819 	{
1820 	case PMC_CLASS_TSC:
1821 		ev = &pmc_event_table[PMC_EV_TSC_TSC];
1822 		count = 1;
1823 		break;
1824 	case PMC_CLASS_K7:
1825 		ev = &pmc_event_table[PMC_EV_K7_FIRST];
1826 		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
1827 		break;
1828 	case PMC_CLASS_K8:
1829 		ev = &pmc_event_table[PMC_EV_K8_FIRST];
1830 		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
1831 		break;
1832 	case PMC_CLASS_P5:
1833 		ev = &pmc_event_table[PMC_EV_P5_FIRST];
1834 		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
1835 		break;
1836 	case PMC_CLASS_P6:
1837 		ev = &pmc_event_table[PMC_EV_P6_FIRST];
1838 		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
1839 		break;
1840 	case PMC_CLASS_P4:
1841 		ev = &pmc_event_table[PMC_EV_P4_FIRST];
1842 		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
1843 		break;
1844 	default:
1845 		errno = EINVAL;
1846 		return -1;
1847 	}
1848 
1849 	if ((names = malloc(count * sizeof(const char *))) == NULL)
1850 		return -1;
1851 
1852 	*eventnames = names;
1853 	*nevents = count;
1854 
1855 	for (;count--; ev++, names++)
1856 		*names = ev->pm_ev_name;
1857 	return 0;
1858 }
1859 
1860 int
1861 pmc_flush_logfile(void)
1862 {
1863 	return PMC_CALL(FLUSHLOG,0);
1864 }
1865 
1866 int
1867 pmc_get_driver_stats(struct pmc_driverstats *ds)
1868 {
1869 	struct pmc_op_getdriverstats gms;
1870 
1871 	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
1872 		return -1;
1873 
1874 	/* copy out fields in the current userland<->library interface */
1875 	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1876 	ds->pm_intr_processed  = gms.pm_intr_processed;
1877 	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1878 	ds->pm_syscalls        = gms.pm_syscalls;
1879 	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1880 	ds->pm_buffer_requests = gms.pm_buffer_requests;
1881 	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1882 	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1883 
1884 	return 0;
1885 }
1886 
1887 int
1888 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1889 {
1890 	struct pmc_op_getmsr gm;
1891 
1892 	gm.pm_pmcid = pmc;
1893 	if (PMC_CALL(PMCGETMSR, &gm) < 0)
1894 		return -1;
1895 	*msr = gm.pm_msr;
1896 	return 0;
1897 }
1898 
1899 int
1900 pmc_init(void)
1901 {
1902 	int error, pmc_mod_id;
1903 	unsigned int n;
1904 	uint32_t abi_version;
1905 	struct module_stat pmc_modstat;
1906 	struct pmc_op_getcpuinfo op_cpu_info;
1907 
1908 	if (pmc_syscall != -1) /* already inited */
1909 		return 0;
1910 
1911 	/* retrieve the system call number from the KLD */
1912 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1913 		return -1;
1914 
1915 	pmc_modstat.version = sizeof(struct module_stat);
1916 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1917 		return -1;
1918 
1919 	pmc_syscall = pmc_modstat.data.intval;
1920 
1921 	/* check the kernel module's ABI against our compiled-in version */
1922 	abi_version = PMC_VERSION;
1923 	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1924 		return (pmc_syscall = -1);
1925 
1926 	/* ignore patch & minor numbers for the comparision */
1927 	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1928 		errno  = EPROGMISMATCH;
1929 		return (pmc_syscall = -1);
1930 	}
1931 
1932 	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
1933 		return (pmc_syscall = -1);
1934 
1935 	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1936 	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1937 	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1938 	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1939 	for (n = 0; n < cpu_info.pm_nclass; n++)
1940 		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
1941 
1942 	/* set parser pointer */
1943 	switch (cpu_info.pm_cputype) {
1944 #if defined(__i386__)
1945 	case PMC_CPU_AMD_K7:
1946 		pmc_mdep_event_aliases = k7_aliases;
1947 		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1948 		break;
1949 	case PMC_CPU_INTEL_P5:
1950 		pmc_mdep_event_aliases = p5_aliases;
1951 		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1952 		break;
1953 	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1954 	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1955 	case PMC_CPU_INTEL_PIII:
1956 	case PMC_CPU_INTEL_PM:
1957 		pmc_mdep_event_aliases = p6_aliases;
1958 		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1959 		break;
1960 #endif
1961 #if defined(__amd64__) || defined(__i386__)
1962 	case PMC_CPU_INTEL_PIV:
1963 		pmc_mdep_event_aliases = p4_aliases;
1964 		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1965 		break;
1966 	case PMC_CPU_AMD_K8:
1967 		pmc_mdep_event_aliases = k8_aliases;
1968 		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1969 		break;
1970 #endif
1971 
1972 	default:
1973 		/*
1974 		 * Some kind of CPU this version of the library knows nothing
1975 		 * about.  This shouldn't happen since the abi version check
1976 		 * should have caught this.
1977 		 */
1978 		errno = ENXIO;
1979 		return (pmc_syscall = -1);
1980 	}
1981 
1982 	return 0;
1983 }
1984 
1985 const char *
1986 pmc_name_of_capability(enum pmc_caps cap)
1987 {
1988 	int i;
1989 
1990 	/*
1991 	 * 'cap' should have a single bit set and should be in
1992 	 * range.
1993 	 */
1994 
1995 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1996 	    cap > PMC_CAP_LAST) {
1997 		errno = EINVAL;
1998 		return NULL;
1999 	}
2000 
2001 	i = ffs(cap);
2002 
2003 	return pmc_capability_names[i - 1];
2004 }
2005 
2006 const char *
2007 pmc_name_of_class(enum pmc_class pc)
2008 {
2009 	if ((int) pc >= PMC_CLASS_FIRST &&
2010 	    pc <= PMC_CLASS_LAST)
2011 		return pmc_class_names[pc];
2012 
2013 	errno = EINVAL;
2014 	return NULL;
2015 }
2016 
2017 const char *
2018 pmc_name_of_cputype(enum pmc_cputype cp)
2019 {
2020 	if ((int) cp >= PMC_CPU_FIRST &&
2021 	    cp <= PMC_CPU_LAST)
2022 		return pmc_cputype_names[cp];
2023 	errno = EINVAL;
2024 	return NULL;
2025 }
2026 
2027 const char *
2028 pmc_name_of_disposition(enum pmc_disp pd)
2029 {
2030 	if ((int) pd >= PMC_DISP_FIRST &&
2031 	    pd <= PMC_DISP_LAST)
2032 		return pmc_disposition_names[pd];
2033 
2034 	errno = EINVAL;
2035 	return NULL;
2036 }
2037 
2038 const char *
2039 pmc_name_of_event(enum pmc_event pe)
2040 {
2041 	if ((int) pe >= PMC_EVENT_FIRST &&
2042 	    pe <= PMC_EVENT_LAST)
2043 		return pmc_event_table[pe].pm_ev_name;
2044 
2045 	errno = EINVAL;
2046 	return NULL;
2047 }
2048 
2049 const char *
2050 pmc_name_of_mode(enum pmc_mode pm)
2051 {
2052 	if ((int) pm >= PMC_MODE_FIRST &&
2053 	    pm <= PMC_MODE_LAST)
2054 		return pmc_mode_names[pm];
2055 
2056 	errno = EINVAL;
2057 	return NULL;
2058 }
2059 
2060 const char *
2061 pmc_name_of_state(enum pmc_state ps)
2062 {
2063 	if ((int) ps >= PMC_STATE_FIRST &&
2064 	    ps <= PMC_STATE_LAST)
2065 		return pmc_state_names[ps];
2066 
2067 	errno = EINVAL;
2068 	return NULL;
2069 }
2070 
2071 int
2072 pmc_ncpu(void)
2073 {
2074 	if (pmc_syscall == -1) {
2075 		errno = ENXIO;
2076 		return -1;
2077 	}
2078 
2079 	return cpu_info.pm_ncpu;
2080 }
2081 
2082 int
2083 pmc_npmc(int cpu)
2084 {
2085 	if (pmc_syscall == -1) {
2086 		errno = ENXIO;
2087 		return -1;
2088 	}
2089 
2090 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2091 		errno = EINVAL;
2092 		return -1;
2093 	}
2094 
2095 	return cpu_info.pm_npmc;
2096 }
2097 
2098 int
2099 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2100 {
2101 	int nbytes, npmc;
2102 	struct pmc_op_getpmcinfo *pmci;
2103 
2104 	if ((npmc = pmc_npmc(cpu)) < 0)
2105 		return -1;
2106 
2107 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2108 	    npmc * sizeof(struct pmc_info);
2109 
2110 	if ((pmci = calloc(1, nbytes)) == NULL)
2111 		return -1;
2112 
2113 	pmci->pm_cpu  = cpu;
2114 
2115 	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2116 		free(pmci);
2117 		return -1;
2118 	}
2119 
2120 	/* kernel<->library, library<->userland interfaces are identical */
2121 	*ppmci = (struct pmc_pmcinfo *) pmci;
2122 
2123 	return 0;
2124 }
2125 
2126 int
2127 pmc_read(pmc_id_t pmc, pmc_value_t *value)
2128 {
2129 	struct pmc_op_pmcrw pmc_read_op;
2130 
2131 	pmc_read_op.pm_pmcid = pmc;
2132 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2133 	pmc_read_op.pm_value = -1;
2134 
2135 	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2136 		return -1;
2137 
2138 	*value = pmc_read_op.pm_value;
2139 
2140 	return 0;
2141 }
2142 
2143 int
2144 pmc_release(pmc_id_t pmc)
2145 {
2146 	struct pmc_op_simple	pmc_release_args;
2147 
2148 	pmc_release_args.pm_pmcid = pmc;
2149 
2150 	return PMC_CALL(PMCRELEASE, &pmc_release_args);
2151 }
2152 
2153 int
2154 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2155 {
2156 	struct pmc_op_pmcrw pmc_rw_op;
2157 
2158 	pmc_rw_op.pm_pmcid = pmc;
2159 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2160 	pmc_rw_op.pm_value = newvalue;
2161 
2162 	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2163 		return -1;
2164 
2165 	*oldvaluep = pmc_rw_op.pm_value;
2166 
2167 	return 0;
2168 }
2169 
2170 int
2171 pmc_set(pmc_id_t pmc, pmc_value_t value)
2172 {
2173 	struct pmc_op_pmcsetcount sc;
2174 
2175 	sc.pm_pmcid = pmc;
2176 	sc.pm_count = value;
2177 
2178 	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2179 		return -1;
2180 
2181 	return 0;
2182 
2183 }
2184 
2185 int
2186 pmc_start(pmc_id_t pmc)
2187 {
2188 	struct pmc_op_simple	pmc_start_args;
2189 
2190 	pmc_start_args.pm_pmcid = pmc;
2191 	return PMC_CALL(PMCSTART, &pmc_start_args);
2192 }
2193 
2194 int
2195 pmc_stop(pmc_id_t pmc)
2196 {
2197 	struct pmc_op_simple	pmc_stop_args;
2198 
2199 	pmc_stop_args.pm_pmcid = pmc;
2200 	return PMC_CALL(PMCSTOP, &pmc_stop_args);
2201 }
2202 
2203 int
2204 pmc_width(pmc_id_t pmcid, uint32_t *width)
2205 {
2206 	unsigned int i;
2207 	enum pmc_class cl;
2208 
2209 	cl = PMC_ID_TO_CLASS(pmcid);
2210 	for (i = 0; i < cpu_info.pm_nclass; i++)
2211 		if (cpu_info.pm_classes[i].pm_class == cl) {
2212 			*width = cpu_info.pm_classes[i].pm_width;
2213 			return 0;
2214 		}
2215 	return EINVAL;
2216 }
2217 
2218 int
2219 pmc_write(pmc_id_t pmc, pmc_value_t value)
2220 {
2221 	struct pmc_op_pmcrw pmc_write_op;
2222 
2223 	pmc_write_op.pm_pmcid = pmc;
2224 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2225 	pmc_write_op.pm_value = value;
2226 
2227 	return PMC_CALL(PMCRW, &pmc_write_op);
2228 }
2229 
2230 int
2231 pmc_writelog(uint32_t userdata)
2232 {
2233 	struct pmc_op_writelog wl;
2234 
2235 	wl.pm_userdata = userdata;
2236 	return PMC_CALL(WRITELOG, &wl);
2237 }
2238