xref: /freebsd/lib/libpmc/libpmc.c (revision 13014ca04aad1931d41958b56f71a2c65b9a7a2c)
1 /*-
2  * Copyright (c) 2003-2008 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/types.h>
31 #include <sys/module.h>
32 #include <sys/pmc.h>
33 #include <sys/syscall.h>
34 
35 #include <ctype.h>
36 #include <errno.h>
37 #include <fcntl.h>
38 #include <pmc.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <strings.h>
43 #include <unistd.h>
44 
45 /* Function prototypes */
46 #if defined(__i386__)
47 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
48     struct pmc_op_pmcallocate *_pmc_config);
49 #endif
50 #if defined(__amd64__) || defined(__i386__)
51 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52     struct pmc_op_pmcallocate *_pmc_config);
53 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54     struct pmc_op_pmcallocate *_pmc_config);
55 #endif
56 #if defined(__i386__)
57 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58     struct pmc_op_pmcallocate *_pmc_config);
59 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60     struct pmc_op_pmcallocate *_pmc_config);
61 #endif
62 
63 #define PMC_CALL(cmd, params)				\
64 	syscall(pmc_syscall, PMC_OP_##cmd, (params))
65 
66 /*
67  * Event aliases provide a way for the user to ask for generic events
68  * like "cache-misses", or "instructions-retired".  These aliases are
69  * mapped to the appropriate canonical event descriptions using a
70  * lookup table.
71  */
72 struct pmc_event_alias {
73 	const char	*pm_alias;
74 	const char	*pm_spec;
75 };
76 
77 static const struct pmc_event_alias *pmc_mdep_event_aliases;
78 
79 /*
80  * The pmc_event_descr table maps symbolic names known to the user
81  * to integer codes used by the PMC KLD.
82  */
83 struct pmc_event_descr {
84 	const char	*pm_ev_name;
85 	enum pmc_event	pm_ev_code;
86 	enum pmc_class	pm_ev_class;
87 };
88 
89 static const struct pmc_event_descr
90 pmc_event_table[] =
91 {
92 #undef  __PMC_EV
93 #define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
94 	__PMC_EVENTS()
95 };
96 
97 /*
98  * Mapping tables, mapping enumeration values to human readable
99  * strings.
100  */
101 
102 static const char * pmc_capability_names[] = {
103 #undef	__PMC_CAP
104 #define	__PMC_CAP(N,V,D)	#N ,
105 	__PMC_CAPS()
106 };
107 
108 static const char * pmc_class_names[] = {
109 #undef	__PMC_CLASS
110 #define __PMC_CLASS(C)	#C ,
111 	__PMC_CLASSES()
112 };
113 
114 static const char * pmc_cputype_names[] = {
115 #undef	__PMC_CPU
116 #define	__PMC_CPU(S, D) #S ,
117 	__PMC_CPUS()
118 };
119 
120 static const char * pmc_disposition_names[] = {
121 #undef	__PMC_DISP
122 #define	__PMC_DISP(D)	#D ,
123 	__PMC_DISPOSITIONS()
124 };
125 
126 static const char * pmc_mode_names[] = {
127 #undef  __PMC_MODE
128 #define __PMC_MODE(M,N)	#M ,
129 	__PMC_MODES()
130 };
131 
132 static const char * pmc_state_names[] = {
133 #undef  __PMC_STATE
134 #define __PMC_STATE(S) #S ,
135 	__PMC_STATES()
136 };
137 
138 static int pmc_syscall = -1;		/* filled in by pmc_init() */
139 
140 static struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
141 
142 
143 /* Architecture dependent event parsing */
144 static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
145     struct pmc_op_pmcallocate *_pmc_config);
146 
147 /* Event masks for events */
148 struct pmc_masks {
149 	const char	*pm_name;
150 	const uint32_t	pm_value;
151 };
152 #define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
153 #define	NULLMASK	PMCMASK(NULL,0)
154 
155 #if defined(__amd64__) || defined(__i386__)
156 static int
157 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
158 {
159 	const struct pmc_masks *pm;
160 	char *q, *r;
161 	int c;
162 
163 	if (pmask == NULL)	/* no mask keywords */
164 		return (-1);
165 	q = strchr(p, '=');	/* skip '=' */
166 	if (*++q == '\0')	/* no more data */
167 		return (-1);
168 	c = 0;			/* count of mask keywords seen */
169 	while ((r = strsep(&q, "+")) != NULL) {
170 		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
171 			;
172 		if (pm->pm_name == NULL) /* not found */
173 			return (-1);
174 		*evmask |= pm->pm_value;
175 		c++;
176 	}
177 	return (c);
178 }
179 #endif
180 
181 #define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
182 #define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
183 #define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
184 
185 #if defined(__i386__)
186 
187 /*
188  * AMD K7 (Athlon) CPUs.
189  */
190 
191 static struct pmc_event_alias k7_aliases[] = {
192 	EV_ALIAS("branches",		"k7-retired-branches"),
193 	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
194 	EV_ALIAS("cycles",		"tsc"),
195 	EV_ALIAS("dc-misses",		"k7-dc-misses"),
196 	EV_ALIAS("ic-misses",		"k7-ic-misses"),
197 	EV_ALIAS("instructions",	"k7-retired-instructions"),
198 	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
199 	EV_ALIAS(NULL, NULL)
200 };
201 
202 #define	K7_KW_COUNT	"count"
203 #define	K7_KW_EDGE	"edge"
204 #define	K7_KW_INV	"inv"
205 #define	K7_KW_OS	"os"
206 #define	K7_KW_UNITMASK	"unitmask"
207 #define	K7_KW_USR	"usr"
208 
209 static int
210 k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
211     struct pmc_op_pmcallocate *pmc_config)
212 {
213 	char		*e, *p, *q;
214 	int		c, has_unitmask;
215 	uint32_t	count, unitmask;
216 
217 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
218 	pmc_config->pm_caps |= PMC_CAP_READ;
219 
220 	if (pe == PMC_EV_TSC_TSC) {
221 		/* TSC events must be unqualified. */
222 		if (ctrspec && *ctrspec != '\0')
223 			return (-1);
224 		return (0);
225 	}
226 
227 	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
228 	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
229 	    pe == PMC_EV_K7_DC_WRITEBACKS) {
230 		has_unitmask = 1;
231 		unitmask = AMD_PMC_UNITMASK_MOESI;
232 	} else
233 		unitmask = has_unitmask = 0;
234 
235 	pmc_config->pm_caps |= PMC_CAP_WRITE;
236 
237 	while ((p = strsep(&ctrspec, ",")) != NULL) {
238 		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
239 			q = strchr(p, '=');
240 			if (*++q == '\0') /* skip '=' */
241 				return (-1);
242 
243 			count = strtol(q, &e, 0);
244 			if (e == q || *e != '\0')
245 				return (-1);
246 
247 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
248 			pmc_config->pm_md.pm_amd.pm_amd_config |=
249 			    AMD_PMC_TO_COUNTER(count);
250 
251 		} else if (KWMATCH(p, K7_KW_EDGE)) {
252 			pmc_config->pm_caps |= PMC_CAP_EDGE;
253 		} else if (KWMATCH(p, K7_KW_INV)) {
254 			pmc_config->pm_caps |= PMC_CAP_INVERT;
255 		} else if (KWMATCH(p, K7_KW_OS)) {
256 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
257 		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
258 			if (has_unitmask == 0)
259 				return (-1);
260 			unitmask = 0;
261 			q = strchr(p, '=');
262 			if (*++q == '\0') /* skip '=' */
263 				return (-1);
264 
265 			while ((c = tolower(*q++)) != 0)
266 				if (c == 'm')
267 					unitmask |= AMD_PMC_UNITMASK_M;
268 				else if (c == 'o')
269 					unitmask |= AMD_PMC_UNITMASK_O;
270 				else if (c == 'e')
271 					unitmask |= AMD_PMC_UNITMASK_E;
272 				else if (c == 's')
273 					unitmask |= AMD_PMC_UNITMASK_S;
274 				else if (c == 'i')
275 					unitmask |= AMD_PMC_UNITMASK_I;
276 				else if (c == '+')
277 					continue;
278 				else
279 					return (-1);
280 
281 			if (unitmask == 0)
282 				return (-1);
283 
284 		} else if (KWMATCH(p, K7_KW_USR)) {
285 			pmc_config->pm_caps |= PMC_CAP_USER;
286 		} else
287 			return (-1);
288 	}
289 
290 	if (has_unitmask) {
291 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
292 		pmc_config->pm_md.pm_amd.pm_amd_config |=
293 		    AMD_PMC_TO_UNITMASK(unitmask);
294 	}
295 
296 	return (0);
297 
298 }
299 
300 #endif
301 
302 #if defined(__amd64__) || defined(__i386__)
303 
304 /*
305  * AMD K8 PMCs.
306  *
307  * These are very similar to AMD K7 PMCs, but support more kinds of
308  * events.
309  */
310 
311 static struct pmc_event_alias k8_aliases[] = {
312 	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
313 	EV_ALIAS("branch-mispredicts",
314 	    "k8-fr-retired-taken-branches-mispredicted"),
315 	EV_ALIAS("cycles",		"tsc"),
316 	EV_ALIAS("dc-misses",		"k8-dc-miss"),
317 	EV_ALIAS("ic-misses",		"k8-ic-miss"),
318 	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
319 	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
320 	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
321 	EV_ALIAS(NULL, NULL)
322 };
323 
324 #define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
325 
326 /*
327  * Parsing tables
328  */
329 
330 /* fp dispatched fpu ops */
331 static const struct pmc_masks k8_mask_fdfo[] = {
332 	__K8MASK(add-pipe-excluding-junk-ops,	0),
333 	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
334 	__K8MASK(store-pipe-excluding-junk-ops,	2),
335 	__K8MASK(add-pipe-junk-ops,		3),
336 	__K8MASK(multiply-pipe-junk-ops,	4),
337 	__K8MASK(store-pipe-junk-ops,		5),
338 	NULLMASK
339 };
340 
341 /* ls segment register loads */
342 static const struct pmc_masks k8_mask_lsrl[] = {
343 	__K8MASK(es,	0),
344 	__K8MASK(cs,	1),
345 	__K8MASK(ss,	2),
346 	__K8MASK(ds,	3),
347 	__K8MASK(fs,	4),
348 	__K8MASK(gs,	5),
349 	__K8MASK(hs,	6),
350 	NULLMASK
351 };
352 
353 /* ls locked operation */
354 static const struct pmc_masks k8_mask_llo[] = {
355 	__K8MASK(locked-instructions,	0),
356 	__K8MASK(cycles-in-request,	1),
357 	__K8MASK(cycles-to-complete,	2),
358 	NULLMASK
359 };
360 
361 /* dc refill from {l2,system} and dc copyback */
362 static const struct pmc_masks k8_mask_dc[] = {
363 	__K8MASK(invalid,	0),
364 	__K8MASK(shared,	1),
365 	__K8MASK(exclusive,	2),
366 	__K8MASK(owner,		3),
367 	__K8MASK(modified,	4),
368 	NULLMASK
369 };
370 
371 /* dc one bit ecc error */
372 static const struct pmc_masks k8_mask_dobee[] = {
373 	__K8MASK(scrubber,	0),
374 	__K8MASK(piggyback,	1),
375 	NULLMASK
376 };
377 
378 /* dc dispatched prefetch instructions */
379 static const struct pmc_masks k8_mask_ddpi[] = {
380 	__K8MASK(load,	0),
381 	__K8MASK(store,	1),
382 	__K8MASK(nta,	2),
383 	NULLMASK
384 };
385 
386 /* dc dcache accesses by locks */
387 static const struct pmc_masks k8_mask_dabl[] = {
388 	__K8MASK(accesses,	0),
389 	__K8MASK(misses,	1),
390 	NULLMASK
391 };
392 
393 /* bu internal l2 request */
394 static const struct pmc_masks k8_mask_bilr[] = {
395 	__K8MASK(ic-fill,	0),
396 	__K8MASK(dc-fill,	1),
397 	__K8MASK(tlb-reload,	2),
398 	__K8MASK(tag-snoop,	3),
399 	__K8MASK(cancelled,	4),
400 	NULLMASK
401 };
402 
403 /* bu fill request l2 miss */
404 static const struct pmc_masks k8_mask_bfrlm[] = {
405 	__K8MASK(ic-fill,	0),
406 	__K8MASK(dc-fill,	1),
407 	__K8MASK(tlb-reload,	2),
408 	NULLMASK
409 };
410 
411 /* bu fill into l2 */
412 static const struct pmc_masks k8_mask_bfil[] = {
413 	__K8MASK(dirty-l2-victim,	0),
414 	__K8MASK(victim-from-l2,	1),
415 	NULLMASK
416 };
417 
418 /* fr retired fpu instructions */
419 static const struct pmc_masks k8_mask_frfi[] = {
420 	__K8MASK(x87,			0),
421 	__K8MASK(mmx-3dnow,		1),
422 	__K8MASK(packed-sse-sse2,	2),
423 	__K8MASK(scalar-sse-sse2,	3),
424 	NULLMASK
425 };
426 
427 /* fr retired fastpath double op instructions */
428 static const struct pmc_masks k8_mask_frfdoi[] = {
429 	__K8MASK(low-op-pos-0,		0),
430 	__K8MASK(low-op-pos-1,		1),
431 	__K8MASK(low-op-pos-2,		2),
432 	NULLMASK
433 };
434 
435 /* fr fpu exceptions */
436 static const struct pmc_masks k8_mask_ffe[] = {
437 	__K8MASK(x87-reclass-microfaults,	0),
438 	__K8MASK(sse-retype-microfaults,	1),
439 	__K8MASK(sse-reclass-microfaults,	2),
440 	__K8MASK(sse-and-x87-microtraps,	3),
441 	NULLMASK
442 };
443 
444 /* nb memory controller page access event */
445 static const struct pmc_masks k8_mask_nmcpae[] = {
446 	__K8MASK(page-hit,	0),
447 	__K8MASK(page-miss,	1),
448 	__K8MASK(page-conflict,	2),
449 	NULLMASK
450 };
451 
452 /* nb memory controller turnaround */
453 static const struct pmc_masks k8_mask_nmct[] = {
454 	__K8MASK(dimm-turnaround,		0),
455 	__K8MASK(read-to-write-turnaround,	1),
456 	__K8MASK(write-to-read-turnaround,	2),
457 	NULLMASK
458 };
459 
460 /* nb memory controller bypass saturation */
461 static const struct pmc_masks k8_mask_nmcbs[] = {
462 	__K8MASK(memory-controller-hi-pri-bypass,	0),
463 	__K8MASK(memory-controller-lo-pri-bypass,	1),
464 	__K8MASK(dram-controller-interface-bypass,	2),
465 	__K8MASK(dram-controller-queue-bypass,		3),
466 	NULLMASK
467 };
468 
469 /* nb sized commands */
470 static const struct pmc_masks k8_mask_nsc[] = {
471 	__K8MASK(nonpostwrszbyte,	0),
472 	__K8MASK(nonpostwrszdword,	1),
473 	__K8MASK(postwrszbyte,		2),
474 	__K8MASK(postwrszdword,		3),
475 	__K8MASK(rdszbyte,		4),
476 	__K8MASK(rdszdword,		5),
477 	__K8MASK(rdmodwr,		6),
478 	NULLMASK
479 };
480 
481 /* nb probe result */
482 static const struct pmc_masks k8_mask_npr[] = {
483 	__K8MASK(probe-miss,		0),
484 	__K8MASK(probe-hit,		1),
485 	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
486 	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
487 	NULLMASK
488 };
489 
490 /* nb hypertransport bus bandwidth */
491 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
492 	__K8MASK(command,	0),
493 	__K8MASK(data,	1),
494 	__K8MASK(buffer-release, 2),
495 	__K8MASK(nop,	3),
496 	NULLMASK
497 };
498 
499 #undef	__K8MASK
500 
501 #define	K8_KW_COUNT	"count"
502 #define	K8_KW_EDGE	"edge"
503 #define	K8_KW_INV	"inv"
504 #define	K8_KW_MASK	"mask"
505 #define	K8_KW_OS	"os"
506 #define	K8_KW_USR	"usr"
507 
508 static int
509 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
510     struct pmc_op_pmcallocate *pmc_config)
511 {
512 	char		*e, *p, *q;
513 	int		n;
514 	uint32_t	count, evmask;
515 	const struct pmc_masks	*pm, *pmask;
516 
517 	pmc_config->pm_caps |= PMC_CAP_READ;
518 	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
519 
520 	if (pe == PMC_EV_TSC_TSC) {
521 		/* TSC events must be unqualified. */
522 		if (ctrspec && *ctrspec != '\0')
523 			return (-1);
524 		return (0);
525 	}
526 
527 	pmask = NULL;
528 	evmask = 0;
529 
530 #define	__K8SETMASK(M) pmask = k8_mask_##M
531 
532 	/* setup parsing tables */
533 	switch (pe) {
534 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
535 		__K8SETMASK(fdfo);
536 		break;
537 	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
538 		__K8SETMASK(lsrl);
539 		break;
540 	case PMC_EV_K8_LS_LOCKED_OPERATION:
541 		__K8SETMASK(llo);
542 		break;
543 	case PMC_EV_K8_DC_REFILL_FROM_L2:
544 	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
545 	case PMC_EV_K8_DC_COPYBACK:
546 		__K8SETMASK(dc);
547 		break;
548 	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
549 		__K8SETMASK(dobee);
550 		break;
551 	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
552 		__K8SETMASK(ddpi);
553 		break;
554 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
555 		__K8SETMASK(dabl);
556 		break;
557 	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
558 		__K8SETMASK(bilr);
559 		break;
560 	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
561 		__K8SETMASK(bfrlm);
562 		break;
563 	case PMC_EV_K8_BU_FILL_INTO_L2:
564 		__K8SETMASK(bfil);
565 		break;
566 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
567 		__K8SETMASK(frfi);
568 		break;
569 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
570 		__K8SETMASK(frfdoi);
571 		break;
572 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
573 		__K8SETMASK(ffe);
574 		break;
575 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
576 		__K8SETMASK(nmcpae);
577 		break;
578 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
579 		__K8SETMASK(nmct);
580 		break;
581 	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
582 		__K8SETMASK(nmcbs);
583 		break;
584 	case PMC_EV_K8_NB_SIZED_COMMANDS:
585 		__K8SETMASK(nsc);
586 		break;
587 	case PMC_EV_K8_NB_PROBE_RESULT:
588 		__K8SETMASK(npr);
589 		break;
590 	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
591 	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
592 	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
593 		__K8SETMASK(nhbb);
594 		break;
595 
596 	default:
597 		break;		/* no options defined */
598 	}
599 
600 	pmc_config->pm_caps |= PMC_CAP_WRITE;
601 
602 	while ((p = strsep(&ctrspec, ",")) != NULL) {
603 		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
604 			q = strchr(p, '=');
605 			if (*++q == '\0') /* skip '=' */
606 				return (-1);
607 
608 			count = strtol(q, &e, 0);
609 			if (e == q || *e != '\0')
610 				return (-1);
611 
612 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
613 			pmc_config->pm_md.pm_amd.pm_amd_config |=
614 			    AMD_PMC_TO_COUNTER(count);
615 
616 		} else if (KWMATCH(p, K8_KW_EDGE)) {
617 			pmc_config->pm_caps |= PMC_CAP_EDGE;
618 		} else if (KWMATCH(p, K8_KW_INV)) {
619 			pmc_config->pm_caps |= PMC_CAP_INVERT;
620 		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
621 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
622 				return (-1);
623 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
624 		} else if (KWMATCH(p, K8_KW_OS)) {
625 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
626 		} else if (KWMATCH(p, K8_KW_USR)) {
627 			pmc_config->pm_caps |= PMC_CAP_USER;
628 		} else
629 			return (-1);
630 	}
631 
632 	/* other post processing */
633 	switch (pe) {
634 	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
635 	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
636 	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
637 	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
638 	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
639 	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
640 		/* XXX only available in rev B and later */
641 		break;
642 	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
643 		/* XXX only available in rev C and later */
644 		break;
645 	case PMC_EV_K8_LS_LOCKED_OPERATION:
646 		/* XXX CPU Rev A,B evmask is to be zero */
647 		if (evmask & (evmask - 1)) /* > 1 bit set */
648 			return (-1);
649 		if (evmask == 0) {
650 			evmask = 0x01; /* Rev C and later: #instrs */
651 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
652 		}
653 		break;
654 	default:
655 		if (evmask == 0 && pmask != NULL) {
656 			for (pm = pmask; pm->pm_name; pm++)
657 				evmask |= pm->pm_value;
658 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
659 		}
660 	}
661 
662 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
663 		pmc_config->pm_md.pm_amd.pm_amd_config =
664 		    AMD_PMC_TO_UNITMASK(evmask);
665 
666 	return (0);
667 }
668 
669 #endif
670 
671 #if defined(__amd64__) || defined(__i386__)
672 
673 /*
674  * Intel P4 PMCs
675  */
676 
677 static struct pmc_event_alias p4_aliases[] = {
678 	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
679 	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
680 	EV_ALIAS("cycles",		"tsc"),
681 	EV_ALIAS("instructions",
682 	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
683 	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
684 	EV_ALIAS(NULL, NULL)
685 };
686 
687 #define	P4_KW_ACTIVE	"active"
688 #define	P4_KW_ACTIVE_ANY "any"
689 #define	P4_KW_ACTIVE_BOTH "both"
690 #define	P4_KW_ACTIVE_NONE "none"
691 #define	P4_KW_ACTIVE_SINGLE "single"
692 #define	P4_KW_BUSREQTYPE "busreqtype"
693 #define	P4_KW_CASCADE	"cascade"
694 #define	P4_KW_EDGE	"edge"
695 #define	P4_KW_INV	"complement"
696 #define	P4_KW_OS	"os"
697 #define	P4_KW_MASK	"mask"
698 #define	P4_KW_PRECISE	"precise"
699 #define	P4_KW_TAG	"tag"
700 #define	P4_KW_THRESHOLD	"threshold"
701 #define	P4_KW_USR	"usr"
702 
703 #define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
704 
705 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
706 	__P4MASK(dd, 0),
707 	__P4MASK(db, 1),
708 	__P4MASK(di, 2),
709 	__P4MASK(bd, 3),
710 	__P4MASK(bb, 4),
711 	__P4MASK(bi, 5),
712 	__P4MASK(id, 6),
713 	__P4MASK(ib, 7),
714 	NULLMASK
715 };
716 
717 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
718 	__P4MASK(tcmiss, 0),
719 	NULLMASK,
720 };
721 
722 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
723 	__P4MASK(hit, 0),
724 	__P4MASK(miss, 1),
725 	__P4MASK(hit-uc, 2),
726 	NULLMASK
727 };
728 
729 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
730 	__P4MASK(st-rb-full, 2),
731 	__P4MASK(64k-conf, 3),
732 	NULLMASK
733 };
734 
735 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
736 	__P4MASK(lsc, 0),
737 	__P4MASK(ssc, 1),
738 	NULLMASK
739 };
740 
741 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
742 	__P4MASK(split-ld, 1),
743 	NULLMASK
744 };
745 
746 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
747 	__P4MASK(split-st, 1),
748 	NULLMASK
749 };
750 
751 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
752 	__P4MASK(no-sta, 1),
753 	__P4MASK(no-std, 3),
754 	__P4MASK(partial-data, 4),
755 	__P4MASK(unalgn-addr, 5),
756 	NULLMASK
757 };
758 
759 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
760 	__P4MASK(dtmiss, 0),
761 	__P4MASK(itmiss, 1),
762 	NULLMASK
763 };
764 
765 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
766 	__P4MASK(rd-2ndl-hits, 0),
767 	__P4MASK(rd-2ndl-hite, 1),
768 	__P4MASK(rd-2ndl-hitm, 2),
769 	__P4MASK(rd-3rdl-hits, 3),
770 	__P4MASK(rd-3rdl-hite, 4),
771 	__P4MASK(rd-3rdl-hitm, 5),
772 	__P4MASK(rd-2ndl-miss, 8),
773 	__P4MASK(rd-3rdl-miss, 9),
774 	__P4MASK(wr-2ndl-miss, 10),
775 	NULLMASK
776 };
777 
778 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
779 	__P4MASK(all-read, 5),
780 	__P4MASK(all-write, 6),
781 	__P4MASK(mem-uc, 7),
782 	__P4MASK(mem-wc, 8),
783 	__P4MASK(mem-wt, 9),
784 	__P4MASK(mem-wp, 10),
785 	__P4MASK(mem-wb, 11),
786 	__P4MASK(own, 13),
787 	__P4MASK(other, 14),
788 	__P4MASK(prefetch, 15),
789 	NULLMASK
790 };
791 
792 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
793 	__P4MASK(all-read, 5),
794 	__P4MASK(all-write, 6),
795 	__P4MASK(mem-uc, 7),
796 	__P4MASK(mem-wc, 8),
797 	__P4MASK(mem-wt, 9),
798 	__P4MASK(mem-wp, 10),
799 	__P4MASK(mem-wb, 11),
800 	__P4MASK(own, 13),
801 	__P4MASK(other, 14),
802 	__P4MASK(prefetch, 15),
803 	NULLMASK
804 };
805 
806 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
807 	__P4MASK(drdy-drv, 0),
808 	__P4MASK(drdy-own, 1),
809 	__P4MASK(drdy-other, 2),
810 	__P4MASK(dbsy-drv, 3),
811 	__P4MASK(dbsy-own, 4),
812 	__P4MASK(dbsy-other, 5),
813 	NULLMASK
814 };
815 
816 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
817 	__P4MASK(req-type0, 0),
818 	__P4MASK(req-type1, 1),
819 	__P4MASK(req-len0, 2),
820 	__P4MASK(req-len1, 3),
821 	__P4MASK(req-io-type, 5),
822 	__P4MASK(req-lock-type, 6),
823 	__P4MASK(req-cache-type, 7),
824 	__P4MASK(req-split-type, 8),
825 	__P4MASK(req-dem-type, 9),
826 	__P4MASK(req-ord-type, 10),
827 	__P4MASK(mem-type0, 11),
828 	__P4MASK(mem-type1, 12),
829 	__P4MASK(mem-type2, 13),
830 	NULLMASK
831 };
832 
833 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
834 	__P4MASK(all, 15),
835 	NULLMASK
836 };
837 
838 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
839 	__P4MASK(all, 15),
840 	NULLMASK
841 };
842 
843 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
844 	__P4MASK(all, 15),
845 	NULLMASK
846 };
847 
848 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
849 	__P4MASK(all, 15),
850 	NULLMASK
851 };
852 
853 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
854 	__P4MASK(all, 15),
855 	NULLMASK
856 };
857 
858 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
859 	__P4MASK(all, 15),
860 	NULLMASK
861 };
862 
863 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
864 	__P4MASK(all, 15),
865 	NULLMASK
866 };
867 
868 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
869 	__P4MASK(all, 15),
870 	NULLMASK
871 };
872 
873 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
874 	__P4MASK(allp0, 3),
875 	__P4MASK(allp2, 4),
876 	NULLMASK
877 };
878 
879 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
880 	__P4MASK(running, 0),
881 	NULLMASK
882 };
883 
884 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
885 	__P4MASK(cisc, 0),
886 	NULLMASK
887 };
888 
889 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
890 	__P4MASK(from-tc-build, 0),
891 	__P4MASK(from-tc-deliver, 1),
892 	__P4MASK(from-rom, 2),
893 	NULLMASK
894 };
895 
896 static const struct pmc_masks p4_mask_rmbt[] = {
897 	/* retired mispred branch type */
898 	__P4MASK(conditional, 1),
899 	__P4MASK(call, 2),
900 	__P4MASK(return, 3),
901 	__P4MASK(indirect, 4),
902 	NULLMASK
903 };
904 
905 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
906 	__P4MASK(conditional, 1),
907 	__P4MASK(call, 2),
908 	__P4MASK(retired, 3),
909 	__P4MASK(indirect, 4),
910 	NULLMASK
911 };
912 
913 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
914 	__P4MASK(sbfull, 5),
915 	NULLMASK
916 };
917 
918 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
919 	__P4MASK(wcb-evicts, 0),
920 	__P4MASK(wcb-full-evict, 1),
921 	NULLMASK
922 };
923 
924 static const struct pmc_masks p4_mask_fee[] = { /* front end event */
925 	__P4MASK(nbogus, 0),
926 	__P4MASK(bogus, 1),
927 	NULLMASK
928 };
929 
930 static const struct pmc_masks p4_mask_ee[] = { /* execution event */
931 	__P4MASK(nbogus0, 0),
932 	__P4MASK(nbogus1, 1),
933 	__P4MASK(nbogus2, 2),
934 	__P4MASK(nbogus3, 3),
935 	__P4MASK(bogus0, 4),
936 	__P4MASK(bogus1, 5),
937 	__P4MASK(bogus2, 6),
938 	__P4MASK(bogus3, 7),
939 	NULLMASK
940 };
941 
942 static const struct pmc_masks p4_mask_re[] = { /* replay event */
943 	__P4MASK(nbogus, 0),
944 	__P4MASK(bogus, 1),
945 	NULLMASK
946 };
947 
948 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
949 	__P4MASK(nbogusntag, 0),
950 	__P4MASK(nbogustag, 1),
951 	__P4MASK(bogusntag, 2),
952 	__P4MASK(bogustag, 3),
953 	NULLMASK
954 };
955 
956 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
957 	__P4MASK(nbogus, 0),
958 	__P4MASK(bogus, 1),
959 	NULLMASK
960 };
961 
962 static const struct pmc_masks p4_mask_ut[] = { /* uop type */
963 	__P4MASK(tagloads, 1),
964 	__P4MASK(tagstores, 2),
965 	NULLMASK
966 };
967 
968 static const struct pmc_masks p4_mask_br[] = { /* branch retired */
969 	__P4MASK(mmnp, 0),
970 	__P4MASK(mmnm, 1),
971 	__P4MASK(mmtp, 2),
972 	__P4MASK(mmtm, 3),
973 	NULLMASK
974 };
975 
976 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
977 	__P4MASK(nbogus, 0),
978 	NULLMASK
979 };
980 
981 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
982 	__P4MASK(fpsu, 0),
983 	__P4MASK(fpso, 1),
984 	__P4MASK(poao, 2),
985 	__P4MASK(poau, 3),
986 	__P4MASK(prea, 4),
987 	NULLMASK
988 };
989 
990 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
991 	__P4MASK(clear, 0),
992 	__P4MASK(moclear, 2),
993 	__P4MASK(smclear, 3),
994 	NULLMASK
995 };
996 
997 /* P4 event parser */
998 static int
999 p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1000     struct pmc_op_pmcallocate *pmc_config)
1001 {
1002 
1003 	char	*e, *p, *q;
1004 	int	count, has_tag, has_busreqtype, n;
1005 	uint32_t evmask, cccractivemask;
1006 	const struct pmc_masks *pm, *pmask;
1007 
1008 	pmc_config->pm_caps |= PMC_CAP_READ;
1009 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1010 	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1011 
1012 	if (pe == PMC_EV_TSC_TSC) {
1013 		/* TSC must not be further qualified */
1014 		if (ctrspec && *ctrspec != '\0')
1015 			return (-1);
1016 		return (0);
1017 	}
1018 
1019 	pmask   = NULL;
1020 	evmask  = 0;
1021 	cccractivemask = 0x3;
1022 	has_tag = has_busreqtype = 0;
1023 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1024 
1025 #define	__P4SETMASK(M) do {				\
1026 	pmask = p4_mask_##M;				\
1027 } while (0)
1028 
1029 	switch (pe) {
1030 	case PMC_EV_P4_TC_DELIVER_MODE:
1031 		__P4SETMASK(tcdm);
1032 		break;
1033 	case PMC_EV_P4_BPU_FETCH_REQUEST:
1034 		__P4SETMASK(bfr);
1035 		break;
1036 	case PMC_EV_P4_ITLB_REFERENCE:
1037 		__P4SETMASK(ir);
1038 		break;
1039 	case PMC_EV_P4_MEMORY_CANCEL:
1040 		__P4SETMASK(memcan);
1041 		break;
1042 	case PMC_EV_P4_MEMORY_COMPLETE:
1043 		__P4SETMASK(memcomp);
1044 		break;
1045 	case PMC_EV_P4_LOAD_PORT_REPLAY:
1046 		__P4SETMASK(lpr);
1047 		break;
1048 	case PMC_EV_P4_STORE_PORT_REPLAY:
1049 		__P4SETMASK(spr);
1050 		break;
1051 	case PMC_EV_P4_MOB_LOAD_REPLAY:
1052 		__P4SETMASK(mlr);
1053 		break;
1054 	case PMC_EV_P4_PAGE_WALK_TYPE:
1055 		__P4SETMASK(pwt);
1056 		break;
1057 	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1058 		__P4SETMASK(bcr);
1059 		break;
1060 	case PMC_EV_P4_IOQ_ALLOCATION:
1061 		__P4SETMASK(ia);
1062 		has_busreqtype = 1;
1063 		break;
1064 	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1065 		__P4SETMASK(iae);
1066 		has_busreqtype = 1;
1067 		break;
1068 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1069 		__P4SETMASK(fda);
1070 		break;
1071 	case PMC_EV_P4_BSQ_ALLOCATION:
1072 		__P4SETMASK(ba);
1073 		break;
1074 	case PMC_EV_P4_SSE_INPUT_ASSIST:
1075 		__P4SETMASK(sia);
1076 		break;
1077 	case PMC_EV_P4_PACKED_SP_UOP:
1078 		__P4SETMASK(psu);
1079 		break;
1080 	case PMC_EV_P4_PACKED_DP_UOP:
1081 		__P4SETMASK(pdu);
1082 		break;
1083 	case PMC_EV_P4_SCALAR_SP_UOP:
1084 		__P4SETMASK(ssu);
1085 		break;
1086 	case PMC_EV_P4_SCALAR_DP_UOP:
1087 		__P4SETMASK(sdu);
1088 		break;
1089 	case PMC_EV_P4_64BIT_MMX_UOP:
1090 		__P4SETMASK(64bmu);
1091 		break;
1092 	case PMC_EV_P4_128BIT_MMX_UOP:
1093 		__P4SETMASK(128bmu);
1094 		break;
1095 	case PMC_EV_P4_X87_FP_UOP:
1096 		__P4SETMASK(xfu);
1097 		break;
1098 	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1099 		__P4SETMASK(xsmu);
1100 		break;
1101 	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1102 		__P4SETMASK(gpe);
1103 		break;
1104 	case PMC_EV_P4_TC_MS_XFER:
1105 		__P4SETMASK(tmx);
1106 		break;
1107 	case PMC_EV_P4_UOP_QUEUE_WRITES:
1108 		__P4SETMASK(uqw);
1109 		break;
1110 	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1111 		__P4SETMASK(rmbt);
1112 		break;
1113 	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1114 		__P4SETMASK(rbt);
1115 		break;
1116 	case PMC_EV_P4_RESOURCE_STALL:
1117 		__P4SETMASK(rs);
1118 		break;
1119 	case PMC_EV_P4_WC_BUFFER:
1120 		__P4SETMASK(wb);
1121 		break;
1122 	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1123 	case PMC_EV_P4_B2B_CYCLES:
1124 	case PMC_EV_P4_BNR:
1125 	case PMC_EV_P4_SNOOP:
1126 	case PMC_EV_P4_RESPONSE:
1127 		break;
1128 	case PMC_EV_P4_FRONT_END_EVENT:
1129 		__P4SETMASK(fee);
1130 		break;
1131 	case PMC_EV_P4_EXECUTION_EVENT:
1132 		__P4SETMASK(ee);
1133 		break;
1134 	case PMC_EV_P4_REPLAY_EVENT:
1135 		__P4SETMASK(re);
1136 		break;
1137 	case PMC_EV_P4_INSTR_RETIRED:
1138 		__P4SETMASK(insret);
1139 		break;
1140 	case PMC_EV_P4_UOPS_RETIRED:
1141 		__P4SETMASK(ur);
1142 		break;
1143 	case PMC_EV_P4_UOP_TYPE:
1144 		__P4SETMASK(ut);
1145 		break;
1146 	case PMC_EV_P4_BRANCH_RETIRED:
1147 		__P4SETMASK(br);
1148 		break;
1149 	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1150 		__P4SETMASK(mbr);
1151 		break;
1152 	case PMC_EV_P4_X87_ASSIST:
1153 		__P4SETMASK(xa);
1154 		break;
1155 	case PMC_EV_P4_MACHINE_CLEAR:
1156 		__P4SETMASK(machclr);
1157 		break;
1158 	default:
1159 		return (-1);
1160 	}
1161 
1162 	/* process additional flags */
1163 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1164 		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1165 			q = strchr(p, '=');
1166 			if (*++q == '\0') /* skip '=' */
1167 				return (-1);
1168 
1169 			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
1170 				cccractivemask = 0x0;
1171 			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1172 				cccractivemask = 0x1;
1173 			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
1174 				cccractivemask = 0x2;
1175 			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
1176 				cccractivemask = 0x3;
1177 			else
1178 				return (-1);
1179 
1180 		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1181 			if (has_busreqtype == 0)
1182 				return (-1);
1183 
1184 			q = strchr(p, '=');
1185 			if (*++q == '\0') /* skip '=' */
1186 				return (-1);
1187 
1188 			count = strtol(q, &e, 0);
1189 			if (e == q || *e != '\0')
1190 				return (-1);
1191 			evmask = (evmask & ~0x1F) | (count & 0x1F);
1192 		} else if (KWMATCH(p, P4_KW_CASCADE))
1193 			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1194 		else if (KWMATCH(p, P4_KW_EDGE))
1195 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1196 		else if (KWMATCH(p, P4_KW_INV))
1197 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1198 		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1199 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1200 				return (-1);
1201 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1202 		} else if (KWMATCH(p, P4_KW_OS))
1203 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1204 		else if (KWMATCH(p, P4_KW_PRECISE))
1205 			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1206 		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1207 			if (has_tag == 0)
1208 				return (-1);
1209 
1210 			q = strchr(p, '=');
1211 			if (*++q == '\0') /* skip '=' */
1212 				return (-1);
1213 
1214 			count = strtol(q, &e, 0);
1215 			if (e == q || *e != '\0')
1216 				return (-1);
1217 
1218 			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1219 			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1220 			    P4_ESCR_TO_TAG_VALUE(count);
1221 		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1222 			q = strchr(p, '=');
1223 			if (*++q == '\0') /* skip '=' */
1224 				return (-1);
1225 
1226 			count = strtol(q, &e, 0);
1227 			if (e == q || *e != '\0')
1228 				return (-1);
1229 
1230 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1231 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1232 			    ~P4_CCCR_THRESHOLD_MASK;
1233 			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1234 			    P4_CCCR_TO_THRESHOLD(count);
1235 		} else if (KWMATCH(p, P4_KW_USR))
1236 			pmc_config->pm_caps |= PMC_CAP_USER;
1237 		else
1238 			return (-1);
1239 	}
1240 
1241 	/* other post processing */
1242 	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1243 	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1244 	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1245 		pmc_config->pm_caps |= PMC_CAP_EDGE;
1246 
1247 	/* fill in thread activity mask */
1248 	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1249 	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1250 
1251 	if (evmask)
1252 		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1253 
1254 	switch (pe) {
1255 	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1256 		if ((evmask & 0x06) == 0x06 ||
1257 		    (evmask & 0x18) == 0x18)
1258 			return (-1); /* can't have own+other bits together */
1259 		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1260 			evmask = 0x1D;
1261 		break;
1262 	case PMC_EV_P4_MACHINE_CLEAR:
1263 		/* only one bit is allowed to be set */
1264 		if ((evmask & (evmask - 1)) != 0)
1265 			return (-1);
1266 		if (evmask == 0) {
1267 			evmask = 0x1;	/* 'CLEAR' */
1268 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1269 		}
1270 		break;
1271 	default:
1272 		if (evmask == 0 && pmask) {
1273 			for (pm = pmask; pm->pm_name; pm++)
1274 				evmask |= pm->pm_value;
1275 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1276 		}
1277 	}
1278 
1279 	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1280 	    P4_ESCR_TO_EVENT_MASK(evmask);
1281 
1282 	return (0);
1283 }
1284 
1285 #endif
1286 
1287 #if defined(__i386__)
1288 
1289 /*
1290  * Pentium style PMCs
1291  */
1292 
1293 static struct pmc_event_alias p5_aliases[] = {
1294 	EV_ALIAS("branches",		"p5-taken-branches"),
1295 	EV_ALIAS("cycles",		"tsc"),
1296 	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
1297 	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
1298 	EV_ALIAS("instructions",	"p5-instructions-executed"),
1299 	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
1300 	EV_ALIAS("unhalted-cycles",
1301 	    "p5-number-of-cycles-not-in-halt-state"),
1302 	EV_ALIAS(NULL, NULL)
1303 };
1304 
1305 static int
1306 p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1307     struct pmc_op_pmcallocate *pmc_config)
1308 {
1309 	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1310 }
1311 
1312 /*
1313  * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1314  * and Pentium M CPUs.
1315  */
1316 
1317 static struct pmc_event_alias p6_aliases[] = {
1318 	EV_ALIAS("branches",		"p6-br-inst-retired"),
1319 	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1320 	EV_ALIAS("cycles",		"tsc"),
1321 	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1322 	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
1323 	EV_ALIAS("instructions",	"p6-inst-retired"),
1324 	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1325 	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
1326 	EV_ALIAS(NULL, NULL)
1327 };
1328 
1329 #define	P6_KW_CMASK	"cmask"
1330 #define	P6_KW_EDGE	"edge"
1331 #define	P6_KW_INV	"inv"
1332 #define	P6_KW_OS	"os"
1333 #define	P6_KW_UMASK	"umask"
1334 #define	P6_KW_USR	"usr"
1335 
1336 static struct pmc_masks p6_mask_mesi[] = {
1337 	PMCMASK(m,	0x01),
1338 	PMCMASK(e,	0x02),
1339 	PMCMASK(s,	0x04),
1340 	PMCMASK(i,	0x08),
1341 	NULLMASK
1342 };
1343 
1344 static struct pmc_masks p6_mask_mesihw[] = {
1345 	PMCMASK(m,	0x01),
1346 	PMCMASK(e,	0x02),
1347 	PMCMASK(s,	0x04),
1348 	PMCMASK(i,	0x08),
1349 	PMCMASK(nonhw,	0x00),
1350 	PMCMASK(hw,	0x10),
1351 	PMCMASK(both,	0x30),
1352 	NULLMASK
1353 };
1354 
1355 static struct pmc_masks p6_mask_hw[] = {
1356 	PMCMASK(nonhw,	0x00),
1357 	PMCMASK(hw,	0x10),
1358 	PMCMASK(both,	0x30),
1359 	NULLMASK
1360 };
1361 
1362 static struct pmc_masks p6_mask_any[] = {
1363 	PMCMASK(self,	0x00),
1364 	PMCMASK(any,	0x20),
1365 	NULLMASK
1366 };
1367 
1368 static struct pmc_masks p6_mask_ekp[] = {
1369 	PMCMASK(nta,	0x00),
1370 	PMCMASK(t1,	0x01),
1371 	PMCMASK(t2,	0x02),
1372 	PMCMASK(wos,	0x03),
1373 	NULLMASK
1374 };
1375 
1376 static struct pmc_masks p6_mask_pps[] = {
1377 	PMCMASK(packed-and-scalar, 0x00),
1378 	PMCMASK(scalar,	0x01),
1379 	NULLMASK
1380 };
1381 
1382 static struct pmc_masks p6_mask_mite[] = {
1383 	PMCMASK(packed-multiply,	 0x01),
1384 	PMCMASK(packed-shift,		0x02),
1385 	PMCMASK(pack,			0x04),
1386 	PMCMASK(unpack,			0x08),
1387 	PMCMASK(packed-logical,		0x10),
1388 	PMCMASK(packed-arithmetic,	0x20),
1389 	NULLMASK
1390 };
1391 
1392 static struct pmc_masks p6_mask_fmt[] = {
1393 	PMCMASK(mmxtofp,	0x00),
1394 	PMCMASK(fptommx,	0x01),
1395 	NULLMASK
1396 };
1397 
1398 static struct pmc_masks p6_mask_sr[] = {
1399 	PMCMASK(es,	0x01),
1400 	PMCMASK(ds,	0x02),
1401 	PMCMASK(fs,	0x04),
1402 	PMCMASK(gs,	0x08),
1403 	NULLMASK
1404 };
1405 
1406 static struct pmc_masks p6_mask_eet[] = {
1407 	PMCMASK(all,	0x00),
1408 	PMCMASK(freq,	0x02),
1409 	NULLMASK
1410 };
1411 
1412 static struct pmc_masks p6_mask_efur[] = {
1413 	PMCMASK(all,	0x00),
1414 	PMCMASK(loadop,	0x01),
1415 	PMCMASK(stdsta,	0x02),
1416 	NULLMASK
1417 };
1418 
1419 static struct pmc_masks p6_mask_essir[] = {
1420 	PMCMASK(sse-packed-single,	0x00),
1421 	PMCMASK(sse-packed-single-scalar-single, 0x01),
1422 	PMCMASK(sse2-packed-double,	0x02),
1423 	PMCMASK(sse2-scalar-double,	0x03),
1424 	NULLMASK
1425 };
1426 
1427 static struct pmc_masks p6_mask_esscir[] = {
1428 	PMCMASK(sse-packed-single,	0x00),
1429 	PMCMASK(sse-scalar-single,	0x01),
1430 	PMCMASK(sse2-packed-double,	0x02),
1431 	PMCMASK(sse2-scalar-double,	0x03),
1432 	NULLMASK
1433 };
1434 
1435 /* P6 event parser */
1436 static int
1437 p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1438     struct pmc_op_pmcallocate *pmc_config)
1439 {
1440 	char *e, *p, *q;
1441 	uint32_t evmask;
1442 	int count, n;
1443 	const struct pmc_masks *pm, *pmask;
1444 
1445 	pmc_config->pm_caps |= PMC_CAP_READ;
1446 	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1447 
1448 	if (pe == PMC_EV_TSC_TSC) {
1449 		if (ctrspec && *ctrspec != '\0')
1450 			return (-1);
1451 		return (0);
1452 	}
1453 
1454 	pmc_config->pm_caps |= PMC_CAP_WRITE;
1455 	evmask = 0;
1456 
1457 #define	P6MASKSET(M)	pmask = p6_mask_ ## M
1458 
1459 	switch(pe) {
1460 	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
1461 	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1462 	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1463 	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1464 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1465 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1466 	case PMC_EV_P6_BUS_TRAN_BRD:
1467 	case PMC_EV_P6_BUS_TRAN_RFO:
1468 	case PMC_EV_P6_BUS_TRANS_WB:
1469 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1470 	case PMC_EV_P6_BUS_TRAN_INVAL:
1471 	case PMC_EV_P6_BUS_TRAN_PWR:
1472 	case PMC_EV_P6_BUS_TRANS_P:
1473 	case PMC_EV_P6_BUS_TRANS_IO:
1474 	case PMC_EV_P6_BUS_TRAN_DEF:
1475 	case PMC_EV_P6_BUS_TRAN_BURST:
1476 	case PMC_EV_P6_BUS_TRAN_ANY:
1477 	case PMC_EV_P6_BUS_TRAN_MEM:
1478 		P6MASKSET(any);	break;
1479 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1480 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1481 		P6MASKSET(ekp); break;
1482 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1483 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1484 		P6MASKSET(pps);	break;
1485 	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1486 		P6MASKSET(mite); break;
1487 	case PMC_EV_P6_FP_MMX_TRANS:
1488 		P6MASKSET(fmt);	break;
1489 	case PMC_EV_P6_SEG_RENAME_STALLS:
1490 	case PMC_EV_P6_SEG_REG_RENAMES:
1491 		P6MASKSET(sr);	break;
1492 	case PMC_EV_P6_EMON_EST_TRANS:
1493 		P6MASKSET(eet);	break;
1494 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1495 		P6MASKSET(efur); break;
1496 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1497 		P6MASKSET(essir); break;
1498 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1499 		P6MASKSET(esscir); break;
1500 	default:
1501 		pmask = NULL;
1502 		break;
1503 	}
1504 
1505 	/* Pentium M PMCs have a few events with different semantics */
1506 	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1507 		if (pe == PMC_EV_P6_L2_LD ||
1508 		    pe == PMC_EV_P6_L2_LINES_IN ||
1509 		    pe == PMC_EV_P6_L2_LINES_OUT)
1510 			P6MASKSET(mesihw);
1511 		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1512 			P6MASKSET(hw);
1513 	}
1514 
1515 	/* Parse additional modifiers if present */
1516 	while ((p = strsep(&ctrspec, ",")) != NULL) {
1517 		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1518 			q = strchr(p, '=');
1519 			if (*++q == '\0') /* skip '=' */
1520 				return (-1);
1521 			count = strtol(q, &e, 0);
1522 			if (e == q || *e != '\0')
1523 				return (-1);
1524 			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1525 			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1526 			    P6_EVSEL_TO_CMASK(count);
1527 		} else if (KWMATCH(p, P6_KW_EDGE)) {
1528 			pmc_config->pm_caps |= PMC_CAP_EDGE;
1529 		} else if (KWMATCH(p, P6_KW_INV)) {
1530 			pmc_config->pm_caps |= PMC_CAP_INVERT;
1531 		} else if (KWMATCH(p, P6_KW_OS)) {
1532 			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1533 		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1534 			evmask = 0;
1535 			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1536 				return (-1);
1537 			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1538 			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1539 			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1540 			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1541 			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1542 			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1543 			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1544 			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1545 			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1546 			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1547 			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1548 			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1549 			     pe == PMC_EV_P6_BUS_TRANS_P ||
1550 			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1551 			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1552 			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1553 			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1554 			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1555 			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1556 			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1557 			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1558 			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1559 			     pe == PMC_EV_P6_FP_MMX_TRANS)
1560 			    && (n > 1))	/* Only one mask keyword is allowed. */
1561 				return (-1);
1562 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1563 		} else if (KWMATCH(p, P6_KW_USR)) {
1564 			pmc_config->pm_caps |= PMC_CAP_USER;
1565 		} else
1566 			return (-1);
1567 	}
1568 
1569 	/* post processing */
1570 	switch (pe) {
1571 
1572 		/*
1573 		 * The following events default to an evmask of 0
1574 		 */
1575 
1576 		/* default => 'self' */
1577 	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1578 	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1579 	case PMC_EV_P6_BUS_TRAN_BRD:
1580 	case PMC_EV_P6_BUS_TRAN_RFO:
1581 	case PMC_EV_P6_BUS_TRANS_WB:
1582 	case PMC_EV_P6_BUS_TRAN_IFETCH:
1583 	case PMC_EV_P6_BUS_TRAN_INVAL:
1584 	case PMC_EV_P6_BUS_TRAN_PWR:
1585 	case PMC_EV_P6_BUS_TRANS_P:
1586 	case PMC_EV_P6_BUS_TRANS_IO:
1587 	case PMC_EV_P6_BUS_TRAN_DEF:
1588 	case PMC_EV_P6_BUS_TRAN_BURST:
1589 	case PMC_EV_P6_BUS_TRAN_ANY:
1590 	case PMC_EV_P6_BUS_TRAN_MEM:
1591 
1592 		/* default => 'nta' */
1593 	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1594 	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1595 
1596 		/* default => 'packed and scalar' */
1597 	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1598 	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1599 
1600 		/* default => 'mmx to fp transitions' */
1601 	case PMC_EV_P6_FP_MMX_TRANS:
1602 
1603 		/* default => 'SSE Packed Single' */
1604 	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1605 	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1606 
1607 		/* default => 'all fused micro-ops' */
1608 	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1609 
1610 		/* default => 'all transitions' */
1611 	case PMC_EV_P6_EMON_EST_TRANS:
1612 		break;
1613 
1614 	case PMC_EV_P6_MMX_UOPS_EXEC:
1615 		evmask = 0x0F;		/* only value allowed */
1616 		break;
1617 
1618 	default:
1619 		/*
1620 		 * For all other events, set the default event mask
1621 		 * to a logical OR of all the allowed event mask bits.
1622 		 */
1623 		if (evmask == 0 && pmask) {
1624 			for (pm = pmask; pm->pm_name; pm++)
1625 				evmask |= pm->pm_value;
1626 			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1627 		}
1628 
1629 		break;
1630 	}
1631 
1632 	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1633 		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1634 		    P6_EVSEL_TO_UMASK(evmask);
1635 
1636 	return (0);
1637 }
1638 
1639 #endif
1640 
1641 /*
1642  * API entry points
1643  */
1644 
1645 int
1646 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1647     uint32_t flags, int cpu, pmc_id_t *pmcid)
1648 {
1649 	int retval;
1650 	enum pmc_event pe;
1651 	char *r, *spec_copy;
1652 	const char *ctrname;
1653 	const struct pmc_event_alias *p;
1654 	struct pmc_op_pmcallocate pmc_config;
1655 
1656 	spec_copy = NULL;
1657 	retval    = -1;
1658 
1659 	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1660 	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1661 		errno = EINVAL;
1662 		goto out;
1663 	}
1664 
1665 	/* replace an event alias with the canonical event specifier */
1666 	if (pmc_mdep_event_aliases)
1667 		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1668 			if (!strcmp(ctrspec, p->pm_alias)) {
1669 				spec_copy = strdup(p->pm_spec);
1670 				break;
1671 			}
1672 
1673 	if (spec_copy == NULL)
1674 		spec_copy = strdup(ctrspec);
1675 
1676 	r = spec_copy;
1677 	ctrname = strsep(&r, ",");
1678 
1679 	/* look for the given counter name */
1680 	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1681 		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1682 			break;
1683 
1684 	if (pe > PMC_EVENT_LAST) {
1685 		errno = EINVAL;
1686 		goto out;
1687 	}
1688 
1689 	bzero(&pmc_config, sizeof(pmc_config));
1690 	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1691 	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1692 	pmc_config.pm_cpu   = cpu;
1693 	pmc_config.pm_mode  = mode;
1694 	pmc_config.pm_flags = flags;
1695 
1696 	if (PMC_IS_SAMPLING_MODE(mode))
1697 		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1698 
1699 	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1700 		errno = EINVAL;
1701 		goto out;
1702 	}
1703 
1704 	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1705 		goto out;
1706 
1707 	*pmcid = pmc_config.pm_pmcid;
1708 
1709 	retval = 0;
1710 
1711  out:
1712 	if (spec_copy)
1713 		free(spec_copy);
1714 
1715 	return (retval);
1716 }
1717 
1718 int
1719 pmc_attach(pmc_id_t pmc, pid_t pid)
1720 {
1721 	struct pmc_op_pmcattach pmc_attach_args;
1722 
1723 	pmc_attach_args.pm_pmc = pmc;
1724 	pmc_attach_args.pm_pid = pid;
1725 
1726 	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
1727 }
1728 
1729 int
1730 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1731 {
1732 	unsigned int i;
1733 	enum pmc_class cl;
1734 
1735 	cl = PMC_ID_TO_CLASS(pmcid);
1736 	for (i = 0; i < cpu_info.pm_nclass; i++)
1737 		if (cpu_info.pm_classes[i].pm_class == cl) {
1738 			*caps = cpu_info.pm_classes[i].pm_caps;
1739 			return (0);
1740 		}
1741 	errno = EINVAL;
1742 	return (-1);
1743 }
1744 
1745 int
1746 pmc_configure_logfile(int fd)
1747 {
1748 	struct pmc_op_configurelog cla;
1749 
1750 	cla.pm_logfd = fd;
1751 	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1752 		return (-1);
1753 	return (0);
1754 }
1755 
1756 int
1757 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1758 {
1759 	if (pmc_syscall == -1) {
1760 		errno = ENXIO;
1761 		return (-1);
1762 	}
1763 
1764 	*pci = &cpu_info;
1765 	return (0);
1766 }
1767 
1768 int
1769 pmc_detach(pmc_id_t pmc, pid_t pid)
1770 {
1771 	struct pmc_op_pmcattach pmc_detach_args;
1772 
1773 	pmc_detach_args.pm_pmc = pmc;
1774 	pmc_detach_args.pm_pid = pid;
1775 	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
1776 }
1777 
1778 int
1779 pmc_disable(int cpu, int pmc)
1780 {
1781 	struct pmc_op_pmcadmin ssa;
1782 
1783 	ssa.pm_cpu = cpu;
1784 	ssa.pm_pmc = pmc;
1785 	ssa.pm_state = PMC_STATE_DISABLED;
1786 	return (PMC_CALL(PMCADMIN, &ssa));
1787 }
1788 
1789 int
1790 pmc_enable(int cpu, int pmc)
1791 {
1792 	struct pmc_op_pmcadmin ssa;
1793 
1794 	ssa.pm_cpu = cpu;
1795 	ssa.pm_pmc = pmc;
1796 	ssa.pm_state = PMC_STATE_FREE;
1797 	return (PMC_CALL(PMCADMIN, &ssa));
1798 }
1799 
1800 /*
1801  * Return a list of events known to a given PMC class.  'cl' is the
1802  * PMC class identifier, 'eventnames' is the returned list of 'const
1803  * char *' pointers pointing to the names of the events. 'nevents' is
1804  * the number of event name pointers returned.
1805  *
1806  * The space for 'eventnames' is allocated using malloc(3).  The caller
1807  * is responsible for freeing this space when done.
1808  */
1809 int
1810 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1811     int *nevents)
1812 {
1813 	int count;
1814 	const char **names;
1815 	const struct pmc_event_descr *ev;
1816 
1817 	switch (cl)
1818 	{
1819 	case PMC_CLASS_TSC:
1820 		ev = &pmc_event_table[PMC_EV_TSC_TSC];
1821 		count = 1;
1822 		break;
1823 	case PMC_CLASS_K7:
1824 		ev = &pmc_event_table[PMC_EV_K7_FIRST];
1825 		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
1826 		break;
1827 	case PMC_CLASS_K8:
1828 		ev = &pmc_event_table[PMC_EV_K8_FIRST];
1829 		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
1830 		break;
1831 	case PMC_CLASS_P5:
1832 		ev = &pmc_event_table[PMC_EV_P5_FIRST];
1833 		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
1834 		break;
1835 	case PMC_CLASS_P6:
1836 		ev = &pmc_event_table[PMC_EV_P6_FIRST];
1837 		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
1838 		break;
1839 	case PMC_CLASS_P4:
1840 		ev = &pmc_event_table[PMC_EV_P4_FIRST];
1841 		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
1842 		break;
1843 	default:
1844 		errno = EINVAL;
1845 		return (-1);
1846 	}
1847 
1848 	if ((names = malloc(count * sizeof(const char *))) == NULL)
1849 		return (-1);
1850 
1851 	*eventnames = names;
1852 	*nevents = count;
1853 
1854 	for (;count--; ev++, names++)
1855 		*names = ev->pm_ev_name;
1856 	return (0);
1857 }
1858 
1859 int
1860 pmc_flush_logfile(void)
1861 {
1862 	return (PMC_CALL(FLUSHLOG,0));
1863 }
1864 
1865 int
1866 pmc_get_driver_stats(struct pmc_driverstats *ds)
1867 {
1868 	struct pmc_op_getdriverstats gms;
1869 
1870 	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
1871 		return (-1);
1872 
1873 	/* copy out fields in the current userland<->library interface */
1874 	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1875 	ds->pm_intr_processed  = gms.pm_intr_processed;
1876 	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1877 	ds->pm_syscalls        = gms.pm_syscalls;
1878 	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1879 	ds->pm_buffer_requests = gms.pm_buffer_requests;
1880 	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1881 	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1882 	return (0);
1883 }
1884 
1885 int
1886 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1887 {
1888 	struct pmc_op_getmsr gm;
1889 
1890 	gm.pm_pmcid = pmc;
1891 	if (PMC_CALL(PMCGETMSR, &gm) < 0)
1892 		return (-1);
1893 	*msr = gm.pm_msr;
1894 	return (0);
1895 }
1896 
1897 int
1898 pmc_init(void)
1899 {
1900 	int error, pmc_mod_id;
1901 	unsigned int n;
1902 	uint32_t abi_version;
1903 	struct module_stat pmc_modstat;
1904 	struct pmc_op_getcpuinfo op_cpu_info;
1905 
1906 	if (pmc_syscall != -1) /* already inited */
1907 		return (0);
1908 
1909 	/* retrieve the system call number from the KLD */
1910 	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1911 		return (-1);
1912 
1913 	pmc_modstat.version = sizeof(struct module_stat);
1914 	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1915 		return (-1);
1916 
1917 	pmc_syscall = pmc_modstat.data.intval;
1918 
1919 	/* check the kernel module's ABI against our compiled-in version */
1920 	abi_version = PMC_VERSION;
1921 	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1922 		return (pmc_syscall = -1);
1923 
1924 	/* ignore patch & minor numbers for the comparision */
1925 	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1926 		errno  = EPROGMISMATCH;
1927 		return (pmc_syscall = -1);
1928 	}
1929 
1930 	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
1931 		return (pmc_syscall = -1);
1932 
1933 	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1934 	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1935 	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1936 	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1937 	for (n = 0; n < cpu_info.pm_nclass; n++)
1938 		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
1939 
1940 	/* set parser pointer */
1941 	switch (cpu_info.pm_cputype) {
1942 #if defined(__i386__)
1943 	case PMC_CPU_AMD_K7:
1944 		pmc_mdep_event_aliases = k7_aliases;
1945 		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1946 		break;
1947 	case PMC_CPU_INTEL_P5:
1948 		pmc_mdep_event_aliases = p5_aliases;
1949 		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1950 		break;
1951 	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1952 	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1953 	case PMC_CPU_INTEL_PIII:
1954 	case PMC_CPU_INTEL_PM:
1955 		pmc_mdep_event_aliases = p6_aliases;
1956 		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1957 		break;
1958 #endif
1959 #if defined(__amd64__) || defined(__i386__)
1960 	case PMC_CPU_INTEL_PIV:
1961 		pmc_mdep_event_aliases = p4_aliases;
1962 		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1963 		break;
1964 	case PMC_CPU_AMD_K8:
1965 		pmc_mdep_event_aliases = k8_aliases;
1966 		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1967 		break;
1968 #endif
1969 
1970 	default:
1971 		/*
1972 		 * Some kind of CPU this version of the library knows nothing
1973 		 * about.  This shouldn't happen since the abi version check
1974 		 * should have caught this.
1975 		 */
1976 		errno = ENXIO;
1977 		return (pmc_syscall = -1);
1978 	}
1979 
1980 	return (0);
1981 }
1982 
1983 const char *
1984 pmc_name_of_capability(enum pmc_caps cap)
1985 {
1986 	int i;
1987 
1988 	/*
1989 	 * 'cap' should have a single bit set and should be in
1990 	 * range.
1991 	 */
1992 	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1993 	    cap > PMC_CAP_LAST) {
1994 		errno = EINVAL;
1995 		return (NULL);
1996 	}
1997 
1998 	i = ffs(cap);
1999 	return (pmc_capability_names[i - 1]);
2000 }
2001 
2002 const char *
2003 pmc_name_of_class(enum pmc_class pc)
2004 {
2005 	if ((int) pc >= PMC_CLASS_FIRST &&
2006 	    pc <= PMC_CLASS_LAST)
2007 		return (pmc_class_names[pc]);
2008 
2009 	errno = EINVAL;
2010 	return (NULL);
2011 }
2012 
2013 const char *
2014 pmc_name_of_cputype(enum pmc_cputype cp)
2015 {
2016 	if ((int) cp >= PMC_CPU_FIRST &&
2017 	    cp <= PMC_CPU_LAST)
2018 		return (pmc_cputype_names[cp]);
2019 	errno = EINVAL;
2020 	return (NULL);
2021 }
2022 
2023 const char *
2024 pmc_name_of_disposition(enum pmc_disp pd)
2025 {
2026 	if ((int) pd >= PMC_DISP_FIRST &&
2027 	    pd <= PMC_DISP_LAST)
2028 		return (pmc_disposition_names[pd]);
2029 
2030 	errno = EINVAL;
2031 	return (NULL);
2032 }
2033 
2034 const char *
2035 pmc_name_of_event(enum pmc_event pe)
2036 {
2037 	if ((int) pe >= PMC_EVENT_FIRST &&
2038 	    pe <= PMC_EVENT_LAST)
2039 		return (pmc_event_table[pe].pm_ev_name);
2040 
2041 	errno = EINVAL;
2042 	return (NULL);
2043 }
2044 
2045 const char *
2046 pmc_name_of_mode(enum pmc_mode pm)
2047 {
2048 	if ((int) pm >= PMC_MODE_FIRST &&
2049 	    pm <= PMC_MODE_LAST)
2050 		return (pmc_mode_names[pm]);
2051 
2052 	errno = EINVAL;
2053 	return (NULL);
2054 }
2055 
2056 const char *
2057 pmc_name_of_state(enum pmc_state ps)
2058 {
2059 	if ((int) ps >= PMC_STATE_FIRST &&
2060 	    ps <= PMC_STATE_LAST)
2061 		return (pmc_state_names[ps]);
2062 
2063 	errno = EINVAL;
2064 	return (NULL);
2065 }
2066 
2067 int
2068 pmc_ncpu(void)
2069 {
2070 	if (pmc_syscall == -1) {
2071 		errno = ENXIO;
2072 		return (-1);
2073 	}
2074 
2075 	return (cpu_info.pm_ncpu);
2076 }
2077 
2078 int
2079 pmc_npmc(int cpu)
2080 {
2081 	if (pmc_syscall == -1) {
2082 		errno = ENXIO;
2083 		return (-1);
2084 	}
2085 
2086 	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2087 		errno = EINVAL;
2088 		return (-1);
2089 	}
2090 
2091 	return (cpu_info.pm_npmc);
2092 }
2093 
2094 int
2095 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2096 {
2097 	int nbytes, npmc;
2098 	struct pmc_op_getpmcinfo *pmci;
2099 
2100 	if ((npmc = pmc_npmc(cpu)) < 0)
2101 		return (-1);
2102 
2103 	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2104 	    npmc * sizeof(struct pmc_info);
2105 
2106 	if ((pmci = calloc(1, nbytes)) == NULL)
2107 		return (-1);
2108 
2109 	pmci->pm_cpu  = cpu;
2110 
2111 	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2112 		free(pmci);
2113 		return (-1);
2114 	}
2115 
2116 	/* kernel<->library, library<->userland interfaces are identical */
2117 	*ppmci = (struct pmc_pmcinfo *) pmci;
2118 	return (0);
2119 }
2120 
2121 int
2122 pmc_read(pmc_id_t pmc, pmc_value_t *value)
2123 {
2124 	struct pmc_op_pmcrw pmc_read_op;
2125 
2126 	pmc_read_op.pm_pmcid = pmc;
2127 	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2128 	pmc_read_op.pm_value = -1;
2129 
2130 	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2131 		return (-1);
2132 
2133 	*value = pmc_read_op.pm_value;
2134 	return (0);
2135 }
2136 
2137 int
2138 pmc_release(pmc_id_t pmc)
2139 {
2140 	struct pmc_op_simple	pmc_release_args;
2141 
2142 	pmc_release_args.pm_pmcid = pmc;
2143 	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2144 }
2145 
2146 int
2147 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2148 {
2149 	struct pmc_op_pmcrw pmc_rw_op;
2150 
2151 	pmc_rw_op.pm_pmcid = pmc;
2152 	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2153 	pmc_rw_op.pm_value = newvalue;
2154 
2155 	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2156 		return (-1);
2157 
2158 	*oldvaluep = pmc_rw_op.pm_value;
2159 	return (0);
2160 }
2161 
2162 int
2163 pmc_set(pmc_id_t pmc, pmc_value_t value)
2164 {
2165 	struct pmc_op_pmcsetcount sc;
2166 
2167 	sc.pm_pmcid = pmc;
2168 	sc.pm_count = value;
2169 
2170 	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2171 		return (-1);
2172 	return (0);
2173 }
2174 
2175 int
2176 pmc_start(pmc_id_t pmc)
2177 {
2178 	struct pmc_op_simple	pmc_start_args;
2179 
2180 	pmc_start_args.pm_pmcid = pmc;
2181 	return (PMC_CALL(PMCSTART, &pmc_start_args));
2182 }
2183 
2184 int
2185 pmc_stop(pmc_id_t pmc)
2186 {
2187 	struct pmc_op_simple	pmc_stop_args;
2188 
2189 	pmc_stop_args.pm_pmcid = pmc;
2190 	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2191 }
2192 
2193 int
2194 pmc_width(pmc_id_t pmcid, uint32_t *width)
2195 {
2196 	unsigned int i;
2197 	enum pmc_class cl;
2198 
2199 	cl = PMC_ID_TO_CLASS(pmcid);
2200 	for (i = 0; i < cpu_info.pm_nclass; i++)
2201 		if (cpu_info.pm_classes[i].pm_class == cl) {
2202 			*width = cpu_info.pm_classes[i].pm_width;
2203 			return (0);
2204 		}
2205 	errno = EINVAL;
2206 	return (-1);
2207 }
2208 
2209 int
2210 pmc_write(pmc_id_t pmc, pmc_value_t value)
2211 {
2212 	struct pmc_op_pmcrw pmc_write_op;
2213 
2214 	pmc_write_op.pm_pmcid = pmc;
2215 	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2216 	pmc_write_op.pm_value = value;
2217 	return (PMC_CALL(PMCRW, &pmc_write_op));
2218 }
2219 
2220 int
2221 pmc_writelog(uint32_t userdata)
2222 {
2223 	struct pmc_op_writelog wl;
2224 
2225 	wl.pm_userdata = userdata;
2226 	return (PMC_CALL(WRITELOG, &wl));
2227 }
2228