1 /*- 2 * Copyright (c) 2003-2008 Joseph Koshy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/types.h> 31 #include <sys/param.h> 32 #include <sys/module.h> 33 #include <sys/pmc.h> 34 #include <sys/syscall.h> 35 36 #include <ctype.h> 37 #include <errno.h> 38 #include <fcntl.h> 39 #include <pmc.h> 40 #include <stdio.h> 41 #include <stdlib.h> 42 #include <string.h> 43 #include <strings.h> 44 #include <unistd.h> 45 46 #include "libpmcinternal.h" 47 48 /* Function prototypes */ 49 #if defined(__i386__) 50 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 51 struct pmc_op_pmcallocate *_pmc_config); 52 #endif 53 #if defined(__amd64__) || defined(__i386__) 54 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 55 struct pmc_op_pmcallocate *_pmc_config); 56 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 57 struct pmc_op_pmcallocate *_pmc_config); 58 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 59 struct pmc_op_pmcallocate *_pmc_config); 60 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 61 struct pmc_op_pmcallocate *_pmc_config); 62 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 63 struct pmc_op_pmcallocate *_pmc_config); 64 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 65 struct pmc_op_pmcallocate *_pmc_config); 66 #endif 67 #if defined(__i386__) 68 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 69 struct pmc_op_pmcallocate *_pmc_config); 70 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 71 struct pmc_op_pmcallocate *_pmc_config); 72 #endif 73 #if defined(__amd64__) || defined(__i386__) 74 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 75 struct pmc_op_pmcallocate *_pmc_config); 76 #endif 77 #if defined(__XSCALE__) 78 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 79 struct pmc_op_pmcallocate *_pmc_config); 80 #endif 81 #if defined(__mips__) 82 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 83 struct pmc_op_pmcallocate *_pmc_config); 84 #endif /* __mips__ */ 85 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 86 struct pmc_op_pmcallocate *_pmc_config); 87 88 #if defined(__powerpc__) 89 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 90 struct pmc_op_pmcallocate *_pmc_config); 91 #endif /* __powerpc__ */ 92 93 #define PMC_CALL(cmd, params) \ 94 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 95 96 /* 97 * Event aliases provide a way for the user to ask for generic events 98 * like "cache-misses", or "instructions-retired". These aliases are 99 * mapped to the appropriate canonical event descriptions using a 100 * lookup table. 101 */ 102 struct pmc_event_alias { 103 const char *pm_alias; 104 const char *pm_spec; 105 }; 106 107 static const struct pmc_event_alias *pmc_mdep_event_aliases; 108 109 /* 110 * The pmc_event_descr structure maps symbolic names known to the user 111 * to integer codes used by the PMC KLD. 112 */ 113 struct pmc_event_descr { 114 const char *pm_ev_name; 115 enum pmc_event pm_ev_code; 116 }; 117 118 /* 119 * The pmc_class_descr structure maps class name prefixes for 120 * event names to event tables and other PMC class data. 121 */ 122 struct pmc_class_descr { 123 const char *pm_evc_name; 124 size_t pm_evc_name_size; 125 enum pmc_class pm_evc_class; 126 const struct pmc_event_descr *pm_evc_event_table; 127 size_t pm_evc_event_table_size; 128 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 129 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 130 }; 131 132 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 133 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 134 135 #undef __PMC_EV 136 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 137 138 /* 139 * PMC_CLASSDEP_TABLE(NAME, CLASS) 140 * 141 * Define a table mapping event names and aliases to HWPMC event IDs. 142 */ 143 #define PMC_CLASSDEP_TABLE(N, C) \ 144 static const struct pmc_event_descr N##_event_table[] = \ 145 { \ 146 __PMC_EV_##C() \ 147 } 148 149 PMC_CLASSDEP_TABLE(iaf, IAF); 150 PMC_CLASSDEP_TABLE(k7, K7); 151 PMC_CLASSDEP_TABLE(k8, K8); 152 PMC_CLASSDEP_TABLE(p4, P4); 153 PMC_CLASSDEP_TABLE(p5, P5); 154 PMC_CLASSDEP_TABLE(p6, P6); 155 PMC_CLASSDEP_TABLE(xscale, XSCALE); 156 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 157 PMC_CLASSDEP_TABLE(octeon, OCTEON); 158 PMC_CLASSDEP_TABLE(ucf, UCF); 159 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 160 PMC_CLASSDEP_TABLE(ppc970, PPC970); 161 162 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 163 164 #undef __PMC_EV_ALIAS 165 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 166 167 static const struct pmc_event_descr atom_event_table[] = 168 { 169 __PMC_EV_ALIAS_ATOM() 170 }; 171 172 static const struct pmc_event_descr core_event_table[] = 173 { 174 __PMC_EV_ALIAS_CORE() 175 }; 176 177 178 static const struct pmc_event_descr core2_event_table[] = 179 { 180 __PMC_EV_ALIAS_CORE2() 181 }; 182 183 static const struct pmc_event_descr corei7_event_table[] = 184 { 185 __PMC_EV_ALIAS_COREI7() 186 }; 187 188 static const struct pmc_event_descr haswell_event_table[] = 189 { 190 __PMC_EV_ALIAS_HASWELL() 191 }; 192 193 static const struct pmc_event_descr ivybridge_event_table[] = 194 { 195 __PMC_EV_ALIAS_IVYBRIDGE() 196 }; 197 198 static const struct pmc_event_descr ivybridge_xeon_event_table[] = 199 { 200 __PMC_EV_ALIAS_IVYBRIDGE_XEON() 201 }; 202 203 static const struct pmc_event_descr sandybridge_event_table[] = 204 { 205 __PMC_EV_ALIAS_SANDYBRIDGE() 206 }; 207 208 static const struct pmc_event_descr sandybridge_xeon_event_table[] = 209 { 210 __PMC_EV_ALIAS_SANDYBRIDGE_XEON() 211 }; 212 213 static const struct pmc_event_descr westmere_event_table[] = 214 { 215 __PMC_EV_ALIAS_WESTMERE() 216 }; 217 218 static const struct pmc_event_descr corei7uc_event_table[] = 219 { 220 __PMC_EV_ALIAS_COREI7UC() 221 }; 222 223 static const struct pmc_event_descr haswelluc_event_table[] = 224 { 225 __PMC_EV_ALIAS_HASWELLUC() 226 }; 227 228 static const struct pmc_event_descr sandybridgeuc_event_table[] = 229 { 230 __PMC_EV_ALIAS_SANDYBRIDGEUC() 231 }; 232 233 static const struct pmc_event_descr westmereuc_event_table[] = 234 { 235 __PMC_EV_ALIAS_WESTMEREUC() 236 }; 237 238 /* 239 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 240 * 241 * Map a CPU to the PMC classes it supports. 242 */ 243 #define PMC_MDEP_TABLE(N,C,...) \ 244 static const enum pmc_class N##_pmc_classes[] = { \ 245 PMC_CLASS_##C, __VA_ARGS__ \ 246 } 247 248 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 249 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC); 250 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 251 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 252 PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 253 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 254 PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 255 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 256 PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 257 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 258 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC); 259 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 260 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC); 261 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC); 262 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC); 263 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 264 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 265 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 266 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450); 267 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970); 268 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 269 270 static const struct pmc_event_descr tsc_event_table[] = 271 { 272 __PMC_EV_TSC() 273 }; 274 275 #undef PMC_CLASS_TABLE_DESC 276 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 277 static const struct pmc_class_descr NAME##_class_table_descr = \ 278 { \ 279 .pm_evc_name = #CLASS "-", \ 280 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 281 .pm_evc_class = PMC_CLASS_##CLASS , \ 282 .pm_evc_event_table = EVENTS##_event_table , \ 283 .pm_evc_event_table_size = \ 284 PMC_EVENT_TABLE_SIZE(EVENTS), \ 285 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 286 } 287 288 #if defined(__i386__) || defined(__amd64__) 289 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf); 290 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap); 291 PMC_CLASS_TABLE_DESC(core, IAP, core, iap); 292 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap); 293 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap); 294 PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap); 295 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap); 296 PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap); 297 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap); 298 PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap); 299 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap); 300 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf); 301 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp); 302 PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp); 303 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp); 304 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp); 305 #endif 306 #if defined(__i386__) 307 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7); 308 #endif 309 #if defined(__i386__) || defined(__amd64__) 310 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 311 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4); 312 #endif 313 #if defined(__i386__) 314 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5); 315 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6); 316 #endif 317 #if defined(__i386__) || defined(__amd64__) 318 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 319 #endif 320 #if defined(__XSCALE__) 321 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 322 #endif 323 #if defined(__mips__) 324 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 325 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 326 #endif /* __mips__ */ 327 #if defined(__powerpc__) 328 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 329 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 330 #endif 331 332 static struct pmc_class_descr soft_class_table_descr = 333 { 334 .pm_evc_name = "SOFT-", 335 .pm_evc_name_size = sizeof("SOFT-") - 1, 336 .pm_evc_class = PMC_CLASS_SOFT, 337 .pm_evc_event_table = NULL, 338 .pm_evc_event_table_size = 0, 339 .pm_evc_allocate_pmc = soft_allocate_pmc 340 }; 341 342 #undef PMC_CLASS_TABLE_DESC 343 344 static const struct pmc_class_descr **pmc_class_table; 345 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 346 347 static const enum pmc_class *pmc_mdep_class_list; 348 static size_t pmc_mdep_class_list_size; 349 350 /* 351 * Mapping tables, mapping enumeration values to human readable 352 * strings. 353 */ 354 355 static const char * pmc_capability_names[] = { 356 #undef __PMC_CAP 357 #define __PMC_CAP(N,V,D) #N , 358 __PMC_CAPS() 359 }; 360 361 static const char * pmc_class_names[] = { 362 #undef __PMC_CLASS 363 #define __PMC_CLASS(C) #C , 364 __PMC_CLASSES() 365 }; 366 367 struct pmc_cputype_map { 368 enum pmc_cputype pm_cputype; 369 const char *pm_name; 370 }; 371 372 static const struct pmc_cputype_map pmc_cputype_names[] = { 373 #undef __PMC_CPU 374 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 375 __PMC_CPUS() 376 }; 377 378 static const char * pmc_disposition_names[] = { 379 #undef __PMC_DISP 380 #define __PMC_DISP(D) #D , 381 __PMC_DISPOSITIONS() 382 }; 383 384 static const char * pmc_mode_names[] = { 385 #undef __PMC_MODE 386 #define __PMC_MODE(M,N) #M , 387 __PMC_MODES() 388 }; 389 390 static const char * pmc_state_names[] = { 391 #undef __PMC_STATE 392 #define __PMC_STATE(S) #S , 393 __PMC_STATES() 394 }; 395 396 /* 397 * Filled in by pmc_init(). 398 */ 399 static int pmc_syscall = -1; 400 static struct pmc_cpuinfo cpu_info; 401 static struct pmc_op_getdyneventinfo soft_event_info; 402 403 /* Event masks for events */ 404 struct pmc_masks { 405 const char *pm_name; 406 const uint64_t pm_value; 407 }; 408 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 409 #define NULLMASK { .pm_name = NULL } 410 411 #if defined(__amd64__) || defined(__i386__) 412 static int 413 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 414 { 415 const struct pmc_masks *pm; 416 char *q, *r; 417 int c; 418 419 if (pmask == NULL) /* no mask keywords */ 420 return (-1); 421 q = strchr(p, '='); /* skip '=' */ 422 if (*++q == '\0') /* no more data */ 423 return (-1); 424 c = 0; /* count of mask keywords seen */ 425 while ((r = strsep(&q, "+")) != NULL) { 426 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 427 pm++) 428 ; 429 if (pm->pm_name == NULL) /* not found */ 430 return (-1); 431 *evmask |= pm->pm_value; 432 c++; 433 } 434 return (c); 435 } 436 #endif 437 438 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 439 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 440 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 441 442 #if defined(__i386__) 443 444 /* 445 * AMD K7 (Athlon) CPUs. 446 */ 447 448 static struct pmc_event_alias k7_aliases[] = { 449 EV_ALIAS("branches", "k7-retired-branches"), 450 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"), 451 EV_ALIAS("cycles", "tsc"), 452 EV_ALIAS("dc-misses", "k7-dc-misses"), 453 EV_ALIAS("ic-misses", "k7-ic-misses"), 454 EV_ALIAS("instructions", "k7-retired-instructions"), 455 EV_ALIAS("interrupts", "k7-hardware-interrupts"), 456 EV_ALIAS(NULL, NULL) 457 }; 458 459 #define K7_KW_COUNT "count" 460 #define K7_KW_EDGE "edge" 461 #define K7_KW_INV "inv" 462 #define K7_KW_OS "os" 463 #define K7_KW_UNITMASK "unitmask" 464 #define K7_KW_USR "usr" 465 466 static int 467 k7_allocate_pmc(enum pmc_event pe, char *ctrspec, 468 struct pmc_op_pmcallocate *pmc_config) 469 { 470 char *e, *p, *q; 471 int c, has_unitmask; 472 uint32_t count, unitmask; 473 474 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 475 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 476 477 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 || 478 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM || 479 pe == PMC_EV_K7_DC_WRITEBACKS) { 480 has_unitmask = 1; 481 unitmask = AMD_PMC_UNITMASK_MOESI; 482 } else 483 unitmask = has_unitmask = 0; 484 485 while ((p = strsep(&ctrspec, ",")) != NULL) { 486 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) { 487 q = strchr(p, '='); 488 if (*++q == '\0') /* skip '=' */ 489 return (-1); 490 491 count = strtol(q, &e, 0); 492 if (e == q || *e != '\0') 493 return (-1); 494 495 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 496 pmc_config->pm_md.pm_amd.pm_amd_config |= 497 AMD_PMC_TO_COUNTER(count); 498 499 } else if (KWMATCH(p, K7_KW_EDGE)) { 500 pmc_config->pm_caps |= PMC_CAP_EDGE; 501 } else if (KWMATCH(p, K7_KW_INV)) { 502 pmc_config->pm_caps |= PMC_CAP_INVERT; 503 } else if (KWMATCH(p, K7_KW_OS)) { 504 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 505 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) { 506 if (has_unitmask == 0) 507 return (-1); 508 unitmask = 0; 509 q = strchr(p, '='); 510 if (*++q == '\0') /* skip '=' */ 511 return (-1); 512 513 while ((c = tolower(*q++)) != 0) 514 if (c == 'm') 515 unitmask |= AMD_PMC_UNITMASK_M; 516 else if (c == 'o') 517 unitmask |= AMD_PMC_UNITMASK_O; 518 else if (c == 'e') 519 unitmask |= AMD_PMC_UNITMASK_E; 520 else if (c == 's') 521 unitmask |= AMD_PMC_UNITMASK_S; 522 else if (c == 'i') 523 unitmask |= AMD_PMC_UNITMASK_I; 524 else if (c == '+') 525 continue; 526 else 527 return (-1); 528 529 if (unitmask == 0) 530 return (-1); 531 532 } else if (KWMATCH(p, K7_KW_USR)) { 533 pmc_config->pm_caps |= PMC_CAP_USER; 534 } else 535 return (-1); 536 } 537 538 if (has_unitmask) { 539 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 540 pmc_config->pm_md.pm_amd.pm_amd_config |= 541 AMD_PMC_TO_UNITMASK(unitmask); 542 } 543 544 return (0); 545 546 } 547 548 #endif 549 550 #if defined(__amd64__) || defined(__i386__) 551 552 /* 553 * Intel Core (Family 6, Model E) PMCs. 554 */ 555 556 static struct pmc_event_alias core_aliases[] = { 557 EV_ALIAS("branches", "iap-br-instr-ret"), 558 EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"), 559 EV_ALIAS("cycles", "tsc-tsc"), 560 EV_ALIAS("ic-misses", "iap-icache-misses"), 561 EV_ALIAS("instructions", "iap-instr-ret"), 562 EV_ALIAS("interrupts", "iap-core-hw-int-rx"), 563 EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"), 564 EV_ALIAS(NULL, NULL) 565 }; 566 567 /* 568 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H) 569 * and Atom (Family 6, model 1CH) PMCs. 570 * 571 * We map aliases to events on the fixed-function counters if these 572 * are present. Note that not all CPUs in this family contain fixed-function 573 * counters. 574 */ 575 576 static struct pmc_event_alias core2_aliases[] = { 577 EV_ALIAS("branches", "iap-br-inst-retired.any"), 578 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"), 579 EV_ALIAS("cycles", "tsc-tsc"), 580 EV_ALIAS("ic-misses", "iap-l1i-misses"), 581 EV_ALIAS("instructions", "iaf-instr-retired.any"), 582 EV_ALIAS("interrupts", "iap-hw-int-rcv"), 583 EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"), 584 EV_ALIAS(NULL, NULL) 585 }; 586 587 static struct pmc_event_alias core2_aliases_without_iaf[] = { 588 EV_ALIAS("branches", "iap-br-inst-retired.any"), 589 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"), 590 EV_ALIAS("cycles", "tsc-tsc"), 591 EV_ALIAS("ic-misses", "iap-l1i-misses"), 592 EV_ALIAS("instructions", "iap-inst-retired.any_p"), 593 EV_ALIAS("interrupts", "iap-hw-int-rcv"), 594 EV_ALIAS("unhalted-cycles", "iap-cpu-clk-unhalted.core_p"), 595 EV_ALIAS(NULL, NULL) 596 }; 597 598 #define atom_aliases core2_aliases 599 #define atom_aliases_without_iaf core2_aliases_without_iaf 600 #define corei7_aliases core2_aliases 601 #define corei7_aliases_without_iaf core2_aliases_without_iaf 602 #define haswell_aliases core2_aliases 603 #define haswell_aliases_without_iaf core2_aliases_without_iaf 604 #define ivybridge_aliases core2_aliases 605 #define ivybridge_aliases_without_iaf core2_aliases_without_iaf 606 #define ivybridge_xeon_aliases core2_aliases 607 #define ivybridge_xeon_aliases_without_iaf core2_aliases_without_iaf 608 #define sandybridge_aliases core2_aliases 609 #define sandybridge_aliases_without_iaf core2_aliases_without_iaf 610 #define sandybridge_xeon_aliases core2_aliases 611 #define sandybridge_xeon_aliases_without_iaf core2_aliases_without_iaf 612 #define westmere_aliases core2_aliases 613 #define westmere_aliases_without_iaf core2_aliases_without_iaf 614 615 #define IAF_KW_OS "os" 616 #define IAF_KW_USR "usr" 617 #define IAF_KW_ANYTHREAD "anythread" 618 619 /* 620 * Parse an event specifier for Intel fixed function counters. 621 */ 622 static int 623 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec, 624 struct pmc_op_pmcallocate *pmc_config) 625 { 626 char *p; 627 628 (void) pe; 629 630 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 631 pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0; 632 633 while ((p = strsep(&ctrspec, ",")) != NULL) { 634 if (KWMATCH(p, IAF_KW_OS)) 635 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 636 else if (KWMATCH(p, IAF_KW_USR)) 637 pmc_config->pm_caps |= PMC_CAP_USER; 638 else if (KWMATCH(p, IAF_KW_ANYTHREAD)) 639 pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY; 640 else 641 return (-1); 642 } 643 644 return (0); 645 } 646 647 /* 648 * Core/Core2 support. 649 */ 650 651 #define IAP_KW_AGENT "agent" 652 #define IAP_KW_ANYTHREAD "anythread" 653 #define IAP_KW_CACHESTATE "cachestate" 654 #define IAP_KW_CMASK "cmask" 655 #define IAP_KW_CORE "core" 656 #define IAP_KW_EDGE "edge" 657 #define IAP_KW_INV "inv" 658 #define IAP_KW_OS "os" 659 #define IAP_KW_PREFETCH "prefetch" 660 #define IAP_KW_SNOOPRESPONSE "snoopresponse" 661 #define IAP_KW_SNOOPTYPE "snooptype" 662 #define IAP_KW_TRANSITION "trans" 663 #define IAP_KW_USR "usr" 664 #define IAP_KW_RSP "rsp" 665 666 static struct pmc_masks iap_core_mask[] = { 667 PMCMASK(all, (0x3 << 14)), 668 PMCMASK(this, (0x1 << 14)), 669 NULLMASK 670 }; 671 672 static struct pmc_masks iap_agent_mask[] = { 673 PMCMASK(this, 0), 674 PMCMASK(any, (0x1 << 13)), 675 NULLMASK 676 }; 677 678 static struct pmc_masks iap_prefetch_mask[] = { 679 PMCMASK(both, (0x3 << 12)), 680 PMCMASK(only, (0x1 << 12)), 681 PMCMASK(exclude, 0), 682 NULLMASK 683 }; 684 685 static struct pmc_masks iap_cachestate_mask[] = { 686 PMCMASK(i, (1 << 8)), 687 PMCMASK(s, (1 << 9)), 688 PMCMASK(e, (1 << 10)), 689 PMCMASK(m, (1 << 11)), 690 NULLMASK 691 }; 692 693 static struct pmc_masks iap_snoopresponse_mask[] = { 694 PMCMASK(clean, (1 << 8)), 695 PMCMASK(hit, (1 << 9)), 696 PMCMASK(hitm, (1 << 11)), 697 NULLMASK 698 }; 699 700 static struct pmc_masks iap_snooptype_mask[] = { 701 PMCMASK(cmp2s, (1 << 8)), 702 PMCMASK(cmp2i, (1 << 9)), 703 NULLMASK 704 }; 705 706 static struct pmc_masks iap_transition_mask[] = { 707 PMCMASK(any, 0x00), 708 PMCMASK(frequency, 0x10), 709 NULLMASK 710 }; 711 712 static struct pmc_masks iap_rsp_mask_i7_wm[] = { 713 PMCMASK(DMND_DATA_RD, (1 << 0)), 714 PMCMASK(DMND_RFO, (1 << 1)), 715 PMCMASK(DMND_IFETCH, (1 << 2)), 716 PMCMASK(WB, (1 << 3)), 717 PMCMASK(PF_DATA_RD, (1 << 4)), 718 PMCMASK(PF_RFO, (1 << 5)), 719 PMCMASK(PF_IFETCH, (1 << 6)), 720 PMCMASK(OTHER, (1 << 7)), 721 PMCMASK(UNCORE_HIT, (1 << 8)), 722 PMCMASK(OTHER_CORE_HIT_SNP, (1 << 9)), 723 PMCMASK(OTHER_CORE_HITM, (1 << 10)), 724 PMCMASK(REMOTE_CACHE_FWD, (1 << 12)), 725 PMCMASK(REMOTE_DRAM, (1 << 13)), 726 PMCMASK(LOCAL_DRAM, (1 << 14)), 727 PMCMASK(NON_DRAM, (1 << 15)), 728 NULLMASK 729 }; 730 731 static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = { 732 PMCMASK(REQ_DMND_DATA_RD, (1ULL << 0)), 733 PMCMASK(REQ_DMND_RFO, (1ULL << 1)), 734 PMCMASK(REQ_DMND_IFETCH, (1ULL << 2)), 735 PMCMASK(REQ_WB, (1ULL << 3)), 736 PMCMASK(REQ_PF_DATA_RD, (1ULL << 4)), 737 PMCMASK(REQ_PF_RFO, (1ULL << 5)), 738 PMCMASK(REQ_PF_IFETCH, (1ULL << 6)), 739 PMCMASK(REQ_PF_LLC_DATA_RD, (1ULL << 7)), 740 PMCMASK(REQ_PF_LLC_RFO, (1ULL << 8)), 741 PMCMASK(REQ_PF_LLC_IFETCH, (1ULL << 9)), 742 PMCMASK(REQ_BUS_LOCKS, (1ULL << 10)), 743 PMCMASK(REQ_STRM_ST, (1ULL << 11)), 744 PMCMASK(REQ_OTHER, (1ULL << 15)), 745 PMCMASK(RES_ANY, (1ULL << 16)), 746 PMCMASK(RES_SUPPLIER_SUPP, (1ULL << 17)), 747 PMCMASK(RES_SUPPLIER_LLC_HITM, (1ULL << 18)), 748 PMCMASK(RES_SUPPLIER_LLC_HITE, (1ULL << 19)), 749 PMCMASK(RES_SUPPLIER_LLC_HITS, (1ULL << 20)), 750 PMCMASK(RES_SUPPLIER_LLC_HITF, (1ULL << 21)), 751 PMCMASK(RES_SUPPLIER_LOCAL, (1ULL << 22)), 752 PMCMASK(RES_SNOOP_SNP_NONE, (1ULL << 31)), 753 PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)), 754 PMCMASK(RES_SNOOP_SNP_MISS, (1ULL << 33)), 755 PMCMASK(RES_SNOOP_HIT_NO_FWD, (1ULL << 34)), 756 PMCMASK(RES_SNOOP_HIT_FWD, (1ULL << 35)), 757 PMCMASK(RES_SNOOP_HITM, (1ULL << 36)), 758 PMCMASK(RES_NON_DRAM, (1ULL << 37)), 759 NULLMASK 760 }; 761 762 static struct pmc_masks iap_rsp_mask_haswell[] = { 763 PMCMASK(REQ_DMND_DATA_RD, (1ULL << 0)), 764 PMCMASK(REQ_DMND_RFO, (1ULL << 1)), 765 PMCMASK(REQ_DMND_IFETCH, (1ULL << 2)), 766 PMCMASK(REQ_PF_DATA_RD, (1ULL << 4)), 767 PMCMASK(REQ_PF_RFO, (1ULL << 5)), 768 PMCMASK(REQ_PF_IFETCH, (1ULL << 6)), 769 PMCMASK(REQ_OTHER, (1ULL << 15)), 770 PMCMASK(RES_ANY, (1ULL << 16)), 771 PMCMASK(RES_SUPPLIER_SUPP, (1ULL << 17)), 772 PMCMASK(RES_SUPPLIER_LLC_HITM, (1ULL << 18)), 773 PMCMASK(RES_SUPPLIER_LLC_HITE, (1ULL << 19)), 774 PMCMASK(RES_SUPPLIER_LLC_HITS, (1ULL << 20)), 775 PMCMASK(RES_SUPPLIER_LLC_HITF, (1ULL << 21)), 776 PMCMASK(RES_SUPPLIER_LOCAL, (1ULL << 22)), 777 PMCMASK(RES_SNOOP_SNP_NONE, (1ULL << 31)), 778 PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)), 779 PMCMASK(RES_SNOOP_SNP_MISS, (1ULL << 33)), 780 PMCMASK(RES_SNOOP_HIT_NO_FWD, (1ULL << 34)), 781 PMCMASK(RES_SNOOP_HIT_FWD, (1ULL << 35)), 782 PMCMASK(RES_SNOOP_HITM, (1ULL << 36)), 783 PMCMASK(RES_NON_DRAM, (1ULL << 37)), 784 NULLMASK 785 }; 786 787 static int 788 iap_allocate_pmc(enum pmc_event pe, char *ctrspec, 789 struct pmc_op_pmcallocate *pmc_config) 790 { 791 char *e, *p, *q; 792 uint64_t cachestate, evmask, rsp; 793 int count, n; 794 795 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE | 796 PMC_CAP_QUALIFIER); 797 pmc_config->pm_md.pm_iap.pm_iap_config = 0; 798 799 cachestate = evmask = rsp = 0; 800 801 /* Parse additional modifiers if present */ 802 while ((p = strsep(&ctrspec, ",")) != NULL) { 803 804 n = 0; 805 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) { 806 q = strchr(p, '='); 807 if (*++q == '\0') /* skip '=' */ 808 return (-1); 809 count = strtol(q, &e, 0); 810 if (e == q || *e != '\0') 811 return (-1); 812 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 813 pmc_config->pm_md.pm_iap.pm_iap_config |= 814 IAP_CMASK(count); 815 } else if (KWMATCH(p, IAP_KW_EDGE)) { 816 pmc_config->pm_caps |= PMC_CAP_EDGE; 817 } else if (KWMATCH(p, IAP_KW_INV)) { 818 pmc_config->pm_caps |= PMC_CAP_INVERT; 819 } else if (KWMATCH(p, IAP_KW_OS)) { 820 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 821 } else if (KWMATCH(p, IAP_KW_USR)) { 822 pmc_config->pm_caps |= PMC_CAP_USER; 823 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) { 824 pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY; 825 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) { 826 n = pmc_parse_mask(iap_core_mask, p, &evmask); 827 if (n != 1) 828 return (-1); 829 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) { 830 n = pmc_parse_mask(iap_agent_mask, p, &evmask); 831 if (n != 1) 832 return (-1); 833 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) { 834 n = pmc_parse_mask(iap_prefetch_mask, p, &evmask); 835 if (n != 1) 836 return (-1); 837 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) { 838 n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate); 839 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE && 840 KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) { 841 n = pmc_parse_mask(iap_transition_mask, p, &evmask); 842 if (n != 1) 843 return (-1); 844 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM || 845 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 || 846 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) { 847 if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) { 848 n = pmc_parse_mask(iap_snoopresponse_mask, p, 849 &evmask); 850 } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) { 851 n = pmc_parse_mask(iap_snooptype_mask, p, 852 &evmask); 853 } else 854 return (-1); 855 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 || 856 cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE) { 857 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 858 n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp); 859 } else 860 return (-1); 861 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE || 862 cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON || 863 cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE || 864 cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) { 865 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 866 n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp); 867 } else 868 return (-1); 869 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL) { 870 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 871 n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp); 872 } else 873 return (-1); 874 } else 875 return (-1); 876 877 if (n < 0) /* Parsing failed. */ 878 return (-1); 879 } 880 881 pmc_config->pm_md.pm_iap.pm_iap_config |= evmask; 882 883 /* 884 * If the event requires a 'cachestate' qualifier but was not 885 * specified by the user, use a sensible default. 886 */ 887 switch (pe) { 888 case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */ 889 case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */ 890 case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */ 891 case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */ 892 case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */ 893 case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */ 894 case PMC_EV_IAP_EVENT_32H: /* Core */ 895 case PMC_EV_IAP_EVENT_40H: /* Core */ 896 case PMC_EV_IAP_EVENT_41H: /* Core */ 897 case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */ 898 if (cachestate == 0) 899 cachestate = (0xF << 8); 900 break; 901 case PMC_EV_IAP_EVENT_77H: /* Atom */ 902 /* IAP_EVENT_77H only accepts a cachestate qualifier on the 903 * Atom processor 904 */ 905 if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0) 906 cachestate = (0xF << 8); 907 break; 908 default: 909 break; 910 } 911 912 pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate; 913 pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp; 914 915 return (0); 916 } 917 918 /* 919 * Intel Uncore. 920 */ 921 922 static int 923 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec, 924 struct pmc_op_pmcallocate *pmc_config) 925 { 926 (void) pe; 927 (void) ctrspec; 928 929 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 930 pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0; 931 932 return (0); 933 } 934 935 #define UCP_KW_CMASK "cmask" 936 #define UCP_KW_EDGE "edge" 937 #define UCP_KW_INV "inv" 938 939 static int 940 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec, 941 struct pmc_op_pmcallocate *pmc_config) 942 { 943 char *e, *p, *q; 944 int count, n; 945 946 (void) pe; 947 948 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE | 949 PMC_CAP_QUALIFIER); 950 pmc_config->pm_md.pm_ucp.pm_ucp_config = 0; 951 952 /* Parse additional modifiers if present */ 953 while ((p = strsep(&ctrspec, ",")) != NULL) { 954 955 n = 0; 956 if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) { 957 q = strchr(p, '='); 958 if (*++q == '\0') /* skip '=' */ 959 return (-1); 960 count = strtol(q, &e, 0); 961 if (e == q || *e != '\0') 962 return (-1); 963 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 964 pmc_config->pm_md.pm_ucp.pm_ucp_config |= 965 UCP_CMASK(count); 966 } else if (KWMATCH(p, UCP_KW_EDGE)) { 967 pmc_config->pm_caps |= PMC_CAP_EDGE; 968 } else if (KWMATCH(p, UCP_KW_INV)) { 969 pmc_config->pm_caps |= PMC_CAP_INVERT; 970 } else 971 return (-1); 972 973 if (n < 0) /* Parsing failed. */ 974 return (-1); 975 } 976 977 return (0); 978 } 979 980 /* 981 * AMD K8 PMCs. 982 * 983 * These are very similar to AMD K7 PMCs, but support more kinds of 984 * events. 985 */ 986 987 static struct pmc_event_alias k8_aliases[] = { 988 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 989 EV_ALIAS("branch-mispredicts", 990 "k8-fr-retired-taken-branches-mispredicted"), 991 EV_ALIAS("cycles", "tsc"), 992 EV_ALIAS("dc-misses", "k8-dc-miss"), 993 EV_ALIAS("ic-misses", "k8-ic-miss"), 994 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 995 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 996 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 997 EV_ALIAS(NULL, NULL) 998 }; 999 1000 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 1001 1002 /* 1003 * Parsing tables 1004 */ 1005 1006 /* fp dispatched fpu ops */ 1007 static const struct pmc_masks k8_mask_fdfo[] = { 1008 __K8MASK(add-pipe-excluding-junk-ops, 0), 1009 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 1010 __K8MASK(store-pipe-excluding-junk-ops, 2), 1011 __K8MASK(add-pipe-junk-ops, 3), 1012 __K8MASK(multiply-pipe-junk-ops, 4), 1013 __K8MASK(store-pipe-junk-ops, 5), 1014 NULLMASK 1015 }; 1016 1017 /* ls segment register loads */ 1018 static const struct pmc_masks k8_mask_lsrl[] = { 1019 __K8MASK(es, 0), 1020 __K8MASK(cs, 1), 1021 __K8MASK(ss, 2), 1022 __K8MASK(ds, 3), 1023 __K8MASK(fs, 4), 1024 __K8MASK(gs, 5), 1025 __K8MASK(hs, 6), 1026 NULLMASK 1027 }; 1028 1029 /* ls locked operation */ 1030 static const struct pmc_masks k8_mask_llo[] = { 1031 __K8MASK(locked-instructions, 0), 1032 __K8MASK(cycles-in-request, 1), 1033 __K8MASK(cycles-to-complete, 2), 1034 NULLMASK 1035 }; 1036 1037 /* dc refill from {l2,system} and dc copyback */ 1038 static const struct pmc_masks k8_mask_dc[] = { 1039 __K8MASK(invalid, 0), 1040 __K8MASK(shared, 1), 1041 __K8MASK(exclusive, 2), 1042 __K8MASK(owner, 3), 1043 __K8MASK(modified, 4), 1044 NULLMASK 1045 }; 1046 1047 /* dc one bit ecc error */ 1048 static const struct pmc_masks k8_mask_dobee[] = { 1049 __K8MASK(scrubber, 0), 1050 __K8MASK(piggyback, 1), 1051 NULLMASK 1052 }; 1053 1054 /* dc dispatched prefetch instructions */ 1055 static const struct pmc_masks k8_mask_ddpi[] = { 1056 __K8MASK(load, 0), 1057 __K8MASK(store, 1), 1058 __K8MASK(nta, 2), 1059 NULLMASK 1060 }; 1061 1062 /* dc dcache accesses by locks */ 1063 static const struct pmc_masks k8_mask_dabl[] = { 1064 __K8MASK(accesses, 0), 1065 __K8MASK(misses, 1), 1066 NULLMASK 1067 }; 1068 1069 /* bu internal l2 request */ 1070 static const struct pmc_masks k8_mask_bilr[] = { 1071 __K8MASK(ic-fill, 0), 1072 __K8MASK(dc-fill, 1), 1073 __K8MASK(tlb-reload, 2), 1074 __K8MASK(tag-snoop, 3), 1075 __K8MASK(cancelled, 4), 1076 NULLMASK 1077 }; 1078 1079 /* bu fill request l2 miss */ 1080 static const struct pmc_masks k8_mask_bfrlm[] = { 1081 __K8MASK(ic-fill, 0), 1082 __K8MASK(dc-fill, 1), 1083 __K8MASK(tlb-reload, 2), 1084 NULLMASK 1085 }; 1086 1087 /* bu fill into l2 */ 1088 static const struct pmc_masks k8_mask_bfil[] = { 1089 __K8MASK(dirty-l2-victim, 0), 1090 __K8MASK(victim-from-l2, 1), 1091 NULLMASK 1092 }; 1093 1094 /* fr retired fpu instructions */ 1095 static const struct pmc_masks k8_mask_frfi[] = { 1096 __K8MASK(x87, 0), 1097 __K8MASK(mmx-3dnow, 1), 1098 __K8MASK(packed-sse-sse2, 2), 1099 __K8MASK(scalar-sse-sse2, 3), 1100 NULLMASK 1101 }; 1102 1103 /* fr retired fastpath double op instructions */ 1104 static const struct pmc_masks k8_mask_frfdoi[] = { 1105 __K8MASK(low-op-pos-0, 0), 1106 __K8MASK(low-op-pos-1, 1), 1107 __K8MASK(low-op-pos-2, 2), 1108 NULLMASK 1109 }; 1110 1111 /* fr fpu exceptions */ 1112 static const struct pmc_masks k8_mask_ffe[] = { 1113 __K8MASK(x87-reclass-microfaults, 0), 1114 __K8MASK(sse-retype-microfaults, 1), 1115 __K8MASK(sse-reclass-microfaults, 2), 1116 __K8MASK(sse-and-x87-microtraps, 3), 1117 NULLMASK 1118 }; 1119 1120 /* nb memory controller page access event */ 1121 static const struct pmc_masks k8_mask_nmcpae[] = { 1122 __K8MASK(page-hit, 0), 1123 __K8MASK(page-miss, 1), 1124 __K8MASK(page-conflict, 2), 1125 NULLMASK 1126 }; 1127 1128 /* nb memory controller turnaround */ 1129 static const struct pmc_masks k8_mask_nmct[] = { 1130 __K8MASK(dimm-turnaround, 0), 1131 __K8MASK(read-to-write-turnaround, 1), 1132 __K8MASK(write-to-read-turnaround, 2), 1133 NULLMASK 1134 }; 1135 1136 /* nb memory controller bypass saturation */ 1137 static const struct pmc_masks k8_mask_nmcbs[] = { 1138 __K8MASK(memory-controller-hi-pri-bypass, 0), 1139 __K8MASK(memory-controller-lo-pri-bypass, 1), 1140 __K8MASK(dram-controller-interface-bypass, 2), 1141 __K8MASK(dram-controller-queue-bypass, 3), 1142 NULLMASK 1143 }; 1144 1145 /* nb sized commands */ 1146 static const struct pmc_masks k8_mask_nsc[] = { 1147 __K8MASK(nonpostwrszbyte, 0), 1148 __K8MASK(nonpostwrszdword, 1), 1149 __K8MASK(postwrszbyte, 2), 1150 __K8MASK(postwrszdword, 3), 1151 __K8MASK(rdszbyte, 4), 1152 __K8MASK(rdszdword, 5), 1153 __K8MASK(rdmodwr, 6), 1154 NULLMASK 1155 }; 1156 1157 /* nb probe result */ 1158 static const struct pmc_masks k8_mask_npr[] = { 1159 __K8MASK(probe-miss, 0), 1160 __K8MASK(probe-hit, 1), 1161 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 1162 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 1163 NULLMASK 1164 }; 1165 1166 /* nb hypertransport bus bandwidth */ 1167 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 1168 __K8MASK(command, 0), 1169 __K8MASK(data, 1), 1170 __K8MASK(buffer-release, 2), 1171 __K8MASK(nop, 3), 1172 NULLMASK 1173 }; 1174 1175 #undef __K8MASK 1176 1177 #define K8_KW_COUNT "count" 1178 #define K8_KW_EDGE "edge" 1179 #define K8_KW_INV "inv" 1180 #define K8_KW_MASK "mask" 1181 #define K8_KW_OS "os" 1182 #define K8_KW_USR "usr" 1183 1184 static int 1185 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 1186 struct pmc_op_pmcallocate *pmc_config) 1187 { 1188 char *e, *p, *q; 1189 int n; 1190 uint32_t count; 1191 uint64_t evmask; 1192 const struct pmc_masks *pm, *pmask; 1193 1194 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1195 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 1196 1197 pmask = NULL; 1198 evmask = 0; 1199 1200 #define __K8SETMASK(M) pmask = k8_mask_##M 1201 1202 /* setup parsing tables */ 1203 switch (pe) { 1204 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 1205 __K8SETMASK(fdfo); 1206 break; 1207 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 1208 __K8SETMASK(lsrl); 1209 break; 1210 case PMC_EV_K8_LS_LOCKED_OPERATION: 1211 __K8SETMASK(llo); 1212 break; 1213 case PMC_EV_K8_DC_REFILL_FROM_L2: 1214 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 1215 case PMC_EV_K8_DC_COPYBACK: 1216 __K8SETMASK(dc); 1217 break; 1218 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 1219 __K8SETMASK(dobee); 1220 break; 1221 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 1222 __K8SETMASK(ddpi); 1223 break; 1224 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 1225 __K8SETMASK(dabl); 1226 break; 1227 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 1228 __K8SETMASK(bilr); 1229 break; 1230 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 1231 __K8SETMASK(bfrlm); 1232 break; 1233 case PMC_EV_K8_BU_FILL_INTO_L2: 1234 __K8SETMASK(bfil); 1235 break; 1236 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 1237 __K8SETMASK(frfi); 1238 break; 1239 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 1240 __K8SETMASK(frfdoi); 1241 break; 1242 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 1243 __K8SETMASK(ffe); 1244 break; 1245 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 1246 __K8SETMASK(nmcpae); 1247 break; 1248 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 1249 __K8SETMASK(nmct); 1250 break; 1251 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 1252 __K8SETMASK(nmcbs); 1253 break; 1254 case PMC_EV_K8_NB_SIZED_COMMANDS: 1255 __K8SETMASK(nsc); 1256 break; 1257 case PMC_EV_K8_NB_PROBE_RESULT: 1258 __K8SETMASK(npr); 1259 break; 1260 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 1261 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 1262 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 1263 __K8SETMASK(nhbb); 1264 break; 1265 1266 default: 1267 break; /* no options defined */ 1268 } 1269 1270 while ((p = strsep(&ctrspec, ",")) != NULL) { 1271 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 1272 q = strchr(p, '='); 1273 if (*++q == '\0') /* skip '=' */ 1274 return (-1); 1275 1276 count = strtol(q, &e, 0); 1277 if (e == q || *e != '\0') 1278 return (-1); 1279 1280 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1281 pmc_config->pm_md.pm_amd.pm_amd_config |= 1282 AMD_PMC_TO_COUNTER(count); 1283 1284 } else if (KWMATCH(p, K8_KW_EDGE)) { 1285 pmc_config->pm_caps |= PMC_CAP_EDGE; 1286 } else if (KWMATCH(p, K8_KW_INV)) { 1287 pmc_config->pm_caps |= PMC_CAP_INVERT; 1288 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 1289 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1290 return (-1); 1291 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1292 } else if (KWMATCH(p, K8_KW_OS)) { 1293 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1294 } else if (KWMATCH(p, K8_KW_USR)) { 1295 pmc_config->pm_caps |= PMC_CAP_USER; 1296 } else 1297 return (-1); 1298 } 1299 1300 /* other post processing */ 1301 switch (pe) { 1302 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 1303 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 1304 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 1305 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 1306 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 1307 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 1308 /* XXX only available in rev B and later */ 1309 break; 1310 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 1311 /* XXX only available in rev C and later */ 1312 break; 1313 case PMC_EV_K8_LS_LOCKED_OPERATION: 1314 /* XXX CPU Rev A,B evmask is to be zero */ 1315 if (evmask & (evmask - 1)) /* > 1 bit set */ 1316 return (-1); 1317 if (evmask == 0) { 1318 evmask = 0x01; /* Rev C and later: #instrs */ 1319 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1320 } 1321 break; 1322 default: 1323 if (evmask == 0 && pmask != NULL) { 1324 for (pm = pmask; pm->pm_name; pm++) 1325 evmask |= pm->pm_value; 1326 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1327 } 1328 } 1329 1330 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 1331 pmc_config->pm_md.pm_amd.pm_amd_config = 1332 AMD_PMC_TO_UNITMASK(evmask); 1333 1334 return (0); 1335 } 1336 1337 #endif 1338 1339 #if defined(__amd64__) || defined(__i386__) 1340 1341 /* 1342 * Intel P4 PMCs 1343 */ 1344 1345 static struct pmc_event_alias p4_aliases[] = { 1346 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"), 1347 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"), 1348 EV_ALIAS("cycles", "tsc"), 1349 EV_ALIAS("instructions", 1350 "p4-instr-retired,mask=nbogusntag+nbogustag"), 1351 EV_ALIAS("unhalted-cycles", "p4-global-power-events"), 1352 EV_ALIAS(NULL, NULL) 1353 }; 1354 1355 #define P4_KW_ACTIVE "active" 1356 #define P4_KW_ACTIVE_ANY "any" 1357 #define P4_KW_ACTIVE_BOTH "both" 1358 #define P4_KW_ACTIVE_NONE "none" 1359 #define P4_KW_ACTIVE_SINGLE "single" 1360 #define P4_KW_BUSREQTYPE "busreqtype" 1361 #define P4_KW_CASCADE "cascade" 1362 #define P4_KW_EDGE "edge" 1363 #define P4_KW_INV "complement" 1364 #define P4_KW_OS "os" 1365 #define P4_KW_MASK "mask" 1366 #define P4_KW_PRECISE "precise" 1367 #define P4_KW_TAG "tag" 1368 #define P4_KW_THRESHOLD "threshold" 1369 #define P4_KW_USR "usr" 1370 1371 #define __P4MASK(N,V) PMCMASK(N, (1 << (V))) 1372 1373 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */ 1374 __P4MASK(dd, 0), 1375 __P4MASK(db, 1), 1376 __P4MASK(di, 2), 1377 __P4MASK(bd, 3), 1378 __P4MASK(bb, 4), 1379 __P4MASK(bi, 5), 1380 __P4MASK(id, 6), 1381 __P4MASK(ib, 7), 1382 NULLMASK 1383 }; 1384 1385 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */ 1386 __P4MASK(tcmiss, 0), 1387 NULLMASK, 1388 }; 1389 1390 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */ 1391 __P4MASK(hit, 0), 1392 __P4MASK(miss, 1), 1393 __P4MASK(hit-uc, 2), 1394 NULLMASK 1395 }; 1396 1397 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */ 1398 __P4MASK(st-rb-full, 2), 1399 __P4MASK(64k-conf, 3), 1400 NULLMASK 1401 }; 1402 1403 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */ 1404 __P4MASK(lsc, 0), 1405 __P4MASK(ssc, 1), 1406 NULLMASK 1407 }; 1408 1409 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */ 1410 __P4MASK(split-ld, 1), 1411 NULLMASK 1412 }; 1413 1414 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */ 1415 __P4MASK(split-st, 1), 1416 NULLMASK 1417 }; 1418 1419 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */ 1420 __P4MASK(no-sta, 1), 1421 __P4MASK(no-std, 3), 1422 __P4MASK(partial-data, 4), 1423 __P4MASK(unalgn-addr, 5), 1424 NULLMASK 1425 }; 1426 1427 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */ 1428 __P4MASK(dtmiss, 0), 1429 __P4MASK(itmiss, 1), 1430 NULLMASK 1431 }; 1432 1433 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */ 1434 __P4MASK(rd-2ndl-hits, 0), 1435 __P4MASK(rd-2ndl-hite, 1), 1436 __P4MASK(rd-2ndl-hitm, 2), 1437 __P4MASK(rd-3rdl-hits, 3), 1438 __P4MASK(rd-3rdl-hite, 4), 1439 __P4MASK(rd-3rdl-hitm, 5), 1440 __P4MASK(rd-2ndl-miss, 8), 1441 __P4MASK(rd-3rdl-miss, 9), 1442 __P4MASK(wr-2ndl-miss, 10), 1443 NULLMASK 1444 }; 1445 1446 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */ 1447 __P4MASK(all-read, 5), 1448 __P4MASK(all-write, 6), 1449 __P4MASK(mem-uc, 7), 1450 __P4MASK(mem-wc, 8), 1451 __P4MASK(mem-wt, 9), 1452 __P4MASK(mem-wp, 10), 1453 __P4MASK(mem-wb, 11), 1454 __P4MASK(own, 13), 1455 __P4MASK(other, 14), 1456 __P4MASK(prefetch, 15), 1457 NULLMASK 1458 }; 1459 1460 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */ 1461 __P4MASK(all-read, 5), 1462 __P4MASK(all-write, 6), 1463 __P4MASK(mem-uc, 7), 1464 __P4MASK(mem-wc, 8), 1465 __P4MASK(mem-wt, 9), 1466 __P4MASK(mem-wp, 10), 1467 __P4MASK(mem-wb, 11), 1468 __P4MASK(own, 13), 1469 __P4MASK(other, 14), 1470 __P4MASK(prefetch, 15), 1471 NULLMASK 1472 }; 1473 1474 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */ 1475 __P4MASK(drdy-drv, 0), 1476 __P4MASK(drdy-own, 1), 1477 __P4MASK(drdy-other, 2), 1478 __P4MASK(dbsy-drv, 3), 1479 __P4MASK(dbsy-own, 4), 1480 __P4MASK(dbsy-other, 5), 1481 NULLMASK 1482 }; 1483 1484 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */ 1485 __P4MASK(req-type0, 0), 1486 __P4MASK(req-type1, 1), 1487 __P4MASK(req-len0, 2), 1488 __P4MASK(req-len1, 3), 1489 __P4MASK(req-io-type, 5), 1490 __P4MASK(req-lock-type, 6), 1491 __P4MASK(req-cache-type, 7), 1492 __P4MASK(req-split-type, 8), 1493 __P4MASK(req-dem-type, 9), 1494 __P4MASK(req-ord-type, 10), 1495 __P4MASK(mem-type0, 11), 1496 __P4MASK(mem-type1, 12), 1497 __P4MASK(mem-type2, 13), 1498 NULLMASK 1499 }; 1500 1501 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */ 1502 __P4MASK(all, 15), 1503 NULLMASK 1504 }; 1505 1506 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */ 1507 __P4MASK(all, 15), 1508 NULLMASK 1509 }; 1510 1511 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */ 1512 __P4MASK(all, 15), 1513 NULLMASK 1514 }; 1515 1516 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */ 1517 __P4MASK(all, 15), 1518 NULLMASK 1519 }; 1520 1521 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */ 1522 __P4MASK(all, 15), 1523 NULLMASK 1524 }; 1525 1526 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */ 1527 __P4MASK(all, 15), 1528 NULLMASK 1529 }; 1530 1531 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */ 1532 __P4MASK(all, 15), 1533 NULLMASK 1534 }; 1535 1536 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */ 1537 __P4MASK(all, 15), 1538 NULLMASK 1539 }; 1540 1541 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */ 1542 __P4MASK(allp0, 3), 1543 __P4MASK(allp2, 4), 1544 NULLMASK 1545 }; 1546 1547 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */ 1548 __P4MASK(running, 0), 1549 NULLMASK 1550 }; 1551 1552 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */ 1553 __P4MASK(cisc, 0), 1554 NULLMASK 1555 }; 1556 1557 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */ 1558 __P4MASK(from-tc-build, 0), 1559 __P4MASK(from-tc-deliver, 1), 1560 __P4MASK(from-rom, 2), 1561 NULLMASK 1562 }; 1563 1564 static const struct pmc_masks p4_mask_rmbt[] = { 1565 /* retired mispred branch type */ 1566 __P4MASK(conditional, 1), 1567 __P4MASK(call, 2), 1568 __P4MASK(return, 3), 1569 __P4MASK(indirect, 4), 1570 NULLMASK 1571 }; 1572 1573 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */ 1574 __P4MASK(conditional, 1), 1575 __P4MASK(call, 2), 1576 __P4MASK(retired, 3), 1577 __P4MASK(indirect, 4), 1578 NULLMASK 1579 }; 1580 1581 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */ 1582 __P4MASK(sbfull, 5), 1583 NULLMASK 1584 }; 1585 1586 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */ 1587 __P4MASK(wcb-evicts, 0), 1588 __P4MASK(wcb-full-evict, 1), 1589 NULLMASK 1590 }; 1591 1592 static const struct pmc_masks p4_mask_fee[] = { /* front end event */ 1593 __P4MASK(nbogus, 0), 1594 __P4MASK(bogus, 1), 1595 NULLMASK 1596 }; 1597 1598 static const struct pmc_masks p4_mask_ee[] = { /* execution event */ 1599 __P4MASK(nbogus0, 0), 1600 __P4MASK(nbogus1, 1), 1601 __P4MASK(nbogus2, 2), 1602 __P4MASK(nbogus3, 3), 1603 __P4MASK(bogus0, 4), 1604 __P4MASK(bogus1, 5), 1605 __P4MASK(bogus2, 6), 1606 __P4MASK(bogus3, 7), 1607 NULLMASK 1608 }; 1609 1610 static const struct pmc_masks p4_mask_re[] = { /* replay event */ 1611 __P4MASK(nbogus, 0), 1612 __P4MASK(bogus, 1), 1613 NULLMASK 1614 }; 1615 1616 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */ 1617 __P4MASK(nbogusntag, 0), 1618 __P4MASK(nbogustag, 1), 1619 __P4MASK(bogusntag, 2), 1620 __P4MASK(bogustag, 3), 1621 NULLMASK 1622 }; 1623 1624 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */ 1625 __P4MASK(nbogus, 0), 1626 __P4MASK(bogus, 1), 1627 NULLMASK 1628 }; 1629 1630 static const struct pmc_masks p4_mask_ut[] = { /* uop type */ 1631 __P4MASK(tagloads, 1), 1632 __P4MASK(tagstores, 2), 1633 NULLMASK 1634 }; 1635 1636 static const struct pmc_masks p4_mask_br[] = { /* branch retired */ 1637 __P4MASK(mmnp, 0), 1638 __P4MASK(mmnm, 1), 1639 __P4MASK(mmtp, 2), 1640 __P4MASK(mmtm, 3), 1641 NULLMASK 1642 }; 1643 1644 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */ 1645 __P4MASK(nbogus, 0), 1646 NULLMASK 1647 }; 1648 1649 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */ 1650 __P4MASK(fpsu, 0), 1651 __P4MASK(fpso, 1), 1652 __P4MASK(poao, 2), 1653 __P4MASK(poau, 3), 1654 __P4MASK(prea, 4), 1655 NULLMASK 1656 }; 1657 1658 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */ 1659 __P4MASK(clear, 0), 1660 __P4MASK(moclear, 2), 1661 __P4MASK(smclear, 3), 1662 NULLMASK 1663 }; 1664 1665 /* P4 event parser */ 1666 static int 1667 p4_allocate_pmc(enum pmc_event pe, char *ctrspec, 1668 struct pmc_op_pmcallocate *pmc_config) 1669 { 1670 1671 char *e, *p, *q; 1672 int count, has_tag, has_busreqtype, n; 1673 uint32_t cccractivemask; 1674 uint64_t evmask; 1675 const struct pmc_masks *pm, *pmask; 1676 1677 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1678 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig = 1679 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0; 1680 1681 pmask = NULL; 1682 evmask = 0; 1683 cccractivemask = 0x3; 1684 has_tag = has_busreqtype = 0; 1685 1686 #define __P4SETMASK(M) do { \ 1687 pmask = p4_mask_##M; \ 1688 } while (0) 1689 1690 switch (pe) { 1691 case PMC_EV_P4_TC_DELIVER_MODE: 1692 __P4SETMASK(tcdm); 1693 break; 1694 case PMC_EV_P4_BPU_FETCH_REQUEST: 1695 __P4SETMASK(bfr); 1696 break; 1697 case PMC_EV_P4_ITLB_REFERENCE: 1698 __P4SETMASK(ir); 1699 break; 1700 case PMC_EV_P4_MEMORY_CANCEL: 1701 __P4SETMASK(memcan); 1702 break; 1703 case PMC_EV_P4_MEMORY_COMPLETE: 1704 __P4SETMASK(memcomp); 1705 break; 1706 case PMC_EV_P4_LOAD_PORT_REPLAY: 1707 __P4SETMASK(lpr); 1708 break; 1709 case PMC_EV_P4_STORE_PORT_REPLAY: 1710 __P4SETMASK(spr); 1711 break; 1712 case PMC_EV_P4_MOB_LOAD_REPLAY: 1713 __P4SETMASK(mlr); 1714 break; 1715 case PMC_EV_P4_PAGE_WALK_TYPE: 1716 __P4SETMASK(pwt); 1717 break; 1718 case PMC_EV_P4_BSQ_CACHE_REFERENCE: 1719 __P4SETMASK(bcr); 1720 break; 1721 case PMC_EV_P4_IOQ_ALLOCATION: 1722 __P4SETMASK(ia); 1723 has_busreqtype = 1; 1724 break; 1725 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES: 1726 __P4SETMASK(iae); 1727 has_busreqtype = 1; 1728 break; 1729 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1730 __P4SETMASK(fda); 1731 break; 1732 case PMC_EV_P4_BSQ_ALLOCATION: 1733 __P4SETMASK(ba); 1734 break; 1735 case PMC_EV_P4_SSE_INPUT_ASSIST: 1736 __P4SETMASK(sia); 1737 break; 1738 case PMC_EV_P4_PACKED_SP_UOP: 1739 __P4SETMASK(psu); 1740 break; 1741 case PMC_EV_P4_PACKED_DP_UOP: 1742 __P4SETMASK(pdu); 1743 break; 1744 case PMC_EV_P4_SCALAR_SP_UOP: 1745 __P4SETMASK(ssu); 1746 break; 1747 case PMC_EV_P4_SCALAR_DP_UOP: 1748 __P4SETMASK(sdu); 1749 break; 1750 case PMC_EV_P4_64BIT_MMX_UOP: 1751 __P4SETMASK(64bmu); 1752 break; 1753 case PMC_EV_P4_128BIT_MMX_UOP: 1754 __P4SETMASK(128bmu); 1755 break; 1756 case PMC_EV_P4_X87_FP_UOP: 1757 __P4SETMASK(xfu); 1758 break; 1759 case PMC_EV_P4_X87_SIMD_MOVES_UOP: 1760 __P4SETMASK(xsmu); 1761 break; 1762 case PMC_EV_P4_GLOBAL_POWER_EVENTS: 1763 __P4SETMASK(gpe); 1764 break; 1765 case PMC_EV_P4_TC_MS_XFER: 1766 __P4SETMASK(tmx); 1767 break; 1768 case PMC_EV_P4_UOP_QUEUE_WRITES: 1769 __P4SETMASK(uqw); 1770 break; 1771 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE: 1772 __P4SETMASK(rmbt); 1773 break; 1774 case PMC_EV_P4_RETIRED_BRANCH_TYPE: 1775 __P4SETMASK(rbt); 1776 break; 1777 case PMC_EV_P4_RESOURCE_STALL: 1778 __P4SETMASK(rs); 1779 break; 1780 case PMC_EV_P4_WC_BUFFER: 1781 __P4SETMASK(wb); 1782 break; 1783 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES: 1784 case PMC_EV_P4_B2B_CYCLES: 1785 case PMC_EV_P4_BNR: 1786 case PMC_EV_P4_SNOOP: 1787 case PMC_EV_P4_RESPONSE: 1788 break; 1789 case PMC_EV_P4_FRONT_END_EVENT: 1790 __P4SETMASK(fee); 1791 break; 1792 case PMC_EV_P4_EXECUTION_EVENT: 1793 __P4SETMASK(ee); 1794 break; 1795 case PMC_EV_P4_REPLAY_EVENT: 1796 __P4SETMASK(re); 1797 break; 1798 case PMC_EV_P4_INSTR_RETIRED: 1799 __P4SETMASK(insret); 1800 break; 1801 case PMC_EV_P4_UOPS_RETIRED: 1802 __P4SETMASK(ur); 1803 break; 1804 case PMC_EV_P4_UOP_TYPE: 1805 __P4SETMASK(ut); 1806 break; 1807 case PMC_EV_P4_BRANCH_RETIRED: 1808 __P4SETMASK(br); 1809 break; 1810 case PMC_EV_P4_MISPRED_BRANCH_RETIRED: 1811 __P4SETMASK(mbr); 1812 break; 1813 case PMC_EV_P4_X87_ASSIST: 1814 __P4SETMASK(xa); 1815 break; 1816 case PMC_EV_P4_MACHINE_CLEAR: 1817 __P4SETMASK(machclr); 1818 break; 1819 default: 1820 return (-1); 1821 } 1822 1823 /* process additional flags */ 1824 while ((p = strsep(&ctrspec, ",")) != NULL) { 1825 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) { 1826 q = strchr(p, '='); 1827 if (*++q == '\0') /* skip '=' */ 1828 return (-1); 1829 1830 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0) 1831 cccractivemask = 0x0; 1832 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0) 1833 cccractivemask = 0x1; 1834 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0) 1835 cccractivemask = 0x2; 1836 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0) 1837 cccractivemask = 0x3; 1838 else 1839 return (-1); 1840 1841 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) { 1842 if (has_busreqtype == 0) 1843 return (-1); 1844 1845 q = strchr(p, '='); 1846 if (*++q == '\0') /* skip '=' */ 1847 return (-1); 1848 1849 count = strtol(q, &e, 0); 1850 if (e == q || *e != '\0') 1851 return (-1); 1852 evmask = (evmask & ~0x1F) | (count & 0x1F); 1853 } else if (KWMATCH(p, P4_KW_CASCADE)) 1854 pmc_config->pm_caps |= PMC_CAP_CASCADE; 1855 else if (KWMATCH(p, P4_KW_EDGE)) 1856 pmc_config->pm_caps |= PMC_CAP_EDGE; 1857 else if (KWMATCH(p, P4_KW_INV)) 1858 pmc_config->pm_caps |= PMC_CAP_INVERT; 1859 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) { 1860 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1861 return (-1); 1862 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1863 } else if (KWMATCH(p, P4_KW_OS)) 1864 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1865 else if (KWMATCH(p, P4_KW_PRECISE)) 1866 pmc_config->pm_caps |= PMC_CAP_PRECISE; 1867 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) { 1868 if (has_tag == 0) 1869 return (-1); 1870 1871 q = strchr(p, '='); 1872 if (*++q == '\0') /* skip '=' */ 1873 return (-1); 1874 1875 count = strtol(q, &e, 0); 1876 if (e == q || *e != '\0') 1877 return (-1); 1878 1879 pmc_config->pm_caps |= PMC_CAP_TAGGING; 1880 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |= 1881 P4_ESCR_TO_TAG_VALUE(count); 1882 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) { 1883 q = strchr(p, '='); 1884 if (*++q == '\0') /* skip '=' */ 1885 return (-1); 1886 1887 count = strtol(q, &e, 0); 1888 if (e == q || *e != '\0') 1889 return (-1); 1890 1891 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1892 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &= 1893 ~P4_CCCR_THRESHOLD_MASK; 1894 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1895 P4_CCCR_TO_THRESHOLD(count); 1896 } else if (KWMATCH(p, P4_KW_USR)) 1897 pmc_config->pm_caps |= PMC_CAP_USER; 1898 else 1899 return (-1); 1900 } 1901 1902 /* other post processing */ 1903 if (pe == PMC_EV_P4_IOQ_ALLOCATION || 1904 pe == PMC_EV_P4_FSB_DATA_ACTIVITY || 1905 pe == PMC_EV_P4_BSQ_ALLOCATION) 1906 pmc_config->pm_caps |= PMC_CAP_EDGE; 1907 1908 /* fill in thread activity mask */ 1909 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1910 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask); 1911 1912 if (evmask) 1913 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1914 1915 switch (pe) { 1916 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1917 if ((evmask & 0x06) == 0x06 || 1918 (evmask & 0x18) == 0x18) 1919 return (-1); /* can't have own+other bits together */ 1920 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */ 1921 evmask = 0x1D; 1922 break; 1923 case PMC_EV_P4_MACHINE_CLEAR: 1924 /* only one bit is allowed to be set */ 1925 if ((evmask & (evmask - 1)) != 0) 1926 return (-1); 1927 if (evmask == 0) { 1928 evmask = 0x1; /* 'CLEAR' */ 1929 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1930 } 1931 break; 1932 default: 1933 if (evmask == 0 && pmask) { 1934 for (pm = pmask; pm->pm_name; pm++) 1935 evmask |= pm->pm_value; 1936 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1937 } 1938 } 1939 1940 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 1941 P4_ESCR_TO_EVENT_MASK(evmask); 1942 1943 return (0); 1944 } 1945 1946 #endif 1947 1948 #if defined(__i386__) 1949 1950 /* 1951 * Pentium style PMCs 1952 */ 1953 1954 static struct pmc_event_alias p5_aliases[] = { 1955 EV_ALIAS("branches", "p5-taken-branches"), 1956 EV_ALIAS("cycles", "tsc"), 1957 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"), 1958 EV_ALIAS("ic-misses", "p5-code-cache-miss"), 1959 EV_ALIAS("instructions", "p5-instructions-executed"), 1960 EV_ALIAS("interrupts", "p5-hardware-interrupts"), 1961 EV_ALIAS("unhalted-cycles", 1962 "p5-number-of-cycles-not-in-halt-state"), 1963 EV_ALIAS(NULL, NULL) 1964 }; 1965 1966 static int 1967 p5_allocate_pmc(enum pmc_event pe, char *ctrspec, 1968 struct pmc_op_pmcallocate *pmc_config) 1969 { 1970 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */ 1971 } 1972 1973 /* 1974 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III, 1975 * and Pentium M CPUs. 1976 */ 1977 1978 static struct pmc_event_alias p6_aliases[] = { 1979 EV_ALIAS("branches", "p6-br-inst-retired"), 1980 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"), 1981 EV_ALIAS("cycles", "tsc"), 1982 EV_ALIAS("dc-misses", "p6-dcu-lines-in"), 1983 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"), 1984 EV_ALIAS("instructions", "p6-inst-retired"), 1985 EV_ALIAS("interrupts", "p6-hw-int-rx"), 1986 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"), 1987 EV_ALIAS(NULL, NULL) 1988 }; 1989 1990 #define P6_KW_CMASK "cmask" 1991 #define P6_KW_EDGE "edge" 1992 #define P6_KW_INV "inv" 1993 #define P6_KW_OS "os" 1994 #define P6_KW_UMASK "umask" 1995 #define P6_KW_USR "usr" 1996 1997 static struct pmc_masks p6_mask_mesi[] = { 1998 PMCMASK(m, 0x01), 1999 PMCMASK(e, 0x02), 2000 PMCMASK(s, 0x04), 2001 PMCMASK(i, 0x08), 2002 NULLMASK 2003 }; 2004 2005 static struct pmc_masks p6_mask_mesihw[] = { 2006 PMCMASK(m, 0x01), 2007 PMCMASK(e, 0x02), 2008 PMCMASK(s, 0x04), 2009 PMCMASK(i, 0x08), 2010 PMCMASK(nonhw, 0x00), 2011 PMCMASK(hw, 0x10), 2012 PMCMASK(both, 0x30), 2013 NULLMASK 2014 }; 2015 2016 static struct pmc_masks p6_mask_hw[] = { 2017 PMCMASK(nonhw, 0x00), 2018 PMCMASK(hw, 0x10), 2019 PMCMASK(both, 0x30), 2020 NULLMASK 2021 }; 2022 2023 static struct pmc_masks p6_mask_any[] = { 2024 PMCMASK(self, 0x00), 2025 PMCMASK(any, 0x20), 2026 NULLMASK 2027 }; 2028 2029 static struct pmc_masks p6_mask_ekp[] = { 2030 PMCMASK(nta, 0x00), 2031 PMCMASK(t1, 0x01), 2032 PMCMASK(t2, 0x02), 2033 PMCMASK(wos, 0x03), 2034 NULLMASK 2035 }; 2036 2037 static struct pmc_masks p6_mask_pps[] = { 2038 PMCMASK(packed-and-scalar, 0x00), 2039 PMCMASK(scalar, 0x01), 2040 NULLMASK 2041 }; 2042 2043 static struct pmc_masks p6_mask_mite[] = { 2044 PMCMASK(packed-multiply, 0x01), 2045 PMCMASK(packed-shift, 0x02), 2046 PMCMASK(pack, 0x04), 2047 PMCMASK(unpack, 0x08), 2048 PMCMASK(packed-logical, 0x10), 2049 PMCMASK(packed-arithmetic, 0x20), 2050 NULLMASK 2051 }; 2052 2053 static struct pmc_masks p6_mask_fmt[] = { 2054 PMCMASK(mmxtofp, 0x00), 2055 PMCMASK(fptommx, 0x01), 2056 NULLMASK 2057 }; 2058 2059 static struct pmc_masks p6_mask_sr[] = { 2060 PMCMASK(es, 0x01), 2061 PMCMASK(ds, 0x02), 2062 PMCMASK(fs, 0x04), 2063 PMCMASK(gs, 0x08), 2064 NULLMASK 2065 }; 2066 2067 static struct pmc_masks p6_mask_eet[] = { 2068 PMCMASK(all, 0x00), 2069 PMCMASK(freq, 0x02), 2070 NULLMASK 2071 }; 2072 2073 static struct pmc_masks p6_mask_efur[] = { 2074 PMCMASK(all, 0x00), 2075 PMCMASK(loadop, 0x01), 2076 PMCMASK(stdsta, 0x02), 2077 NULLMASK 2078 }; 2079 2080 static struct pmc_masks p6_mask_essir[] = { 2081 PMCMASK(sse-packed-single, 0x00), 2082 PMCMASK(sse-packed-single-scalar-single, 0x01), 2083 PMCMASK(sse2-packed-double, 0x02), 2084 PMCMASK(sse2-scalar-double, 0x03), 2085 NULLMASK 2086 }; 2087 2088 static struct pmc_masks p6_mask_esscir[] = { 2089 PMCMASK(sse-packed-single, 0x00), 2090 PMCMASK(sse-scalar-single, 0x01), 2091 PMCMASK(sse2-packed-double, 0x02), 2092 PMCMASK(sse2-scalar-double, 0x03), 2093 NULLMASK 2094 }; 2095 2096 /* P6 event parser */ 2097 static int 2098 p6_allocate_pmc(enum pmc_event pe, char *ctrspec, 2099 struct pmc_op_pmcallocate *pmc_config) 2100 { 2101 char *e, *p, *q; 2102 uint64_t evmask; 2103 int count, n; 2104 const struct pmc_masks *pm, *pmask; 2105 2106 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2107 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0; 2108 2109 evmask = 0; 2110 2111 #define P6MASKSET(M) pmask = p6_mask_ ## M 2112 2113 switch(pe) { 2114 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break; 2115 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break; 2116 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break; 2117 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break; 2118 case PMC_EV_P6_BUS_DRDY_CLOCKS: 2119 case PMC_EV_P6_BUS_LOCK_CLOCKS: 2120 case PMC_EV_P6_BUS_TRAN_BRD: 2121 case PMC_EV_P6_BUS_TRAN_RFO: 2122 case PMC_EV_P6_BUS_TRANS_WB: 2123 case PMC_EV_P6_BUS_TRAN_IFETCH: 2124 case PMC_EV_P6_BUS_TRAN_INVAL: 2125 case PMC_EV_P6_BUS_TRAN_PWR: 2126 case PMC_EV_P6_BUS_TRANS_P: 2127 case PMC_EV_P6_BUS_TRANS_IO: 2128 case PMC_EV_P6_BUS_TRAN_DEF: 2129 case PMC_EV_P6_BUS_TRAN_BURST: 2130 case PMC_EV_P6_BUS_TRAN_ANY: 2131 case PMC_EV_P6_BUS_TRAN_MEM: 2132 P6MASKSET(any); break; 2133 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 2134 case PMC_EV_P6_EMON_KNI_PREF_MISS: 2135 P6MASKSET(ekp); break; 2136 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 2137 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 2138 P6MASKSET(pps); break; 2139 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC: 2140 P6MASKSET(mite); break; 2141 case PMC_EV_P6_FP_MMX_TRANS: 2142 P6MASKSET(fmt); break; 2143 case PMC_EV_P6_SEG_RENAME_STALLS: 2144 case PMC_EV_P6_SEG_REG_RENAMES: 2145 P6MASKSET(sr); break; 2146 case PMC_EV_P6_EMON_EST_TRANS: 2147 P6MASKSET(eet); break; 2148 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 2149 P6MASKSET(efur); break; 2150 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 2151 P6MASKSET(essir); break; 2152 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 2153 P6MASKSET(esscir); break; 2154 default: 2155 pmask = NULL; 2156 break; 2157 } 2158 2159 /* Pentium M PMCs have a few events with different semantics */ 2160 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) { 2161 if (pe == PMC_EV_P6_L2_LD || 2162 pe == PMC_EV_P6_L2_LINES_IN || 2163 pe == PMC_EV_P6_L2_LINES_OUT) 2164 P6MASKSET(mesihw); 2165 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM) 2166 P6MASKSET(hw); 2167 } 2168 2169 /* Parse additional modifiers if present */ 2170 while ((p = strsep(&ctrspec, ",")) != NULL) { 2171 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) { 2172 q = strchr(p, '='); 2173 if (*++q == '\0') /* skip '=' */ 2174 return (-1); 2175 count = strtol(q, &e, 0); 2176 if (e == q || *e != '\0') 2177 return (-1); 2178 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 2179 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 2180 P6_EVSEL_TO_CMASK(count); 2181 } else if (KWMATCH(p, P6_KW_EDGE)) { 2182 pmc_config->pm_caps |= PMC_CAP_EDGE; 2183 } else if (KWMATCH(p, P6_KW_INV)) { 2184 pmc_config->pm_caps |= PMC_CAP_INVERT; 2185 } else if (KWMATCH(p, P6_KW_OS)) { 2186 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2187 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) { 2188 evmask = 0; 2189 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 2190 return (-1); 2191 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS || 2192 pe == PMC_EV_P6_BUS_LOCK_CLOCKS || 2193 pe == PMC_EV_P6_BUS_TRAN_BRD || 2194 pe == PMC_EV_P6_BUS_TRAN_RFO || 2195 pe == PMC_EV_P6_BUS_TRAN_IFETCH || 2196 pe == PMC_EV_P6_BUS_TRAN_INVAL || 2197 pe == PMC_EV_P6_BUS_TRAN_PWR || 2198 pe == PMC_EV_P6_BUS_TRAN_DEF || 2199 pe == PMC_EV_P6_BUS_TRAN_BURST || 2200 pe == PMC_EV_P6_BUS_TRAN_ANY || 2201 pe == PMC_EV_P6_BUS_TRAN_MEM || 2202 pe == PMC_EV_P6_BUS_TRANS_IO || 2203 pe == PMC_EV_P6_BUS_TRANS_P || 2204 pe == PMC_EV_P6_BUS_TRANS_WB || 2205 pe == PMC_EV_P6_EMON_EST_TRANS || 2206 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET || 2207 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET || 2208 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED || 2209 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED || 2210 pe == PMC_EV_P6_EMON_KNI_PREF_MISS || 2211 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED || 2212 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED || 2213 pe == PMC_EV_P6_FP_MMX_TRANS) 2214 && (n > 1)) /* Only one mask keyword is allowed. */ 2215 return (-1); 2216 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 2217 } else if (KWMATCH(p, P6_KW_USR)) { 2218 pmc_config->pm_caps |= PMC_CAP_USER; 2219 } else 2220 return (-1); 2221 } 2222 2223 /* post processing */ 2224 switch (pe) { 2225 2226 /* 2227 * The following events default to an evmask of 0 2228 */ 2229 2230 /* default => 'self' */ 2231 case PMC_EV_P6_BUS_DRDY_CLOCKS: 2232 case PMC_EV_P6_BUS_LOCK_CLOCKS: 2233 case PMC_EV_P6_BUS_TRAN_BRD: 2234 case PMC_EV_P6_BUS_TRAN_RFO: 2235 case PMC_EV_P6_BUS_TRANS_WB: 2236 case PMC_EV_P6_BUS_TRAN_IFETCH: 2237 case PMC_EV_P6_BUS_TRAN_INVAL: 2238 case PMC_EV_P6_BUS_TRAN_PWR: 2239 case PMC_EV_P6_BUS_TRANS_P: 2240 case PMC_EV_P6_BUS_TRANS_IO: 2241 case PMC_EV_P6_BUS_TRAN_DEF: 2242 case PMC_EV_P6_BUS_TRAN_BURST: 2243 case PMC_EV_P6_BUS_TRAN_ANY: 2244 case PMC_EV_P6_BUS_TRAN_MEM: 2245 2246 /* default => 'nta' */ 2247 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 2248 case PMC_EV_P6_EMON_KNI_PREF_MISS: 2249 2250 /* default => 'packed and scalar' */ 2251 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 2252 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 2253 2254 /* default => 'mmx to fp transitions' */ 2255 case PMC_EV_P6_FP_MMX_TRANS: 2256 2257 /* default => 'SSE Packed Single' */ 2258 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 2259 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 2260 2261 /* default => 'all fused micro-ops' */ 2262 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 2263 2264 /* default => 'all transitions' */ 2265 case PMC_EV_P6_EMON_EST_TRANS: 2266 break; 2267 2268 case PMC_EV_P6_MMX_UOPS_EXEC: 2269 evmask = 0x0F; /* only value allowed */ 2270 break; 2271 2272 default: 2273 /* 2274 * For all other events, set the default event mask 2275 * to a logical OR of all the allowed event mask bits. 2276 */ 2277 if (evmask == 0 && pmask) { 2278 for (pm = pmask; pm->pm_name; pm++) 2279 evmask |= pm->pm_value; 2280 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 2281 } 2282 2283 break; 2284 } 2285 2286 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 2287 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 2288 P6_EVSEL_TO_UMASK(evmask); 2289 2290 return (0); 2291 } 2292 2293 #endif 2294 2295 #if defined(__i386__) || defined(__amd64__) 2296 static int 2297 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 2298 struct pmc_op_pmcallocate *pmc_config) 2299 { 2300 if (pe != PMC_EV_TSC_TSC) 2301 return (-1); 2302 2303 /* TSC events must be unqualified. */ 2304 if (ctrspec && *ctrspec != '\0') 2305 return (-1); 2306 2307 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 2308 pmc_config->pm_caps |= PMC_CAP_READ; 2309 2310 return (0); 2311 } 2312 #endif 2313 2314 static struct pmc_event_alias generic_aliases[] = { 2315 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 2316 EV_ALIAS(NULL, NULL) 2317 }; 2318 2319 static int 2320 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 2321 struct pmc_op_pmcallocate *pmc_config) 2322 { 2323 (void)ctrspec; 2324 (void)pmc_config; 2325 2326 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 2327 return (-1); 2328 2329 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2330 return (0); 2331 } 2332 2333 #if defined(__XSCALE__) 2334 2335 static struct pmc_event_alias xscale_aliases[] = { 2336 EV_ALIAS("branches", "BRANCH_RETIRED"), 2337 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 2338 EV_ALIAS("dc-misses", "DC_MISS"), 2339 EV_ALIAS("ic-misses", "IC_MISS"), 2340 EV_ALIAS("instructions", "INSTR_RETIRED"), 2341 EV_ALIAS(NULL, NULL) 2342 }; 2343 static int 2344 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2345 struct pmc_op_pmcallocate *pmc_config __unused) 2346 { 2347 switch (pe) { 2348 default: 2349 break; 2350 } 2351 2352 return (0); 2353 } 2354 #endif 2355 2356 #if defined(__mips__) 2357 2358 static struct pmc_event_alias mips24k_aliases[] = { 2359 EV_ALIAS("instructions", "INSTR_EXECUTED"), 2360 EV_ALIAS("branches", "BRANCH_COMPLETED"), 2361 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 2362 EV_ALIAS(NULL, NULL) 2363 }; 2364 2365 static struct pmc_event_alias octeon_aliases[] = { 2366 EV_ALIAS("instructions", "RET"), 2367 EV_ALIAS("branches", "BR"), 2368 EV_ALIAS("branch-mispredicts", "BRMIS"), 2369 EV_ALIAS(NULL, NULL) 2370 }; 2371 2372 #define MIPS_KW_OS "os" 2373 #define MIPS_KW_USR "usr" 2374 #define MIPS_KW_ANYTHREAD "anythread" 2375 2376 static int 2377 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2378 struct pmc_op_pmcallocate *pmc_config __unused) 2379 { 2380 char *p; 2381 2382 (void) pe; 2383 2384 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2385 2386 while ((p = strsep(&ctrspec, ",")) != NULL) { 2387 if (KWMATCH(p, MIPS_KW_OS)) 2388 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2389 else if (KWMATCH(p, MIPS_KW_USR)) 2390 pmc_config->pm_caps |= PMC_CAP_USER; 2391 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 2392 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 2393 else 2394 return (-1); 2395 } 2396 2397 return (0); 2398 } 2399 2400 #endif /* __mips__ */ 2401 2402 #if defined(__powerpc__) 2403 2404 static struct pmc_event_alias ppc7450_aliases[] = { 2405 EV_ALIAS("instructions", "INSTR_COMPLETED"), 2406 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 2407 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 2408 EV_ALIAS(NULL, NULL) 2409 }; 2410 2411 static struct pmc_event_alias ppc970_aliases[] = { 2412 EV_ALIAS("instructions", "INSTR_COMPLETED"), 2413 EV_ALIAS("cycles", "CYCLES"), 2414 EV_ALIAS(NULL, NULL) 2415 }; 2416 2417 #define POWERPC_KW_OS "os" 2418 #define POWERPC_KW_USR "usr" 2419 #define POWERPC_KW_ANYTHREAD "anythread" 2420 2421 static int 2422 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2423 struct pmc_op_pmcallocate *pmc_config __unused) 2424 { 2425 char *p; 2426 2427 (void) pe; 2428 2429 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2430 2431 while ((p = strsep(&ctrspec, ",")) != NULL) { 2432 if (KWMATCH(p, POWERPC_KW_OS)) 2433 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2434 else if (KWMATCH(p, POWERPC_KW_USR)) 2435 pmc_config->pm_caps |= PMC_CAP_USER; 2436 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 2437 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 2438 else 2439 return (-1); 2440 } 2441 2442 return (0); 2443 } 2444 2445 #endif /* __powerpc__ */ 2446 2447 2448 /* 2449 * Match an event name `name' with its canonical form. 2450 * 2451 * Matches are case insensitive and spaces, periods, underscores and 2452 * hyphen characters are considered to match each other. 2453 * 2454 * Returns 1 for a match, 0 otherwise. 2455 */ 2456 2457 static int 2458 pmc_match_event_name(const char *name, const char *canonicalname) 2459 { 2460 int cc, nc; 2461 const unsigned char *c, *n; 2462 2463 c = (const unsigned char *) canonicalname; 2464 n = (const unsigned char *) name; 2465 2466 for (; (nc = *n) && (cc = *c); n++, c++) { 2467 2468 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 2469 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 2470 continue; 2471 2472 if (toupper(nc) == toupper(cc)) 2473 continue; 2474 2475 2476 return (0); 2477 } 2478 2479 if (*n == '\0' && *c == '\0') 2480 return (1); 2481 2482 return (0); 2483 } 2484 2485 /* 2486 * Match an event name against all the event named supported by a 2487 * PMC class. 2488 * 2489 * Returns an event descriptor pointer on match or NULL otherwise. 2490 */ 2491 static const struct pmc_event_descr * 2492 pmc_match_event_class(const char *name, 2493 const struct pmc_class_descr *pcd) 2494 { 2495 size_t n; 2496 const struct pmc_event_descr *ev; 2497 2498 ev = pcd->pm_evc_event_table; 2499 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 2500 if (pmc_match_event_name(name, ev->pm_ev_name)) 2501 return (ev); 2502 2503 return (NULL); 2504 } 2505 2506 static int 2507 pmc_mdep_is_compatible_class(enum pmc_class pc) 2508 { 2509 size_t n; 2510 2511 for (n = 0; n < pmc_mdep_class_list_size; n++) 2512 if (pmc_mdep_class_list[n] == pc) 2513 return (1); 2514 return (0); 2515 } 2516 2517 /* 2518 * API entry points 2519 */ 2520 2521 int 2522 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 2523 uint32_t flags, int cpu, pmc_id_t *pmcid) 2524 { 2525 size_t n; 2526 int retval; 2527 char *r, *spec_copy; 2528 const char *ctrname; 2529 const struct pmc_event_descr *ev; 2530 const struct pmc_event_alias *alias; 2531 struct pmc_op_pmcallocate pmc_config; 2532 const struct pmc_class_descr *pcd; 2533 2534 spec_copy = NULL; 2535 retval = -1; 2536 2537 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 2538 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 2539 errno = EINVAL; 2540 goto out; 2541 } 2542 2543 /* replace an event alias with the canonical event specifier */ 2544 if (pmc_mdep_event_aliases) 2545 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 2546 if (!strcasecmp(ctrspec, alias->pm_alias)) { 2547 spec_copy = strdup(alias->pm_spec); 2548 break; 2549 } 2550 2551 if (spec_copy == NULL) 2552 spec_copy = strdup(ctrspec); 2553 2554 r = spec_copy; 2555 ctrname = strsep(&r, ","); 2556 2557 /* 2558 * If a explicit class prefix was given by the user, restrict the 2559 * search for the event to the specified PMC class. 2560 */ 2561 ev = NULL; 2562 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 2563 pcd = pmc_class_table[n]; 2564 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 2565 strncasecmp(ctrname, pcd->pm_evc_name, 2566 pcd->pm_evc_name_size) == 0) { 2567 if ((ev = pmc_match_event_class(ctrname + 2568 pcd->pm_evc_name_size, pcd)) == NULL) { 2569 errno = EINVAL; 2570 goto out; 2571 } 2572 break; 2573 } 2574 } 2575 2576 /* 2577 * Otherwise, search for this event in all compatible PMC 2578 * classes. 2579 */ 2580 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 2581 pcd = pmc_class_table[n]; 2582 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 2583 ev = pmc_match_event_class(ctrname, pcd); 2584 } 2585 2586 if (ev == NULL) { 2587 errno = EINVAL; 2588 goto out; 2589 } 2590 2591 bzero(&pmc_config, sizeof(pmc_config)); 2592 pmc_config.pm_ev = ev->pm_ev_code; 2593 pmc_config.pm_class = pcd->pm_evc_class; 2594 pmc_config.pm_cpu = cpu; 2595 pmc_config.pm_mode = mode; 2596 pmc_config.pm_flags = flags; 2597 2598 if (PMC_IS_SAMPLING_MODE(mode)) 2599 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 2600 2601 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 2602 errno = EINVAL; 2603 goto out; 2604 } 2605 2606 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 2607 goto out; 2608 2609 *pmcid = pmc_config.pm_pmcid; 2610 2611 retval = 0; 2612 2613 out: 2614 if (spec_copy) 2615 free(spec_copy); 2616 2617 return (retval); 2618 } 2619 2620 int 2621 pmc_attach(pmc_id_t pmc, pid_t pid) 2622 { 2623 struct pmc_op_pmcattach pmc_attach_args; 2624 2625 pmc_attach_args.pm_pmc = pmc; 2626 pmc_attach_args.pm_pid = pid; 2627 2628 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 2629 } 2630 2631 int 2632 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 2633 { 2634 unsigned int i; 2635 enum pmc_class cl; 2636 2637 cl = PMC_ID_TO_CLASS(pmcid); 2638 for (i = 0; i < cpu_info.pm_nclass; i++) 2639 if (cpu_info.pm_classes[i].pm_class == cl) { 2640 *caps = cpu_info.pm_classes[i].pm_caps; 2641 return (0); 2642 } 2643 errno = EINVAL; 2644 return (-1); 2645 } 2646 2647 int 2648 pmc_configure_logfile(int fd) 2649 { 2650 struct pmc_op_configurelog cla; 2651 2652 cla.pm_logfd = fd; 2653 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 2654 return (-1); 2655 return (0); 2656 } 2657 2658 int 2659 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 2660 { 2661 if (pmc_syscall == -1) { 2662 errno = ENXIO; 2663 return (-1); 2664 } 2665 2666 *pci = &cpu_info; 2667 return (0); 2668 } 2669 2670 int 2671 pmc_detach(pmc_id_t pmc, pid_t pid) 2672 { 2673 struct pmc_op_pmcattach pmc_detach_args; 2674 2675 pmc_detach_args.pm_pmc = pmc; 2676 pmc_detach_args.pm_pid = pid; 2677 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 2678 } 2679 2680 int 2681 pmc_disable(int cpu, int pmc) 2682 { 2683 struct pmc_op_pmcadmin ssa; 2684 2685 ssa.pm_cpu = cpu; 2686 ssa.pm_pmc = pmc; 2687 ssa.pm_state = PMC_STATE_DISABLED; 2688 return (PMC_CALL(PMCADMIN, &ssa)); 2689 } 2690 2691 int 2692 pmc_enable(int cpu, int pmc) 2693 { 2694 struct pmc_op_pmcadmin ssa; 2695 2696 ssa.pm_cpu = cpu; 2697 ssa.pm_pmc = pmc; 2698 ssa.pm_state = PMC_STATE_FREE; 2699 return (PMC_CALL(PMCADMIN, &ssa)); 2700 } 2701 2702 /* 2703 * Return a list of events known to a given PMC class. 'cl' is the 2704 * PMC class identifier, 'eventnames' is the returned list of 'const 2705 * char *' pointers pointing to the names of the events. 'nevents' is 2706 * the number of event name pointers returned. 2707 * 2708 * The space for 'eventnames' is allocated using malloc(3). The caller 2709 * is responsible for freeing this space when done. 2710 */ 2711 int 2712 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 2713 int *nevents) 2714 { 2715 int count; 2716 const char **names; 2717 const struct pmc_event_descr *ev; 2718 2719 switch (cl) 2720 { 2721 case PMC_CLASS_IAF: 2722 ev = iaf_event_table; 2723 count = PMC_EVENT_TABLE_SIZE(iaf); 2724 break; 2725 case PMC_CLASS_IAP: 2726 /* 2727 * Return the most appropriate set of event name 2728 * spellings for the current CPU. 2729 */ 2730 switch (cpu_info.pm_cputype) { 2731 default: 2732 case PMC_CPU_INTEL_ATOM: 2733 ev = atom_event_table; 2734 count = PMC_EVENT_TABLE_SIZE(atom); 2735 break; 2736 case PMC_CPU_INTEL_CORE: 2737 ev = core_event_table; 2738 count = PMC_EVENT_TABLE_SIZE(core); 2739 break; 2740 case PMC_CPU_INTEL_CORE2: 2741 case PMC_CPU_INTEL_CORE2EXTREME: 2742 ev = core2_event_table; 2743 count = PMC_EVENT_TABLE_SIZE(core2); 2744 break; 2745 case PMC_CPU_INTEL_COREI7: 2746 ev = corei7_event_table; 2747 count = PMC_EVENT_TABLE_SIZE(corei7); 2748 break; 2749 case PMC_CPU_INTEL_HASWELL: 2750 ev = haswell_event_table; 2751 count = PMC_EVENT_TABLE_SIZE(haswell); 2752 break; 2753 case PMC_CPU_INTEL_IVYBRIDGE: 2754 ev = ivybridge_event_table; 2755 count = PMC_EVENT_TABLE_SIZE(ivybridge); 2756 break; 2757 case PMC_CPU_INTEL_IVYBRIDGE_XEON: 2758 ev = ivybridge_xeon_event_table; 2759 count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon); 2760 break; 2761 case PMC_CPU_INTEL_SANDYBRIDGE: 2762 ev = sandybridge_event_table; 2763 count = PMC_EVENT_TABLE_SIZE(sandybridge); 2764 break; 2765 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 2766 ev = sandybridge_xeon_event_table; 2767 count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon); 2768 break; 2769 case PMC_CPU_INTEL_WESTMERE: 2770 ev = westmere_event_table; 2771 count = PMC_EVENT_TABLE_SIZE(westmere); 2772 break; 2773 } 2774 break; 2775 case PMC_CLASS_UCF: 2776 ev = ucf_event_table; 2777 count = PMC_EVENT_TABLE_SIZE(ucf); 2778 break; 2779 case PMC_CLASS_UCP: 2780 /* 2781 * Return the most appropriate set of event name 2782 * spellings for the current CPU. 2783 */ 2784 switch (cpu_info.pm_cputype) { 2785 default: 2786 case PMC_CPU_INTEL_COREI7: 2787 ev = corei7uc_event_table; 2788 count = PMC_EVENT_TABLE_SIZE(corei7uc); 2789 break; 2790 case PMC_CPU_INTEL_HASWELL: 2791 ev = haswelluc_event_table; 2792 count = PMC_EVENT_TABLE_SIZE(haswelluc); 2793 break; 2794 case PMC_CPU_INTEL_SANDYBRIDGE: 2795 ev = sandybridgeuc_event_table; 2796 count = PMC_EVENT_TABLE_SIZE(sandybridgeuc); 2797 break; 2798 case PMC_CPU_INTEL_WESTMERE: 2799 ev = westmereuc_event_table; 2800 count = PMC_EVENT_TABLE_SIZE(westmereuc); 2801 break; 2802 } 2803 break; 2804 case PMC_CLASS_TSC: 2805 ev = tsc_event_table; 2806 count = PMC_EVENT_TABLE_SIZE(tsc); 2807 break; 2808 case PMC_CLASS_K7: 2809 ev = k7_event_table; 2810 count = PMC_EVENT_TABLE_SIZE(k7); 2811 break; 2812 case PMC_CLASS_K8: 2813 ev = k8_event_table; 2814 count = PMC_EVENT_TABLE_SIZE(k8); 2815 break; 2816 case PMC_CLASS_P4: 2817 ev = p4_event_table; 2818 count = PMC_EVENT_TABLE_SIZE(p4); 2819 break; 2820 case PMC_CLASS_P5: 2821 ev = p5_event_table; 2822 count = PMC_EVENT_TABLE_SIZE(p5); 2823 break; 2824 case PMC_CLASS_P6: 2825 ev = p6_event_table; 2826 count = PMC_EVENT_TABLE_SIZE(p6); 2827 break; 2828 case PMC_CLASS_XSCALE: 2829 ev = xscale_event_table; 2830 count = PMC_EVENT_TABLE_SIZE(xscale); 2831 break; 2832 case PMC_CLASS_MIPS24K: 2833 ev = mips24k_event_table; 2834 count = PMC_EVENT_TABLE_SIZE(mips24k); 2835 break; 2836 case PMC_CLASS_OCTEON: 2837 ev = octeon_event_table; 2838 count = PMC_EVENT_TABLE_SIZE(octeon); 2839 break; 2840 case PMC_CLASS_PPC7450: 2841 ev = ppc7450_event_table; 2842 count = PMC_EVENT_TABLE_SIZE(ppc7450); 2843 break; 2844 case PMC_CLASS_PPC970: 2845 ev = ppc970_event_table; 2846 count = PMC_EVENT_TABLE_SIZE(ppc970); 2847 break; 2848 case PMC_CLASS_SOFT: 2849 ev = soft_event_table; 2850 count = soft_event_info.pm_nevent; 2851 break; 2852 default: 2853 errno = EINVAL; 2854 return (-1); 2855 } 2856 2857 if ((names = malloc(count * sizeof(const char *))) == NULL) 2858 return (-1); 2859 2860 *eventnames = names; 2861 *nevents = count; 2862 2863 for (;count--; ev++, names++) 2864 *names = ev->pm_ev_name; 2865 2866 return (0); 2867 } 2868 2869 int 2870 pmc_flush_logfile(void) 2871 { 2872 return (PMC_CALL(FLUSHLOG,0)); 2873 } 2874 2875 int 2876 pmc_close_logfile(void) 2877 { 2878 return (PMC_CALL(CLOSELOG,0)); 2879 } 2880 2881 int 2882 pmc_get_driver_stats(struct pmc_driverstats *ds) 2883 { 2884 struct pmc_op_getdriverstats gms; 2885 2886 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 2887 return (-1); 2888 2889 /* copy out fields in the current userland<->library interface */ 2890 ds->pm_intr_ignored = gms.pm_intr_ignored; 2891 ds->pm_intr_processed = gms.pm_intr_processed; 2892 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 2893 ds->pm_syscalls = gms.pm_syscalls; 2894 ds->pm_syscall_errors = gms.pm_syscall_errors; 2895 ds->pm_buffer_requests = gms.pm_buffer_requests; 2896 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 2897 ds->pm_log_sweeps = gms.pm_log_sweeps; 2898 return (0); 2899 } 2900 2901 int 2902 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 2903 { 2904 struct pmc_op_getmsr gm; 2905 2906 gm.pm_pmcid = pmc; 2907 if (PMC_CALL(PMCGETMSR, &gm) < 0) 2908 return (-1); 2909 *msr = gm.pm_msr; 2910 return (0); 2911 } 2912 2913 int 2914 pmc_init(void) 2915 { 2916 int error, pmc_mod_id; 2917 unsigned int n; 2918 uint32_t abi_version; 2919 struct module_stat pmc_modstat; 2920 struct pmc_op_getcpuinfo op_cpu_info; 2921 #if defined(__amd64__) || defined(__i386__) 2922 int cpu_has_iaf_counters; 2923 unsigned int t; 2924 #endif 2925 2926 if (pmc_syscall != -1) /* already inited */ 2927 return (0); 2928 2929 /* retrieve the system call number from the KLD */ 2930 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 2931 return (-1); 2932 2933 pmc_modstat.version = sizeof(struct module_stat); 2934 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 2935 return (-1); 2936 2937 pmc_syscall = pmc_modstat.data.intval; 2938 2939 /* check the kernel module's ABI against our compiled-in version */ 2940 abi_version = PMC_VERSION; 2941 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 2942 return (pmc_syscall = -1); 2943 2944 /* ignore patch & minor numbers for the comparision */ 2945 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 2946 errno = EPROGMISMATCH; 2947 return (pmc_syscall = -1); 2948 } 2949 2950 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 2951 return (pmc_syscall = -1); 2952 2953 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 2954 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 2955 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 2956 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 2957 for (n = 0; n < cpu_info.pm_nclass; n++) 2958 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n]; 2959 2960 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 2961 sizeof(struct pmc_class_descr *)); 2962 2963 if (pmc_class_table == NULL) 2964 return (-1); 2965 2966 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 2967 pmc_class_table[n] = NULL; 2968 2969 /* 2970 * Get soft events list. 2971 */ 2972 soft_event_info.pm_class = PMC_CLASS_SOFT; 2973 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 2974 return (pmc_syscall = -1); 2975 2976 /* Map soft events to static list. */ 2977 for (n = 0; n < soft_event_info.pm_nevent; n++) { 2978 soft_event_table[n].pm_ev_name = 2979 soft_event_info.pm_events[n].pm_ev_name; 2980 soft_event_table[n].pm_ev_code = 2981 soft_event_info.pm_events[n].pm_ev_code; 2982 } 2983 soft_class_table_descr.pm_evc_event_table_size = \ 2984 soft_event_info.pm_nevent; 2985 soft_class_table_descr.pm_evc_event_table = \ 2986 soft_event_table; 2987 2988 /* 2989 * Fill in the class table. 2990 */ 2991 n = 0; 2992 2993 /* Fill soft events information. */ 2994 pmc_class_table[n++] = &soft_class_table_descr; 2995 #if defined(__amd64__) || defined(__i386__) 2996 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 2997 pmc_class_table[n++] = &tsc_class_table_descr; 2998 2999 /* 3000 * Check if this CPU has fixed function counters. 3001 */ 3002 cpu_has_iaf_counters = 0; 3003 for (t = 0; t < cpu_info.pm_nclass; t++) 3004 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 3005 cpu_info.pm_classes[t].pm_num > 0) 3006 cpu_has_iaf_counters = 1; 3007 #endif 3008 3009 #define PMC_MDEP_INIT(C) do { \ 3010 pmc_mdep_event_aliases = C##_aliases; \ 3011 pmc_mdep_class_list = C##_pmc_classes; \ 3012 pmc_mdep_class_list_size = \ 3013 PMC_TABLE_SIZE(C##_pmc_classes); \ 3014 } while (0) 3015 3016 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 3017 PMC_MDEP_INIT(C); \ 3018 pmc_class_table[n++] = &iaf_class_table_descr; \ 3019 if (!cpu_has_iaf_counters) \ 3020 pmc_mdep_event_aliases = \ 3021 C##_aliases_without_iaf; \ 3022 pmc_class_table[n] = &C##_class_table_descr; \ 3023 } while (0) 3024 3025 /* Configure the event name parser. */ 3026 switch (cpu_info.pm_cputype) { 3027 #if defined(__i386__) 3028 case PMC_CPU_AMD_K7: 3029 PMC_MDEP_INIT(k7); 3030 pmc_class_table[n] = &k7_class_table_descr; 3031 break; 3032 case PMC_CPU_INTEL_P5: 3033 PMC_MDEP_INIT(p5); 3034 pmc_class_table[n] = &p5_class_table_descr; 3035 break; 3036 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */ 3037 case PMC_CPU_INTEL_PII: /* similar PMCs. */ 3038 case PMC_CPU_INTEL_PIII: 3039 case PMC_CPU_INTEL_PM: 3040 PMC_MDEP_INIT(p6); 3041 pmc_class_table[n] = &p6_class_table_descr; 3042 break; 3043 #endif 3044 #if defined(__amd64__) || defined(__i386__) 3045 case PMC_CPU_AMD_K8: 3046 PMC_MDEP_INIT(k8); 3047 pmc_class_table[n] = &k8_class_table_descr; 3048 break; 3049 case PMC_CPU_INTEL_ATOM: 3050 PMC_MDEP_INIT_INTEL_V2(atom); 3051 break; 3052 case PMC_CPU_INTEL_CORE: 3053 PMC_MDEP_INIT(core); 3054 pmc_class_table[n] = &core_class_table_descr; 3055 break; 3056 case PMC_CPU_INTEL_CORE2: 3057 case PMC_CPU_INTEL_CORE2EXTREME: 3058 PMC_MDEP_INIT_INTEL_V2(core2); 3059 break; 3060 case PMC_CPU_INTEL_COREI7: 3061 pmc_class_table[n++] = &ucf_class_table_descr; 3062 pmc_class_table[n++] = &corei7uc_class_table_descr; 3063 PMC_MDEP_INIT_INTEL_V2(corei7); 3064 break; 3065 case PMC_CPU_INTEL_HASWELL: 3066 pmc_class_table[n++] = &ucf_class_table_descr; 3067 pmc_class_table[n++] = &haswelluc_class_table_descr; 3068 PMC_MDEP_INIT_INTEL_V2(haswell); 3069 break; 3070 case PMC_CPU_INTEL_IVYBRIDGE: 3071 PMC_MDEP_INIT_INTEL_V2(ivybridge); 3072 break; 3073 case PMC_CPU_INTEL_IVYBRIDGE_XEON: 3074 PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon); 3075 break; 3076 case PMC_CPU_INTEL_SANDYBRIDGE: 3077 pmc_class_table[n++] = &ucf_class_table_descr; 3078 pmc_class_table[n++] = &sandybridgeuc_class_table_descr; 3079 PMC_MDEP_INIT_INTEL_V2(sandybridge); 3080 break; 3081 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 3082 PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon); 3083 break; 3084 case PMC_CPU_INTEL_WESTMERE: 3085 pmc_class_table[n++] = &ucf_class_table_descr; 3086 pmc_class_table[n++] = &westmereuc_class_table_descr; 3087 PMC_MDEP_INIT_INTEL_V2(westmere); 3088 break; 3089 case PMC_CPU_INTEL_PIV: 3090 PMC_MDEP_INIT(p4); 3091 pmc_class_table[n] = &p4_class_table_descr; 3092 break; 3093 #endif 3094 case PMC_CPU_GENERIC: 3095 PMC_MDEP_INIT(generic); 3096 break; 3097 #if defined(__XSCALE__) 3098 case PMC_CPU_INTEL_XSCALE: 3099 PMC_MDEP_INIT(xscale); 3100 pmc_class_table[n] = &xscale_class_table_descr; 3101 break; 3102 #endif 3103 #if defined(__mips__) 3104 case PMC_CPU_MIPS_24K: 3105 PMC_MDEP_INIT(mips24k); 3106 pmc_class_table[n] = &mips24k_class_table_descr; 3107 break; 3108 case PMC_CPU_MIPS_OCTEON: 3109 PMC_MDEP_INIT(octeon); 3110 pmc_class_table[n] = &octeon_class_table_descr; 3111 break; 3112 #endif /* __mips__ */ 3113 #if defined(__powerpc__) 3114 case PMC_CPU_PPC_7450: 3115 PMC_MDEP_INIT(ppc7450); 3116 pmc_class_table[n] = &ppc7450_class_table_descr; 3117 break; 3118 case PMC_CPU_PPC_970: 3119 PMC_MDEP_INIT(ppc970); 3120 pmc_class_table[n] = &ppc970_class_table_descr; 3121 break; 3122 #endif 3123 default: 3124 /* 3125 * Some kind of CPU this version of the library knows nothing 3126 * about. This shouldn't happen since the abi version check 3127 * should have caught this. 3128 */ 3129 errno = ENXIO; 3130 return (pmc_syscall = -1); 3131 } 3132 3133 return (0); 3134 } 3135 3136 const char * 3137 pmc_name_of_capability(enum pmc_caps cap) 3138 { 3139 int i; 3140 3141 /* 3142 * 'cap' should have a single bit set and should be in 3143 * range. 3144 */ 3145 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 3146 cap > PMC_CAP_LAST) { 3147 errno = EINVAL; 3148 return (NULL); 3149 } 3150 3151 i = ffs(cap); 3152 return (pmc_capability_names[i - 1]); 3153 } 3154 3155 const char * 3156 pmc_name_of_class(enum pmc_class pc) 3157 { 3158 if ((int) pc >= PMC_CLASS_FIRST && 3159 pc <= PMC_CLASS_LAST) 3160 return (pmc_class_names[pc]); 3161 3162 errno = EINVAL; 3163 return (NULL); 3164 } 3165 3166 const char * 3167 pmc_name_of_cputype(enum pmc_cputype cp) 3168 { 3169 size_t n; 3170 3171 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 3172 if (cp == pmc_cputype_names[n].pm_cputype) 3173 return (pmc_cputype_names[n].pm_name); 3174 3175 errno = EINVAL; 3176 return (NULL); 3177 } 3178 3179 const char * 3180 pmc_name_of_disposition(enum pmc_disp pd) 3181 { 3182 if ((int) pd >= PMC_DISP_FIRST && 3183 pd <= PMC_DISP_LAST) 3184 return (pmc_disposition_names[pd]); 3185 3186 errno = EINVAL; 3187 return (NULL); 3188 } 3189 3190 const char * 3191 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 3192 { 3193 const struct pmc_event_descr *ev, *evfence; 3194 3195 ev = evfence = NULL; 3196 if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) { 3197 ev = iaf_event_table; 3198 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf); 3199 } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) { 3200 switch (cpu) { 3201 case PMC_CPU_INTEL_ATOM: 3202 ev = atom_event_table; 3203 evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom); 3204 break; 3205 case PMC_CPU_INTEL_CORE: 3206 ev = core_event_table; 3207 evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core); 3208 break; 3209 case PMC_CPU_INTEL_CORE2: 3210 case PMC_CPU_INTEL_CORE2EXTREME: 3211 ev = core2_event_table; 3212 evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2); 3213 break; 3214 case PMC_CPU_INTEL_COREI7: 3215 ev = corei7_event_table; 3216 evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7); 3217 break; 3218 case PMC_CPU_INTEL_HASWELL: 3219 ev = haswell_event_table; 3220 evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell); 3221 break; 3222 case PMC_CPU_INTEL_IVYBRIDGE: 3223 ev = ivybridge_event_table; 3224 evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge); 3225 break; 3226 case PMC_CPU_INTEL_IVYBRIDGE_XEON: 3227 ev = ivybridge_xeon_event_table; 3228 evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon); 3229 break; 3230 case PMC_CPU_INTEL_SANDYBRIDGE: 3231 ev = sandybridge_event_table; 3232 evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge); 3233 break; 3234 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 3235 ev = sandybridge_xeon_event_table; 3236 evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon); 3237 break; 3238 case PMC_CPU_INTEL_WESTMERE: 3239 ev = westmere_event_table; 3240 evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere); 3241 break; 3242 default: /* Unknown CPU type. */ 3243 break; 3244 } 3245 } else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) { 3246 ev = ucf_event_table; 3247 evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf); 3248 } else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) { 3249 switch (cpu) { 3250 case PMC_CPU_INTEL_COREI7: 3251 ev = corei7uc_event_table; 3252 evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc); 3253 break; 3254 case PMC_CPU_INTEL_SANDYBRIDGE: 3255 ev = sandybridgeuc_event_table; 3256 evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc); 3257 break; 3258 case PMC_CPU_INTEL_WESTMERE: 3259 ev = westmereuc_event_table; 3260 evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc); 3261 break; 3262 default: /* Unknown CPU type. */ 3263 break; 3264 } 3265 } else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) { 3266 ev = k7_event_table; 3267 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7); 3268 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 3269 ev = k8_event_table; 3270 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 3271 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) { 3272 ev = p4_event_table; 3273 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4); 3274 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) { 3275 ev = p5_event_table; 3276 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5); 3277 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) { 3278 ev = p6_event_table; 3279 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6); 3280 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 3281 ev = xscale_event_table; 3282 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 3283 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 3284 ev = mips24k_event_table; 3285 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 3286 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 3287 ev = octeon_event_table; 3288 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 3289 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 3290 ev = ppc7450_event_table; 3291 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 3292 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 3293 ev = ppc970_event_table; 3294 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 3295 } else if (pe == PMC_EV_TSC_TSC) { 3296 ev = tsc_event_table; 3297 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 3298 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 3299 ev = soft_event_table; 3300 evfence = soft_event_table + soft_event_info.pm_nevent; 3301 } 3302 3303 for (; ev != evfence; ev++) 3304 if (pe == ev->pm_ev_code) 3305 return (ev->pm_ev_name); 3306 3307 return (NULL); 3308 } 3309 3310 const char * 3311 pmc_name_of_event(enum pmc_event pe) 3312 { 3313 const char *n; 3314 3315 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 3316 return (n); 3317 3318 errno = EINVAL; 3319 return (NULL); 3320 } 3321 3322 const char * 3323 pmc_name_of_mode(enum pmc_mode pm) 3324 { 3325 if ((int) pm >= PMC_MODE_FIRST && 3326 pm <= PMC_MODE_LAST) 3327 return (pmc_mode_names[pm]); 3328 3329 errno = EINVAL; 3330 return (NULL); 3331 } 3332 3333 const char * 3334 pmc_name_of_state(enum pmc_state ps) 3335 { 3336 if ((int) ps >= PMC_STATE_FIRST && 3337 ps <= PMC_STATE_LAST) 3338 return (pmc_state_names[ps]); 3339 3340 errno = EINVAL; 3341 return (NULL); 3342 } 3343 3344 int 3345 pmc_ncpu(void) 3346 { 3347 if (pmc_syscall == -1) { 3348 errno = ENXIO; 3349 return (-1); 3350 } 3351 3352 return (cpu_info.pm_ncpu); 3353 } 3354 3355 int 3356 pmc_npmc(int cpu) 3357 { 3358 if (pmc_syscall == -1) { 3359 errno = ENXIO; 3360 return (-1); 3361 } 3362 3363 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 3364 errno = EINVAL; 3365 return (-1); 3366 } 3367 3368 return (cpu_info.pm_npmc); 3369 } 3370 3371 int 3372 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 3373 { 3374 int nbytes, npmc; 3375 struct pmc_op_getpmcinfo *pmci; 3376 3377 if ((npmc = pmc_npmc(cpu)) < 0) 3378 return (-1); 3379 3380 nbytes = sizeof(struct pmc_op_getpmcinfo) + 3381 npmc * sizeof(struct pmc_info); 3382 3383 if ((pmci = calloc(1, nbytes)) == NULL) 3384 return (-1); 3385 3386 pmci->pm_cpu = cpu; 3387 3388 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 3389 free(pmci); 3390 return (-1); 3391 } 3392 3393 /* kernel<->library, library<->userland interfaces are identical */ 3394 *ppmci = (struct pmc_pmcinfo *) pmci; 3395 return (0); 3396 } 3397 3398 int 3399 pmc_read(pmc_id_t pmc, pmc_value_t *value) 3400 { 3401 struct pmc_op_pmcrw pmc_read_op; 3402 3403 pmc_read_op.pm_pmcid = pmc; 3404 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 3405 pmc_read_op.pm_value = -1; 3406 3407 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 3408 return (-1); 3409 3410 *value = pmc_read_op.pm_value; 3411 return (0); 3412 } 3413 3414 int 3415 pmc_release(pmc_id_t pmc) 3416 { 3417 struct pmc_op_simple pmc_release_args; 3418 3419 pmc_release_args.pm_pmcid = pmc; 3420 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 3421 } 3422 3423 int 3424 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 3425 { 3426 struct pmc_op_pmcrw pmc_rw_op; 3427 3428 pmc_rw_op.pm_pmcid = pmc; 3429 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 3430 pmc_rw_op.pm_value = newvalue; 3431 3432 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 3433 return (-1); 3434 3435 *oldvaluep = pmc_rw_op.pm_value; 3436 return (0); 3437 } 3438 3439 int 3440 pmc_set(pmc_id_t pmc, pmc_value_t value) 3441 { 3442 struct pmc_op_pmcsetcount sc; 3443 3444 sc.pm_pmcid = pmc; 3445 sc.pm_count = value; 3446 3447 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 3448 return (-1); 3449 return (0); 3450 } 3451 3452 int 3453 pmc_start(pmc_id_t pmc) 3454 { 3455 struct pmc_op_simple pmc_start_args; 3456 3457 pmc_start_args.pm_pmcid = pmc; 3458 return (PMC_CALL(PMCSTART, &pmc_start_args)); 3459 } 3460 3461 int 3462 pmc_stop(pmc_id_t pmc) 3463 { 3464 struct pmc_op_simple pmc_stop_args; 3465 3466 pmc_stop_args.pm_pmcid = pmc; 3467 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 3468 } 3469 3470 int 3471 pmc_width(pmc_id_t pmcid, uint32_t *width) 3472 { 3473 unsigned int i; 3474 enum pmc_class cl; 3475 3476 cl = PMC_ID_TO_CLASS(pmcid); 3477 for (i = 0; i < cpu_info.pm_nclass; i++) 3478 if (cpu_info.pm_classes[i].pm_class == cl) { 3479 *width = cpu_info.pm_classes[i].pm_width; 3480 return (0); 3481 } 3482 errno = EINVAL; 3483 return (-1); 3484 } 3485 3486 int 3487 pmc_write(pmc_id_t pmc, pmc_value_t value) 3488 { 3489 struct pmc_op_pmcrw pmc_write_op; 3490 3491 pmc_write_op.pm_pmcid = pmc; 3492 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 3493 pmc_write_op.pm_value = value; 3494 return (PMC_CALL(PMCRW, &pmc_write_op)); 3495 } 3496 3497 int 3498 pmc_writelog(uint32_t userdata) 3499 { 3500 struct pmc_op_writelog wl; 3501 3502 wl.pm_userdata = userdata; 3503 return (PMC_CALL(WRITELOG, &wl)); 3504 } 3505