1 /*- 2 * Copyright (c) 2003-2008 Joseph Koshy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/types.h> 31 #include <sys/module.h> 32 #include <sys/pmc.h> 33 #include <sys/syscall.h> 34 35 #include <ctype.h> 36 #include <errno.h> 37 #include <fcntl.h> 38 #include <pmc.h> 39 #include <stdio.h> 40 #include <stdlib.h> 41 #include <string.h> 42 #include <strings.h> 43 #include <unistd.h> 44 45 #include "libpmcinternal.h" 46 47 /* Function prototypes */ 48 #if defined(__i386__) 49 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 50 struct pmc_op_pmcallocate *_pmc_config); 51 #endif 52 #if defined(__amd64__) || defined(__i386__) 53 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 54 struct pmc_op_pmcallocate *_pmc_config); 55 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 56 struct pmc_op_pmcallocate *_pmc_config); 57 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 58 struct pmc_op_pmcallocate *_pmc_config); 59 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 60 struct pmc_op_pmcallocate *_pmc_config); 61 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 62 struct pmc_op_pmcallocate *_pmc_config); 63 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 64 struct pmc_op_pmcallocate *_pmc_config); 65 #endif 66 #if defined(__i386__) 67 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 68 struct pmc_op_pmcallocate *_pmc_config); 69 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 70 struct pmc_op_pmcallocate *_pmc_config); 71 #endif 72 #if defined(__amd64__) || defined(__i386__) 73 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 74 struct pmc_op_pmcallocate *_pmc_config); 75 #endif 76 #if defined(__XSCALE__) 77 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 78 struct pmc_op_pmcallocate *_pmc_config); 79 #endif 80 #if defined(__mips__) 81 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 82 struct pmc_op_pmcallocate *_pmc_config); 83 #endif /* __mips__ */ 84 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 85 struct pmc_op_pmcallocate *_pmc_config); 86 87 #if defined(__powerpc__) 88 static int ppc7450_allocate_pmc(enum pmc_event _pe, char* ctrspec, 89 struct pmc_op_pmcallocate *_pmc_config); 90 #endif /* __powerpc__ */ 91 92 #define PMC_CALL(cmd, params) \ 93 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 94 95 /* 96 * Event aliases provide a way for the user to ask for generic events 97 * like "cache-misses", or "instructions-retired". These aliases are 98 * mapped to the appropriate canonical event descriptions using a 99 * lookup table. 100 */ 101 struct pmc_event_alias { 102 const char *pm_alias; 103 const char *pm_spec; 104 }; 105 106 static const struct pmc_event_alias *pmc_mdep_event_aliases; 107 108 /* 109 * The pmc_event_descr structure maps symbolic names known to the user 110 * to integer codes used by the PMC KLD. 111 */ 112 struct pmc_event_descr { 113 const char *pm_ev_name; 114 enum pmc_event pm_ev_code; 115 }; 116 117 /* 118 * The pmc_class_descr structure maps class name prefixes for 119 * event names to event tables and other PMC class data. 120 */ 121 struct pmc_class_descr { 122 const char *pm_evc_name; 123 size_t pm_evc_name_size; 124 enum pmc_class pm_evc_class; 125 const struct pmc_event_descr *pm_evc_event_table; 126 size_t pm_evc_event_table_size; 127 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 128 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 129 }; 130 131 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 132 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 133 134 #undef __PMC_EV 135 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 136 137 /* 138 * PMC_CLASSDEP_TABLE(NAME, CLASS) 139 * 140 * Define a table mapping event names and aliases to HWPMC event IDs. 141 */ 142 #define PMC_CLASSDEP_TABLE(N, C) \ 143 static const struct pmc_event_descr N##_event_table[] = \ 144 { \ 145 __PMC_EV_##C() \ 146 } 147 148 PMC_CLASSDEP_TABLE(iaf, IAF); 149 PMC_CLASSDEP_TABLE(k7, K7); 150 PMC_CLASSDEP_TABLE(k8, K8); 151 PMC_CLASSDEP_TABLE(p4, P4); 152 PMC_CLASSDEP_TABLE(p5, P5); 153 PMC_CLASSDEP_TABLE(p6, P6); 154 PMC_CLASSDEP_TABLE(xscale, XSCALE); 155 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 156 PMC_CLASSDEP_TABLE(octeon, OCTEON); 157 PMC_CLASSDEP_TABLE(ucf, UCF); 158 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 159 160 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 161 162 #undef __PMC_EV_ALIAS 163 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 164 165 static const struct pmc_event_descr atom_event_table[] = 166 { 167 __PMC_EV_ALIAS_ATOM() 168 }; 169 170 static const struct pmc_event_descr core_event_table[] = 171 { 172 __PMC_EV_ALIAS_CORE() 173 }; 174 175 176 static const struct pmc_event_descr core2_event_table[] = 177 { 178 __PMC_EV_ALIAS_CORE2() 179 }; 180 181 static const struct pmc_event_descr corei7_event_table[] = 182 { 183 __PMC_EV_ALIAS_COREI7() 184 }; 185 186 static const struct pmc_event_descr ivybridge_event_table[] = 187 { 188 __PMC_EV_ALIAS_IVYBRIDGE() 189 }; 190 191 static const struct pmc_event_descr sandybridge_event_table[] = 192 { 193 __PMC_EV_ALIAS_SANDYBRIDGE() 194 }; 195 196 static const struct pmc_event_descr sandybridge_xeon_event_table[] = 197 { 198 __PMC_EV_ALIAS_SANDYBRIDGE_XEON() 199 }; 200 201 static const struct pmc_event_descr westmere_event_table[] = 202 { 203 __PMC_EV_ALIAS_WESTMERE() 204 }; 205 206 static const struct pmc_event_descr corei7uc_event_table[] = 207 { 208 __PMC_EV_ALIAS_COREI7UC() 209 }; 210 211 static const struct pmc_event_descr sandybridgeuc_event_table[] = 212 { 213 __PMC_EV_ALIAS_SANDYBRIDGEUC() 214 }; 215 216 static const struct pmc_event_descr westmereuc_event_table[] = 217 { 218 __PMC_EV_ALIAS_WESTMEREUC() 219 }; 220 221 /* 222 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 223 * 224 * Map a CPU to the PMC classes it supports. 225 */ 226 #define PMC_MDEP_TABLE(N,C,...) \ 227 static const enum pmc_class N##_pmc_classes[] = { \ 228 PMC_CLASS_##C, __VA_ARGS__ \ 229 } 230 231 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 232 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC); 233 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 234 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 235 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 236 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 237 PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 238 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 239 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC); 240 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 241 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC); 242 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC); 243 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC); 244 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 245 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 246 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 247 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450); 248 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 249 250 static const struct pmc_event_descr tsc_event_table[] = 251 { 252 __PMC_EV_TSC() 253 }; 254 255 #undef PMC_CLASS_TABLE_DESC 256 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 257 static const struct pmc_class_descr NAME##_class_table_descr = \ 258 { \ 259 .pm_evc_name = #CLASS "-", \ 260 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 261 .pm_evc_class = PMC_CLASS_##CLASS , \ 262 .pm_evc_event_table = EVENTS##_event_table , \ 263 .pm_evc_event_table_size = \ 264 PMC_EVENT_TABLE_SIZE(EVENTS), \ 265 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 266 } 267 268 #if defined(__i386__) || defined(__amd64__) 269 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf); 270 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap); 271 PMC_CLASS_TABLE_DESC(core, IAP, core, iap); 272 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap); 273 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap); 274 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap); 275 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap); 276 PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap); 277 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap); 278 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf); 279 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp); 280 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp); 281 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp); 282 #endif 283 #if defined(__i386__) 284 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7); 285 #endif 286 #if defined(__i386__) || defined(__amd64__) 287 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 288 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4); 289 #endif 290 #if defined(__i386__) 291 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5); 292 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6); 293 #endif 294 #if defined(__i386__) || defined(__amd64__) 295 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 296 #endif 297 #if defined(__XSCALE__) 298 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 299 #endif 300 #if defined(__mips__) 301 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 302 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 303 #endif /* __mips__ */ 304 #if defined(__powerpc__) 305 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, ppc7450); 306 #endif 307 308 static struct pmc_class_descr soft_class_table_descr = 309 { 310 .pm_evc_name = "SOFT-", 311 .pm_evc_name_size = sizeof("SOFT-") - 1, 312 .pm_evc_class = PMC_CLASS_SOFT, 313 .pm_evc_event_table = NULL, 314 .pm_evc_event_table_size = 0, 315 .pm_evc_allocate_pmc = soft_allocate_pmc 316 }; 317 318 #undef PMC_CLASS_TABLE_DESC 319 320 static const struct pmc_class_descr **pmc_class_table; 321 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 322 323 static const enum pmc_class *pmc_mdep_class_list; 324 static size_t pmc_mdep_class_list_size; 325 326 /* 327 * Mapping tables, mapping enumeration values to human readable 328 * strings. 329 */ 330 331 static const char * pmc_capability_names[] = { 332 #undef __PMC_CAP 333 #define __PMC_CAP(N,V,D) #N , 334 __PMC_CAPS() 335 }; 336 337 static const char * pmc_class_names[] = { 338 #undef __PMC_CLASS 339 #define __PMC_CLASS(C) #C , 340 __PMC_CLASSES() 341 }; 342 343 struct pmc_cputype_map { 344 enum pmc_cputype pm_cputype; 345 const char *pm_name; 346 }; 347 348 static const struct pmc_cputype_map pmc_cputype_names[] = { 349 #undef __PMC_CPU 350 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 351 __PMC_CPUS() 352 }; 353 354 static const char * pmc_disposition_names[] = { 355 #undef __PMC_DISP 356 #define __PMC_DISP(D) #D , 357 __PMC_DISPOSITIONS() 358 }; 359 360 static const char * pmc_mode_names[] = { 361 #undef __PMC_MODE 362 #define __PMC_MODE(M,N) #M , 363 __PMC_MODES() 364 }; 365 366 static const char * pmc_state_names[] = { 367 #undef __PMC_STATE 368 #define __PMC_STATE(S) #S , 369 __PMC_STATES() 370 }; 371 372 /* 373 * Filled in by pmc_init(). 374 */ 375 static int pmc_syscall = -1; 376 static struct pmc_cpuinfo cpu_info; 377 static struct pmc_op_getdyneventinfo soft_event_info; 378 379 /* Event masks for events */ 380 struct pmc_masks { 381 const char *pm_name; 382 const uint64_t pm_value; 383 }; 384 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 385 #define NULLMASK { .pm_name = NULL } 386 387 #if defined(__amd64__) || defined(__i386__) 388 static int 389 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 390 { 391 const struct pmc_masks *pm; 392 char *q, *r; 393 int c; 394 395 if (pmask == NULL) /* no mask keywords */ 396 return (-1); 397 q = strchr(p, '='); /* skip '=' */ 398 if (*++q == '\0') /* no more data */ 399 return (-1); 400 c = 0; /* count of mask keywords seen */ 401 while ((r = strsep(&q, "+")) != NULL) { 402 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 403 pm++) 404 ; 405 if (pm->pm_name == NULL) /* not found */ 406 return (-1); 407 *evmask |= pm->pm_value; 408 c++; 409 } 410 return (c); 411 } 412 #endif 413 414 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 415 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 416 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 417 418 #if defined(__i386__) 419 420 /* 421 * AMD K7 (Athlon) CPUs. 422 */ 423 424 static struct pmc_event_alias k7_aliases[] = { 425 EV_ALIAS("branches", "k7-retired-branches"), 426 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"), 427 EV_ALIAS("cycles", "tsc"), 428 EV_ALIAS("dc-misses", "k7-dc-misses"), 429 EV_ALIAS("ic-misses", "k7-ic-misses"), 430 EV_ALIAS("instructions", "k7-retired-instructions"), 431 EV_ALIAS("interrupts", "k7-hardware-interrupts"), 432 EV_ALIAS(NULL, NULL) 433 }; 434 435 #define K7_KW_COUNT "count" 436 #define K7_KW_EDGE "edge" 437 #define K7_KW_INV "inv" 438 #define K7_KW_OS "os" 439 #define K7_KW_UNITMASK "unitmask" 440 #define K7_KW_USR "usr" 441 442 static int 443 k7_allocate_pmc(enum pmc_event pe, char *ctrspec, 444 struct pmc_op_pmcallocate *pmc_config) 445 { 446 char *e, *p, *q; 447 int c, has_unitmask; 448 uint32_t count, unitmask; 449 450 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 451 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 452 453 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 || 454 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM || 455 pe == PMC_EV_K7_DC_WRITEBACKS) { 456 has_unitmask = 1; 457 unitmask = AMD_PMC_UNITMASK_MOESI; 458 } else 459 unitmask = has_unitmask = 0; 460 461 while ((p = strsep(&ctrspec, ",")) != NULL) { 462 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) { 463 q = strchr(p, '='); 464 if (*++q == '\0') /* skip '=' */ 465 return (-1); 466 467 count = strtol(q, &e, 0); 468 if (e == q || *e != '\0') 469 return (-1); 470 471 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 472 pmc_config->pm_md.pm_amd.pm_amd_config |= 473 AMD_PMC_TO_COUNTER(count); 474 475 } else if (KWMATCH(p, K7_KW_EDGE)) { 476 pmc_config->pm_caps |= PMC_CAP_EDGE; 477 } else if (KWMATCH(p, K7_KW_INV)) { 478 pmc_config->pm_caps |= PMC_CAP_INVERT; 479 } else if (KWMATCH(p, K7_KW_OS)) { 480 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 481 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) { 482 if (has_unitmask == 0) 483 return (-1); 484 unitmask = 0; 485 q = strchr(p, '='); 486 if (*++q == '\0') /* skip '=' */ 487 return (-1); 488 489 while ((c = tolower(*q++)) != 0) 490 if (c == 'm') 491 unitmask |= AMD_PMC_UNITMASK_M; 492 else if (c == 'o') 493 unitmask |= AMD_PMC_UNITMASK_O; 494 else if (c == 'e') 495 unitmask |= AMD_PMC_UNITMASK_E; 496 else if (c == 's') 497 unitmask |= AMD_PMC_UNITMASK_S; 498 else if (c == 'i') 499 unitmask |= AMD_PMC_UNITMASK_I; 500 else if (c == '+') 501 continue; 502 else 503 return (-1); 504 505 if (unitmask == 0) 506 return (-1); 507 508 } else if (KWMATCH(p, K7_KW_USR)) { 509 pmc_config->pm_caps |= PMC_CAP_USER; 510 } else 511 return (-1); 512 } 513 514 if (has_unitmask) { 515 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 516 pmc_config->pm_md.pm_amd.pm_amd_config |= 517 AMD_PMC_TO_UNITMASK(unitmask); 518 } 519 520 return (0); 521 522 } 523 524 #endif 525 526 #if defined(__amd64__) || defined(__i386__) 527 528 /* 529 * Intel Core (Family 6, Model E) PMCs. 530 */ 531 532 static struct pmc_event_alias core_aliases[] = { 533 EV_ALIAS("branches", "iap-br-instr-ret"), 534 EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"), 535 EV_ALIAS("cycles", "tsc-tsc"), 536 EV_ALIAS("ic-misses", "iap-icache-misses"), 537 EV_ALIAS("instructions", "iap-instr-ret"), 538 EV_ALIAS("interrupts", "iap-core-hw-int-rx"), 539 EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"), 540 EV_ALIAS(NULL, NULL) 541 }; 542 543 /* 544 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H) 545 * and Atom (Family 6, model 1CH) PMCs. 546 * 547 * We map aliases to events on the fixed-function counters if these 548 * are present. Note that not all CPUs in this family contain fixed-function 549 * counters. 550 */ 551 552 static struct pmc_event_alias core2_aliases[] = { 553 EV_ALIAS("branches", "iap-br-inst-retired.any"), 554 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"), 555 EV_ALIAS("cycles", "tsc-tsc"), 556 EV_ALIAS("ic-misses", "iap-l1i-misses"), 557 EV_ALIAS("instructions", "iaf-instr-retired.any"), 558 EV_ALIAS("interrupts", "iap-hw-int-rcv"), 559 EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"), 560 EV_ALIAS(NULL, NULL) 561 }; 562 563 static struct pmc_event_alias core2_aliases_without_iaf[] = { 564 EV_ALIAS("branches", "iap-br-inst-retired.any"), 565 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"), 566 EV_ALIAS("cycles", "tsc-tsc"), 567 EV_ALIAS("ic-misses", "iap-l1i-misses"), 568 EV_ALIAS("instructions", "iap-inst-retired.any_p"), 569 EV_ALIAS("interrupts", "iap-hw-int-rcv"), 570 EV_ALIAS("unhalted-cycles", "iap-cpu-clk-unhalted.core_p"), 571 EV_ALIAS(NULL, NULL) 572 }; 573 574 #define atom_aliases core2_aliases 575 #define atom_aliases_without_iaf core2_aliases_without_iaf 576 #define corei7_aliases core2_aliases 577 #define corei7_aliases_without_iaf core2_aliases_without_iaf 578 #define ivybridge_aliases core2_aliases 579 #define ivybridge_aliases_without_iaf core2_aliases_without_iaf 580 #define sandybridge_aliases core2_aliases 581 #define sandybridge_aliases_without_iaf core2_aliases_without_iaf 582 #define sandybridge_xeon_aliases core2_aliases 583 #define sandybridge_xeon_aliases_without_iaf core2_aliases_without_iaf 584 #define westmere_aliases core2_aliases 585 #define westmere_aliases_without_iaf core2_aliases_without_iaf 586 587 #define IAF_KW_OS "os" 588 #define IAF_KW_USR "usr" 589 #define IAF_KW_ANYTHREAD "anythread" 590 591 /* 592 * Parse an event specifier for Intel fixed function counters. 593 */ 594 static int 595 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec, 596 struct pmc_op_pmcallocate *pmc_config) 597 { 598 char *p; 599 600 (void) pe; 601 602 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 603 pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0; 604 605 while ((p = strsep(&ctrspec, ",")) != NULL) { 606 if (KWMATCH(p, IAF_KW_OS)) 607 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 608 else if (KWMATCH(p, IAF_KW_USR)) 609 pmc_config->pm_caps |= PMC_CAP_USER; 610 else if (KWMATCH(p, IAF_KW_ANYTHREAD)) 611 pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY; 612 else 613 return (-1); 614 } 615 616 return (0); 617 } 618 619 /* 620 * Core/Core2 support. 621 */ 622 623 #define IAP_KW_AGENT "agent" 624 #define IAP_KW_ANYTHREAD "anythread" 625 #define IAP_KW_CACHESTATE "cachestate" 626 #define IAP_KW_CMASK "cmask" 627 #define IAP_KW_CORE "core" 628 #define IAP_KW_EDGE "edge" 629 #define IAP_KW_INV "inv" 630 #define IAP_KW_OS "os" 631 #define IAP_KW_PREFETCH "prefetch" 632 #define IAP_KW_SNOOPRESPONSE "snoopresponse" 633 #define IAP_KW_SNOOPTYPE "snooptype" 634 #define IAP_KW_TRANSITION "trans" 635 #define IAP_KW_USR "usr" 636 #define IAP_KW_RSP "rsp" 637 638 static struct pmc_masks iap_core_mask[] = { 639 PMCMASK(all, (0x3 << 14)), 640 PMCMASK(this, (0x1 << 14)), 641 NULLMASK 642 }; 643 644 static struct pmc_masks iap_agent_mask[] = { 645 PMCMASK(this, 0), 646 PMCMASK(any, (0x1 << 13)), 647 NULLMASK 648 }; 649 650 static struct pmc_masks iap_prefetch_mask[] = { 651 PMCMASK(both, (0x3 << 12)), 652 PMCMASK(only, (0x1 << 12)), 653 PMCMASK(exclude, 0), 654 NULLMASK 655 }; 656 657 static struct pmc_masks iap_cachestate_mask[] = { 658 PMCMASK(i, (1 << 8)), 659 PMCMASK(s, (1 << 9)), 660 PMCMASK(e, (1 << 10)), 661 PMCMASK(m, (1 << 11)), 662 NULLMASK 663 }; 664 665 static struct pmc_masks iap_snoopresponse_mask[] = { 666 PMCMASK(clean, (1 << 8)), 667 PMCMASK(hit, (1 << 9)), 668 PMCMASK(hitm, (1 << 11)), 669 NULLMASK 670 }; 671 672 static struct pmc_masks iap_snooptype_mask[] = { 673 PMCMASK(cmp2s, (1 << 8)), 674 PMCMASK(cmp2i, (1 << 9)), 675 NULLMASK 676 }; 677 678 static struct pmc_masks iap_transition_mask[] = { 679 PMCMASK(any, 0x00), 680 PMCMASK(frequency, 0x10), 681 NULLMASK 682 }; 683 684 static struct pmc_masks iap_rsp_mask_i7_wm[] = { 685 PMCMASK(DMND_DATA_RD, (1 << 0)), 686 PMCMASK(DMND_RFO, (1 << 1)), 687 PMCMASK(DMND_IFETCH, (1 << 2)), 688 PMCMASK(WB, (1 << 3)), 689 PMCMASK(PF_DATA_RD, (1 << 4)), 690 PMCMASK(PF_RFO, (1 << 5)), 691 PMCMASK(PF_IFETCH, (1 << 6)), 692 PMCMASK(OTHER, (1 << 7)), 693 PMCMASK(UNCORE_HIT, (1 << 8)), 694 PMCMASK(OTHER_CORE_HIT_SNP, (1 << 9)), 695 PMCMASK(OTHER_CORE_HITM, (1 << 10)), 696 PMCMASK(REMOTE_CACHE_FWD, (1 << 12)), 697 PMCMASK(REMOTE_DRAM, (1 << 13)), 698 PMCMASK(LOCAL_DRAM, (1 << 14)), 699 PMCMASK(NON_DRAM, (1 << 15)), 700 NULLMASK 701 }; 702 703 static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = { 704 PMCMASK(REQ_DMND_DATA_RD, (1ULL << 0)), 705 PMCMASK(REQ_DMND_RFO, (1ULL << 1)), 706 PMCMASK(REQ_DMND_IFETCH, (1ULL << 2)), 707 PMCMASK(REQ_WB, (1ULL << 3)), 708 PMCMASK(REQ_PF_DATA_RD, (1ULL << 4)), 709 PMCMASK(REQ_PF_RFO, (1ULL << 5)), 710 PMCMASK(REQ_PF_IFETCH, (1ULL << 6)), 711 PMCMASK(REQ_PF_LLC_DATA_RD, (1ULL << 7)), 712 PMCMASK(REQ_PF_LLC_RFO, (1ULL << 8)), 713 PMCMASK(REQ_PF_LLC_IFETCH, (1ULL << 9)), 714 PMCMASK(REQ_BUS_LOCKS, (1ULL << 10)), 715 PMCMASK(REQ_STRM_ST, (1ULL << 11)), 716 PMCMASK(REQ_OTHER, (1ULL << 15)), 717 PMCMASK(RES_ANY, (1ULL << 16)), 718 PMCMASK(RES_SUPPLIER_SUPP, (1ULL << 17)), 719 PMCMASK(RES_SUPPLIER_LLC_HITM, (1ULL << 18)), 720 PMCMASK(RES_SUPPLIER_LLC_HITE, (1ULL << 19)), 721 PMCMASK(RES_SUPPLIER_LLC_HITS, (1ULL << 20)), 722 PMCMASK(RES_SUPPLIER_LLC_HITF, (1ULL << 21)), 723 PMCMASK(RES_SUPPLIER_LOCAL, (1ULL << 22)), 724 PMCMASK(RES_SNOOP_SNPI_NONE, (1ULL << 31)), 725 PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)), 726 PMCMASK(RES_SNOOP_SNP_MISS, (1ULL << 33)), 727 PMCMASK(RES_SNOOP_HIT_NO_FWD, (1ULL << 34)), 728 PMCMASK(RES_SNOOP_HIT_FWD, (1ULL << 35)), 729 PMCMASK(RES_SNOOP_HITM, (1ULL << 36)), 730 PMCMASK(RES_NON_DRAM, (1ULL << 37)), 731 NULLMASK 732 }; 733 734 static int 735 iap_allocate_pmc(enum pmc_event pe, char *ctrspec, 736 struct pmc_op_pmcallocate *pmc_config) 737 { 738 char *e, *p, *q; 739 uint64_t cachestate, evmask, rsp; 740 int count, n; 741 742 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE | 743 PMC_CAP_QUALIFIER); 744 pmc_config->pm_md.pm_iap.pm_iap_config = 0; 745 746 cachestate = evmask = rsp = 0; 747 748 /* Parse additional modifiers if present */ 749 while ((p = strsep(&ctrspec, ",")) != NULL) { 750 751 n = 0; 752 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) { 753 q = strchr(p, '='); 754 if (*++q == '\0') /* skip '=' */ 755 return (-1); 756 count = strtol(q, &e, 0); 757 if (e == q || *e != '\0') 758 return (-1); 759 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 760 pmc_config->pm_md.pm_iap.pm_iap_config |= 761 IAP_CMASK(count); 762 } else if (KWMATCH(p, IAP_KW_EDGE)) { 763 pmc_config->pm_caps |= PMC_CAP_EDGE; 764 } else if (KWMATCH(p, IAP_KW_INV)) { 765 pmc_config->pm_caps |= PMC_CAP_INVERT; 766 } else if (KWMATCH(p, IAP_KW_OS)) { 767 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 768 } else if (KWMATCH(p, IAP_KW_USR)) { 769 pmc_config->pm_caps |= PMC_CAP_USER; 770 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) { 771 pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY; 772 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) { 773 n = pmc_parse_mask(iap_core_mask, p, &evmask); 774 if (n != 1) 775 return (-1); 776 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) { 777 n = pmc_parse_mask(iap_agent_mask, p, &evmask); 778 if (n != 1) 779 return (-1); 780 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) { 781 n = pmc_parse_mask(iap_prefetch_mask, p, &evmask); 782 if (n != 1) 783 return (-1); 784 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) { 785 n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate); 786 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE && 787 KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) { 788 n = pmc_parse_mask(iap_transition_mask, p, &evmask); 789 if (n != 1) 790 return (-1); 791 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM || 792 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 || 793 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) { 794 if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) { 795 n = pmc_parse_mask(iap_snoopresponse_mask, p, 796 &evmask); 797 } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) { 798 n = pmc_parse_mask(iap_snooptype_mask, p, 799 &evmask); 800 } else 801 return (-1); 802 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 || 803 cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE) { 804 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 805 n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp); 806 } else 807 return (-1); 808 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE || 809 cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON || 810 cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE) { 811 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 812 n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp); 813 } else 814 return (-1); 815 } else 816 return (-1); 817 818 if (n < 0) /* Parsing failed. */ 819 return (-1); 820 } 821 822 pmc_config->pm_md.pm_iap.pm_iap_config |= evmask; 823 824 /* 825 * If the event requires a 'cachestate' qualifier but was not 826 * specified by the user, use a sensible default. 827 */ 828 switch (pe) { 829 case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */ 830 case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */ 831 case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */ 832 case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */ 833 case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */ 834 case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */ 835 case PMC_EV_IAP_EVENT_32H: /* Core */ 836 case PMC_EV_IAP_EVENT_40H: /* Core */ 837 case PMC_EV_IAP_EVENT_41H: /* Core */ 838 case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */ 839 if (cachestate == 0) 840 cachestate = (0xF << 8); 841 break; 842 case PMC_EV_IAP_EVENT_77H: /* Atom */ 843 /* IAP_EVENT_77H only accepts a cachestate qualifier on the 844 * Atom processor 845 */ 846 if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0) 847 cachestate = (0xF << 8); 848 break; 849 default: 850 break; 851 } 852 853 pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate; 854 pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp; 855 856 return (0); 857 } 858 859 /* 860 * Intel Uncore. 861 */ 862 863 static int 864 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec, 865 struct pmc_op_pmcallocate *pmc_config) 866 { 867 (void) pe; 868 (void) ctrspec; 869 870 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 871 pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0; 872 873 return (0); 874 } 875 876 #define UCP_KW_CMASK "cmask" 877 #define UCP_KW_EDGE "edge" 878 #define UCP_KW_INV "inv" 879 880 static int 881 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec, 882 struct pmc_op_pmcallocate *pmc_config) 883 { 884 char *e, *p, *q; 885 int count, n; 886 887 (void) pe; 888 889 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE | 890 PMC_CAP_QUALIFIER); 891 pmc_config->pm_md.pm_ucp.pm_ucp_config = 0; 892 893 /* Parse additional modifiers if present */ 894 while ((p = strsep(&ctrspec, ",")) != NULL) { 895 896 n = 0; 897 if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) { 898 q = strchr(p, '='); 899 if (*++q == '\0') /* skip '=' */ 900 return (-1); 901 count = strtol(q, &e, 0); 902 if (e == q || *e != '\0') 903 return (-1); 904 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 905 pmc_config->pm_md.pm_ucp.pm_ucp_config |= 906 UCP_CMASK(count); 907 } else if (KWMATCH(p, UCP_KW_EDGE)) { 908 pmc_config->pm_caps |= PMC_CAP_EDGE; 909 } else if (KWMATCH(p, UCP_KW_INV)) { 910 pmc_config->pm_caps |= PMC_CAP_INVERT; 911 } else 912 return (-1); 913 914 if (n < 0) /* Parsing failed. */ 915 return (-1); 916 } 917 918 return (0); 919 } 920 921 /* 922 * AMD K8 PMCs. 923 * 924 * These are very similar to AMD K7 PMCs, but support more kinds of 925 * events. 926 */ 927 928 static struct pmc_event_alias k8_aliases[] = { 929 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 930 EV_ALIAS("branch-mispredicts", 931 "k8-fr-retired-taken-branches-mispredicted"), 932 EV_ALIAS("cycles", "tsc"), 933 EV_ALIAS("dc-misses", "k8-dc-miss"), 934 EV_ALIAS("ic-misses", "k8-ic-miss"), 935 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 936 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 937 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 938 EV_ALIAS(NULL, NULL) 939 }; 940 941 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 942 943 /* 944 * Parsing tables 945 */ 946 947 /* fp dispatched fpu ops */ 948 static const struct pmc_masks k8_mask_fdfo[] = { 949 __K8MASK(add-pipe-excluding-junk-ops, 0), 950 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 951 __K8MASK(store-pipe-excluding-junk-ops, 2), 952 __K8MASK(add-pipe-junk-ops, 3), 953 __K8MASK(multiply-pipe-junk-ops, 4), 954 __K8MASK(store-pipe-junk-ops, 5), 955 NULLMASK 956 }; 957 958 /* ls segment register loads */ 959 static const struct pmc_masks k8_mask_lsrl[] = { 960 __K8MASK(es, 0), 961 __K8MASK(cs, 1), 962 __K8MASK(ss, 2), 963 __K8MASK(ds, 3), 964 __K8MASK(fs, 4), 965 __K8MASK(gs, 5), 966 __K8MASK(hs, 6), 967 NULLMASK 968 }; 969 970 /* ls locked operation */ 971 static const struct pmc_masks k8_mask_llo[] = { 972 __K8MASK(locked-instructions, 0), 973 __K8MASK(cycles-in-request, 1), 974 __K8MASK(cycles-to-complete, 2), 975 NULLMASK 976 }; 977 978 /* dc refill from {l2,system} and dc copyback */ 979 static const struct pmc_masks k8_mask_dc[] = { 980 __K8MASK(invalid, 0), 981 __K8MASK(shared, 1), 982 __K8MASK(exclusive, 2), 983 __K8MASK(owner, 3), 984 __K8MASK(modified, 4), 985 NULLMASK 986 }; 987 988 /* dc one bit ecc error */ 989 static const struct pmc_masks k8_mask_dobee[] = { 990 __K8MASK(scrubber, 0), 991 __K8MASK(piggyback, 1), 992 NULLMASK 993 }; 994 995 /* dc dispatched prefetch instructions */ 996 static const struct pmc_masks k8_mask_ddpi[] = { 997 __K8MASK(load, 0), 998 __K8MASK(store, 1), 999 __K8MASK(nta, 2), 1000 NULLMASK 1001 }; 1002 1003 /* dc dcache accesses by locks */ 1004 static const struct pmc_masks k8_mask_dabl[] = { 1005 __K8MASK(accesses, 0), 1006 __K8MASK(misses, 1), 1007 NULLMASK 1008 }; 1009 1010 /* bu internal l2 request */ 1011 static const struct pmc_masks k8_mask_bilr[] = { 1012 __K8MASK(ic-fill, 0), 1013 __K8MASK(dc-fill, 1), 1014 __K8MASK(tlb-reload, 2), 1015 __K8MASK(tag-snoop, 3), 1016 __K8MASK(cancelled, 4), 1017 NULLMASK 1018 }; 1019 1020 /* bu fill request l2 miss */ 1021 static const struct pmc_masks k8_mask_bfrlm[] = { 1022 __K8MASK(ic-fill, 0), 1023 __K8MASK(dc-fill, 1), 1024 __K8MASK(tlb-reload, 2), 1025 NULLMASK 1026 }; 1027 1028 /* bu fill into l2 */ 1029 static const struct pmc_masks k8_mask_bfil[] = { 1030 __K8MASK(dirty-l2-victim, 0), 1031 __K8MASK(victim-from-l2, 1), 1032 NULLMASK 1033 }; 1034 1035 /* fr retired fpu instructions */ 1036 static const struct pmc_masks k8_mask_frfi[] = { 1037 __K8MASK(x87, 0), 1038 __K8MASK(mmx-3dnow, 1), 1039 __K8MASK(packed-sse-sse2, 2), 1040 __K8MASK(scalar-sse-sse2, 3), 1041 NULLMASK 1042 }; 1043 1044 /* fr retired fastpath double op instructions */ 1045 static const struct pmc_masks k8_mask_frfdoi[] = { 1046 __K8MASK(low-op-pos-0, 0), 1047 __K8MASK(low-op-pos-1, 1), 1048 __K8MASK(low-op-pos-2, 2), 1049 NULLMASK 1050 }; 1051 1052 /* fr fpu exceptions */ 1053 static const struct pmc_masks k8_mask_ffe[] = { 1054 __K8MASK(x87-reclass-microfaults, 0), 1055 __K8MASK(sse-retype-microfaults, 1), 1056 __K8MASK(sse-reclass-microfaults, 2), 1057 __K8MASK(sse-and-x87-microtraps, 3), 1058 NULLMASK 1059 }; 1060 1061 /* nb memory controller page access event */ 1062 static const struct pmc_masks k8_mask_nmcpae[] = { 1063 __K8MASK(page-hit, 0), 1064 __K8MASK(page-miss, 1), 1065 __K8MASK(page-conflict, 2), 1066 NULLMASK 1067 }; 1068 1069 /* nb memory controller turnaround */ 1070 static const struct pmc_masks k8_mask_nmct[] = { 1071 __K8MASK(dimm-turnaround, 0), 1072 __K8MASK(read-to-write-turnaround, 1), 1073 __K8MASK(write-to-read-turnaround, 2), 1074 NULLMASK 1075 }; 1076 1077 /* nb memory controller bypass saturation */ 1078 static const struct pmc_masks k8_mask_nmcbs[] = { 1079 __K8MASK(memory-controller-hi-pri-bypass, 0), 1080 __K8MASK(memory-controller-lo-pri-bypass, 1), 1081 __K8MASK(dram-controller-interface-bypass, 2), 1082 __K8MASK(dram-controller-queue-bypass, 3), 1083 NULLMASK 1084 }; 1085 1086 /* nb sized commands */ 1087 static const struct pmc_masks k8_mask_nsc[] = { 1088 __K8MASK(nonpostwrszbyte, 0), 1089 __K8MASK(nonpostwrszdword, 1), 1090 __K8MASK(postwrszbyte, 2), 1091 __K8MASK(postwrszdword, 3), 1092 __K8MASK(rdszbyte, 4), 1093 __K8MASK(rdszdword, 5), 1094 __K8MASK(rdmodwr, 6), 1095 NULLMASK 1096 }; 1097 1098 /* nb probe result */ 1099 static const struct pmc_masks k8_mask_npr[] = { 1100 __K8MASK(probe-miss, 0), 1101 __K8MASK(probe-hit, 1), 1102 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 1103 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 1104 NULLMASK 1105 }; 1106 1107 /* nb hypertransport bus bandwidth */ 1108 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 1109 __K8MASK(command, 0), 1110 __K8MASK(data, 1), 1111 __K8MASK(buffer-release, 2), 1112 __K8MASK(nop, 3), 1113 NULLMASK 1114 }; 1115 1116 #undef __K8MASK 1117 1118 #define K8_KW_COUNT "count" 1119 #define K8_KW_EDGE "edge" 1120 #define K8_KW_INV "inv" 1121 #define K8_KW_MASK "mask" 1122 #define K8_KW_OS "os" 1123 #define K8_KW_USR "usr" 1124 1125 static int 1126 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 1127 struct pmc_op_pmcallocate *pmc_config) 1128 { 1129 char *e, *p, *q; 1130 int n; 1131 uint32_t count; 1132 uint64_t evmask; 1133 const struct pmc_masks *pm, *pmask; 1134 1135 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1136 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 1137 1138 pmask = NULL; 1139 evmask = 0; 1140 1141 #define __K8SETMASK(M) pmask = k8_mask_##M 1142 1143 /* setup parsing tables */ 1144 switch (pe) { 1145 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 1146 __K8SETMASK(fdfo); 1147 break; 1148 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 1149 __K8SETMASK(lsrl); 1150 break; 1151 case PMC_EV_K8_LS_LOCKED_OPERATION: 1152 __K8SETMASK(llo); 1153 break; 1154 case PMC_EV_K8_DC_REFILL_FROM_L2: 1155 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 1156 case PMC_EV_K8_DC_COPYBACK: 1157 __K8SETMASK(dc); 1158 break; 1159 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 1160 __K8SETMASK(dobee); 1161 break; 1162 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 1163 __K8SETMASK(ddpi); 1164 break; 1165 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 1166 __K8SETMASK(dabl); 1167 break; 1168 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 1169 __K8SETMASK(bilr); 1170 break; 1171 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 1172 __K8SETMASK(bfrlm); 1173 break; 1174 case PMC_EV_K8_BU_FILL_INTO_L2: 1175 __K8SETMASK(bfil); 1176 break; 1177 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 1178 __K8SETMASK(frfi); 1179 break; 1180 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 1181 __K8SETMASK(frfdoi); 1182 break; 1183 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 1184 __K8SETMASK(ffe); 1185 break; 1186 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 1187 __K8SETMASK(nmcpae); 1188 break; 1189 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 1190 __K8SETMASK(nmct); 1191 break; 1192 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 1193 __K8SETMASK(nmcbs); 1194 break; 1195 case PMC_EV_K8_NB_SIZED_COMMANDS: 1196 __K8SETMASK(nsc); 1197 break; 1198 case PMC_EV_K8_NB_PROBE_RESULT: 1199 __K8SETMASK(npr); 1200 break; 1201 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 1202 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 1203 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 1204 __K8SETMASK(nhbb); 1205 break; 1206 1207 default: 1208 break; /* no options defined */ 1209 } 1210 1211 while ((p = strsep(&ctrspec, ",")) != NULL) { 1212 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 1213 q = strchr(p, '='); 1214 if (*++q == '\0') /* skip '=' */ 1215 return (-1); 1216 1217 count = strtol(q, &e, 0); 1218 if (e == q || *e != '\0') 1219 return (-1); 1220 1221 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1222 pmc_config->pm_md.pm_amd.pm_amd_config |= 1223 AMD_PMC_TO_COUNTER(count); 1224 1225 } else if (KWMATCH(p, K8_KW_EDGE)) { 1226 pmc_config->pm_caps |= PMC_CAP_EDGE; 1227 } else if (KWMATCH(p, K8_KW_INV)) { 1228 pmc_config->pm_caps |= PMC_CAP_INVERT; 1229 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 1230 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1231 return (-1); 1232 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1233 } else if (KWMATCH(p, K8_KW_OS)) { 1234 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1235 } else if (KWMATCH(p, K8_KW_USR)) { 1236 pmc_config->pm_caps |= PMC_CAP_USER; 1237 } else 1238 return (-1); 1239 } 1240 1241 /* other post processing */ 1242 switch (pe) { 1243 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 1244 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 1245 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 1246 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 1247 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 1248 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 1249 /* XXX only available in rev B and later */ 1250 break; 1251 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 1252 /* XXX only available in rev C and later */ 1253 break; 1254 case PMC_EV_K8_LS_LOCKED_OPERATION: 1255 /* XXX CPU Rev A,B evmask is to be zero */ 1256 if (evmask & (evmask - 1)) /* > 1 bit set */ 1257 return (-1); 1258 if (evmask == 0) { 1259 evmask = 0x01; /* Rev C and later: #instrs */ 1260 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1261 } 1262 break; 1263 default: 1264 if (evmask == 0 && pmask != NULL) { 1265 for (pm = pmask; pm->pm_name; pm++) 1266 evmask |= pm->pm_value; 1267 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1268 } 1269 } 1270 1271 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 1272 pmc_config->pm_md.pm_amd.pm_amd_config = 1273 AMD_PMC_TO_UNITMASK(evmask); 1274 1275 return (0); 1276 } 1277 1278 #endif 1279 1280 #if defined(__amd64__) || defined(__i386__) 1281 1282 /* 1283 * Intel P4 PMCs 1284 */ 1285 1286 static struct pmc_event_alias p4_aliases[] = { 1287 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"), 1288 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"), 1289 EV_ALIAS("cycles", "tsc"), 1290 EV_ALIAS("instructions", 1291 "p4-instr-retired,mask=nbogusntag+nbogustag"), 1292 EV_ALIAS("unhalted-cycles", "p4-global-power-events"), 1293 EV_ALIAS(NULL, NULL) 1294 }; 1295 1296 #define P4_KW_ACTIVE "active" 1297 #define P4_KW_ACTIVE_ANY "any" 1298 #define P4_KW_ACTIVE_BOTH "both" 1299 #define P4_KW_ACTIVE_NONE "none" 1300 #define P4_KW_ACTIVE_SINGLE "single" 1301 #define P4_KW_BUSREQTYPE "busreqtype" 1302 #define P4_KW_CASCADE "cascade" 1303 #define P4_KW_EDGE "edge" 1304 #define P4_KW_INV "complement" 1305 #define P4_KW_OS "os" 1306 #define P4_KW_MASK "mask" 1307 #define P4_KW_PRECISE "precise" 1308 #define P4_KW_TAG "tag" 1309 #define P4_KW_THRESHOLD "threshold" 1310 #define P4_KW_USR "usr" 1311 1312 #define __P4MASK(N,V) PMCMASK(N, (1 << (V))) 1313 1314 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */ 1315 __P4MASK(dd, 0), 1316 __P4MASK(db, 1), 1317 __P4MASK(di, 2), 1318 __P4MASK(bd, 3), 1319 __P4MASK(bb, 4), 1320 __P4MASK(bi, 5), 1321 __P4MASK(id, 6), 1322 __P4MASK(ib, 7), 1323 NULLMASK 1324 }; 1325 1326 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */ 1327 __P4MASK(tcmiss, 0), 1328 NULLMASK, 1329 }; 1330 1331 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */ 1332 __P4MASK(hit, 0), 1333 __P4MASK(miss, 1), 1334 __P4MASK(hit-uc, 2), 1335 NULLMASK 1336 }; 1337 1338 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */ 1339 __P4MASK(st-rb-full, 2), 1340 __P4MASK(64k-conf, 3), 1341 NULLMASK 1342 }; 1343 1344 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */ 1345 __P4MASK(lsc, 0), 1346 __P4MASK(ssc, 1), 1347 NULLMASK 1348 }; 1349 1350 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */ 1351 __P4MASK(split-ld, 1), 1352 NULLMASK 1353 }; 1354 1355 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */ 1356 __P4MASK(split-st, 1), 1357 NULLMASK 1358 }; 1359 1360 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */ 1361 __P4MASK(no-sta, 1), 1362 __P4MASK(no-std, 3), 1363 __P4MASK(partial-data, 4), 1364 __P4MASK(unalgn-addr, 5), 1365 NULLMASK 1366 }; 1367 1368 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */ 1369 __P4MASK(dtmiss, 0), 1370 __P4MASK(itmiss, 1), 1371 NULLMASK 1372 }; 1373 1374 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */ 1375 __P4MASK(rd-2ndl-hits, 0), 1376 __P4MASK(rd-2ndl-hite, 1), 1377 __P4MASK(rd-2ndl-hitm, 2), 1378 __P4MASK(rd-3rdl-hits, 3), 1379 __P4MASK(rd-3rdl-hite, 4), 1380 __P4MASK(rd-3rdl-hitm, 5), 1381 __P4MASK(rd-2ndl-miss, 8), 1382 __P4MASK(rd-3rdl-miss, 9), 1383 __P4MASK(wr-2ndl-miss, 10), 1384 NULLMASK 1385 }; 1386 1387 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */ 1388 __P4MASK(all-read, 5), 1389 __P4MASK(all-write, 6), 1390 __P4MASK(mem-uc, 7), 1391 __P4MASK(mem-wc, 8), 1392 __P4MASK(mem-wt, 9), 1393 __P4MASK(mem-wp, 10), 1394 __P4MASK(mem-wb, 11), 1395 __P4MASK(own, 13), 1396 __P4MASK(other, 14), 1397 __P4MASK(prefetch, 15), 1398 NULLMASK 1399 }; 1400 1401 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */ 1402 __P4MASK(all-read, 5), 1403 __P4MASK(all-write, 6), 1404 __P4MASK(mem-uc, 7), 1405 __P4MASK(mem-wc, 8), 1406 __P4MASK(mem-wt, 9), 1407 __P4MASK(mem-wp, 10), 1408 __P4MASK(mem-wb, 11), 1409 __P4MASK(own, 13), 1410 __P4MASK(other, 14), 1411 __P4MASK(prefetch, 15), 1412 NULLMASK 1413 }; 1414 1415 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */ 1416 __P4MASK(drdy-drv, 0), 1417 __P4MASK(drdy-own, 1), 1418 __P4MASK(drdy-other, 2), 1419 __P4MASK(dbsy-drv, 3), 1420 __P4MASK(dbsy-own, 4), 1421 __P4MASK(dbsy-other, 5), 1422 NULLMASK 1423 }; 1424 1425 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */ 1426 __P4MASK(req-type0, 0), 1427 __P4MASK(req-type1, 1), 1428 __P4MASK(req-len0, 2), 1429 __P4MASK(req-len1, 3), 1430 __P4MASK(req-io-type, 5), 1431 __P4MASK(req-lock-type, 6), 1432 __P4MASK(req-cache-type, 7), 1433 __P4MASK(req-split-type, 8), 1434 __P4MASK(req-dem-type, 9), 1435 __P4MASK(req-ord-type, 10), 1436 __P4MASK(mem-type0, 11), 1437 __P4MASK(mem-type1, 12), 1438 __P4MASK(mem-type2, 13), 1439 NULLMASK 1440 }; 1441 1442 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */ 1443 __P4MASK(all, 15), 1444 NULLMASK 1445 }; 1446 1447 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */ 1448 __P4MASK(all, 15), 1449 NULLMASK 1450 }; 1451 1452 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */ 1453 __P4MASK(all, 15), 1454 NULLMASK 1455 }; 1456 1457 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */ 1458 __P4MASK(all, 15), 1459 NULLMASK 1460 }; 1461 1462 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */ 1463 __P4MASK(all, 15), 1464 NULLMASK 1465 }; 1466 1467 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */ 1468 __P4MASK(all, 15), 1469 NULLMASK 1470 }; 1471 1472 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */ 1473 __P4MASK(all, 15), 1474 NULLMASK 1475 }; 1476 1477 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */ 1478 __P4MASK(all, 15), 1479 NULLMASK 1480 }; 1481 1482 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */ 1483 __P4MASK(allp0, 3), 1484 __P4MASK(allp2, 4), 1485 NULLMASK 1486 }; 1487 1488 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */ 1489 __P4MASK(running, 0), 1490 NULLMASK 1491 }; 1492 1493 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */ 1494 __P4MASK(cisc, 0), 1495 NULLMASK 1496 }; 1497 1498 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */ 1499 __P4MASK(from-tc-build, 0), 1500 __P4MASK(from-tc-deliver, 1), 1501 __P4MASK(from-rom, 2), 1502 NULLMASK 1503 }; 1504 1505 static const struct pmc_masks p4_mask_rmbt[] = { 1506 /* retired mispred branch type */ 1507 __P4MASK(conditional, 1), 1508 __P4MASK(call, 2), 1509 __P4MASK(return, 3), 1510 __P4MASK(indirect, 4), 1511 NULLMASK 1512 }; 1513 1514 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */ 1515 __P4MASK(conditional, 1), 1516 __P4MASK(call, 2), 1517 __P4MASK(retired, 3), 1518 __P4MASK(indirect, 4), 1519 NULLMASK 1520 }; 1521 1522 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */ 1523 __P4MASK(sbfull, 5), 1524 NULLMASK 1525 }; 1526 1527 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */ 1528 __P4MASK(wcb-evicts, 0), 1529 __P4MASK(wcb-full-evict, 1), 1530 NULLMASK 1531 }; 1532 1533 static const struct pmc_masks p4_mask_fee[] = { /* front end event */ 1534 __P4MASK(nbogus, 0), 1535 __P4MASK(bogus, 1), 1536 NULLMASK 1537 }; 1538 1539 static const struct pmc_masks p4_mask_ee[] = { /* execution event */ 1540 __P4MASK(nbogus0, 0), 1541 __P4MASK(nbogus1, 1), 1542 __P4MASK(nbogus2, 2), 1543 __P4MASK(nbogus3, 3), 1544 __P4MASK(bogus0, 4), 1545 __P4MASK(bogus1, 5), 1546 __P4MASK(bogus2, 6), 1547 __P4MASK(bogus3, 7), 1548 NULLMASK 1549 }; 1550 1551 static const struct pmc_masks p4_mask_re[] = { /* replay event */ 1552 __P4MASK(nbogus, 0), 1553 __P4MASK(bogus, 1), 1554 NULLMASK 1555 }; 1556 1557 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */ 1558 __P4MASK(nbogusntag, 0), 1559 __P4MASK(nbogustag, 1), 1560 __P4MASK(bogusntag, 2), 1561 __P4MASK(bogustag, 3), 1562 NULLMASK 1563 }; 1564 1565 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */ 1566 __P4MASK(nbogus, 0), 1567 __P4MASK(bogus, 1), 1568 NULLMASK 1569 }; 1570 1571 static const struct pmc_masks p4_mask_ut[] = { /* uop type */ 1572 __P4MASK(tagloads, 1), 1573 __P4MASK(tagstores, 2), 1574 NULLMASK 1575 }; 1576 1577 static const struct pmc_masks p4_mask_br[] = { /* branch retired */ 1578 __P4MASK(mmnp, 0), 1579 __P4MASK(mmnm, 1), 1580 __P4MASK(mmtp, 2), 1581 __P4MASK(mmtm, 3), 1582 NULLMASK 1583 }; 1584 1585 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */ 1586 __P4MASK(nbogus, 0), 1587 NULLMASK 1588 }; 1589 1590 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */ 1591 __P4MASK(fpsu, 0), 1592 __P4MASK(fpso, 1), 1593 __P4MASK(poao, 2), 1594 __P4MASK(poau, 3), 1595 __P4MASK(prea, 4), 1596 NULLMASK 1597 }; 1598 1599 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */ 1600 __P4MASK(clear, 0), 1601 __P4MASK(moclear, 2), 1602 __P4MASK(smclear, 3), 1603 NULLMASK 1604 }; 1605 1606 /* P4 event parser */ 1607 static int 1608 p4_allocate_pmc(enum pmc_event pe, char *ctrspec, 1609 struct pmc_op_pmcallocate *pmc_config) 1610 { 1611 1612 char *e, *p, *q; 1613 int count, has_tag, has_busreqtype, n; 1614 uint32_t cccractivemask; 1615 uint64_t evmask; 1616 const struct pmc_masks *pm, *pmask; 1617 1618 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1619 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig = 1620 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0; 1621 1622 pmask = NULL; 1623 evmask = 0; 1624 cccractivemask = 0x3; 1625 has_tag = has_busreqtype = 0; 1626 1627 #define __P4SETMASK(M) do { \ 1628 pmask = p4_mask_##M; \ 1629 } while (0) 1630 1631 switch (pe) { 1632 case PMC_EV_P4_TC_DELIVER_MODE: 1633 __P4SETMASK(tcdm); 1634 break; 1635 case PMC_EV_P4_BPU_FETCH_REQUEST: 1636 __P4SETMASK(bfr); 1637 break; 1638 case PMC_EV_P4_ITLB_REFERENCE: 1639 __P4SETMASK(ir); 1640 break; 1641 case PMC_EV_P4_MEMORY_CANCEL: 1642 __P4SETMASK(memcan); 1643 break; 1644 case PMC_EV_P4_MEMORY_COMPLETE: 1645 __P4SETMASK(memcomp); 1646 break; 1647 case PMC_EV_P4_LOAD_PORT_REPLAY: 1648 __P4SETMASK(lpr); 1649 break; 1650 case PMC_EV_P4_STORE_PORT_REPLAY: 1651 __P4SETMASK(spr); 1652 break; 1653 case PMC_EV_P4_MOB_LOAD_REPLAY: 1654 __P4SETMASK(mlr); 1655 break; 1656 case PMC_EV_P4_PAGE_WALK_TYPE: 1657 __P4SETMASK(pwt); 1658 break; 1659 case PMC_EV_P4_BSQ_CACHE_REFERENCE: 1660 __P4SETMASK(bcr); 1661 break; 1662 case PMC_EV_P4_IOQ_ALLOCATION: 1663 __P4SETMASK(ia); 1664 has_busreqtype = 1; 1665 break; 1666 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES: 1667 __P4SETMASK(iae); 1668 has_busreqtype = 1; 1669 break; 1670 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1671 __P4SETMASK(fda); 1672 break; 1673 case PMC_EV_P4_BSQ_ALLOCATION: 1674 __P4SETMASK(ba); 1675 break; 1676 case PMC_EV_P4_SSE_INPUT_ASSIST: 1677 __P4SETMASK(sia); 1678 break; 1679 case PMC_EV_P4_PACKED_SP_UOP: 1680 __P4SETMASK(psu); 1681 break; 1682 case PMC_EV_P4_PACKED_DP_UOP: 1683 __P4SETMASK(pdu); 1684 break; 1685 case PMC_EV_P4_SCALAR_SP_UOP: 1686 __P4SETMASK(ssu); 1687 break; 1688 case PMC_EV_P4_SCALAR_DP_UOP: 1689 __P4SETMASK(sdu); 1690 break; 1691 case PMC_EV_P4_64BIT_MMX_UOP: 1692 __P4SETMASK(64bmu); 1693 break; 1694 case PMC_EV_P4_128BIT_MMX_UOP: 1695 __P4SETMASK(128bmu); 1696 break; 1697 case PMC_EV_P4_X87_FP_UOP: 1698 __P4SETMASK(xfu); 1699 break; 1700 case PMC_EV_P4_X87_SIMD_MOVES_UOP: 1701 __P4SETMASK(xsmu); 1702 break; 1703 case PMC_EV_P4_GLOBAL_POWER_EVENTS: 1704 __P4SETMASK(gpe); 1705 break; 1706 case PMC_EV_P4_TC_MS_XFER: 1707 __P4SETMASK(tmx); 1708 break; 1709 case PMC_EV_P4_UOP_QUEUE_WRITES: 1710 __P4SETMASK(uqw); 1711 break; 1712 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE: 1713 __P4SETMASK(rmbt); 1714 break; 1715 case PMC_EV_P4_RETIRED_BRANCH_TYPE: 1716 __P4SETMASK(rbt); 1717 break; 1718 case PMC_EV_P4_RESOURCE_STALL: 1719 __P4SETMASK(rs); 1720 break; 1721 case PMC_EV_P4_WC_BUFFER: 1722 __P4SETMASK(wb); 1723 break; 1724 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES: 1725 case PMC_EV_P4_B2B_CYCLES: 1726 case PMC_EV_P4_BNR: 1727 case PMC_EV_P4_SNOOP: 1728 case PMC_EV_P4_RESPONSE: 1729 break; 1730 case PMC_EV_P4_FRONT_END_EVENT: 1731 __P4SETMASK(fee); 1732 break; 1733 case PMC_EV_P4_EXECUTION_EVENT: 1734 __P4SETMASK(ee); 1735 break; 1736 case PMC_EV_P4_REPLAY_EVENT: 1737 __P4SETMASK(re); 1738 break; 1739 case PMC_EV_P4_INSTR_RETIRED: 1740 __P4SETMASK(insret); 1741 break; 1742 case PMC_EV_P4_UOPS_RETIRED: 1743 __P4SETMASK(ur); 1744 break; 1745 case PMC_EV_P4_UOP_TYPE: 1746 __P4SETMASK(ut); 1747 break; 1748 case PMC_EV_P4_BRANCH_RETIRED: 1749 __P4SETMASK(br); 1750 break; 1751 case PMC_EV_P4_MISPRED_BRANCH_RETIRED: 1752 __P4SETMASK(mbr); 1753 break; 1754 case PMC_EV_P4_X87_ASSIST: 1755 __P4SETMASK(xa); 1756 break; 1757 case PMC_EV_P4_MACHINE_CLEAR: 1758 __P4SETMASK(machclr); 1759 break; 1760 default: 1761 return (-1); 1762 } 1763 1764 /* process additional flags */ 1765 while ((p = strsep(&ctrspec, ",")) != NULL) { 1766 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) { 1767 q = strchr(p, '='); 1768 if (*++q == '\0') /* skip '=' */ 1769 return (-1); 1770 1771 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0) 1772 cccractivemask = 0x0; 1773 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0) 1774 cccractivemask = 0x1; 1775 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0) 1776 cccractivemask = 0x2; 1777 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0) 1778 cccractivemask = 0x3; 1779 else 1780 return (-1); 1781 1782 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) { 1783 if (has_busreqtype == 0) 1784 return (-1); 1785 1786 q = strchr(p, '='); 1787 if (*++q == '\0') /* skip '=' */ 1788 return (-1); 1789 1790 count = strtol(q, &e, 0); 1791 if (e == q || *e != '\0') 1792 return (-1); 1793 evmask = (evmask & ~0x1F) | (count & 0x1F); 1794 } else if (KWMATCH(p, P4_KW_CASCADE)) 1795 pmc_config->pm_caps |= PMC_CAP_CASCADE; 1796 else if (KWMATCH(p, P4_KW_EDGE)) 1797 pmc_config->pm_caps |= PMC_CAP_EDGE; 1798 else if (KWMATCH(p, P4_KW_INV)) 1799 pmc_config->pm_caps |= PMC_CAP_INVERT; 1800 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) { 1801 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1802 return (-1); 1803 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1804 } else if (KWMATCH(p, P4_KW_OS)) 1805 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1806 else if (KWMATCH(p, P4_KW_PRECISE)) 1807 pmc_config->pm_caps |= PMC_CAP_PRECISE; 1808 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) { 1809 if (has_tag == 0) 1810 return (-1); 1811 1812 q = strchr(p, '='); 1813 if (*++q == '\0') /* skip '=' */ 1814 return (-1); 1815 1816 count = strtol(q, &e, 0); 1817 if (e == q || *e != '\0') 1818 return (-1); 1819 1820 pmc_config->pm_caps |= PMC_CAP_TAGGING; 1821 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |= 1822 P4_ESCR_TO_TAG_VALUE(count); 1823 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) { 1824 q = strchr(p, '='); 1825 if (*++q == '\0') /* skip '=' */ 1826 return (-1); 1827 1828 count = strtol(q, &e, 0); 1829 if (e == q || *e != '\0') 1830 return (-1); 1831 1832 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1833 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &= 1834 ~P4_CCCR_THRESHOLD_MASK; 1835 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1836 P4_CCCR_TO_THRESHOLD(count); 1837 } else if (KWMATCH(p, P4_KW_USR)) 1838 pmc_config->pm_caps |= PMC_CAP_USER; 1839 else 1840 return (-1); 1841 } 1842 1843 /* other post processing */ 1844 if (pe == PMC_EV_P4_IOQ_ALLOCATION || 1845 pe == PMC_EV_P4_FSB_DATA_ACTIVITY || 1846 pe == PMC_EV_P4_BSQ_ALLOCATION) 1847 pmc_config->pm_caps |= PMC_CAP_EDGE; 1848 1849 /* fill in thread activity mask */ 1850 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1851 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask); 1852 1853 if (evmask) 1854 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1855 1856 switch (pe) { 1857 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1858 if ((evmask & 0x06) == 0x06 || 1859 (evmask & 0x18) == 0x18) 1860 return (-1); /* can't have own+other bits together */ 1861 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */ 1862 evmask = 0x1D; 1863 break; 1864 case PMC_EV_P4_MACHINE_CLEAR: 1865 /* only one bit is allowed to be set */ 1866 if ((evmask & (evmask - 1)) != 0) 1867 return (-1); 1868 if (evmask == 0) { 1869 evmask = 0x1; /* 'CLEAR' */ 1870 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1871 } 1872 break; 1873 default: 1874 if (evmask == 0 && pmask) { 1875 for (pm = pmask; pm->pm_name; pm++) 1876 evmask |= pm->pm_value; 1877 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1878 } 1879 } 1880 1881 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 1882 P4_ESCR_TO_EVENT_MASK(evmask); 1883 1884 return (0); 1885 } 1886 1887 #endif 1888 1889 #if defined(__i386__) 1890 1891 /* 1892 * Pentium style PMCs 1893 */ 1894 1895 static struct pmc_event_alias p5_aliases[] = { 1896 EV_ALIAS("branches", "p5-taken-branches"), 1897 EV_ALIAS("cycles", "tsc"), 1898 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"), 1899 EV_ALIAS("ic-misses", "p5-code-cache-miss"), 1900 EV_ALIAS("instructions", "p5-instructions-executed"), 1901 EV_ALIAS("interrupts", "p5-hardware-interrupts"), 1902 EV_ALIAS("unhalted-cycles", 1903 "p5-number-of-cycles-not-in-halt-state"), 1904 EV_ALIAS(NULL, NULL) 1905 }; 1906 1907 static int 1908 p5_allocate_pmc(enum pmc_event pe, char *ctrspec, 1909 struct pmc_op_pmcallocate *pmc_config) 1910 { 1911 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */ 1912 } 1913 1914 /* 1915 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III, 1916 * and Pentium M CPUs. 1917 */ 1918 1919 static struct pmc_event_alias p6_aliases[] = { 1920 EV_ALIAS("branches", "p6-br-inst-retired"), 1921 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"), 1922 EV_ALIAS("cycles", "tsc"), 1923 EV_ALIAS("dc-misses", "p6-dcu-lines-in"), 1924 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"), 1925 EV_ALIAS("instructions", "p6-inst-retired"), 1926 EV_ALIAS("interrupts", "p6-hw-int-rx"), 1927 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"), 1928 EV_ALIAS(NULL, NULL) 1929 }; 1930 1931 #define P6_KW_CMASK "cmask" 1932 #define P6_KW_EDGE "edge" 1933 #define P6_KW_INV "inv" 1934 #define P6_KW_OS "os" 1935 #define P6_KW_UMASK "umask" 1936 #define P6_KW_USR "usr" 1937 1938 static struct pmc_masks p6_mask_mesi[] = { 1939 PMCMASK(m, 0x01), 1940 PMCMASK(e, 0x02), 1941 PMCMASK(s, 0x04), 1942 PMCMASK(i, 0x08), 1943 NULLMASK 1944 }; 1945 1946 static struct pmc_masks p6_mask_mesihw[] = { 1947 PMCMASK(m, 0x01), 1948 PMCMASK(e, 0x02), 1949 PMCMASK(s, 0x04), 1950 PMCMASK(i, 0x08), 1951 PMCMASK(nonhw, 0x00), 1952 PMCMASK(hw, 0x10), 1953 PMCMASK(both, 0x30), 1954 NULLMASK 1955 }; 1956 1957 static struct pmc_masks p6_mask_hw[] = { 1958 PMCMASK(nonhw, 0x00), 1959 PMCMASK(hw, 0x10), 1960 PMCMASK(both, 0x30), 1961 NULLMASK 1962 }; 1963 1964 static struct pmc_masks p6_mask_any[] = { 1965 PMCMASK(self, 0x00), 1966 PMCMASK(any, 0x20), 1967 NULLMASK 1968 }; 1969 1970 static struct pmc_masks p6_mask_ekp[] = { 1971 PMCMASK(nta, 0x00), 1972 PMCMASK(t1, 0x01), 1973 PMCMASK(t2, 0x02), 1974 PMCMASK(wos, 0x03), 1975 NULLMASK 1976 }; 1977 1978 static struct pmc_masks p6_mask_pps[] = { 1979 PMCMASK(packed-and-scalar, 0x00), 1980 PMCMASK(scalar, 0x01), 1981 NULLMASK 1982 }; 1983 1984 static struct pmc_masks p6_mask_mite[] = { 1985 PMCMASK(packed-multiply, 0x01), 1986 PMCMASK(packed-shift, 0x02), 1987 PMCMASK(pack, 0x04), 1988 PMCMASK(unpack, 0x08), 1989 PMCMASK(packed-logical, 0x10), 1990 PMCMASK(packed-arithmetic, 0x20), 1991 NULLMASK 1992 }; 1993 1994 static struct pmc_masks p6_mask_fmt[] = { 1995 PMCMASK(mmxtofp, 0x00), 1996 PMCMASK(fptommx, 0x01), 1997 NULLMASK 1998 }; 1999 2000 static struct pmc_masks p6_mask_sr[] = { 2001 PMCMASK(es, 0x01), 2002 PMCMASK(ds, 0x02), 2003 PMCMASK(fs, 0x04), 2004 PMCMASK(gs, 0x08), 2005 NULLMASK 2006 }; 2007 2008 static struct pmc_masks p6_mask_eet[] = { 2009 PMCMASK(all, 0x00), 2010 PMCMASK(freq, 0x02), 2011 NULLMASK 2012 }; 2013 2014 static struct pmc_masks p6_mask_efur[] = { 2015 PMCMASK(all, 0x00), 2016 PMCMASK(loadop, 0x01), 2017 PMCMASK(stdsta, 0x02), 2018 NULLMASK 2019 }; 2020 2021 static struct pmc_masks p6_mask_essir[] = { 2022 PMCMASK(sse-packed-single, 0x00), 2023 PMCMASK(sse-packed-single-scalar-single, 0x01), 2024 PMCMASK(sse2-packed-double, 0x02), 2025 PMCMASK(sse2-scalar-double, 0x03), 2026 NULLMASK 2027 }; 2028 2029 static struct pmc_masks p6_mask_esscir[] = { 2030 PMCMASK(sse-packed-single, 0x00), 2031 PMCMASK(sse-scalar-single, 0x01), 2032 PMCMASK(sse2-packed-double, 0x02), 2033 PMCMASK(sse2-scalar-double, 0x03), 2034 NULLMASK 2035 }; 2036 2037 /* P6 event parser */ 2038 static int 2039 p6_allocate_pmc(enum pmc_event pe, char *ctrspec, 2040 struct pmc_op_pmcallocate *pmc_config) 2041 { 2042 char *e, *p, *q; 2043 uint64_t evmask; 2044 int count, n; 2045 const struct pmc_masks *pm, *pmask; 2046 2047 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2048 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0; 2049 2050 evmask = 0; 2051 2052 #define P6MASKSET(M) pmask = p6_mask_ ## M 2053 2054 switch(pe) { 2055 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break; 2056 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break; 2057 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break; 2058 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break; 2059 case PMC_EV_P6_BUS_DRDY_CLOCKS: 2060 case PMC_EV_P6_BUS_LOCK_CLOCKS: 2061 case PMC_EV_P6_BUS_TRAN_BRD: 2062 case PMC_EV_P6_BUS_TRAN_RFO: 2063 case PMC_EV_P6_BUS_TRANS_WB: 2064 case PMC_EV_P6_BUS_TRAN_IFETCH: 2065 case PMC_EV_P6_BUS_TRAN_INVAL: 2066 case PMC_EV_P6_BUS_TRAN_PWR: 2067 case PMC_EV_P6_BUS_TRANS_P: 2068 case PMC_EV_P6_BUS_TRANS_IO: 2069 case PMC_EV_P6_BUS_TRAN_DEF: 2070 case PMC_EV_P6_BUS_TRAN_BURST: 2071 case PMC_EV_P6_BUS_TRAN_ANY: 2072 case PMC_EV_P6_BUS_TRAN_MEM: 2073 P6MASKSET(any); break; 2074 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 2075 case PMC_EV_P6_EMON_KNI_PREF_MISS: 2076 P6MASKSET(ekp); break; 2077 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 2078 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 2079 P6MASKSET(pps); break; 2080 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC: 2081 P6MASKSET(mite); break; 2082 case PMC_EV_P6_FP_MMX_TRANS: 2083 P6MASKSET(fmt); break; 2084 case PMC_EV_P6_SEG_RENAME_STALLS: 2085 case PMC_EV_P6_SEG_REG_RENAMES: 2086 P6MASKSET(sr); break; 2087 case PMC_EV_P6_EMON_EST_TRANS: 2088 P6MASKSET(eet); break; 2089 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 2090 P6MASKSET(efur); break; 2091 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 2092 P6MASKSET(essir); break; 2093 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 2094 P6MASKSET(esscir); break; 2095 default: 2096 pmask = NULL; 2097 break; 2098 } 2099 2100 /* Pentium M PMCs have a few events with different semantics */ 2101 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) { 2102 if (pe == PMC_EV_P6_L2_LD || 2103 pe == PMC_EV_P6_L2_LINES_IN || 2104 pe == PMC_EV_P6_L2_LINES_OUT) 2105 P6MASKSET(mesihw); 2106 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM) 2107 P6MASKSET(hw); 2108 } 2109 2110 /* Parse additional modifiers if present */ 2111 while ((p = strsep(&ctrspec, ",")) != NULL) { 2112 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) { 2113 q = strchr(p, '='); 2114 if (*++q == '\0') /* skip '=' */ 2115 return (-1); 2116 count = strtol(q, &e, 0); 2117 if (e == q || *e != '\0') 2118 return (-1); 2119 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 2120 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 2121 P6_EVSEL_TO_CMASK(count); 2122 } else if (KWMATCH(p, P6_KW_EDGE)) { 2123 pmc_config->pm_caps |= PMC_CAP_EDGE; 2124 } else if (KWMATCH(p, P6_KW_INV)) { 2125 pmc_config->pm_caps |= PMC_CAP_INVERT; 2126 } else if (KWMATCH(p, P6_KW_OS)) { 2127 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2128 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) { 2129 evmask = 0; 2130 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 2131 return (-1); 2132 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS || 2133 pe == PMC_EV_P6_BUS_LOCK_CLOCKS || 2134 pe == PMC_EV_P6_BUS_TRAN_BRD || 2135 pe == PMC_EV_P6_BUS_TRAN_RFO || 2136 pe == PMC_EV_P6_BUS_TRAN_IFETCH || 2137 pe == PMC_EV_P6_BUS_TRAN_INVAL || 2138 pe == PMC_EV_P6_BUS_TRAN_PWR || 2139 pe == PMC_EV_P6_BUS_TRAN_DEF || 2140 pe == PMC_EV_P6_BUS_TRAN_BURST || 2141 pe == PMC_EV_P6_BUS_TRAN_ANY || 2142 pe == PMC_EV_P6_BUS_TRAN_MEM || 2143 pe == PMC_EV_P6_BUS_TRANS_IO || 2144 pe == PMC_EV_P6_BUS_TRANS_P || 2145 pe == PMC_EV_P6_BUS_TRANS_WB || 2146 pe == PMC_EV_P6_EMON_EST_TRANS || 2147 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET || 2148 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET || 2149 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED || 2150 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED || 2151 pe == PMC_EV_P6_EMON_KNI_PREF_MISS || 2152 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED || 2153 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED || 2154 pe == PMC_EV_P6_FP_MMX_TRANS) 2155 && (n > 1)) /* Only one mask keyword is allowed. */ 2156 return (-1); 2157 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 2158 } else if (KWMATCH(p, P6_KW_USR)) { 2159 pmc_config->pm_caps |= PMC_CAP_USER; 2160 } else 2161 return (-1); 2162 } 2163 2164 /* post processing */ 2165 switch (pe) { 2166 2167 /* 2168 * The following events default to an evmask of 0 2169 */ 2170 2171 /* default => 'self' */ 2172 case PMC_EV_P6_BUS_DRDY_CLOCKS: 2173 case PMC_EV_P6_BUS_LOCK_CLOCKS: 2174 case PMC_EV_P6_BUS_TRAN_BRD: 2175 case PMC_EV_P6_BUS_TRAN_RFO: 2176 case PMC_EV_P6_BUS_TRANS_WB: 2177 case PMC_EV_P6_BUS_TRAN_IFETCH: 2178 case PMC_EV_P6_BUS_TRAN_INVAL: 2179 case PMC_EV_P6_BUS_TRAN_PWR: 2180 case PMC_EV_P6_BUS_TRANS_P: 2181 case PMC_EV_P6_BUS_TRANS_IO: 2182 case PMC_EV_P6_BUS_TRAN_DEF: 2183 case PMC_EV_P6_BUS_TRAN_BURST: 2184 case PMC_EV_P6_BUS_TRAN_ANY: 2185 case PMC_EV_P6_BUS_TRAN_MEM: 2186 2187 /* default => 'nta' */ 2188 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 2189 case PMC_EV_P6_EMON_KNI_PREF_MISS: 2190 2191 /* default => 'packed and scalar' */ 2192 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 2193 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 2194 2195 /* default => 'mmx to fp transitions' */ 2196 case PMC_EV_P6_FP_MMX_TRANS: 2197 2198 /* default => 'SSE Packed Single' */ 2199 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 2200 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 2201 2202 /* default => 'all fused micro-ops' */ 2203 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 2204 2205 /* default => 'all transitions' */ 2206 case PMC_EV_P6_EMON_EST_TRANS: 2207 break; 2208 2209 case PMC_EV_P6_MMX_UOPS_EXEC: 2210 evmask = 0x0F; /* only value allowed */ 2211 break; 2212 2213 default: 2214 /* 2215 * For all other events, set the default event mask 2216 * to a logical OR of all the allowed event mask bits. 2217 */ 2218 if (evmask == 0 && pmask) { 2219 for (pm = pmask; pm->pm_name; pm++) 2220 evmask |= pm->pm_value; 2221 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 2222 } 2223 2224 break; 2225 } 2226 2227 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 2228 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 2229 P6_EVSEL_TO_UMASK(evmask); 2230 2231 return (0); 2232 } 2233 2234 #endif 2235 2236 #if defined(__i386__) || defined(__amd64__) 2237 static int 2238 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 2239 struct pmc_op_pmcallocate *pmc_config) 2240 { 2241 if (pe != PMC_EV_TSC_TSC) 2242 return (-1); 2243 2244 /* TSC events must be unqualified. */ 2245 if (ctrspec && *ctrspec != '\0') 2246 return (-1); 2247 2248 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 2249 pmc_config->pm_caps |= PMC_CAP_READ; 2250 2251 return (0); 2252 } 2253 #endif 2254 2255 static struct pmc_event_alias generic_aliases[] = { 2256 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 2257 EV_ALIAS(NULL, NULL) 2258 }; 2259 2260 static int 2261 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 2262 struct pmc_op_pmcallocate *pmc_config) 2263 { 2264 (void)ctrspec; 2265 (void)pmc_config; 2266 2267 if (pe < PMC_EV_SOFT_FIRST || pe > PMC_EV_SOFT_LAST) 2268 return (-1); 2269 2270 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2271 return (0); 2272 } 2273 2274 #if defined(__XSCALE__) 2275 2276 static struct pmc_event_alias xscale_aliases[] = { 2277 EV_ALIAS("branches", "BRANCH_RETIRED"), 2278 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 2279 EV_ALIAS("dc-misses", "DC_MISS"), 2280 EV_ALIAS("ic-misses", "IC_MISS"), 2281 EV_ALIAS("instructions", "INSTR_RETIRED"), 2282 EV_ALIAS(NULL, NULL) 2283 }; 2284 static int 2285 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2286 struct pmc_op_pmcallocate *pmc_config __unused) 2287 { 2288 switch (pe) { 2289 default: 2290 break; 2291 } 2292 2293 return (0); 2294 } 2295 #endif 2296 2297 #if defined(__mips__) 2298 2299 static struct pmc_event_alias mips24k_aliases[] = { 2300 EV_ALIAS("instructions", "INSTR_EXECUTED"), 2301 EV_ALIAS("branches", "BRANCH_COMPLETED"), 2302 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 2303 EV_ALIAS(NULL, NULL) 2304 }; 2305 2306 static struct pmc_event_alias octeon_aliases[] = { 2307 EV_ALIAS("instructions", "RET"), 2308 EV_ALIAS("branches", "BR"), 2309 EV_ALIAS("branch-mispredicts", "BRMIS"), 2310 EV_ALIAS(NULL, NULL) 2311 }; 2312 2313 #define MIPS_KW_OS "os" 2314 #define MIPS_KW_USR "usr" 2315 #define MIPS_KW_ANYTHREAD "anythread" 2316 2317 static int 2318 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2319 struct pmc_op_pmcallocate *pmc_config __unused) 2320 { 2321 char *p; 2322 2323 (void) pe; 2324 2325 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2326 2327 while ((p = strsep(&ctrspec, ",")) != NULL) { 2328 if (KWMATCH(p, MIPS_KW_OS)) 2329 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2330 else if (KWMATCH(p, MIPS_KW_USR)) 2331 pmc_config->pm_caps |= PMC_CAP_USER; 2332 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 2333 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 2334 else 2335 return (-1); 2336 } 2337 2338 return (0); 2339 } 2340 2341 #endif /* __mips__ */ 2342 2343 #if defined(__powerpc__) 2344 2345 static struct pmc_event_alias ppc7450_aliases[] = { 2346 EV_ALIAS("instructions", "INSTR_COMPLETED"), 2347 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 2348 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 2349 EV_ALIAS(NULL, NULL) 2350 }; 2351 2352 #define PPC7450_KW_OS "os" 2353 #define PPC7450_KW_USR "usr" 2354 #define PPC7450_KW_ANYTHREAD "anythread" 2355 2356 static int 2357 ppc7450_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2358 struct pmc_op_pmcallocate *pmc_config __unused) 2359 { 2360 char *p; 2361 2362 (void) pe; 2363 2364 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2365 2366 while ((p = strsep(&ctrspec, ",")) != NULL) { 2367 if (KWMATCH(p, PPC7450_KW_OS)) 2368 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2369 else if (KWMATCH(p, PPC7450_KW_USR)) 2370 pmc_config->pm_caps |= PMC_CAP_USER; 2371 else if (KWMATCH(p, PPC7450_KW_ANYTHREAD)) 2372 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 2373 else 2374 return (-1); 2375 } 2376 2377 return (0); 2378 } 2379 #endif /* __powerpc__ */ 2380 2381 2382 /* 2383 * Match an event name `name' with its canonical form. 2384 * 2385 * Matches are case insensitive and spaces, periods, underscores and 2386 * hyphen characters are considered to match each other. 2387 * 2388 * Returns 1 for a match, 0 otherwise. 2389 */ 2390 2391 static int 2392 pmc_match_event_name(const char *name, const char *canonicalname) 2393 { 2394 int cc, nc; 2395 const unsigned char *c, *n; 2396 2397 c = (const unsigned char *) canonicalname; 2398 n = (const unsigned char *) name; 2399 2400 for (; (nc = *n) && (cc = *c); n++, c++) { 2401 2402 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 2403 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 2404 continue; 2405 2406 if (toupper(nc) == toupper(cc)) 2407 continue; 2408 2409 2410 return (0); 2411 } 2412 2413 if (*n == '\0' && *c == '\0') 2414 return (1); 2415 2416 return (0); 2417 } 2418 2419 /* 2420 * Match an event name against all the event named supported by a 2421 * PMC class. 2422 * 2423 * Returns an event descriptor pointer on match or NULL otherwise. 2424 */ 2425 static const struct pmc_event_descr * 2426 pmc_match_event_class(const char *name, 2427 const struct pmc_class_descr *pcd) 2428 { 2429 size_t n; 2430 const struct pmc_event_descr *ev; 2431 2432 ev = pcd->pm_evc_event_table; 2433 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 2434 if (pmc_match_event_name(name, ev->pm_ev_name)) 2435 return (ev); 2436 2437 return (NULL); 2438 } 2439 2440 static int 2441 pmc_mdep_is_compatible_class(enum pmc_class pc) 2442 { 2443 size_t n; 2444 2445 for (n = 0; n < pmc_mdep_class_list_size; n++) 2446 if (pmc_mdep_class_list[n] == pc) 2447 return (1); 2448 return (0); 2449 } 2450 2451 /* 2452 * API entry points 2453 */ 2454 2455 int 2456 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 2457 uint32_t flags, int cpu, pmc_id_t *pmcid) 2458 { 2459 size_t n; 2460 int retval; 2461 char *r, *spec_copy; 2462 const char *ctrname; 2463 const struct pmc_event_descr *ev; 2464 const struct pmc_event_alias *alias; 2465 struct pmc_op_pmcallocate pmc_config; 2466 const struct pmc_class_descr *pcd; 2467 2468 spec_copy = NULL; 2469 retval = -1; 2470 2471 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 2472 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 2473 errno = EINVAL; 2474 goto out; 2475 } 2476 2477 /* replace an event alias with the canonical event specifier */ 2478 if (pmc_mdep_event_aliases) 2479 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 2480 if (!strcasecmp(ctrspec, alias->pm_alias)) { 2481 spec_copy = strdup(alias->pm_spec); 2482 break; 2483 } 2484 2485 if (spec_copy == NULL) 2486 spec_copy = strdup(ctrspec); 2487 2488 r = spec_copy; 2489 ctrname = strsep(&r, ","); 2490 2491 /* 2492 * If a explicit class prefix was given by the user, restrict the 2493 * search for the event to the specified PMC class. 2494 */ 2495 ev = NULL; 2496 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 2497 pcd = pmc_class_table[n]; 2498 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 2499 strncasecmp(ctrname, pcd->pm_evc_name, 2500 pcd->pm_evc_name_size) == 0) { 2501 if ((ev = pmc_match_event_class(ctrname + 2502 pcd->pm_evc_name_size, pcd)) == NULL) { 2503 errno = EINVAL; 2504 goto out; 2505 } 2506 break; 2507 } 2508 } 2509 2510 /* 2511 * Otherwise, search for this event in all compatible PMC 2512 * classes. 2513 */ 2514 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 2515 pcd = pmc_class_table[n]; 2516 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 2517 ev = pmc_match_event_class(ctrname, pcd); 2518 } 2519 2520 if (ev == NULL) { 2521 errno = EINVAL; 2522 goto out; 2523 } 2524 2525 bzero(&pmc_config, sizeof(pmc_config)); 2526 pmc_config.pm_ev = ev->pm_ev_code; 2527 pmc_config.pm_class = pcd->pm_evc_class; 2528 pmc_config.pm_cpu = cpu; 2529 pmc_config.pm_mode = mode; 2530 pmc_config.pm_flags = flags; 2531 2532 if (PMC_IS_SAMPLING_MODE(mode)) 2533 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 2534 2535 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 2536 errno = EINVAL; 2537 goto out; 2538 } 2539 2540 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 2541 goto out; 2542 2543 *pmcid = pmc_config.pm_pmcid; 2544 2545 retval = 0; 2546 2547 out: 2548 if (spec_copy) 2549 free(spec_copy); 2550 2551 return (retval); 2552 } 2553 2554 int 2555 pmc_attach(pmc_id_t pmc, pid_t pid) 2556 { 2557 struct pmc_op_pmcattach pmc_attach_args; 2558 2559 pmc_attach_args.pm_pmc = pmc; 2560 pmc_attach_args.pm_pid = pid; 2561 2562 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 2563 } 2564 2565 int 2566 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 2567 { 2568 unsigned int i; 2569 enum pmc_class cl; 2570 2571 cl = PMC_ID_TO_CLASS(pmcid); 2572 for (i = 0; i < cpu_info.pm_nclass; i++) 2573 if (cpu_info.pm_classes[i].pm_class == cl) { 2574 *caps = cpu_info.pm_classes[i].pm_caps; 2575 return (0); 2576 } 2577 errno = EINVAL; 2578 return (-1); 2579 } 2580 2581 int 2582 pmc_configure_logfile(int fd) 2583 { 2584 struct pmc_op_configurelog cla; 2585 2586 cla.pm_logfd = fd; 2587 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 2588 return (-1); 2589 return (0); 2590 } 2591 2592 int 2593 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 2594 { 2595 if (pmc_syscall == -1) { 2596 errno = ENXIO; 2597 return (-1); 2598 } 2599 2600 *pci = &cpu_info; 2601 return (0); 2602 } 2603 2604 int 2605 pmc_detach(pmc_id_t pmc, pid_t pid) 2606 { 2607 struct pmc_op_pmcattach pmc_detach_args; 2608 2609 pmc_detach_args.pm_pmc = pmc; 2610 pmc_detach_args.pm_pid = pid; 2611 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 2612 } 2613 2614 int 2615 pmc_disable(int cpu, int pmc) 2616 { 2617 struct pmc_op_pmcadmin ssa; 2618 2619 ssa.pm_cpu = cpu; 2620 ssa.pm_pmc = pmc; 2621 ssa.pm_state = PMC_STATE_DISABLED; 2622 return (PMC_CALL(PMCADMIN, &ssa)); 2623 } 2624 2625 int 2626 pmc_enable(int cpu, int pmc) 2627 { 2628 struct pmc_op_pmcadmin ssa; 2629 2630 ssa.pm_cpu = cpu; 2631 ssa.pm_pmc = pmc; 2632 ssa.pm_state = PMC_STATE_FREE; 2633 return (PMC_CALL(PMCADMIN, &ssa)); 2634 } 2635 2636 /* 2637 * Return a list of events known to a given PMC class. 'cl' is the 2638 * PMC class identifier, 'eventnames' is the returned list of 'const 2639 * char *' pointers pointing to the names of the events. 'nevents' is 2640 * the number of event name pointers returned. 2641 * 2642 * The space for 'eventnames' is allocated using malloc(3). The caller 2643 * is responsible for freeing this space when done. 2644 */ 2645 int 2646 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 2647 int *nevents) 2648 { 2649 int count; 2650 const char **names; 2651 const struct pmc_event_descr *ev; 2652 2653 switch (cl) 2654 { 2655 case PMC_CLASS_IAF: 2656 ev = iaf_event_table; 2657 count = PMC_EVENT_TABLE_SIZE(iaf); 2658 break; 2659 case PMC_CLASS_IAP: 2660 /* 2661 * Return the most appropriate set of event name 2662 * spellings for the current CPU. 2663 */ 2664 switch (cpu_info.pm_cputype) { 2665 default: 2666 case PMC_CPU_INTEL_ATOM: 2667 ev = atom_event_table; 2668 count = PMC_EVENT_TABLE_SIZE(atom); 2669 break; 2670 case PMC_CPU_INTEL_CORE: 2671 ev = core_event_table; 2672 count = PMC_EVENT_TABLE_SIZE(core); 2673 break; 2674 case PMC_CPU_INTEL_CORE2: 2675 case PMC_CPU_INTEL_CORE2EXTREME: 2676 ev = core2_event_table; 2677 count = PMC_EVENT_TABLE_SIZE(core2); 2678 break; 2679 case PMC_CPU_INTEL_COREI7: 2680 ev = corei7_event_table; 2681 count = PMC_EVENT_TABLE_SIZE(corei7); 2682 break; 2683 case PMC_CPU_INTEL_IVYBRIDGE: 2684 ev = ivybridge_event_table; 2685 count = PMC_EVENT_TABLE_SIZE(ivybridge); 2686 break; 2687 case PMC_CPU_INTEL_SANDYBRIDGE: 2688 ev = sandybridge_event_table; 2689 count = PMC_EVENT_TABLE_SIZE(sandybridge); 2690 break; 2691 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 2692 ev = sandybridge_xeon_event_table; 2693 count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon); 2694 break; 2695 case PMC_CPU_INTEL_WESTMERE: 2696 ev = westmere_event_table; 2697 count = PMC_EVENT_TABLE_SIZE(westmere); 2698 break; 2699 } 2700 break; 2701 case PMC_CLASS_UCF: 2702 ev = ucf_event_table; 2703 count = PMC_EVENT_TABLE_SIZE(ucf); 2704 break; 2705 case PMC_CLASS_UCP: 2706 /* 2707 * Return the most appropriate set of event name 2708 * spellings for the current CPU. 2709 */ 2710 switch (cpu_info.pm_cputype) { 2711 default: 2712 case PMC_CPU_INTEL_COREI7: 2713 ev = corei7uc_event_table; 2714 count = PMC_EVENT_TABLE_SIZE(corei7uc); 2715 break; 2716 case PMC_CPU_INTEL_SANDYBRIDGE: 2717 ev = sandybridgeuc_event_table; 2718 count = PMC_EVENT_TABLE_SIZE(sandybridgeuc); 2719 break; 2720 case PMC_CPU_INTEL_WESTMERE: 2721 ev = westmereuc_event_table; 2722 count = PMC_EVENT_TABLE_SIZE(westmereuc); 2723 break; 2724 } 2725 break; 2726 case PMC_CLASS_TSC: 2727 ev = tsc_event_table; 2728 count = PMC_EVENT_TABLE_SIZE(tsc); 2729 break; 2730 case PMC_CLASS_K7: 2731 ev = k7_event_table; 2732 count = PMC_EVENT_TABLE_SIZE(k7); 2733 break; 2734 case PMC_CLASS_K8: 2735 ev = k8_event_table; 2736 count = PMC_EVENT_TABLE_SIZE(k8); 2737 break; 2738 case PMC_CLASS_P4: 2739 ev = p4_event_table; 2740 count = PMC_EVENT_TABLE_SIZE(p4); 2741 break; 2742 case PMC_CLASS_P5: 2743 ev = p5_event_table; 2744 count = PMC_EVENT_TABLE_SIZE(p5); 2745 break; 2746 case PMC_CLASS_P6: 2747 ev = p6_event_table; 2748 count = PMC_EVENT_TABLE_SIZE(p6); 2749 break; 2750 case PMC_CLASS_XSCALE: 2751 ev = xscale_event_table; 2752 count = PMC_EVENT_TABLE_SIZE(xscale); 2753 break; 2754 case PMC_CLASS_MIPS24K: 2755 ev = mips24k_event_table; 2756 count = PMC_EVENT_TABLE_SIZE(mips24k); 2757 break; 2758 case PMC_CLASS_OCTEON: 2759 ev = octeon_event_table; 2760 count = PMC_EVENT_TABLE_SIZE(octeon); 2761 break; 2762 case PMC_CLASS_PPC7450: 2763 ev = ppc7450_event_table; 2764 count = PMC_EVENT_TABLE_SIZE(ppc7450); 2765 break; 2766 case PMC_CLASS_SOFT: 2767 ev = soft_event_table; 2768 count = soft_event_info.pm_nevent; 2769 break; 2770 default: 2771 errno = EINVAL; 2772 return (-1); 2773 } 2774 2775 if ((names = malloc(count * sizeof(const char *))) == NULL) 2776 return (-1); 2777 2778 *eventnames = names; 2779 *nevents = count; 2780 2781 for (;count--; ev++, names++) 2782 *names = ev->pm_ev_name; 2783 2784 return (0); 2785 } 2786 2787 int 2788 pmc_flush_logfile(void) 2789 { 2790 return (PMC_CALL(FLUSHLOG,0)); 2791 } 2792 2793 int 2794 pmc_close_logfile(void) 2795 { 2796 return (PMC_CALL(CLOSELOG,0)); 2797 } 2798 2799 int 2800 pmc_get_driver_stats(struct pmc_driverstats *ds) 2801 { 2802 struct pmc_op_getdriverstats gms; 2803 2804 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 2805 return (-1); 2806 2807 /* copy out fields in the current userland<->library interface */ 2808 ds->pm_intr_ignored = gms.pm_intr_ignored; 2809 ds->pm_intr_processed = gms.pm_intr_processed; 2810 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 2811 ds->pm_syscalls = gms.pm_syscalls; 2812 ds->pm_syscall_errors = gms.pm_syscall_errors; 2813 ds->pm_buffer_requests = gms.pm_buffer_requests; 2814 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 2815 ds->pm_log_sweeps = gms.pm_log_sweeps; 2816 return (0); 2817 } 2818 2819 int 2820 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 2821 { 2822 struct pmc_op_getmsr gm; 2823 2824 gm.pm_pmcid = pmc; 2825 if (PMC_CALL(PMCGETMSR, &gm) < 0) 2826 return (-1); 2827 *msr = gm.pm_msr; 2828 return (0); 2829 } 2830 2831 int 2832 pmc_init(void) 2833 { 2834 int error, pmc_mod_id; 2835 unsigned int n; 2836 uint32_t abi_version; 2837 struct module_stat pmc_modstat; 2838 struct pmc_op_getcpuinfo op_cpu_info; 2839 #if defined(__amd64__) || defined(__i386__) 2840 int cpu_has_iaf_counters; 2841 unsigned int t; 2842 #endif 2843 2844 if (pmc_syscall != -1) /* already inited */ 2845 return (0); 2846 2847 /* retrieve the system call number from the KLD */ 2848 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 2849 return (-1); 2850 2851 pmc_modstat.version = sizeof(struct module_stat); 2852 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 2853 return (-1); 2854 2855 pmc_syscall = pmc_modstat.data.intval; 2856 2857 /* check the kernel module's ABI against our compiled-in version */ 2858 abi_version = PMC_VERSION; 2859 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 2860 return (pmc_syscall = -1); 2861 2862 /* ignore patch & minor numbers for the comparision */ 2863 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 2864 errno = EPROGMISMATCH; 2865 return (pmc_syscall = -1); 2866 } 2867 2868 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 2869 return (pmc_syscall = -1); 2870 2871 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 2872 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 2873 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 2874 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 2875 for (n = 0; n < cpu_info.pm_nclass; n++) 2876 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n]; 2877 2878 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 2879 sizeof(struct pmc_class_descr *)); 2880 2881 if (pmc_class_table == NULL) 2882 return (-1); 2883 2884 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 2885 pmc_class_table[n] = NULL; 2886 2887 /* 2888 * Get soft events list. 2889 */ 2890 soft_event_info.pm_class = PMC_CLASS_SOFT; 2891 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 2892 return (pmc_syscall = -1); 2893 2894 /* Map soft events to static list. */ 2895 for (n = 0; n < soft_event_info.pm_nevent; n++) { 2896 soft_event_table[n].pm_ev_name = 2897 soft_event_info.pm_events[n].pm_ev_name; 2898 soft_event_table[n].pm_ev_code = 2899 soft_event_info.pm_events[n].pm_ev_code; 2900 } 2901 soft_class_table_descr.pm_evc_event_table_size = \ 2902 soft_event_info.pm_nevent; 2903 soft_class_table_descr.pm_evc_event_table = \ 2904 soft_event_table; 2905 2906 /* 2907 * Fill in the class table. 2908 */ 2909 n = 0; 2910 2911 /* Fill soft events information. */ 2912 pmc_class_table[n++] = &soft_class_table_descr; 2913 #if defined(__amd64__) || defined(__i386__) 2914 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 2915 pmc_class_table[n++] = &tsc_class_table_descr; 2916 2917 /* 2918 * Check if this CPU has fixed function counters. 2919 */ 2920 cpu_has_iaf_counters = 0; 2921 for (t = 0; t < cpu_info.pm_nclass; t++) 2922 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 2923 cpu_info.pm_classes[t].pm_num > 0) 2924 cpu_has_iaf_counters = 1; 2925 #endif 2926 2927 #define PMC_MDEP_INIT(C) do { \ 2928 pmc_mdep_event_aliases = C##_aliases; \ 2929 pmc_mdep_class_list = C##_pmc_classes; \ 2930 pmc_mdep_class_list_size = \ 2931 PMC_TABLE_SIZE(C##_pmc_classes); \ 2932 } while (0) 2933 2934 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 2935 PMC_MDEP_INIT(C); \ 2936 pmc_class_table[n++] = &iaf_class_table_descr; \ 2937 if (!cpu_has_iaf_counters) \ 2938 pmc_mdep_event_aliases = \ 2939 C##_aliases_without_iaf; \ 2940 pmc_class_table[n] = &C##_class_table_descr; \ 2941 } while (0) 2942 2943 /* Configure the event name parser. */ 2944 switch (cpu_info.pm_cputype) { 2945 #if defined(__i386__) 2946 case PMC_CPU_AMD_K7: 2947 PMC_MDEP_INIT(k7); 2948 pmc_class_table[n] = &k7_class_table_descr; 2949 break; 2950 case PMC_CPU_INTEL_P5: 2951 PMC_MDEP_INIT(p5); 2952 pmc_class_table[n] = &p5_class_table_descr; 2953 break; 2954 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */ 2955 case PMC_CPU_INTEL_PII: /* similar PMCs. */ 2956 case PMC_CPU_INTEL_PIII: 2957 case PMC_CPU_INTEL_PM: 2958 PMC_MDEP_INIT(p6); 2959 pmc_class_table[n] = &p6_class_table_descr; 2960 break; 2961 #endif 2962 #if defined(__amd64__) || defined(__i386__) 2963 case PMC_CPU_AMD_K8: 2964 PMC_MDEP_INIT(k8); 2965 pmc_class_table[n] = &k8_class_table_descr; 2966 break; 2967 case PMC_CPU_INTEL_ATOM: 2968 PMC_MDEP_INIT_INTEL_V2(atom); 2969 break; 2970 case PMC_CPU_INTEL_CORE: 2971 PMC_MDEP_INIT(core); 2972 pmc_class_table[n] = &core_class_table_descr; 2973 break; 2974 case PMC_CPU_INTEL_CORE2: 2975 case PMC_CPU_INTEL_CORE2EXTREME: 2976 PMC_MDEP_INIT_INTEL_V2(core2); 2977 break; 2978 case PMC_CPU_INTEL_COREI7: 2979 pmc_class_table[n++] = &ucf_class_table_descr; 2980 pmc_class_table[n++] = &corei7uc_class_table_descr; 2981 PMC_MDEP_INIT_INTEL_V2(corei7); 2982 break; 2983 case PMC_CPU_INTEL_IVYBRIDGE: 2984 PMC_MDEP_INIT_INTEL_V2(ivybridge); 2985 break; 2986 case PMC_CPU_INTEL_SANDYBRIDGE: 2987 pmc_class_table[n++] = &ucf_class_table_descr; 2988 pmc_class_table[n++] = &sandybridgeuc_class_table_descr; 2989 PMC_MDEP_INIT_INTEL_V2(sandybridge); 2990 break; 2991 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 2992 PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon); 2993 break; 2994 case PMC_CPU_INTEL_WESTMERE: 2995 pmc_class_table[n++] = &ucf_class_table_descr; 2996 pmc_class_table[n++] = &westmereuc_class_table_descr; 2997 PMC_MDEP_INIT_INTEL_V2(westmere); 2998 break; 2999 case PMC_CPU_INTEL_PIV: 3000 PMC_MDEP_INIT(p4); 3001 pmc_class_table[n] = &p4_class_table_descr; 3002 break; 3003 #endif 3004 case PMC_CPU_GENERIC: 3005 PMC_MDEP_INIT(generic); 3006 break; 3007 #if defined(__XSCALE__) 3008 case PMC_CPU_INTEL_XSCALE: 3009 PMC_MDEP_INIT(xscale); 3010 pmc_class_table[n] = &xscale_class_table_descr; 3011 break; 3012 #endif 3013 #if defined(__mips__) 3014 case PMC_CPU_MIPS_24K: 3015 PMC_MDEP_INIT(mips24k); 3016 pmc_class_table[n] = &mips24k_class_table_descr; 3017 break; 3018 case PMC_CPU_MIPS_OCTEON: 3019 PMC_MDEP_INIT(octeon); 3020 pmc_class_table[n] = &octeon_class_table_descr; 3021 break; 3022 #endif /* __mips__ */ 3023 #if defined(__powerpc__) 3024 case PMC_CPU_PPC_7450: 3025 PMC_MDEP_INIT(ppc7450); 3026 pmc_class_table[n] = &ppc7450_class_table_descr; 3027 break; 3028 #endif 3029 default: 3030 /* 3031 * Some kind of CPU this version of the library knows nothing 3032 * about. This shouldn't happen since the abi version check 3033 * should have caught this. 3034 */ 3035 errno = ENXIO; 3036 return (pmc_syscall = -1); 3037 } 3038 3039 return (0); 3040 } 3041 3042 const char * 3043 pmc_name_of_capability(enum pmc_caps cap) 3044 { 3045 int i; 3046 3047 /* 3048 * 'cap' should have a single bit set and should be in 3049 * range. 3050 */ 3051 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 3052 cap > PMC_CAP_LAST) { 3053 errno = EINVAL; 3054 return (NULL); 3055 } 3056 3057 i = ffs(cap); 3058 return (pmc_capability_names[i - 1]); 3059 } 3060 3061 const char * 3062 pmc_name_of_class(enum pmc_class pc) 3063 { 3064 if ((int) pc >= PMC_CLASS_FIRST && 3065 pc <= PMC_CLASS_LAST) 3066 return (pmc_class_names[pc]); 3067 3068 errno = EINVAL; 3069 return (NULL); 3070 } 3071 3072 const char * 3073 pmc_name_of_cputype(enum pmc_cputype cp) 3074 { 3075 size_t n; 3076 3077 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 3078 if (cp == pmc_cputype_names[n].pm_cputype) 3079 return (pmc_cputype_names[n].pm_name); 3080 3081 errno = EINVAL; 3082 return (NULL); 3083 } 3084 3085 const char * 3086 pmc_name_of_disposition(enum pmc_disp pd) 3087 { 3088 if ((int) pd >= PMC_DISP_FIRST && 3089 pd <= PMC_DISP_LAST) 3090 return (pmc_disposition_names[pd]); 3091 3092 errno = EINVAL; 3093 return (NULL); 3094 } 3095 3096 const char * 3097 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 3098 { 3099 const struct pmc_event_descr *ev, *evfence; 3100 3101 ev = evfence = NULL; 3102 if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) { 3103 ev = iaf_event_table; 3104 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf); 3105 } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) { 3106 switch (cpu) { 3107 case PMC_CPU_INTEL_ATOM: 3108 ev = atom_event_table; 3109 evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom); 3110 break; 3111 case PMC_CPU_INTEL_CORE: 3112 ev = core_event_table; 3113 evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core); 3114 break; 3115 case PMC_CPU_INTEL_CORE2: 3116 case PMC_CPU_INTEL_CORE2EXTREME: 3117 ev = core2_event_table; 3118 evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2); 3119 break; 3120 case PMC_CPU_INTEL_COREI7: 3121 ev = corei7_event_table; 3122 evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7); 3123 break; 3124 case PMC_CPU_INTEL_IVYBRIDGE: 3125 ev = ivybridge_event_table; 3126 evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge); 3127 break; 3128 case PMC_CPU_INTEL_SANDYBRIDGE: 3129 ev = sandybridge_event_table; 3130 evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge); 3131 break; 3132 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 3133 ev = sandybridge_xeon_event_table; 3134 evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon); 3135 break; 3136 case PMC_CPU_INTEL_WESTMERE: 3137 ev = westmere_event_table; 3138 evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere); 3139 break; 3140 default: /* Unknown CPU type. */ 3141 break; 3142 } 3143 } else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) { 3144 ev = ucf_event_table; 3145 evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf); 3146 } else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) { 3147 switch (cpu) { 3148 case PMC_CPU_INTEL_COREI7: 3149 ev = corei7uc_event_table; 3150 evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc); 3151 break; 3152 case PMC_CPU_INTEL_SANDYBRIDGE: 3153 ev = sandybridgeuc_event_table; 3154 evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc); 3155 break; 3156 case PMC_CPU_INTEL_WESTMERE: 3157 ev = westmereuc_event_table; 3158 evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc); 3159 break; 3160 default: /* Unknown CPU type. */ 3161 break; 3162 } 3163 } else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) { 3164 ev = k7_event_table; 3165 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7); 3166 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 3167 ev = k8_event_table; 3168 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 3169 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) { 3170 ev = p4_event_table; 3171 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4); 3172 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) { 3173 ev = p5_event_table; 3174 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5); 3175 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) { 3176 ev = p6_event_table; 3177 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6); 3178 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 3179 ev = xscale_event_table; 3180 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 3181 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 3182 ev = mips24k_event_table; 3183 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 3184 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 3185 ev = octeon_event_table; 3186 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 3187 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 3188 ev = ppc7450_event_table; 3189 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 3190 } else if (pe == PMC_EV_TSC_TSC) { 3191 ev = tsc_event_table; 3192 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 3193 } else if (pe >= PMC_EV_SOFT_FIRST && pe <= PMC_EV_SOFT_LAST) { 3194 ev = soft_event_table; 3195 evfence = soft_event_table + soft_event_info.pm_nevent; 3196 } 3197 3198 for (; ev != evfence; ev++) 3199 if (pe == ev->pm_ev_code) 3200 return (ev->pm_ev_name); 3201 3202 return (NULL); 3203 } 3204 3205 const char * 3206 pmc_name_of_event(enum pmc_event pe) 3207 { 3208 const char *n; 3209 3210 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 3211 return (n); 3212 3213 errno = EINVAL; 3214 return (NULL); 3215 } 3216 3217 const char * 3218 pmc_name_of_mode(enum pmc_mode pm) 3219 { 3220 if ((int) pm >= PMC_MODE_FIRST && 3221 pm <= PMC_MODE_LAST) 3222 return (pmc_mode_names[pm]); 3223 3224 errno = EINVAL; 3225 return (NULL); 3226 } 3227 3228 const char * 3229 pmc_name_of_state(enum pmc_state ps) 3230 { 3231 if ((int) ps >= PMC_STATE_FIRST && 3232 ps <= PMC_STATE_LAST) 3233 return (pmc_state_names[ps]); 3234 3235 errno = EINVAL; 3236 return (NULL); 3237 } 3238 3239 int 3240 pmc_ncpu(void) 3241 { 3242 if (pmc_syscall == -1) { 3243 errno = ENXIO; 3244 return (-1); 3245 } 3246 3247 return (cpu_info.pm_ncpu); 3248 } 3249 3250 int 3251 pmc_npmc(int cpu) 3252 { 3253 if (pmc_syscall == -1) { 3254 errno = ENXIO; 3255 return (-1); 3256 } 3257 3258 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 3259 errno = EINVAL; 3260 return (-1); 3261 } 3262 3263 return (cpu_info.pm_npmc); 3264 } 3265 3266 int 3267 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 3268 { 3269 int nbytes, npmc; 3270 struct pmc_op_getpmcinfo *pmci; 3271 3272 if ((npmc = pmc_npmc(cpu)) < 0) 3273 return (-1); 3274 3275 nbytes = sizeof(struct pmc_op_getpmcinfo) + 3276 npmc * sizeof(struct pmc_info); 3277 3278 if ((pmci = calloc(1, nbytes)) == NULL) 3279 return (-1); 3280 3281 pmci->pm_cpu = cpu; 3282 3283 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 3284 free(pmci); 3285 return (-1); 3286 } 3287 3288 /* kernel<->library, library<->userland interfaces are identical */ 3289 *ppmci = (struct pmc_pmcinfo *) pmci; 3290 return (0); 3291 } 3292 3293 int 3294 pmc_read(pmc_id_t pmc, pmc_value_t *value) 3295 { 3296 struct pmc_op_pmcrw pmc_read_op; 3297 3298 pmc_read_op.pm_pmcid = pmc; 3299 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 3300 pmc_read_op.pm_value = -1; 3301 3302 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 3303 return (-1); 3304 3305 *value = pmc_read_op.pm_value; 3306 return (0); 3307 } 3308 3309 int 3310 pmc_release(pmc_id_t pmc) 3311 { 3312 struct pmc_op_simple pmc_release_args; 3313 3314 pmc_release_args.pm_pmcid = pmc; 3315 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 3316 } 3317 3318 int 3319 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 3320 { 3321 struct pmc_op_pmcrw pmc_rw_op; 3322 3323 pmc_rw_op.pm_pmcid = pmc; 3324 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 3325 pmc_rw_op.pm_value = newvalue; 3326 3327 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 3328 return (-1); 3329 3330 *oldvaluep = pmc_rw_op.pm_value; 3331 return (0); 3332 } 3333 3334 int 3335 pmc_set(pmc_id_t pmc, pmc_value_t value) 3336 { 3337 struct pmc_op_pmcsetcount sc; 3338 3339 sc.pm_pmcid = pmc; 3340 sc.pm_count = value; 3341 3342 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 3343 return (-1); 3344 return (0); 3345 } 3346 3347 int 3348 pmc_start(pmc_id_t pmc) 3349 { 3350 struct pmc_op_simple pmc_start_args; 3351 3352 pmc_start_args.pm_pmcid = pmc; 3353 return (PMC_CALL(PMCSTART, &pmc_start_args)); 3354 } 3355 3356 int 3357 pmc_stop(pmc_id_t pmc) 3358 { 3359 struct pmc_op_simple pmc_stop_args; 3360 3361 pmc_stop_args.pm_pmcid = pmc; 3362 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 3363 } 3364 3365 int 3366 pmc_width(pmc_id_t pmcid, uint32_t *width) 3367 { 3368 unsigned int i; 3369 enum pmc_class cl; 3370 3371 cl = PMC_ID_TO_CLASS(pmcid); 3372 for (i = 0; i < cpu_info.pm_nclass; i++) 3373 if (cpu_info.pm_classes[i].pm_class == cl) { 3374 *width = cpu_info.pm_classes[i].pm_width; 3375 return (0); 3376 } 3377 errno = EINVAL; 3378 return (-1); 3379 } 3380 3381 int 3382 pmc_write(pmc_id_t pmc, pmc_value_t value) 3383 { 3384 struct pmc_op_pmcrw pmc_write_op; 3385 3386 pmc_write_op.pm_pmcid = pmc; 3387 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 3388 pmc_write_op.pm_value = value; 3389 return (PMC_CALL(PMCRW, &pmc_write_op)); 3390 } 3391 3392 int 3393 pmc_writelog(uint32_t userdata) 3394 { 3395 struct pmc_op_writelog wl; 3396 3397 wl.pm_userdata = userdata; 3398 return (PMC_CALL(WRITELOG, &wl)); 3399 } 3400