1 /*- 2 * Copyright (c) 2003-2008 Joseph Koshy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/types.h> 31 #include <sys/module.h> 32 #include <sys/pmc.h> 33 #include <sys/syscall.h> 34 35 #include <ctype.h> 36 #include <errno.h> 37 #include <fcntl.h> 38 #include <pmc.h> 39 #include <stdio.h> 40 #include <stdlib.h> 41 #include <string.h> 42 #include <strings.h> 43 #include <unistd.h> 44 45 #include "libpmcinternal.h" 46 47 /* Function prototypes */ 48 #if defined(__i386__) 49 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 50 struct pmc_op_pmcallocate *_pmc_config); 51 #endif 52 #if defined(__amd64__) || defined(__i386__) 53 static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 54 struct pmc_op_pmcallocate *_pmc_config); 55 static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 56 struct pmc_op_pmcallocate *_pmc_config); 57 static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 58 struct pmc_op_pmcallocate *_pmc_config); 59 static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 60 struct pmc_op_pmcallocate *_pmc_config); 61 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 62 struct pmc_op_pmcallocate *_pmc_config); 63 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 64 struct pmc_op_pmcallocate *_pmc_config); 65 #endif 66 #if defined(__i386__) 67 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 68 struct pmc_op_pmcallocate *_pmc_config); 69 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 70 struct pmc_op_pmcallocate *_pmc_config); 71 #endif 72 #if defined(__amd64__) || defined(__i386__) 73 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 74 struct pmc_op_pmcallocate *_pmc_config); 75 #endif 76 #if defined(__XSCALE__) 77 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 78 struct pmc_op_pmcallocate *_pmc_config); 79 #endif 80 #if defined(__mips__) 81 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 82 struct pmc_op_pmcallocate *_pmc_config); 83 #endif /* __mips__ */ 84 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 85 struct pmc_op_pmcallocate *_pmc_config); 86 87 #if defined(__powerpc__) 88 static int ppc7450_allocate_pmc(enum pmc_event _pe, char* ctrspec, 89 struct pmc_op_pmcallocate *_pmc_config); 90 #endif /* __powerpc__ */ 91 92 #define PMC_CALL(cmd, params) \ 93 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 94 95 /* 96 * Event aliases provide a way for the user to ask for generic events 97 * like "cache-misses", or "instructions-retired". These aliases are 98 * mapped to the appropriate canonical event descriptions using a 99 * lookup table. 100 */ 101 struct pmc_event_alias { 102 const char *pm_alias; 103 const char *pm_spec; 104 }; 105 106 static const struct pmc_event_alias *pmc_mdep_event_aliases; 107 108 /* 109 * The pmc_event_descr structure maps symbolic names known to the user 110 * to integer codes used by the PMC KLD. 111 */ 112 struct pmc_event_descr { 113 const char *pm_ev_name; 114 enum pmc_event pm_ev_code; 115 }; 116 117 /* 118 * The pmc_class_descr structure maps class name prefixes for 119 * event names to event tables and other PMC class data. 120 */ 121 struct pmc_class_descr { 122 const char *pm_evc_name; 123 size_t pm_evc_name_size; 124 enum pmc_class pm_evc_class; 125 const struct pmc_event_descr *pm_evc_event_table; 126 size_t pm_evc_event_table_size; 127 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 128 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 129 }; 130 131 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 132 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 133 134 #undef __PMC_EV 135 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 136 137 /* 138 * PMC_CLASSDEP_TABLE(NAME, CLASS) 139 * 140 * Define a table mapping event names and aliases to HWPMC event IDs. 141 */ 142 #define PMC_CLASSDEP_TABLE(N, C) \ 143 static const struct pmc_event_descr N##_event_table[] = \ 144 { \ 145 __PMC_EV_##C() \ 146 } 147 148 PMC_CLASSDEP_TABLE(iaf, IAF); 149 PMC_CLASSDEP_TABLE(k7, K7); 150 PMC_CLASSDEP_TABLE(k8, K8); 151 PMC_CLASSDEP_TABLE(p4, P4); 152 PMC_CLASSDEP_TABLE(p5, P5); 153 PMC_CLASSDEP_TABLE(p6, P6); 154 PMC_CLASSDEP_TABLE(xscale, XSCALE); 155 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 156 PMC_CLASSDEP_TABLE(octeon, OCTEON); 157 PMC_CLASSDEP_TABLE(ucf, UCF); 158 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 159 160 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 161 162 #undef __PMC_EV_ALIAS 163 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 164 165 static const struct pmc_event_descr atom_event_table[] = 166 { 167 __PMC_EV_ALIAS_ATOM() 168 }; 169 170 static const struct pmc_event_descr core_event_table[] = 171 { 172 __PMC_EV_ALIAS_CORE() 173 }; 174 175 176 static const struct pmc_event_descr core2_event_table[] = 177 { 178 __PMC_EV_ALIAS_CORE2() 179 }; 180 181 static const struct pmc_event_descr corei7_event_table[] = 182 { 183 __PMC_EV_ALIAS_COREI7() 184 }; 185 186 static const struct pmc_event_descr ivybridge_event_table[] = 187 { 188 __PMC_EV_ALIAS_IVYBRIDGE() 189 }; 190 191 static const struct pmc_event_descr sandybridge_event_table[] = 192 { 193 __PMC_EV_ALIAS_SANDYBRIDGE() 194 }; 195 196 static const struct pmc_event_descr westmere_event_table[] = 197 { 198 __PMC_EV_ALIAS_WESTMERE() 199 }; 200 201 static const struct pmc_event_descr corei7uc_event_table[] = 202 { 203 __PMC_EV_ALIAS_COREI7UC() 204 }; 205 206 static const struct pmc_event_descr sandybridgeuc_event_table[] = 207 { 208 __PMC_EV_ALIAS_SANDYBRIDGEUC() 209 }; 210 211 static const struct pmc_event_descr westmereuc_event_table[] = 212 { 213 __PMC_EV_ALIAS_WESTMEREUC() 214 }; 215 216 /* 217 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 218 * 219 * Map a CPU to the PMC classes it supports. 220 */ 221 #define PMC_MDEP_TABLE(N,C,...) \ 222 static const enum pmc_class N##_pmc_classes[] = { \ 223 PMC_CLASS_##C, __VA_ARGS__ \ 224 } 225 226 PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 227 PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC); 228 PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 229 PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 230 PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC); 231 PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 232 PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP); 233 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC); 234 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 235 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC); 236 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC); 237 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC); 238 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 239 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 240 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 241 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450); 242 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 243 244 static const struct pmc_event_descr tsc_event_table[] = 245 { 246 __PMC_EV_TSC() 247 }; 248 249 #undef PMC_CLASS_TABLE_DESC 250 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 251 static const struct pmc_class_descr NAME##_class_table_descr = \ 252 { \ 253 .pm_evc_name = #CLASS "-", \ 254 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 255 .pm_evc_class = PMC_CLASS_##CLASS , \ 256 .pm_evc_event_table = EVENTS##_event_table , \ 257 .pm_evc_event_table_size = \ 258 PMC_EVENT_TABLE_SIZE(EVENTS), \ 259 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 260 } 261 262 #if defined(__i386__) || defined(__amd64__) 263 PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf); 264 PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap); 265 PMC_CLASS_TABLE_DESC(core, IAP, core, iap); 266 PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap); 267 PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap); 268 PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap); 269 PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap); 270 PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap); 271 PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf); 272 PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp); 273 PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp); 274 PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp); 275 #endif 276 #if defined(__i386__) 277 PMC_CLASS_TABLE_DESC(k7, K7, k7, k7); 278 #endif 279 #if defined(__i386__) || defined(__amd64__) 280 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 281 PMC_CLASS_TABLE_DESC(p4, P4, p4, p4); 282 #endif 283 #if defined(__i386__) 284 PMC_CLASS_TABLE_DESC(p5, P5, p5, p5); 285 PMC_CLASS_TABLE_DESC(p6, P6, p6, p6); 286 #endif 287 #if defined(__i386__) || defined(__amd64__) 288 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 289 #endif 290 #if defined(__XSCALE__) 291 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 292 #endif 293 #if defined(__mips__) 294 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 295 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 296 #endif /* __mips__ */ 297 #if defined(__powerpc__) 298 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, ppc7450); 299 #endif 300 301 static struct pmc_class_descr soft_class_table_descr = 302 { 303 .pm_evc_name = "SOFT-", 304 .pm_evc_name_size = sizeof("SOFT-") - 1, 305 .pm_evc_class = PMC_CLASS_SOFT, 306 .pm_evc_event_table = NULL, 307 .pm_evc_event_table_size = 0, 308 .pm_evc_allocate_pmc = soft_allocate_pmc 309 }; 310 311 #undef PMC_CLASS_TABLE_DESC 312 313 static const struct pmc_class_descr **pmc_class_table; 314 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 315 316 static const enum pmc_class *pmc_mdep_class_list; 317 static size_t pmc_mdep_class_list_size; 318 319 /* 320 * Mapping tables, mapping enumeration values to human readable 321 * strings. 322 */ 323 324 static const char * pmc_capability_names[] = { 325 #undef __PMC_CAP 326 #define __PMC_CAP(N,V,D) #N , 327 __PMC_CAPS() 328 }; 329 330 static const char * pmc_class_names[] = { 331 #undef __PMC_CLASS 332 #define __PMC_CLASS(C) #C , 333 __PMC_CLASSES() 334 }; 335 336 struct pmc_cputype_map { 337 enum pmc_cputype pm_cputype; 338 const char *pm_name; 339 }; 340 341 static const struct pmc_cputype_map pmc_cputype_names[] = { 342 #undef __PMC_CPU 343 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 344 __PMC_CPUS() 345 }; 346 347 static const char * pmc_disposition_names[] = { 348 #undef __PMC_DISP 349 #define __PMC_DISP(D) #D , 350 __PMC_DISPOSITIONS() 351 }; 352 353 static const char * pmc_mode_names[] = { 354 #undef __PMC_MODE 355 #define __PMC_MODE(M,N) #M , 356 __PMC_MODES() 357 }; 358 359 static const char * pmc_state_names[] = { 360 #undef __PMC_STATE 361 #define __PMC_STATE(S) #S , 362 __PMC_STATES() 363 }; 364 365 /* 366 * Filled in by pmc_init(). 367 */ 368 static int pmc_syscall = -1; 369 static struct pmc_cpuinfo cpu_info; 370 static struct pmc_op_getdyneventinfo soft_event_info; 371 372 /* Event masks for events */ 373 struct pmc_masks { 374 const char *pm_name; 375 const uint64_t pm_value; 376 }; 377 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 378 #define NULLMASK { .pm_name = NULL } 379 380 #if defined(__amd64__) || defined(__i386__) 381 static int 382 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 383 { 384 const struct pmc_masks *pm; 385 char *q, *r; 386 int c; 387 388 if (pmask == NULL) /* no mask keywords */ 389 return (-1); 390 q = strchr(p, '='); /* skip '=' */ 391 if (*++q == '\0') /* no more data */ 392 return (-1); 393 c = 0; /* count of mask keywords seen */ 394 while ((r = strsep(&q, "+")) != NULL) { 395 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 396 pm++) 397 ; 398 if (pm->pm_name == NULL) /* not found */ 399 return (-1); 400 *evmask |= pm->pm_value; 401 c++; 402 } 403 return (c); 404 } 405 #endif 406 407 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 408 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 409 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 410 411 #if defined(__i386__) 412 413 /* 414 * AMD K7 (Athlon) CPUs. 415 */ 416 417 static struct pmc_event_alias k7_aliases[] = { 418 EV_ALIAS("branches", "k7-retired-branches"), 419 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"), 420 EV_ALIAS("cycles", "tsc"), 421 EV_ALIAS("dc-misses", "k7-dc-misses"), 422 EV_ALIAS("ic-misses", "k7-ic-misses"), 423 EV_ALIAS("instructions", "k7-retired-instructions"), 424 EV_ALIAS("interrupts", "k7-hardware-interrupts"), 425 EV_ALIAS(NULL, NULL) 426 }; 427 428 #define K7_KW_COUNT "count" 429 #define K7_KW_EDGE "edge" 430 #define K7_KW_INV "inv" 431 #define K7_KW_OS "os" 432 #define K7_KW_UNITMASK "unitmask" 433 #define K7_KW_USR "usr" 434 435 static int 436 k7_allocate_pmc(enum pmc_event pe, char *ctrspec, 437 struct pmc_op_pmcallocate *pmc_config) 438 { 439 char *e, *p, *q; 440 int c, has_unitmask; 441 uint32_t count, unitmask; 442 443 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 444 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 445 446 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 || 447 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM || 448 pe == PMC_EV_K7_DC_WRITEBACKS) { 449 has_unitmask = 1; 450 unitmask = AMD_PMC_UNITMASK_MOESI; 451 } else 452 unitmask = has_unitmask = 0; 453 454 while ((p = strsep(&ctrspec, ",")) != NULL) { 455 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) { 456 q = strchr(p, '='); 457 if (*++q == '\0') /* skip '=' */ 458 return (-1); 459 460 count = strtol(q, &e, 0); 461 if (e == q || *e != '\0') 462 return (-1); 463 464 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 465 pmc_config->pm_md.pm_amd.pm_amd_config |= 466 AMD_PMC_TO_COUNTER(count); 467 468 } else if (KWMATCH(p, K7_KW_EDGE)) { 469 pmc_config->pm_caps |= PMC_CAP_EDGE; 470 } else if (KWMATCH(p, K7_KW_INV)) { 471 pmc_config->pm_caps |= PMC_CAP_INVERT; 472 } else if (KWMATCH(p, K7_KW_OS)) { 473 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 474 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) { 475 if (has_unitmask == 0) 476 return (-1); 477 unitmask = 0; 478 q = strchr(p, '='); 479 if (*++q == '\0') /* skip '=' */ 480 return (-1); 481 482 while ((c = tolower(*q++)) != 0) 483 if (c == 'm') 484 unitmask |= AMD_PMC_UNITMASK_M; 485 else if (c == 'o') 486 unitmask |= AMD_PMC_UNITMASK_O; 487 else if (c == 'e') 488 unitmask |= AMD_PMC_UNITMASK_E; 489 else if (c == 's') 490 unitmask |= AMD_PMC_UNITMASK_S; 491 else if (c == 'i') 492 unitmask |= AMD_PMC_UNITMASK_I; 493 else if (c == '+') 494 continue; 495 else 496 return (-1); 497 498 if (unitmask == 0) 499 return (-1); 500 501 } else if (KWMATCH(p, K7_KW_USR)) { 502 pmc_config->pm_caps |= PMC_CAP_USER; 503 } else 504 return (-1); 505 } 506 507 if (has_unitmask) { 508 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 509 pmc_config->pm_md.pm_amd.pm_amd_config |= 510 AMD_PMC_TO_UNITMASK(unitmask); 511 } 512 513 return (0); 514 515 } 516 517 #endif 518 519 #if defined(__amd64__) || defined(__i386__) 520 521 /* 522 * Intel Core (Family 6, Model E) PMCs. 523 */ 524 525 static struct pmc_event_alias core_aliases[] = { 526 EV_ALIAS("branches", "iap-br-instr-ret"), 527 EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"), 528 EV_ALIAS("cycles", "tsc-tsc"), 529 EV_ALIAS("ic-misses", "iap-icache-misses"), 530 EV_ALIAS("instructions", "iap-instr-ret"), 531 EV_ALIAS("interrupts", "iap-core-hw-int-rx"), 532 EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"), 533 EV_ALIAS(NULL, NULL) 534 }; 535 536 /* 537 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H) 538 * and Atom (Family 6, model 1CH) PMCs. 539 * 540 * We map aliases to events on the fixed-function counters if these 541 * are present. Note that not all CPUs in this family contain fixed-function 542 * counters. 543 */ 544 545 static struct pmc_event_alias core2_aliases[] = { 546 EV_ALIAS("branches", "iap-br-inst-retired.any"), 547 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"), 548 EV_ALIAS("cycles", "tsc-tsc"), 549 EV_ALIAS("ic-misses", "iap-l1i-misses"), 550 EV_ALIAS("instructions", "iaf-instr-retired.any"), 551 EV_ALIAS("interrupts", "iap-hw-int-rcv"), 552 EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"), 553 EV_ALIAS(NULL, NULL) 554 }; 555 556 static struct pmc_event_alias core2_aliases_without_iaf[] = { 557 EV_ALIAS("branches", "iap-br-inst-retired.any"), 558 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"), 559 EV_ALIAS("cycles", "tsc-tsc"), 560 EV_ALIAS("ic-misses", "iap-l1i-misses"), 561 EV_ALIAS("instructions", "iap-inst-retired.any_p"), 562 EV_ALIAS("interrupts", "iap-hw-int-rcv"), 563 EV_ALIAS("unhalted-cycles", "iap-cpu-clk-unhalted.core_p"), 564 EV_ALIAS(NULL, NULL) 565 }; 566 567 #define atom_aliases core2_aliases 568 #define atom_aliases_without_iaf core2_aliases_without_iaf 569 #define corei7_aliases core2_aliases 570 #define corei7_aliases_without_iaf core2_aliases_without_iaf 571 #define ivybridge_aliases core2_aliases 572 #define ivybridge_aliases_without_iaf core2_aliases_without_iaf 573 #define sandybridge_aliases core2_aliases 574 #define sandybridge_aliases_without_iaf core2_aliases_without_iaf 575 #define westmere_aliases core2_aliases 576 #define westmere_aliases_without_iaf core2_aliases_without_iaf 577 578 #define IAF_KW_OS "os" 579 #define IAF_KW_USR "usr" 580 #define IAF_KW_ANYTHREAD "anythread" 581 582 /* 583 * Parse an event specifier for Intel fixed function counters. 584 */ 585 static int 586 iaf_allocate_pmc(enum pmc_event pe, char *ctrspec, 587 struct pmc_op_pmcallocate *pmc_config) 588 { 589 char *p; 590 591 (void) pe; 592 593 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 594 pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0; 595 596 while ((p = strsep(&ctrspec, ",")) != NULL) { 597 if (KWMATCH(p, IAF_KW_OS)) 598 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 599 else if (KWMATCH(p, IAF_KW_USR)) 600 pmc_config->pm_caps |= PMC_CAP_USER; 601 else if (KWMATCH(p, IAF_KW_ANYTHREAD)) 602 pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY; 603 else 604 return (-1); 605 } 606 607 return (0); 608 } 609 610 /* 611 * Core/Core2 support. 612 */ 613 614 #define IAP_KW_AGENT "agent" 615 #define IAP_KW_ANYTHREAD "anythread" 616 #define IAP_KW_CACHESTATE "cachestate" 617 #define IAP_KW_CMASK "cmask" 618 #define IAP_KW_CORE "core" 619 #define IAP_KW_EDGE "edge" 620 #define IAP_KW_INV "inv" 621 #define IAP_KW_OS "os" 622 #define IAP_KW_PREFETCH "prefetch" 623 #define IAP_KW_SNOOPRESPONSE "snoopresponse" 624 #define IAP_KW_SNOOPTYPE "snooptype" 625 #define IAP_KW_TRANSITION "trans" 626 #define IAP_KW_USR "usr" 627 #define IAP_KW_RSP "rsp" 628 629 static struct pmc_masks iap_core_mask[] = { 630 PMCMASK(all, (0x3 << 14)), 631 PMCMASK(this, (0x1 << 14)), 632 NULLMASK 633 }; 634 635 static struct pmc_masks iap_agent_mask[] = { 636 PMCMASK(this, 0), 637 PMCMASK(any, (0x1 << 13)), 638 NULLMASK 639 }; 640 641 static struct pmc_masks iap_prefetch_mask[] = { 642 PMCMASK(both, (0x3 << 12)), 643 PMCMASK(only, (0x1 << 12)), 644 PMCMASK(exclude, 0), 645 NULLMASK 646 }; 647 648 static struct pmc_masks iap_cachestate_mask[] = { 649 PMCMASK(i, (1 << 8)), 650 PMCMASK(s, (1 << 9)), 651 PMCMASK(e, (1 << 10)), 652 PMCMASK(m, (1 << 11)), 653 NULLMASK 654 }; 655 656 static struct pmc_masks iap_snoopresponse_mask[] = { 657 PMCMASK(clean, (1 << 8)), 658 PMCMASK(hit, (1 << 9)), 659 PMCMASK(hitm, (1 << 11)), 660 NULLMASK 661 }; 662 663 static struct pmc_masks iap_snooptype_mask[] = { 664 PMCMASK(cmp2s, (1 << 8)), 665 PMCMASK(cmp2i, (1 << 9)), 666 NULLMASK 667 }; 668 669 static struct pmc_masks iap_transition_mask[] = { 670 PMCMASK(any, 0x00), 671 PMCMASK(frequency, 0x10), 672 NULLMASK 673 }; 674 675 static struct pmc_masks iap_rsp_mask_i7_wm[] = { 676 PMCMASK(DMND_DATA_RD, (1 << 0)), 677 PMCMASK(DMND_RFO, (1 << 1)), 678 PMCMASK(DMND_IFETCH, (1 << 2)), 679 PMCMASK(WB, (1 << 3)), 680 PMCMASK(PF_DATA_RD, (1 << 4)), 681 PMCMASK(PF_RFO, (1 << 5)), 682 PMCMASK(PF_IFETCH, (1 << 6)), 683 PMCMASK(OTHER, (1 << 7)), 684 PMCMASK(UNCORE_HIT, (1 << 8)), 685 PMCMASK(OTHER_CORE_HIT_SNP, (1 << 9)), 686 PMCMASK(OTHER_CORE_HITM, (1 << 10)), 687 PMCMASK(REMOTE_CACHE_FWD, (1 << 12)), 688 PMCMASK(REMOTE_DRAM, (1 << 13)), 689 PMCMASK(LOCAL_DRAM, (1 << 14)), 690 PMCMASK(NON_DRAM, (1 << 15)), 691 NULLMASK 692 }; 693 694 static struct pmc_masks iap_rsp_mask_sb_ib[] = { 695 PMCMASK(REQ_DMND_DATA_RD, (1ULL << 0)), 696 PMCMASK(REQ_DMND_RFO, (1ULL << 1)), 697 PMCMASK(REQ_DMND_IFETCH, (1ULL << 2)), 698 PMCMASK(REQ_WB, (1ULL << 3)), 699 PMCMASK(REQ_PF_DATA_RD, (1ULL << 4)), 700 PMCMASK(REQ_PF_RFO, (1ULL << 5)), 701 PMCMASK(REQ_PF_IFETCH, (1ULL << 6)), 702 PMCMASK(REQ_PF_LLC_DATA_RD, (1ULL << 7)), 703 PMCMASK(REQ_PF_LLC_RFO, (1ULL << 8)), 704 PMCMASK(REQ_PF_LLC_IFETCH, (1ULL << 9)), 705 PMCMASK(REQ_BUS_LOCKS, (1ULL << 10)), 706 PMCMASK(REQ_STRM_ST, (1ULL << 11)), 707 PMCMASK(REQ_OTHER, (1ULL << 15)), 708 PMCMASK(RES_ANY, (1ULL << 16)), 709 PMCMASK(RES_SUPPLIER_SUPP, (1ULL << 17)), 710 PMCMASK(RES_SUPPLIER_LLC_HITM, (1ULL << 18)), 711 PMCMASK(RES_SUPPLIER_LLC_HITE, (1ULL << 19)), 712 PMCMASK(RES_SUPPLIER_LLC_HITS, (1ULL << 20)), 713 PMCMASK(RES_SUPPLIER_LLC_HITF, (1ULL << 21)), 714 PMCMASK(RES_SUPPLIER_LOCAL, (1ULL << 22)), 715 PMCMASK(RES_SNOOP_SNPI_NONE, (1ULL << 31)), 716 PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)), 717 PMCMASK(RES_SNOOP_SNP_MISS, (1ULL << 33)), 718 PMCMASK(RES_SNOOP_HIT_NO_FWD, (1ULL << 34)), 719 PMCMASK(RES_SNOOP_HIT_FWD, (1ULL << 35)), 720 PMCMASK(RES_SNOOP_HITM, (1ULL << 36)), 721 PMCMASK(RES_NON_DRAM, (1ULL << 37)), 722 NULLMASK 723 }; 724 725 static int 726 iap_allocate_pmc(enum pmc_event pe, char *ctrspec, 727 struct pmc_op_pmcallocate *pmc_config) 728 { 729 char *e, *p, *q; 730 uint64_t cachestate, evmask, rsp; 731 int count, n; 732 733 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE | 734 PMC_CAP_QUALIFIER); 735 pmc_config->pm_md.pm_iap.pm_iap_config = 0; 736 737 cachestate = evmask = rsp = 0; 738 739 /* Parse additional modifiers if present */ 740 while ((p = strsep(&ctrspec, ",")) != NULL) { 741 742 n = 0; 743 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) { 744 q = strchr(p, '='); 745 if (*++q == '\0') /* skip '=' */ 746 return (-1); 747 count = strtol(q, &e, 0); 748 if (e == q || *e != '\0') 749 return (-1); 750 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 751 pmc_config->pm_md.pm_iap.pm_iap_config |= 752 IAP_CMASK(count); 753 } else if (KWMATCH(p, IAP_KW_EDGE)) { 754 pmc_config->pm_caps |= PMC_CAP_EDGE; 755 } else if (KWMATCH(p, IAP_KW_INV)) { 756 pmc_config->pm_caps |= PMC_CAP_INVERT; 757 } else if (KWMATCH(p, IAP_KW_OS)) { 758 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 759 } else if (KWMATCH(p, IAP_KW_USR)) { 760 pmc_config->pm_caps |= PMC_CAP_USER; 761 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) { 762 pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY; 763 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) { 764 n = pmc_parse_mask(iap_core_mask, p, &evmask); 765 if (n != 1) 766 return (-1); 767 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) { 768 n = pmc_parse_mask(iap_agent_mask, p, &evmask); 769 if (n != 1) 770 return (-1); 771 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) { 772 n = pmc_parse_mask(iap_prefetch_mask, p, &evmask); 773 if (n != 1) 774 return (-1); 775 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) { 776 n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate); 777 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE && 778 KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) { 779 n = pmc_parse_mask(iap_transition_mask, p, &evmask); 780 if (n != 1) 781 return (-1); 782 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM || 783 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 || 784 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) { 785 if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) { 786 n = pmc_parse_mask(iap_snoopresponse_mask, p, 787 &evmask); 788 } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) { 789 n = pmc_parse_mask(iap_snooptype_mask, p, 790 &evmask); 791 } else 792 return (-1); 793 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 || 794 cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE) { 795 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 796 n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp); 797 } else 798 return (-1); 799 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE || 800 cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE) { 801 if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) { 802 n = pmc_parse_mask(iap_rsp_mask_sb_ib, p, &rsp); 803 } else 804 return (-1); 805 } else 806 return (-1); 807 808 if (n < 0) /* Parsing failed. */ 809 return (-1); 810 } 811 812 pmc_config->pm_md.pm_iap.pm_iap_config |= evmask; 813 814 /* 815 * If the event requires a 'cachestate' qualifier but was not 816 * specified by the user, use a sensible default. 817 */ 818 switch (pe) { 819 case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */ 820 case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */ 821 case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */ 822 case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */ 823 case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */ 824 case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */ 825 case PMC_EV_IAP_EVENT_32H: /* Core */ 826 case PMC_EV_IAP_EVENT_40H: /* Core */ 827 case PMC_EV_IAP_EVENT_41H: /* Core */ 828 case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */ 829 if (cachestate == 0) 830 cachestate = (0xF << 8); 831 break; 832 case PMC_EV_IAP_EVENT_77H: /* Atom */ 833 /* IAP_EVENT_77H only accepts a cachestate qualifier on the 834 * Atom processor 835 */ 836 if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0) 837 cachestate = (0xF << 8); 838 break; 839 default: 840 break; 841 } 842 843 pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate; 844 pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp; 845 846 return (0); 847 } 848 849 /* 850 * Intel Uncore. 851 */ 852 853 static int 854 ucf_allocate_pmc(enum pmc_event pe, char *ctrspec, 855 struct pmc_op_pmcallocate *pmc_config) 856 { 857 (void) pe; 858 (void) ctrspec; 859 860 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 861 pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0; 862 863 return (0); 864 } 865 866 #define UCP_KW_CMASK "cmask" 867 #define UCP_KW_EDGE "edge" 868 #define UCP_KW_INV "inv" 869 870 static int 871 ucp_allocate_pmc(enum pmc_event pe, char *ctrspec, 872 struct pmc_op_pmcallocate *pmc_config) 873 { 874 char *e, *p, *q; 875 int count, n; 876 877 (void) pe; 878 879 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE | 880 PMC_CAP_QUALIFIER); 881 pmc_config->pm_md.pm_ucp.pm_ucp_config = 0; 882 883 /* Parse additional modifiers if present */ 884 while ((p = strsep(&ctrspec, ",")) != NULL) { 885 886 n = 0; 887 if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) { 888 q = strchr(p, '='); 889 if (*++q == '\0') /* skip '=' */ 890 return (-1); 891 count = strtol(q, &e, 0); 892 if (e == q || *e != '\0') 893 return (-1); 894 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 895 pmc_config->pm_md.pm_ucp.pm_ucp_config |= 896 UCP_CMASK(count); 897 } else if (KWMATCH(p, UCP_KW_EDGE)) { 898 pmc_config->pm_caps |= PMC_CAP_EDGE; 899 } else if (KWMATCH(p, UCP_KW_INV)) { 900 pmc_config->pm_caps |= PMC_CAP_INVERT; 901 } else 902 return (-1); 903 904 if (n < 0) /* Parsing failed. */ 905 return (-1); 906 } 907 908 return (0); 909 } 910 911 /* 912 * AMD K8 PMCs. 913 * 914 * These are very similar to AMD K7 PMCs, but support more kinds of 915 * events. 916 */ 917 918 static struct pmc_event_alias k8_aliases[] = { 919 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 920 EV_ALIAS("branch-mispredicts", 921 "k8-fr-retired-taken-branches-mispredicted"), 922 EV_ALIAS("cycles", "tsc"), 923 EV_ALIAS("dc-misses", "k8-dc-miss"), 924 EV_ALIAS("ic-misses", "k8-ic-miss"), 925 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 926 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 927 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 928 EV_ALIAS(NULL, NULL) 929 }; 930 931 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 932 933 /* 934 * Parsing tables 935 */ 936 937 /* fp dispatched fpu ops */ 938 static const struct pmc_masks k8_mask_fdfo[] = { 939 __K8MASK(add-pipe-excluding-junk-ops, 0), 940 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 941 __K8MASK(store-pipe-excluding-junk-ops, 2), 942 __K8MASK(add-pipe-junk-ops, 3), 943 __K8MASK(multiply-pipe-junk-ops, 4), 944 __K8MASK(store-pipe-junk-ops, 5), 945 NULLMASK 946 }; 947 948 /* ls segment register loads */ 949 static const struct pmc_masks k8_mask_lsrl[] = { 950 __K8MASK(es, 0), 951 __K8MASK(cs, 1), 952 __K8MASK(ss, 2), 953 __K8MASK(ds, 3), 954 __K8MASK(fs, 4), 955 __K8MASK(gs, 5), 956 __K8MASK(hs, 6), 957 NULLMASK 958 }; 959 960 /* ls locked operation */ 961 static const struct pmc_masks k8_mask_llo[] = { 962 __K8MASK(locked-instructions, 0), 963 __K8MASK(cycles-in-request, 1), 964 __K8MASK(cycles-to-complete, 2), 965 NULLMASK 966 }; 967 968 /* dc refill from {l2,system} and dc copyback */ 969 static const struct pmc_masks k8_mask_dc[] = { 970 __K8MASK(invalid, 0), 971 __K8MASK(shared, 1), 972 __K8MASK(exclusive, 2), 973 __K8MASK(owner, 3), 974 __K8MASK(modified, 4), 975 NULLMASK 976 }; 977 978 /* dc one bit ecc error */ 979 static const struct pmc_masks k8_mask_dobee[] = { 980 __K8MASK(scrubber, 0), 981 __K8MASK(piggyback, 1), 982 NULLMASK 983 }; 984 985 /* dc dispatched prefetch instructions */ 986 static const struct pmc_masks k8_mask_ddpi[] = { 987 __K8MASK(load, 0), 988 __K8MASK(store, 1), 989 __K8MASK(nta, 2), 990 NULLMASK 991 }; 992 993 /* dc dcache accesses by locks */ 994 static const struct pmc_masks k8_mask_dabl[] = { 995 __K8MASK(accesses, 0), 996 __K8MASK(misses, 1), 997 NULLMASK 998 }; 999 1000 /* bu internal l2 request */ 1001 static const struct pmc_masks k8_mask_bilr[] = { 1002 __K8MASK(ic-fill, 0), 1003 __K8MASK(dc-fill, 1), 1004 __K8MASK(tlb-reload, 2), 1005 __K8MASK(tag-snoop, 3), 1006 __K8MASK(cancelled, 4), 1007 NULLMASK 1008 }; 1009 1010 /* bu fill request l2 miss */ 1011 static const struct pmc_masks k8_mask_bfrlm[] = { 1012 __K8MASK(ic-fill, 0), 1013 __K8MASK(dc-fill, 1), 1014 __K8MASK(tlb-reload, 2), 1015 NULLMASK 1016 }; 1017 1018 /* bu fill into l2 */ 1019 static const struct pmc_masks k8_mask_bfil[] = { 1020 __K8MASK(dirty-l2-victim, 0), 1021 __K8MASK(victim-from-l2, 1), 1022 NULLMASK 1023 }; 1024 1025 /* fr retired fpu instructions */ 1026 static const struct pmc_masks k8_mask_frfi[] = { 1027 __K8MASK(x87, 0), 1028 __K8MASK(mmx-3dnow, 1), 1029 __K8MASK(packed-sse-sse2, 2), 1030 __K8MASK(scalar-sse-sse2, 3), 1031 NULLMASK 1032 }; 1033 1034 /* fr retired fastpath double op instructions */ 1035 static const struct pmc_masks k8_mask_frfdoi[] = { 1036 __K8MASK(low-op-pos-0, 0), 1037 __K8MASK(low-op-pos-1, 1), 1038 __K8MASK(low-op-pos-2, 2), 1039 NULLMASK 1040 }; 1041 1042 /* fr fpu exceptions */ 1043 static const struct pmc_masks k8_mask_ffe[] = { 1044 __K8MASK(x87-reclass-microfaults, 0), 1045 __K8MASK(sse-retype-microfaults, 1), 1046 __K8MASK(sse-reclass-microfaults, 2), 1047 __K8MASK(sse-and-x87-microtraps, 3), 1048 NULLMASK 1049 }; 1050 1051 /* nb memory controller page access event */ 1052 static const struct pmc_masks k8_mask_nmcpae[] = { 1053 __K8MASK(page-hit, 0), 1054 __K8MASK(page-miss, 1), 1055 __K8MASK(page-conflict, 2), 1056 NULLMASK 1057 }; 1058 1059 /* nb memory controller turnaround */ 1060 static const struct pmc_masks k8_mask_nmct[] = { 1061 __K8MASK(dimm-turnaround, 0), 1062 __K8MASK(read-to-write-turnaround, 1), 1063 __K8MASK(write-to-read-turnaround, 2), 1064 NULLMASK 1065 }; 1066 1067 /* nb memory controller bypass saturation */ 1068 static const struct pmc_masks k8_mask_nmcbs[] = { 1069 __K8MASK(memory-controller-hi-pri-bypass, 0), 1070 __K8MASK(memory-controller-lo-pri-bypass, 1), 1071 __K8MASK(dram-controller-interface-bypass, 2), 1072 __K8MASK(dram-controller-queue-bypass, 3), 1073 NULLMASK 1074 }; 1075 1076 /* nb sized commands */ 1077 static const struct pmc_masks k8_mask_nsc[] = { 1078 __K8MASK(nonpostwrszbyte, 0), 1079 __K8MASK(nonpostwrszdword, 1), 1080 __K8MASK(postwrszbyte, 2), 1081 __K8MASK(postwrszdword, 3), 1082 __K8MASK(rdszbyte, 4), 1083 __K8MASK(rdszdword, 5), 1084 __K8MASK(rdmodwr, 6), 1085 NULLMASK 1086 }; 1087 1088 /* nb probe result */ 1089 static const struct pmc_masks k8_mask_npr[] = { 1090 __K8MASK(probe-miss, 0), 1091 __K8MASK(probe-hit, 1), 1092 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 1093 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 1094 NULLMASK 1095 }; 1096 1097 /* nb hypertransport bus bandwidth */ 1098 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 1099 __K8MASK(command, 0), 1100 __K8MASK(data, 1), 1101 __K8MASK(buffer-release, 2), 1102 __K8MASK(nop, 3), 1103 NULLMASK 1104 }; 1105 1106 #undef __K8MASK 1107 1108 #define K8_KW_COUNT "count" 1109 #define K8_KW_EDGE "edge" 1110 #define K8_KW_INV "inv" 1111 #define K8_KW_MASK "mask" 1112 #define K8_KW_OS "os" 1113 #define K8_KW_USR "usr" 1114 1115 static int 1116 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 1117 struct pmc_op_pmcallocate *pmc_config) 1118 { 1119 char *e, *p, *q; 1120 int n; 1121 uint32_t count; 1122 uint64_t evmask; 1123 const struct pmc_masks *pm, *pmask; 1124 1125 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1126 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 1127 1128 pmask = NULL; 1129 evmask = 0; 1130 1131 #define __K8SETMASK(M) pmask = k8_mask_##M 1132 1133 /* setup parsing tables */ 1134 switch (pe) { 1135 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 1136 __K8SETMASK(fdfo); 1137 break; 1138 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 1139 __K8SETMASK(lsrl); 1140 break; 1141 case PMC_EV_K8_LS_LOCKED_OPERATION: 1142 __K8SETMASK(llo); 1143 break; 1144 case PMC_EV_K8_DC_REFILL_FROM_L2: 1145 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 1146 case PMC_EV_K8_DC_COPYBACK: 1147 __K8SETMASK(dc); 1148 break; 1149 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 1150 __K8SETMASK(dobee); 1151 break; 1152 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 1153 __K8SETMASK(ddpi); 1154 break; 1155 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 1156 __K8SETMASK(dabl); 1157 break; 1158 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 1159 __K8SETMASK(bilr); 1160 break; 1161 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 1162 __K8SETMASK(bfrlm); 1163 break; 1164 case PMC_EV_K8_BU_FILL_INTO_L2: 1165 __K8SETMASK(bfil); 1166 break; 1167 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 1168 __K8SETMASK(frfi); 1169 break; 1170 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 1171 __K8SETMASK(frfdoi); 1172 break; 1173 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 1174 __K8SETMASK(ffe); 1175 break; 1176 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 1177 __K8SETMASK(nmcpae); 1178 break; 1179 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 1180 __K8SETMASK(nmct); 1181 break; 1182 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 1183 __K8SETMASK(nmcbs); 1184 break; 1185 case PMC_EV_K8_NB_SIZED_COMMANDS: 1186 __K8SETMASK(nsc); 1187 break; 1188 case PMC_EV_K8_NB_PROBE_RESULT: 1189 __K8SETMASK(npr); 1190 break; 1191 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 1192 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 1193 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 1194 __K8SETMASK(nhbb); 1195 break; 1196 1197 default: 1198 break; /* no options defined */ 1199 } 1200 1201 while ((p = strsep(&ctrspec, ",")) != NULL) { 1202 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 1203 q = strchr(p, '='); 1204 if (*++q == '\0') /* skip '=' */ 1205 return (-1); 1206 1207 count = strtol(q, &e, 0); 1208 if (e == q || *e != '\0') 1209 return (-1); 1210 1211 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1212 pmc_config->pm_md.pm_amd.pm_amd_config |= 1213 AMD_PMC_TO_COUNTER(count); 1214 1215 } else if (KWMATCH(p, K8_KW_EDGE)) { 1216 pmc_config->pm_caps |= PMC_CAP_EDGE; 1217 } else if (KWMATCH(p, K8_KW_INV)) { 1218 pmc_config->pm_caps |= PMC_CAP_INVERT; 1219 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 1220 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1221 return (-1); 1222 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1223 } else if (KWMATCH(p, K8_KW_OS)) { 1224 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1225 } else if (KWMATCH(p, K8_KW_USR)) { 1226 pmc_config->pm_caps |= PMC_CAP_USER; 1227 } else 1228 return (-1); 1229 } 1230 1231 /* other post processing */ 1232 switch (pe) { 1233 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 1234 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 1235 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 1236 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 1237 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 1238 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 1239 /* XXX only available in rev B and later */ 1240 break; 1241 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 1242 /* XXX only available in rev C and later */ 1243 break; 1244 case PMC_EV_K8_LS_LOCKED_OPERATION: 1245 /* XXX CPU Rev A,B evmask is to be zero */ 1246 if (evmask & (evmask - 1)) /* > 1 bit set */ 1247 return (-1); 1248 if (evmask == 0) { 1249 evmask = 0x01; /* Rev C and later: #instrs */ 1250 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1251 } 1252 break; 1253 default: 1254 if (evmask == 0 && pmask != NULL) { 1255 for (pm = pmask; pm->pm_name; pm++) 1256 evmask |= pm->pm_value; 1257 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1258 } 1259 } 1260 1261 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 1262 pmc_config->pm_md.pm_amd.pm_amd_config = 1263 AMD_PMC_TO_UNITMASK(evmask); 1264 1265 return (0); 1266 } 1267 1268 #endif 1269 1270 #if defined(__amd64__) || defined(__i386__) 1271 1272 /* 1273 * Intel P4 PMCs 1274 */ 1275 1276 static struct pmc_event_alias p4_aliases[] = { 1277 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"), 1278 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"), 1279 EV_ALIAS("cycles", "tsc"), 1280 EV_ALIAS("instructions", 1281 "p4-instr-retired,mask=nbogusntag+nbogustag"), 1282 EV_ALIAS("unhalted-cycles", "p4-global-power-events"), 1283 EV_ALIAS(NULL, NULL) 1284 }; 1285 1286 #define P4_KW_ACTIVE "active" 1287 #define P4_KW_ACTIVE_ANY "any" 1288 #define P4_KW_ACTIVE_BOTH "both" 1289 #define P4_KW_ACTIVE_NONE "none" 1290 #define P4_KW_ACTIVE_SINGLE "single" 1291 #define P4_KW_BUSREQTYPE "busreqtype" 1292 #define P4_KW_CASCADE "cascade" 1293 #define P4_KW_EDGE "edge" 1294 #define P4_KW_INV "complement" 1295 #define P4_KW_OS "os" 1296 #define P4_KW_MASK "mask" 1297 #define P4_KW_PRECISE "precise" 1298 #define P4_KW_TAG "tag" 1299 #define P4_KW_THRESHOLD "threshold" 1300 #define P4_KW_USR "usr" 1301 1302 #define __P4MASK(N,V) PMCMASK(N, (1 << (V))) 1303 1304 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */ 1305 __P4MASK(dd, 0), 1306 __P4MASK(db, 1), 1307 __P4MASK(di, 2), 1308 __P4MASK(bd, 3), 1309 __P4MASK(bb, 4), 1310 __P4MASK(bi, 5), 1311 __P4MASK(id, 6), 1312 __P4MASK(ib, 7), 1313 NULLMASK 1314 }; 1315 1316 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */ 1317 __P4MASK(tcmiss, 0), 1318 NULLMASK, 1319 }; 1320 1321 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */ 1322 __P4MASK(hit, 0), 1323 __P4MASK(miss, 1), 1324 __P4MASK(hit-uc, 2), 1325 NULLMASK 1326 }; 1327 1328 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */ 1329 __P4MASK(st-rb-full, 2), 1330 __P4MASK(64k-conf, 3), 1331 NULLMASK 1332 }; 1333 1334 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */ 1335 __P4MASK(lsc, 0), 1336 __P4MASK(ssc, 1), 1337 NULLMASK 1338 }; 1339 1340 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */ 1341 __P4MASK(split-ld, 1), 1342 NULLMASK 1343 }; 1344 1345 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */ 1346 __P4MASK(split-st, 1), 1347 NULLMASK 1348 }; 1349 1350 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */ 1351 __P4MASK(no-sta, 1), 1352 __P4MASK(no-std, 3), 1353 __P4MASK(partial-data, 4), 1354 __P4MASK(unalgn-addr, 5), 1355 NULLMASK 1356 }; 1357 1358 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */ 1359 __P4MASK(dtmiss, 0), 1360 __P4MASK(itmiss, 1), 1361 NULLMASK 1362 }; 1363 1364 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */ 1365 __P4MASK(rd-2ndl-hits, 0), 1366 __P4MASK(rd-2ndl-hite, 1), 1367 __P4MASK(rd-2ndl-hitm, 2), 1368 __P4MASK(rd-3rdl-hits, 3), 1369 __P4MASK(rd-3rdl-hite, 4), 1370 __P4MASK(rd-3rdl-hitm, 5), 1371 __P4MASK(rd-2ndl-miss, 8), 1372 __P4MASK(rd-3rdl-miss, 9), 1373 __P4MASK(wr-2ndl-miss, 10), 1374 NULLMASK 1375 }; 1376 1377 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */ 1378 __P4MASK(all-read, 5), 1379 __P4MASK(all-write, 6), 1380 __P4MASK(mem-uc, 7), 1381 __P4MASK(mem-wc, 8), 1382 __P4MASK(mem-wt, 9), 1383 __P4MASK(mem-wp, 10), 1384 __P4MASK(mem-wb, 11), 1385 __P4MASK(own, 13), 1386 __P4MASK(other, 14), 1387 __P4MASK(prefetch, 15), 1388 NULLMASK 1389 }; 1390 1391 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */ 1392 __P4MASK(all-read, 5), 1393 __P4MASK(all-write, 6), 1394 __P4MASK(mem-uc, 7), 1395 __P4MASK(mem-wc, 8), 1396 __P4MASK(mem-wt, 9), 1397 __P4MASK(mem-wp, 10), 1398 __P4MASK(mem-wb, 11), 1399 __P4MASK(own, 13), 1400 __P4MASK(other, 14), 1401 __P4MASK(prefetch, 15), 1402 NULLMASK 1403 }; 1404 1405 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */ 1406 __P4MASK(drdy-drv, 0), 1407 __P4MASK(drdy-own, 1), 1408 __P4MASK(drdy-other, 2), 1409 __P4MASK(dbsy-drv, 3), 1410 __P4MASK(dbsy-own, 4), 1411 __P4MASK(dbsy-other, 5), 1412 NULLMASK 1413 }; 1414 1415 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */ 1416 __P4MASK(req-type0, 0), 1417 __P4MASK(req-type1, 1), 1418 __P4MASK(req-len0, 2), 1419 __P4MASK(req-len1, 3), 1420 __P4MASK(req-io-type, 5), 1421 __P4MASK(req-lock-type, 6), 1422 __P4MASK(req-cache-type, 7), 1423 __P4MASK(req-split-type, 8), 1424 __P4MASK(req-dem-type, 9), 1425 __P4MASK(req-ord-type, 10), 1426 __P4MASK(mem-type0, 11), 1427 __P4MASK(mem-type1, 12), 1428 __P4MASK(mem-type2, 13), 1429 NULLMASK 1430 }; 1431 1432 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */ 1433 __P4MASK(all, 15), 1434 NULLMASK 1435 }; 1436 1437 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */ 1438 __P4MASK(all, 15), 1439 NULLMASK 1440 }; 1441 1442 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */ 1443 __P4MASK(all, 15), 1444 NULLMASK 1445 }; 1446 1447 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */ 1448 __P4MASK(all, 15), 1449 NULLMASK 1450 }; 1451 1452 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */ 1453 __P4MASK(all, 15), 1454 NULLMASK 1455 }; 1456 1457 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */ 1458 __P4MASK(all, 15), 1459 NULLMASK 1460 }; 1461 1462 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */ 1463 __P4MASK(all, 15), 1464 NULLMASK 1465 }; 1466 1467 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */ 1468 __P4MASK(all, 15), 1469 NULLMASK 1470 }; 1471 1472 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */ 1473 __P4MASK(allp0, 3), 1474 __P4MASK(allp2, 4), 1475 NULLMASK 1476 }; 1477 1478 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */ 1479 __P4MASK(running, 0), 1480 NULLMASK 1481 }; 1482 1483 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */ 1484 __P4MASK(cisc, 0), 1485 NULLMASK 1486 }; 1487 1488 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */ 1489 __P4MASK(from-tc-build, 0), 1490 __P4MASK(from-tc-deliver, 1), 1491 __P4MASK(from-rom, 2), 1492 NULLMASK 1493 }; 1494 1495 static const struct pmc_masks p4_mask_rmbt[] = { 1496 /* retired mispred branch type */ 1497 __P4MASK(conditional, 1), 1498 __P4MASK(call, 2), 1499 __P4MASK(return, 3), 1500 __P4MASK(indirect, 4), 1501 NULLMASK 1502 }; 1503 1504 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */ 1505 __P4MASK(conditional, 1), 1506 __P4MASK(call, 2), 1507 __P4MASK(retired, 3), 1508 __P4MASK(indirect, 4), 1509 NULLMASK 1510 }; 1511 1512 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */ 1513 __P4MASK(sbfull, 5), 1514 NULLMASK 1515 }; 1516 1517 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */ 1518 __P4MASK(wcb-evicts, 0), 1519 __P4MASK(wcb-full-evict, 1), 1520 NULLMASK 1521 }; 1522 1523 static const struct pmc_masks p4_mask_fee[] = { /* front end event */ 1524 __P4MASK(nbogus, 0), 1525 __P4MASK(bogus, 1), 1526 NULLMASK 1527 }; 1528 1529 static const struct pmc_masks p4_mask_ee[] = { /* execution event */ 1530 __P4MASK(nbogus0, 0), 1531 __P4MASK(nbogus1, 1), 1532 __P4MASK(nbogus2, 2), 1533 __P4MASK(nbogus3, 3), 1534 __P4MASK(bogus0, 4), 1535 __P4MASK(bogus1, 5), 1536 __P4MASK(bogus2, 6), 1537 __P4MASK(bogus3, 7), 1538 NULLMASK 1539 }; 1540 1541 static const struct pmc_masks p4_mask_re[] = { /* replay event */ 1542 __P4MASK(nbogus, 0), 1543 __P4MASK(bogus, 1), 1544 NULLMASK 1545 }; 1546 1547 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */ 1548 __P4MASK(nbogusntag, 0), 1549 __P4MASK(nbogustag, 1), 1550 __P4MASK(bogusntag, 2), 1551 __P4MASK(bogustag, 3), 1552 NULLMASK 1553 }; 1554 1555 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */ 1556 __P4MASK(nbogus, 0), 1557 __P4MASK(bogus, 1), 1558 NULLMASK 1559 }; 1560 1561 static const struct pmc_masks p4_mask_ut[] = { /* uop type */ 1562 __P4MASK(tagloads, 1), 1563 __P4MASK(tagstores, 2), 1564 NULLMASK 1565 }; 1566 1567 static const struct pmc_masks p4_mask_br[] = { /* branch retired */ 1568 __P4MASK(mmnp, 0), 1569 __P4MASK(mmnm, 1), 1570 __P4MASK(mmtp, 2), 1571 __P4MASK(mmtm, 3), 1572 NULLMASK 1573 }; 1574 1575 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */ 1576 __P4MASK(nbogus, 0), 1577 NULLMASK 1578 }; 1579 1580 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */ 1581 __P4MASK(fpsu, 0), 1582 __P4MASK(fpso, 1), 1583 __P4MASK(poao, 2), 1584 __P4MASK(poau, 3), 1585 __P4MASK(prea, 4), 1586 NULLMASK 1587 }; 1588 1589 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */ 1590 __P4MASK(clear, 0), 1591 __P4MASK(moclear, 2), 1592 __P4MASK(smclear, 3), 1593 NULLMASK 1594 }; 1595 1596 /* P4 event parser */ 1597 static int 1598 p4_allocate_pmc(enum pmc_event pe, char *ctrspec, 1599 struct pmc_op_pmcallocate *pmc_config) 1600 { 1601 1602 char *e, *p, *q; 1603 int count, has_tag, has_busreqtype, n; 1604 uint32_t cccractivemask; 1605 uint64_t evmask; 1606 const struct pmc_masks *pm, *pmask; 1607 1608 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1609 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig = 1610 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0; 1611 1612 pmask = NULL; 1613 evmask = 0; 1614 cccractivemask = 0x3; 1615 has_tag = has_busreqtype = 0; 1616 1617 #define __P4SETMASK(M) do { \ 1618 pmask = p4_mask_##M; \ 1619 } while (0) 1620 1621 switch (pe) { 1622 case PMC_EV_P4_TC_DELIVER_MODE: 1623 __P4SETMASK(tcdm); 1624 break; 1625 case PMC_EV_P4_BPU_FETCH_REQUEST: 1626 __P4SETMASK(bfr); 1627 break; 1628 case PMC_EV_P4_ITLB_REFERENCE: 1629 __P4SETMASK(ir); 1630 break; 1631 case PMC_EV_P4_MEMORY_CANCEL: 1632 __P4SETMASK(memcan); 1633 break; 1634 case PMC_EV_P4_MEMORY_COMPLETE: 1635 __P4SETMASK(memcomp); 1636 break; 1637 case PMC_EV_P4_LOAD_PORT_REPLAY: 1638 __P4SETMASK(lpr); 1639 break; 1640 case PMC_EV_P4_STORE_PORT_REPLAY: 1641 __P4SETMASK(spr); 1642 break; 1643 case PMC_EV_P4_MOB_LOAD_REPLAY: 1644 __P4SETMASK(mlr); 1645 break; 1646 case PMC_EV_P4_PAGE_WALK_TYPE: 1647 __P4SETMASK(pwt); 1648 break; 1649 case PMC_EV_P4_BSQ_CACHE_REFERENCE: 1650 __P4SETMASK(bcr); 1651 break; 1652 case PMC_EV_P4_IOQ_ALLOCATION: 1653 __P4SETMASK(ia); 1654 has_busreqtype = 1; 1655 break; 1656 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES: 1657 __P4SETMASK(iae); 1658 has_busreqtype = 1; 1659 break; 1660 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1661 __P4SETMASK(fda); 1662 break; 1663 case PMC_EV_P4_BSQ_ALLOCATION: 1664 __P4SETMASK(ba); 1665 break; 1666 case PMC_EV_P4_SSE_INPUT_ASSIST: 1667 __P4SETMASK(sia); 1668 break; 1669 case PMC_EV_P4_PACKED_SP_UOP: 1670 __P4SETMASK(psu); 1671 break; 1672 case PMC_EV_P4_PACKED_DP_UOP: 1673 __P4SETMASK(pdu); 1674 break; 1675 case PMC_EV_P4_SCALAR_SP_UOP: 1676 __P4SETMASK(ssu); 1677 break; 1678 case PMC_EV_P4_SCALAR_DP_UOP: 1679 __P4SETMASK(sdu); 1680 break; 1681 case PMC_EV_P4_64BIT_MMX_UOP: 1682 __P4SETMASK(64bmu); 1683 break; 1684 case PMC_EV_P4_128BIT_MMX_UOP: 1685 __P4SETMASK(128bmu); 1686 break; 1687 case PMC_EV_P4_X87_FP_UOP: 1688 __P4SETMASK(xfu); 1689 break; 1690 case PMC_EV_P4_X87_SIMD_MOVES_UOP: 1691 __P4SETMASK(xsmu); 1692 break; 1693 case PMC_EV_P4_GLOBAL_POWER_EVENTS: 1694 __P4SETMASK(gpe); 1695 break; 1696 case PMC_EV_P4_TC_MS_XFER: 1697 __P4SETMASK(tmx); 1698 break; 1699 case PMC_EV_P4_UOP_QUEUE_WRITES: 1700 __P4SETMASK(uqw); 1701 break; 1702 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE: 1703 __P4SETMASK(rmbt); 1704 break; 1705 case PMC_EV_P4_RETIRED_BRANCH_TYPE: 1706 __P4SETMASK(rbt); 1707 break; 1708 case PMC_EV_P4_RESOURCE_STALL: 1709 __P4SETMASK(rs); 1710 break; 1711 case PMC_EV_P4_WC_BUFFER: 1712 __P4SETMASK(wb); 1713 break; 1714 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES: 1715 case PMC_EV_P4_B2B_CYCLES: 1716 case PMC_EV_P4_BNR: 1717 case PMC_EV_P4_SNOOP: 1718 case PMC_EV_P4_RESPONSE: 1719 break; 1720 case PMC_EV_P4_FRONT_END_EVENT: 1721 __P4SETMASK(fee); 1722 break; 1723 case PMC_EV_P4_EXECUTION_EVENT: 1724 __P4SETMASK(ee); 1725 break; 1726 case PMC_EV_P4_REPLAY_EVENT: 1727 __P4SETMASK(re); 1728 break; 1729 case PMC_EV_P4_INSTR_RETIRED: 1730 __P4SETMASK(insret); 1731 break; 1732 case PMC_EV_P4_UOPS_RETIRED: 1733 __P4SETMASK(ur); 1734 break; 1735 case PMC_EV_P4_UOP_TYPE: 1736 __P4SETMASK(ut); 1737 break; 1738 case PMC_EV_P4_BRANCH_RETIRED: 1739 __P4SETMASK(br); 1740 break; 1741 case PMC_EV_P4_MISPRED_BRANCH_RETIRED: 1742 __P4SETMASK(mbr); 1743 break; 1744 case PMC_EV_P4_X87_ASSIST: 1745 __P4SETMASK(xa); 1746 break; 1747 case PMC_EV_P4_MACHINE_CLEAR: 1748 __P4SETMASK(machclr); 1749 break; 1750 default: 1751 return (-1); 1752 } 1753 1754 /* process additional flags */ 1755 while ((p = strsep(&ctrspec, ",")) != NULL) { 1756 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) { 1757 q = strchr(p, '='); 1758 if (*++q == '\0') /* skip '=' */ 1759 return (-1); 1760 1761 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0) 1762 cccractivemask = 0x0; 1763 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0) 1764 cccractivemask = 0x1; 1765 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0) 1766 cccractivemask = 0x2; 1767 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0) 1768 cccractivemask = 0x3; 1769 else 1770 return (-1); 1771 1772 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) { 1773 if (has_busreqtype == 0) 1774 return (-1); 1775 1776 q = strchr(p, '='); 1777 if (*++q == '\0') /* skip '=' */ 1778 return (-1); 1779 1780 count = strtol(q, &e, 0); 1781 if (e == q || *e != '\0') 1782 return (-1); 1783 evmask = (evmask & ~0x1F) | (count & 0x1F); 1784 } else if (KWMATCH(p, P4_KW_CASCADE)) 1785 pmc_config->pm_caps |= PMC_CAP_CASCADE; 1786 else if (KWMATCH(p, P4_KW_EDGE)) 1787 pmc_config->pm_caps |= PMC_CAP_EDGE; 1788 else if (KWMATCH(p, P4_KW_INV)) 1789 pmc_config->pm_caps |= PMC_CAP_INVERT; 1790 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) { 1791 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1792 return (-1); 1793 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1794 } else if (KWMATCH(p, P4_KW_OS)) 1795 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1796 else if (KWMATCH(p, P4_KW_PRECISE)) 1797 pmc_config->pm_caps |= PMC_CAP_PRECISE; 1798 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) { 1799 if (has_tag == 0) 1800 return (-1); 1801 1802 q = strchr(p, '='); 1803 if (*++q == '\0') /* skip '=' */ 1804 return (-1); 1805 1806 count = strtol(q, &e, 0); 1807 if (e == q || *e != '\0') 1808 return (-1); 1809 1810 pmc_config->pm_caps |= PMC_CAP_TAGGING; 1811 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |= 1812 P4_ESCR_TO_TAG_VALUE(count); 1813 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) { 1814 q = strchr(p, '='); 1815 if (*++q == '\0') /* skip '=' */ 1816 return (-1); 1817 1818 count = strtol(q, &e, 0); 1819 if (e == q || *e != '\0') 1820 return (-1); 1821 1822 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1823 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &= 1824 ~P4_CCCR_THRESHOLD_MASK; 1825 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1826 P4_CCCR_TO_THRESHOLD(count); 1827 } else if (KWMATCH(p, P4_KW_USR)) 1828 pmc_config->pm_caps |= PMC_CAP_USER; 1829 else 1830 return (-1); 1831 } 1832 1833 /* other post processing */ 1834 if (pe == PMC_EV_P4_IOQ_ALLOCATION || 1835 pe == PMC_EV_P4_FSB_DATA_ACTIVITY || 1836 pe == PMC_EV_P4_BSQ_ALLOCATION) 1837 pmc_config->pm_caps |= PMC_CAP_EDGE; 1838 1839 /* fill in thread activity mask */ 1840 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1841 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask); 1842 1843 if (evmask) 1844 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1845 1846 switch (pe) { 1847 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1848 if ((evmask & 0x06) == 0x06 || 1849 (evmask & 0x18) == 0x18) 1850 return (-1); /* can't have own+other bits together */ 1851 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */ 1852 evmask = 0x1D; 1853 break; 1854 case PMC_EV_P4_MACHINE_CLEAR: 1855 /* only one bit is allowed to be set */ 1856 if ((evmask & (evmask - 1)) != 0) 1857 return (-1); 1858 if (evmask == 0) { 1859 evmask = 0x1; /* 'CLEAR' */ 1860 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1861 } 1862 break; 1863 default: 1864 if (evmask == 0 && pmask) { 1865 for (pm = pmask; pm->pm_name; pm++) 1866 evmask |= pm->pm_value; 1867 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1868 } 1869 } 1870 1871 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 1872 P4_ESCR_TO_EVENT_MASK(evmask); 1873 1874 return (0); 1875 } 1876 1877 #endif 1878 1879 #if defined(__i386__) 1880 1881 /* 1882 * Pentium style PMCs 1883 */ 1884 1885 static struct pmc_event_alias p5_aliases[] = { 1886 EV_ALIAS("branches", "p5-taken-branches"), 1887 EV_ALIAS("cycles", "tsc"), 1888 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"), 1889 EV_ALIAS("ic-misses", "p5-code-cache-miss"), 1890 EV_ALIAS("instructions", "p5-instructions-executed"), 1891 EV_ALIAS("interrupts", "p5-hardware-interrupts"), 1892 EV_ALIAS("unhalted-cycles", 1893 "p5-number-of-cycles-not-in-halt-state"), 1894 EV_ALIAS(NULL, NULL) 1895 }; 1896 1897 static int 1898 p5_allocate_pmc(enum pmc_event pe, char *ctrspec, 1899 struct pmc_op_pmcallocate *pmc_config) 1900 { 1901 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */ 1902 } 1903 1904 /* 1905 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III, 1906 * and Pentium M CPUs. 1907 */ 1908 1909 static struct pmc_event_alias p6_aliases[] = { 1910 EV_ALIAS("branches", "p6-br-inst-retired"), 1911 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"), 1912 EV_ALIAS("cycles", "tsc"), 1913 EV_ALIAS("dc-misses", "p6-dcu-lines-in"), 1914 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"), 1915 EV_ALIAS("instructions", "p6-inst-retired"), 1916 EV_ALIAS("interrupts", "p6-hw-int-rx"), 1917 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"), 1918 EV_ALIAS(NULL, NULL) 1919 }; 1920 1921 #define P6_KW_CMASK "cmask" 1922 #define P6_KW_EDGE "edge" 1923 #define P6_KW_INV "inv" 1924 #define P6_KW_OS "os" 1925 #define P6_KW_UMASK "umask" 1926 #define P6_KW_USR "usr" 1927 1928 static struct pmc_masks p6_mask_mesi[] = { 1929 PMCMASK(m, 0x01), 1930 PMCMASK(e, 0x02), 1931 PMCMASK(s, 0x04), 1932 PMCMASK(i, 0x08), 1933 NULLMASK 1934 }; 1935 1936 static struct pmc_masks p6_mask_mesihw[] = { 1937 PMCMASK(m, 0x01), 1938 PMCMASK(e, 0x02), 1939 PMCMASK(s, 0x04), 1940 PMCMASK(i, 0x08), 1941 PMCMASK(nonhw, 0x00), 1942 PMCMASK(hw, 0x10), 1943 PMCMASK(both, 0x30), 1944 NULLMASK 1945 }; 1946 1947 static struct pmc_masks p6_mask_hw[] = { 1948 PMCMASK(nonhw, 0x00), 1949 PMCMASK(hw, 0x10), 1950 PMCMASK(both, 0x30), 1951 NULLMASK 1952 }; 1953 1954 static struct pmc_masks p6_mask_any[] = { 1955 PMCMASK(self, 0x00), 1956 PMCMASK(any, 0x20), 1957 NULLMASK 1958 }; 1959 1960 static struct pmc_masks p6_mask_ekp[] = { 1961 PMCMASK(nta, 0x00), 1962 PMCMASK(t1, 0x01), 1963 PMCMASK(t2, 0x02), 1964 PMCMASK(wos, 0x03), 1965 NULLMASK 1966 }; 1967 1968 static struct pmc_masks p6_mask_pps[] = { 1969 PMCMASK(packed-and-scalar, 0x00), 1970 PMCMASK(scalar, 0x01), 1971 NULLMASK 1972 }; 1973 1974 static struct pmc_masks p6_mask_mite[] = { 1975 PMCMASK(packed-multiply, 0x01), 1976 PMCMASK(packed-shift, 0x02), 1977 PMCMASK(pack, 0x04), 1978 PMCMASK(unpack, 0x08), 1979 PMCMASK(packed-logical, 0x10), 1980 PMCMASK(packed-arithmetic, 0x20), 1981 NULLMASK 1982 }; 1983 1984 static struct pmc_masks p6_mask_fmt[] = { 1985 PMCMASK(mmxtofp, 0x00), 1986 PMCMASK(fptommx, 0x01), 1987 NULLMASK 1988 }; 1989 1990 static struct pmc_masks p6_mask_sr[] = { 1991 PMCMASK(es, 0x01), 1992 PMCMASK(ds, 0x02), 1993 PMCMASK(fs, 0x04), 1994 PMCMASK(gs, 0x08), 1995 NULLMASK 1996 }; 1997 1998 static struct pmc_masks p6_mask_eet[] = { 1999 PMCMASK(all, 0x00), 2000 PMCMASK(freq, 0x02), 2001 NULLMASK 2002 }; 2003 2004 static struct pmc_masks p6_mask_efur[] = { 2005 PMCMASK(all, 0x00), 2006 PMCMASK(loadop, 0x01), 2007 PMCMASK(stdsta, 0x02), 2008 NULLMASK 2009 }; 2010 2011 static struct pmc_masks p6_mask_essir[] = { 2012 PMCMASK(sse-packed-single, 0x00), 2013 PMCMASK(sse-packed-single-scalar-single, 0x01), 2014 PMCMASK(sse2-packed-double, 0x02), 2015 PMCMASK(sse2-scalar-double, 0x03), 2016 NULLMASK 2017 }; 2018 2019 static struct pmc_masks p6_mask_esscir[] = { 2020 PMCMASK(sse-packed-single, 0x00), 2021 PMCMASK(sse-scalar-single, 0x01), 2022 PMCMASK(sse2-packed-double, 0x02), 2023 PMCMASK(sse2-scalar-double, 0x03), 2024 NULLMASK 2025 }; 2026 2027 /* P6 event parser */ 2028 static int 2029 p6_allocate_pmc(enum pmc_event pe, char *ctrspec, 2030 struct pmc_op_pmcallocate *pmc_config) 2031 { 2032 char *e, *p, *q; 2033 uint64_t evmask; 2034 int count, n; 2035 const struct pmc_masks *pm, *pmask; 2036 2037 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2038 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0; 2039 2040 evmask = 0; 2041 2042 #define P6MASKSET(M) pmask = p6_mask_ ## M 2043 2044 switch(pe) { 2045 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break; 2046 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break; 2047 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break; 2048 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break; 2049 case PMC_EV_P6_BUS_DRDY_CLOCKS: 2050 case PMC_EV_P6_BUS_LOCK_CLOCKS: 2051 case PMC_EV_P6_BUS_TRAN_BRD: 2052 case PMC_EV_P6_BUS_TRAN_RFO: 2053 case PMC_EV_P6_BUS_TRANS_WB: 2054 case PMC_EV_P6_BUS_TRAN_IFETCH: 2055 case PMC_EV_P6_BUS_TRAN_INVAL: 2056 case PMC_EV_P6_BUS_TRAN_PWR: 2057 case PMC_EV_P6_BUS_TRANS_P: 2058 case PMC_EV_P6_BUS_TRANS_IO: 2059 case PMC_EV_P6_BUS_TRAN_DEF: 2060 case PMC_EV_P6_BUS_TRAN_BURST: 2061 case PMC_EV_P6_BUS_TRAN_ANY: 2062 case PMC_EV_P6_BUS_TRAN_MEM: 2063 P6MASKSET(any); break; 2064 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 2065 case PMC_EV_P6_EMON_KNI_PREF_MISS: 2066 P6MASKSET(ekp); break; 2067 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 2068 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 2069 P6MASKSET(pps); break; 2070 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC: 2071 P6MASKSET(mite); break; 2072 case PMC_EV_P6_FP_MMX_TRANS: 2073 P6MASKSET(fmt); break; 2074 case PMC_EV_P6_SEG_RENAME_STALLS: 2075 case PMC_EV_P6_SEG_REG_RENAMES: 2076 P6MASKSET(sr); break; 2077 case PMC_EV_P6_EMON_EST_TRANS: 2078 P6MASKSET(eet); break; 2079 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 2080 P6MASKSET(efur); break; 2081 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 2082 P6MASKSET(essir); break; 2083 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 2084 P6MASKSET(esscir); break; 2085 default: 2086 pmask = NULL; 2087 break; 2088 } 2089 2090 /* Pentium M PMCs have a few events with different semantics */ 2091 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) { 2092 if (pe == PMC_EV_P6_L2_LD || 2093 pe == PMC_EV_P6_L2_LINES_IN || 2094 pe == PMC_EV_P6_L2_LINES_OUT) 2095 P6MASKSET(mesihw); 2096 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM) 2097 P6MASKSET(hw); 2098 } 2099 2100 /* Parse additional modifiers if present */ 2101 while ((p = strsep(&ctrspec, ",")) != NULL) { 2102 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) { 2103 q = strchr(p, '='); 2104 if (*++q == '\0') /* skip '=' */ 2105 return (-1); 2106 count = strtol(q, &e, 0); 2107 if (e == q || *e != '\0') 2108 return (-1); 2109 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 2110 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 2111 P6_EVSEL_TO_CMASK(count); 2112 } else if (KWMATCH(p, P6_KW_EDGE)) { 2113 pmc_config->pm_caps |= PMC_CAP_EDGE; 2114 } else if (KWMATCH(p, P6_KW_INV)) { 2115 pmc_config->pm_caps |= PMC_CAP_INVERT; 2116 } else if (KWMATCH(p, P6_KW_OS)) { 2117 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2118 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) { 2119 evmask = 0; 2120 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 2121 return (-1); 2122 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS || 2123 pe == PMC_EV_P6_BUS_LOCK_CLOCKS || 2124 pe == PMC_EV_P6_BUS_TRAN_BRD || 2125 pe == PMC_EV_P6_BUS_TRAN_RFO || 2126 pe == PMC_EV_P6_BUS_TRAN_IFETCH || 2127 pe == PMC_EV_P6_BUS_TRAN_INVAL || 2128 pe == PMC_EV_P6_BUS_TRAN_PWR || 2129 pe == PMC_EV_P6_BUS_TRAN_DEF || 2130 pe == PMC_EV_P6_BUS_TRAN_BURST || 2131 pe == PMC_EV_P6_BUS_TRAN_ANY || 2132 pe == PMC_EV_P6_BUS_TRAN_MEM || 2133 pe == PMC_EV_P6_BUS_TRANS_IO || 2134 pe == PMC_EV_P6_BUS_TRANS_P || 2135 pe == PMC_EV_P6_BUS_TRANS_WB || 2136 pe == PMC_EV_P6_EMON_EST_TRANS || 2137 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET || 2138 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET || 2139 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED || 2140 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED || 2141 pe == PMC_EV_P6_EMON_KNI_PREF_MISS || 2142 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED || 2143 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED || 2144 pe == PMC_EV_P6_FP_MMX_TRANS) 2145 && (n > 1)) /* Only one mask keyword is allowed. */ 2146 return (-1); 2147 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 2148 } else if (KWMATCH(p, P6_KW_USR)) { 2149 pmc_config->pm_caps |= PMC_CAP_USER; 2150 } else 2151 return (-1); 2152 } 2153 2154 /* post processing */ 2155 switch (pe) { 2156 2157 /* 2158 * The following events default to an evmask of 0 2159 */ 2160 2161 /* default => 'self' */ 2162 case PMC_EV_P6_BUS_DRDY_CLOCKS: 2163 case PMC_EV_P6_BUS_LOCK_CLOCKS: 2164 case PMC_EV_P6_BUS_TRAN_BRD: 2165 case PMC_EV_P6_BUS_TRAN_RFO: 2166 case PMC_EV_P6_BUS_TRANS_WB: 2167 case PMC_EV_P6_BUS_TRAN_IFETCH: 2168 case PMC_EV_P6_BUS_TRAN_INVAL: 2169 case PMC_EV_P6_BUS_TRAN_PWR: 2170 case PMC_EV_P6_BUS_TRANS_P: 2171 case PMC_EV_P6_BUS_TRANS_IO: 2172 case PMC_EV_P6_BUS_TRAN_DEF: 2173 case PMC_EV_P6_BUS_TRAN_BURST: 2174 case PMC_EV_P6_BUS_TRAN_ANY: 2175 case PMC_EV_P6_BUS_TRAN_MEM: 2176 2177 /* default => 'nta' */ 2178 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 2179 case PMC_EV_P6_EMON_KNI_PREF_MISS: 2180 2181 /* default => 'packed and scalar' */ 2182 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 2183 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 2184 2185 /* default => 'mmx to fp transitions' */ 2186 case PMC_EV_P6_FP_MMX_TRANS: 2187 2188 /* default => 'SSE Packed Single' */ 2189 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 2190 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 2191 2192 /* default => 'all fused micro-ops' */ 2193 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 2194 2195 /* default => 'all transitions' */ 2196 case PMC_EV_P6_EMON_EST_TRANS: 2197 break; 2198 2199 case PMC_EV_P6_MMX_UOPS_EXEC: 2200 evmask = 0x0F; /* only value allowed */ 2201 break; 2202 2203 default: 2204 /* 2205 * For all other events, set the default event mask 2206 * to a logical OR of all the allowed event mask bits. 2207 */ 2208 if (evmask == 0 && pmask) { 2209 for (pm = pmask; pm->pm_name; pm++) 2210 evmask |= pm->pm_value; 2211 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 2212 } 2213 2214 break; 2215 } 2216 2217 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 2218 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 2219 P6_EVSEL_TO_UMASK(evmask); 2220 2221 return (0); 2222 } 2223 2224 #endif 2225 2226 #if defined(__i386__) || defined(__amd64__) 2227 static int 2228 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 2229 struct pmc_op_pmcallocate *pmc_config) 2230 { 2231 if (pe != PMC_EV_TSC_TSC) 2232 return (-1); 2233 2234 /* TSC events must be unqualified. */ 2235 if (ctrspec && *ctrspec != '\0') 2236 return (-1); 2237 2238 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 2239 pmc_config->pm_caps |= PMC_CAP_READ; 2240 2241 return (0); 2242 } 2243 #endif 2244 2245 static struct pmc_event_alias generic_aliases[] = { 2246 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 2247 EV_ALIAS(NULL, NULL) 2248 }; 2249 2250 static int 2251 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 2252 struct pmc_op_pmcallocate *pmc_config) 2253 { 2254 (void)ctrspec; 2255 (void)pmc_config; 2256 2257 if (pe < PMC_EV_SOFT_FIRST || pe > PMC_EV_SOFT_LAST) 2258 return (-1); 2259 2260 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2261 return (0); 2262 } 2263 2264 #if defined(__XSCALE__) 2265 2266 static struct pmc_event_alias xscale_aliases[] = { 2267 EV_ALIAS("branches", "BRANCH_RETIRED"), 2268 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 2269 EV_ALIAS("dc-misses", "DC_MISS"), 2270 EV_ALIAS("ic-misses", "IC_MISS"), 2271 EV_ALIAS("instructions", "INSTR_RETIRED"), 2272 EV_ALIAS(NULL, NULL) 2273 }; 2274 static int 2275 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2276 struct pmc_op_pmcallocate *pmc_config __unused) 2277 { 2278 switch (pe) { 2279 default: 2280 break; 2281 } 2282 2283 return (0); 2284 } 2285 #endif 2286 2287 #if defined(__mips__) 2288 2289 static struct pmc_event_alias mips24k_aliases[] = { 2290 EV_ALIAS("instructions", "INSTR_EXECUTED"), 2291 EV_ALIAS("branches", "BRANCH_COMPLETED"), 2292 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 2293 EV_ALIAS(NULL, NULL) 2294 }; 2295 2296 static struct pmc_event_alias octeon_aliases[] = { 2297 EV_ALIAS("instructions", "RET"), 2298 EV_ALIAS("branches", "BR"), 2299 EV_ALIAS("branch-mispredicts", "BRMIS"), 2300 EV_ALIAS(NULL, NULL) 2301 }; 2302 2303 #define MIPS_KW_OS "os" 2304 #define MIPS_KW_USR "usr" 2305 #define MIPS_KW_ANYTHREAD "anythread" 2306 2307 static int 2308 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2309 struct pmc_op_pmcallocate *pmc_config __unused) 2310 { 2311 char *p; 2312 2313 (void) pe; 2314 2315 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2316 2317 while ((p = strsep(&ctrspec, ",")) != NULL) { 2318 if (KWMATCH(p, MIPS_KW_OS)) 2319 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2320 else if (KWMATCH(p, MIPS_KW_USR)) 2321 pmc_config->pm_caps |= PMC_CAP_USER; 2322 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 2323 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 2324 else 2325 return (-1); 2326 } 2327 2328 return (0); 2329 } 2330 2331 #endif /* __mips__ */ 2332 2333 #if defined(__powerpc__) 2334 2335 static struct pmc_event_alias ppc7450_aliases[] = { 2336 EV_ALIAS("instructions", "INSTR_COMPLETED"), 2337 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 2338 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 2339 EV_ALIAS(NULL, NULL) 2340 }; 2341 2342 #define PPC7450_KW_OS "os" 2343 #define PPC7450_KW_USR "usr" 2344 #define PPC7450_KW_ANYTHREAD "anythread" 2345 2346 static int 2347 ppc7450_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 2348 struct pmc_op_pmcallocate *pmc_config __unused) 2349 { 2350 char *p; 2351 2352 (void) pe; 2353 2354 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 2355 2356 while ((p = strsep(&ctrspec, ",")) != NULL) { 2357 if (KWMATCH(p, PPC7450_KW_OS)) 2358 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 2359 else if (KWMATCH(p, PPC7450_KW_USR)) 2360 pmc_config->pm_caps |= PMC_CAP_USER; 2361 else if (KWMATCH(p, PPC7450_KW_ANYTHREAD)) 2362 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 2363 else 2364 return (-1); 2365 } 2366 2367 return (0); 2368 } 2369 #endif /* __powerpc__ */ 2370 2371 2372 /* 2373 * Match an event name `name' with its canonical form. 2374 * 2375 * Matches are case insensitive and spaces, periods, underscores and 2376 * hyphen characters are considered to match each other. 2377 * 2378 * Returns 1 for a match, 0 otherwise. 2379 */ 2380 2381 static int 2382 pmc_match_event_name(const char *name, const char *canonicalname) 2383 { 2384 int cc, nc; 2385 const unsigned char *c, *n; 2386 2387 c = (const unsigned char *) canonicalname; 2388 n = (const unsigned char *) name; 2389 2390 for (; (nc = *n) && (cc = *c); n++, c++) { 2391 2392 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 2393 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 2394 continue; 2395 2396 if (toupper(nc) == toupper(cc)) 2397 continue; 2398 2399 2400 return (0); 2401 } 2402 2403 if (*n == '\0' && *c == '\0') 2404 return (1); 2405 2406 return (0); 2407 } 2408 2409 /* 2410 * Match an event name against all the event named supported by a 2411 * PMC class. 2412 * 2413 * Returns an event descriptor pointer on match or NULL otherwise. 2414 */ 2415 static const struct pmc_event_descr * 2416 pmc_match_event_class(const char *name, 2417 const struct pmc_class_descr *pcd) 2418 { 2419 size_t n; 2420 const struct pmc_event_descr *ev; 2421 2422 ev = pcd->pm_evc_event_table; 2423 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 2424 if (pmc_match_event_name(name, ev->pm_ev_name)) 2425 return (ev); 2426 2427 return (NULL); 2428 } 2429 2430 static int 2431 pmc_mdep_is_compatible_class(enum pmc_class pc) 2432 { 2433 size_t n; 2434 2435 for (n = 0; n < pmc_mdep_class_list_size; n++) 2436 if (pmc_mdep_class_list[n] == pc) 2437 return (1); 2438 return (0); 2439 } 2440 2441 /* 2442 * API entry points 2443 */ 2444 2445 int 2446 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 2447 uint32_t flags, int cpu, pmc_id_t *pmcid) 2448 { 2449 size_t n; 2450 int retval; 2451 char *r, *spec_copy; 2452 const char *ctrname; 2453 const struct pmc_event_descr *ev; 2454 const struct pmc_event_alias *alias; 2455 struct pmc_op_pmcallocate pmc_config; 2456 const struct pmc_class_descr *pcd; 2457 2458 spec_copy = NULL; 2459 retval = -1; 2460 2461 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 2462 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 2463 errno = EINVAL; 2464 goto out; 2465 } 2466 2467 /* replace an event alias with the canonical event specifier */ 2468 if (pmc_mdep_event_aliases) 2469 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 2470 if (!strcasecmp(ctrspec, alias->pm_alias)) { 2471 spec_copy = strdup(alias->pm_spec); 2472 break; 2473 } 2474 2475 if (spec_copy == NULL) 2476 spec_copy = strdup(ctrspec); 2477 2478 r = spec_copy; 2479 ctrname = strsep(&r, ","); 2480 2481 /* 2482 * If a explicit class prefix was given by the user, restrict the 2483 * search for the event to the specified PMC class. 2484 */ 2485 ev = NULL; 2486 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 2487 pcd = pmc_class_table[n]; 2488 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 2489 strncasecmp(ctrname, pcd->pm_evc_name, 2490 pcd->pm_evc_name_size) == 0) { 2491 if ((ev = pmc_match_event_class(ctrname + 2492 pcd->pm_evc_name_size, pcd)) == NULL) { 2493 errno = EINVAL; 2494 goto out; 2495 } 2496 break; 2497 } 2498 } 2499 2500 /* 2501 * Otherwise, search for this event in all compatible PMC 2502 * classes. 2503 */ 2504 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 2505 pcd = pmc_class_table[n]; 2506 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 2507 ev = pmc_match_event_class(ctrname, pcd); 2508 } 2509 2510 if (ev == NULL) { 2511 errno = EINVAL; 2512 goto out; 2513 } 2514 2515 bzero(&pmc_config, sizeof(pmc_config)); 2516 pmc_config.pm_ev = ev->pm_ev_code; 2517 pmc_config.pm_class = pcd->pm_evc_class; 2518 pmc_config.pm_cpu = cpu; 2519 pmc_config.pm_mode = mode; 2520 pmc_config.pm_flags = flags; 2521 2522 if (PMC_IS_SAMPLING_MODE(mode)) 2523 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 2524 2525 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 2526 errno = EINVAL; 2527 goto out; 2528 } 2529 2530 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 2531 goto out; 2532 2533 *pmcid = pmc_config.pm_pmcid; 2534 2535 retval = 0; 2536 2537 out: 2538 if (spec_copy) 2539 free(spec_copy); 2540 2541 return (retval); 2542 } 2543 2544 int 2545 pmc_attach(pmc_id_t pmc, pid_t pid) 2546 { 2547 struct pmc_op_pmcattach pmc_attach_args; 2548 2549 pmc_attach_args.pm_pmc = pmc; 2550 pmc_attach_args.pm_pid = pid; 2551 2552 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 2553 } 2554 2555 int 2556 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 2557 { 2558 unsigned int i; 2559 enum pmc_class cl; 2560 2561 cl = PMC_ID_TO_CLASS(pmcid); 2562 for (i = 0; i < cpu_info.pm_nclass; i++) 2563 if (cpu_info.pm_classes[i].pm_class == cl) { 2564 *caps = cpu_info.pm_classes[i].pm_caps; 2565 return (0); 2566 } 2567 errno = EINVAL; 2568 return (-1); 2569 } 2570 2571 int 2572 pmc_configure_logfile(int fd) 2573 { 2574 struct pmc_op_configurelog cla; 2575 2576 cla.pm_logfd = fd; 2577 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 2578 return (-1); 2579 return (0); 2580 } 2581 2582 int 2583 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 2584 { 2585 if (pmc_syscall == -1) { 2586 errno = ENXIO; 2587 return (-1); 2588 } 2589 2590 *pci = &cpu_info; 2591 return (0); 2592 } 2593 2594 int 2595 pmc_detach(pmc_id_t pmc, pid_t pid) 2596 { 2597 struct pmc_op_pmcattach pmc_detach_args; 2598 2599 pmc_detach_args.pm_pmc = pmc; 2600 pmc_detach_args.pm_pid = pid; 2601 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 2602 } 2603 2604 int 2605 pmc_disable(int cpu, int pmc) 2606 { 2607 struct pmc_op_pmcadmin ssa; 2608 2609 ssa.pm_cpu = cpu; 2610 ssa.pm_pmc = pmc; 2611 ssa.pm_state = PMC_STATE_DISABLED; 2612 return (PMC_CALL(PMCADMIN, &ssa)); 2613 } 2614 2615 int 2616 pmc_enable(int cpu, int pmc) 2617 { 2618 struct pmc_op_pmcadmin ssa; 2619 2620 ssa.pm_cpu = cpu; 2621 ssa.pm_pmc = pmc; 2622 ssa.pm_state = PMC_STATE_FREE; 2623 return (PMC_CALL(PMCADMIN, &ssa)); 2624 } 2625 2626 /* 2627 * Return a list of events known to a given PMC class. 'cl' is the 2628 * PMC class identifier, 'eventnames' is the returned list of 'const 2629 * char *' pointers pointing to the names of the events. 'nevents' is 2630 * the number of event name pointers returned. 2631 * 2632 * The space for 'eventnames' is allocated using malloc(3). The caller 2633 * is responsible for freeing this space when done. 2634 */ 2635 int 2636 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 2637 int *nevents) 2638 { 2639 int count; 2640 const char **names; 2641 const struct pmc_event_descr *ev; 2642 2643 switch (cl) 2644 { 2645 case PMC_CLASS_IAF: 2646 ev = iaf_event_table; 2647 count = PMC_EVENT_TABLE_SIZE(iaf); 2648 break; 2649 case PMC_CLASS_IAP: 2650 /* 2651 * Return the most appropriate set of event name 2652 * spellings for the current CPU. 2653 */ 2654 switch (cpu_info.pm_cputype) { 2655 default: 2656 case PMC_CPU_INTEL_ATOM: 2657 ev = atom_event_table; 2658 count = PMC_EVENT_TABLE_SIZE(atom); 2659 break; 2660 case PMC_CPU_INTEL_CORE: 2661 ev = core_event_table; 2662 count = PMC_EVENT_TABLE_SIZE(core); 2663 break; 2664 case PMC_CPU_INTEL_CORE2: 2665 case PMC_CPU_INTEL_CORE2EXTREME: 2666 ev = core2_event_table; 2667 count = PMC_EVENT_TABLE_SIZE(core2); 2668 break; 2669 case PMC_CPU_INTEL_COREI7: 2670 ev = corei7_event_table; 2671 count = PMC_EVENT_TABLE_SIZE(corei7); 2672 break; 2673 case PMC_CPU_INTEL_IVYBRIDGE: 2674 ev = ivybridge_event_table; 2675 count = PMC_EVENT_TABLE_SIZE(ivybridge); 2676 break; 2677 case PMC_CPU_INTEL_SANDYBRIDGE: 2678 ev = sandybridge_event_table; 2679 count = PMC_EVENT_TABLE_SIZE(sandybridge); 2680 break; 2681 case PMC_CPU_INTEL_WESTMERE: 2682 ev = westmere_event_table; 2683 count = PMC_EVENT_TABLE_SIZE(westmere); 2684 break; 2685 } 2686 break; 2687 case PMC_CLASS_UCF: 2688 ev = ucf_event_table; 2689 count = PMC_EVENT_TABLE_SIZE(ucf); 2690 break; 2691 case PMC_CLASS_UCP: 2692 /* 2693 * Return the most appropriate set of event name 2694 * spellings for the current CPU. 2695 */ 2696 switch (cpu_info.pm_cputype) { 2697 default: 2698 case PMC_CPU_INTEL_COREI7: 2699 ev = corei7uc_event_table; 2700 count = PMC_EVENT_TABLE_SIZE(corei7uc); 2701 break; 2702 case PMC_CPU_INTEL_SANDYBRIDGE: 2703 ev = sandybridgeuc_event_table; 2704 count = PMC_EVENT_TABLE_SIZE(sandybridgeuc); 2705 break; 2706 case PMC_CPU_INTEL_WESTMERE: 2707 ev = westmereuc_event_table; 2708 count = PMC_EVENT_TABLE_SIZE(westmereuc); 2709 break; 2710 } 2711 break; 2712 case PMC_CLASS_TSC: 2713 ev = tsc_event_table; 2714 count = PMC_EVENT_TABLE_SIZE(tsc); 2715 break; 2716 case PMC_CLASS_K7: 2717 ev = k7_event_table; 2718 count = PMC_EVENT_TABLE_SIZE(k7); 2719 break; 2720 case PMC_CLASS_K8: 2721 ev = k8_event_table; 2722 count = PMC_EVENT_TABLE_SIZE(k8); 2723 break; 2724 case PMC_CLASS_P4: 2725 ev = p4_event_table; 2726 count = PMC_EVENT_TABLE_SIZE(p4); 2727 break; 2728 case PMC_CLASS_P5: 2729 ev = p5_event_table; 2730 count = PMC_EVENT_TABLE_SIZE(p5); 2731 break; 2732 case PMC_CLASS_P6: 2733 ev = p6_event_table; 2734 count = PMC_EVENT_TABLE_SIZE(p6); 2735 break; 2736 case PMC_CLASS_XSCALE: 2737 ev = xscale_event_table; 2738 count = PMC_EVENT_TABLE_SIZE(xscale); 2739 break; 2740 case PMC_CLASS_MIPS24K: 2741 ev = mips24k_event_table; 2742 count = PMC_EVENT_TABLE_SIZE(mips24k); 2743 break; 2744 case PMC_CLASS_OCTEON: 2745 ev = octeon_event_table; 2746 count = PMC_EVENT_TABLE_SIZE(octeon); 2747 break; 2748 case PMC_CLASS_PPC7450: 2749 ev = ppc7450_event_table; 2750 count = PMC_EVENT_TABLE_SIZE(ppc7450); 2751 break; 2752 case PMC_CLASS_SOFT: 2753 ev = soft_event_table; 2754 count = soft_event_info.pm_nevent; 2755 break; 2756 default: 2757 errno = EINVAL; 2758 return (-1); 2759 } 2760 2761 if ((names = malloc(count * sizeof(const char *))) == NULL) 2762 return (-1); 2763 2764 *eventnames = names; 2765 *nevents = count; 2766 2767 for (;count--; ev++, names++) 2768 *names = ev->pm_ev_name; 2769 2770 return (0); 2771 } 2772 2773 int 2774 pmc_flush_logfile(void) 2775 { 2776 return (PMC_CALL(FLUSHLOG,0)); 2777 } 2778 2779 int 2780 pmc_close_logfile(void) 2781 { 2782 return (PMC_CALL(CLOSELOG,0)); 2783 } 2784 2785 int 2786 pmc_get_driver_stats(struct pmc_driverstats *ds) 2787 { 2788 struct pmc_op_getdriverstats gms; 2789 2790 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 2791 return (-1); 2792 2793 /* copy out fields in the current userland<->library interface */ 2794 ds->pm_intr_ignored = gms.pm_intr_ignored; 2795 ds->pm_intr_processed = gms.pm_intr_processed; 2796 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 2797 ds->pm_syscalls = gms.pm_syscalls; 2798 ds->pm_syscall_errors = gms.pm_syscall_errors; 2799 ds->pm_buffer_requests = gms.pm_buffer_requests; 2800 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 2801 ds->pm_log_sweeps = gms.pm_log_sweeps; 2802 return (0); 2803 } 2804 2805 int 2806 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 2807 { 2808 struct pmc_op_getmsr gm; 2809 2810 gm.pm_pmcid = pmc; 2811 if (PMC_CALL(PMCGETMSR, &gm) < 0) 2812 return (-1); 2813 *msr = gm.pm_msr; 2814 return (0); 2815 } 2816 2817 int 2818 pmc_init(void) 2819 { 2820 int error, pmc_mod_id; 2821 unsigned int n; 2822 uint32_t abi_version; 2823 struct module_stat pmc_modstat; 2824 struct pmc_op_getcpuinfo op_cpu_info; 2825 #if defined(__amd64__) || defined(__i386__) 2826 int cpu_has_iaf_counters; 2827 unsigned int t; 2828 #endif 2829 2830 if (pmc_syscall != -1) /* already inited */ 2831 return (0); 2832 2833 /* retrieve the system call number from the KLD */ 2834 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 2835 return (-1); 2836 2837 pmc_modstat.version = sizeof(struct module_stat); 2838 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 2839 return (-1); 2840 2841 pmc_syscall = pmc_modstat.data.intval; 2842 2843 /* check the kernel module's ABI against our compiled-in version */ 2844 abi_version = PMC_VERSION; 2845 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 2846 return (pmc_syscall = -1); 2847 2848 /* ignore patch & minor numbers for the comparision */ 2849 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 2850 errno = EPROGMISMATCH; 2851 return (pmc_syscall = -1); 2852 } 2853 2854 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 2855 return (pmc_syscall = -1); 2856 2857 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 2858 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 2859 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 2860 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 2861 for (n = 0; n < cpu_info.pm_nclass; n++) 2862 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n]; 2863 2864 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 2865 sizeof(struct pmc_class_descr *)); 2866 2867 if (pmc_class_table == NULL) 2868 return (-1); 2869 2870 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 2871 pmc_class_table[n] = NULL; 2872 2873 /* 2874 * Get soft events list. 2875 */ 2876 soft_event_info.pm_class = PMC_CLASS_SOFT; 2877 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 2878 return (pmc_syscall = -1); 2879 2880 /* Map soft events to static list. */ 2881 for (n = 0; n < soft_event_info.pm_nevent; n++) { 2882 soft_event_table[n].pm_ev_name = 2883 soft_event_info.pm_events[n].pm_ev_name; 2884 soft_event_table[n].pm_ev_code = 2885 soft_event_info.pm_events[n].pm_ev_code; 2886 } 2887 soft_class_table_descr.pm_evc_event_table_size = \ 2888 soft_event_info.pm_nevent; 2889 soft_class_table_descr.pm_evc_event_table = \ 2890 soft_event_table; 2891 2892 /* 2893 * Fill in the class table. 2894 */ 2895 n = 0; 2896 2897 /* Fill soft events information. */ 2898 pmc_class_table[n++] = &soft_class_table_descr; 2899 #if defined(__amd64__) || defined(__i386__) 2900 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 2901 pmc_class_table[n++] = &tsc_class_table_descr; 2902 2903 /* 2904 * Check if this CPU has fixed function counters. 2905 */ 2906 cpu_has_iaf_counters = 0; 2907 for (t = 0; t < cpu_info.pm_nclass; t++) 2908 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 2909 cpu_info.pm_classes[t].pm_num > 0) 2910 cpu_has_iaf_counters = 1; 2911 #endif 2912 2913 #define PMC_MDEP_INIT(C) do { \ 2914 pmc_mdep_event_aliases = C##_aliases; \ 2915 pmc_mdep_class_list = C##_pmc_classes; \ 2916 pmc_mdep_class_list_size = \ 2917 PMC_TABLE_SIZE(C##_pmc_classes); \ 2918 } while (0) 2919 2920 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 2921 PMC_MDEP_INIT(C); \ 2922 pmc_class_table[n++] = &iaf_class_table_descr; \ 2923 if (!cpu_has_iaf_counters) \ 2924 pmc_mdep_event_aliases = \ 2925 C##_aliases_without_iaf; \ 2926 pmc_class_table[n] = &C##_class_table_descr; \ 2927 } while (0) 2928 2929 /* Configure the event name parser. */ 2930 switch (cpu_info.pm_cputype) { 2931 #if defined(__i386__) 2932 case PMC_CPU_AMD_K7: 2933 PMC_MDEP_INIT(k7); 2934 pmc_class_table[n] = &k7_class_table_descr; 2935 break; 2936 case PMC_CPU_INTEL_P5: 2937 PMC_MDEP_INIT(p5); 2938 pmc_class_table[n] = &p5_class_table_descr; 2939 break; 2940 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */ 2941 case PMC_CPU_INTEL_PII: /* similar PMCs. */ 2942 case PMC_CPU_INTEL_PIII: 2943 case PMC_CPU_INTEL_PM: 2944 PMC_MDEP_INIT(p6); 2945 pmc_class_table[n] = &p6_class_table_descr; 2946 break; 2947 #endif 2948 #if defined(__amd64__) || defined(__i386__) 2949 case PMC_CPU_AMD_K8: 2950 PMC_MDEP_INIT(k8); 2951 pmc_class_table[n] = &k8_class_table_descr; 2952 break; 2953 case PMC_CPU_INTEL_ATOM: 2954 PMC_MDEP_INIT_INTEL_V2(atom); 2955 break; 2956 case PMC_CPU_INTEL_CORE: 2957 PMC_MDEP_INIT(core); 2958 pmc_class_table[n] = &core_class_table_descr; 2959 break; 2960 case PMC_CPU_INTEL_CORE2: 2961 case PMC_CPU_INTEL_CORE2EXTREME: 2962 PMC_MDEP_INIT_INTEL_V2(core2); 2963 break; 2964 case PMC_CPU_INTEL_COREI7: 2965 pmc_class_table[n++] = &ucf_class_table_descr; 2966 pmc_class_table[n++] = &corei7uc_class_table_descr; 2967 PMC_MDEP_INIT_INTEL_V2(corei7); 2968 break; 2969 case PMC_CPU_INTEL_IVYBRIDGE: 2970 PMC_MDEP_INIT_INTEL_V2(ivybridge); 2971 break; 2972 case PMC_CPU_INTEL_SANDYBRIDGE: 2973 pmc_class_table[n++] = &ucf_class_table_descr; 2974 pmc_class_table[n++] = &sandybridgeuc_class_table_descr; 2975 PMC_MDEP_INIT_INTEL_V2(sandybridge); 2976 break; 2977 case PMC_CPU_INTEL_WESTMERE: 2978 pmc_class_table[n++] = &ucf_class_table_descr; 2979 pmc_class_table[n++] = &westmereuc_class_table_descr; 2980 PMC_MDEP_INIT_INTEL_V2(westmere); 2981 break; 2982 case PMC_CPU_INTEL_PIV: 2983 PMC_MDEP_INIT(p4); 2984 pmc_class_table[n] = &p4_class_table_descr; 2985 break; 2986 #endif 2987 case PMC_CPU_GENERIC: 2988 PMC_MDEP_INIT(generic); 2989 break; 2990 #if defined(__XSCALE__) 2991 case PMC_CPU_INTEL_XSCALE: 2992 PMC_MDEP_INIT(xscale); 2993 pmc_class_table[n] = &xscale_class_table_descr; 2994 break; 2995 #endif 2996 #if defined(__mips__) 2997 case PMC_CPU_MIPS_24K: 2998 PMC_MDEP_INIT(mips24k); 2999 pmc_class_table[n] = &mips24k_class_table_descr; 3000 break; 3001 case PMC_CPU_MIPS_OCTEON: 3002 PMC_MDEP_INIT(octeon); 3003 pmc_class_table[n] = &octeon_class_table_descr; 3004 break; 3005 #endif /* __mips__ */ 3006 #if defined(__powerpc__) 3007 case PMC_CPU_PPC_7450: 3008 PMC_MDEP_INIT(ppc7450); 3009 pmc_class_table[n] = &ppc7450_class_table_descr; 3010 break; 3011 #endif 3012 default: 3013 /* 3014 * Some kind of CPU this version of the library knows nothing 3015 * about. This shouldn't happen since the abi version check 3016 * should have caught this. 3017 */ 3018 errno = ENXIO; 3019 return (pmc_syscall = -1); 3020 } 3021 3022 return (0); 3023 } 3024 3025 const char * 3026 pmc_name_of_capability(enum pmc_caps cap) 3027 { 3028 int i; 3029 3030 /* 3031 * 'cap' should have a single bit set and should be in 3032 * range. 3033 */ 3034 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 3035 cap > PMC_CAP_LAST) { 3036 errno = EINVAL; 3037 return (NULL); 3038 } 3039 3040 i = ffs(cap); 3041 return (pmc_capability_names[i - 1]); 3042 } 3043 3044 const char * 3045 pmc_name_of_class(enum pmc_class pc) 3046 { 3047 if ((int) pc >= PMC_CLASS_FIRST && 3048 pc <= PMC_CLASS_LAST) 3049 return (pmc_class_names[pc]); 3050 3051 errno = EINVAL; 3052 return (NULL); 3053 } 3054 3055 const char * 3056 pmc_name_of_cputype(enum pmc_cputype cp) 3057 { 3058 size_t n; 3059 3060 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 3061 if (cp == pmc_cputype_names[n].pm_cputype) 3062 return (pmc_cputype_names[n].pm_name); 3063 3064 errno = EINVAL; 3065 return (NULL); 3066 } 3067 3068 const char * 3069 pmc_name_of_disposition(enum pmc_disp pd) 3070 { 3071 if ((int) pd >= PMC_DISP_FIRST && 3072 pd <= PMC_DISP_LAST) 3073 return (pmc_disposition_names[pd]); 3074 3075 errno = EINVAL; 3076 return (NULL); 3077 } 3078 3079 const char * 3080 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 3081 { 3082 const struct pmc_event_descr *ev, *evfence; 3083 3084 ev = evfence = NULL; 3085 if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) { 3086 ev = iaf_event_table; 3087 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf); 3088 } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) { 3089 switch (cpu) { 3090 case PMC_CPU_INTEL_ATOM: 3091 ev = atom_event_table; 3092 evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom); 3093 break; 3094 case PMC_CPU_INTEL_CORE: 3095 ev = core_event_table; 3096 evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core); 3097 break; 3098 case PMC_CPU_INTEL_CORE2: 3099 case PMC_CPU_INTEL_CORE2EXTREME: 3100 ev = core2_event_table; 3101 evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2); 3102 break; 3103 case PMC_CPU_INTEL_COREI7: 3104 ev = corei7_event_table; 3105 evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7); 3106 break; 3107 case PMC_CPU_INTEL_IVYBRIDGE: 3108 ev = ivybridge_event_table; 3109 evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge); 3110 break; 3111 case PMC_CPU_INTEL_SANDYBRIDGE: 3112 ev = sandybridge_event_table; 3113 evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge); 3114 break; 3115 case PMC_CPU_INTEL_WESTMERE: 3116 ev = westmere_event_table; 3117 evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere); 3118 break; 3119 default: /* Unknown CPU type. */ 3120 break; 3121 } 3122 } else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) { 3123 ev = ucf_event_table; 3124 evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf); 3125 } else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) { 3126 switch (cpu) { 3127 case PMC_CPU_INTEL_COREI7: 3128 ev = corei7uc_event_table; 3129 evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc); 3130 break; 3131 case PMC_CPU_INTEL_SANDYBRIDGE: 3132 ev = sandybridgeuc_event_table; 3133 evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc); 3134 break; 3135 case PMC_CPU_INTEL_WESTMERE: 3136 ev = westmereuc_event_table; 3137 evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc); 3138 break; 3139 default: /* Unknown CPU type. */ 3140 break; 3141 } 3142 } else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) { 3143 ev = k7_event_table; 3144 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7); 3145 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 3146 ev = k8_event_table; 3147 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 3148 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) { 3149 ev = p4_event_table; 3150 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4); 3151 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) { 3152 ev = p5_event_table; 3153 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5); 3154 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) { 3155 ev = p6_event_table; 3156 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6); 3157 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 3158 ev = xscale_event_table; 3159 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 3160 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 3161 ev = mips24k_event_table; 3162 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 3163 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 3164 ev = octeon_event_table; 3165 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 3166 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 3167 ev = ppc7450_event_table; 3168 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 3169 } else if (pe == PMC_EV_TSC_TSC) { 3170 ev = tsc_event_table; 3171 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 3172 } else if (pe >= PMC_EV_SOFT_FIRST && pe <= PMC_EV_SOFT_LAST) { 3173 ev = soft_event_table; 3174 evfence = soft_event_table + soft_event_info.pm_nevent; 3175 } 3176 3177 for (; ev != evfence; ev++) 3178 if (pe == ev->pm_ev_code) 3179 return (ev->pm_ev_name); 3180 3181 return (NULL); 3182 } 3183 3184 const char * 3185 pmc_name_of_event(enum pmc_event pe) 3186 { 3187 const char *n; 3188 3189 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 3190 return (n); 3191 3192 errno = EINVAL; 3193 return (NULL); 3194 } 3195 3196 const char * 3197 pmc_name_of_mode(enum pmc_mode pm) 3198 { 3199 if ((int) pm >= PMC_MODE_FIRST && 3200 pm <= PMC_MODE_LAST) 3201 return (pmc_mode_names[pm]); 3202 3203 errno = EINVAL; 3204 return (NULL); 3205 } 3206 3207 const char * 3208 pmc_name_of_state(enum pmc_state ps) 3209 { 3210 if ((int) ps >= PMC_STATE_FIRST && 3211 ps <= PMC_STATE_LAST) 3212 return (pmc_state_names[ps]); 3213 3214 errno = EINVAL; 3215 return (NULL); 3216 } 3217 3218 int 3219 pmc_ncpu(void) 3220 { 3221 if (pmc_syscall == -1) { 3222 errno = ENXIO; 3223 return (-1); 3224 } 3225 3226 return (cpu_info.pm_ncpu); 3227 } 3228 3229 int 3230 pmc_npmc(int cpu) 3231 { 3232 if (pmc_syscall == -1) { 3233 errno = ENXIO; 3234 return (-1); 3235 } 3236 3237 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 3238 errno = EINVAL; 3239 return (-1); 3240 } 3241 3242 return (cpu_info.pm_npmc); 3243 } 3244 3245 int 3246 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 3247 { 3248 int nbytes, npmc; 3249 struct pmc_op_getpmcinfo *pmci; 3250 3251 if ((npmc = pmc_npmc(cpu)) < 0) 3252 return (-1); 3253 3254 nbytes = sizeof(struct pmc_op_getpmcinfo) + 3255 npmc * sizeof(struct pmc_info); 3256 3257 if ((pmci = calloc(1, nbytes)) == NULL) 3258 return (-1); 3259 3260 pmci->pm_cpu = cpu; 3261 3262 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 3263 free(pmci); 3264 return (-1); 3265 } 3266 3267 /* kernel<->library, library<->userland interfaces are identical */ 3268 *ppmci = (struct pmc_pmcinfo *) pmci; 3269 return (0); 3270 } 3271 3272 int 3273 pmc_read(pmc_id_t pmc, pmc_value_t *value) 3274 { 3275 struct pmc_op_pmcrw pmc_read_op; 3276 3277 pmc_read_op.pm_pmcid = pmc; 3278 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 3279 pmc_read_op.pm_value = -1; 3280 3281 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 3282 return (-1); 3283 3284 *value = pmc_read_op.pm_value; 3285 return (0); 3286 } 3287 3288 int 3289 pmc_release(pmc_id_t pmc) 3290 { 3291 struct pmc_op_simple pmc_release_args; 3292 3293 pmc_release_args.pm_pmcid = pmc; 3294 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 3295 } 3296 3297 int 3298 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 3299 { 3300 struct pmc_op_pmcrw pmc_rw_op; 3301 3302 pmc_rw_op.pm_pmcid = pmc; 3303 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 3304 pmc_rw_op.pm_value = newvalue; 3305 3306 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 3307 return (-1); 3308 3309 *oldvaluep = pmc_rw_op.pm_value; 3310 return (0); 3311 } 3312 3313 int 3314 pmc_set(pmc_id_t pmc, pmc_value_t value) 3315 { 3316 struct pmc_op_pmcsetcount sc; 3317 3318 sc.pm_pmcid = pmc; 3319 sc.pm_count = value; 3320 3321 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 3322 return (-1); 3323 return (0); 3324 } 3325 3326 int 3327 pmc_start(pmc_id_t pmc) 3328 { 3329 struct pmc_op_simple pmc_start_args; 3330 3331 pmc_start_args.pm_pmcid = pmc; 3332 return (PMC_CALL(PMCSTART, &pmc_start_args)); 3333 } 3334 3335 int 3336 pmc_stop(pmc_id_t pmc) 3337 { 3338 struct pmc_op_simple pmc_stop_args; 3339 3340 pmc_stop_args.pm_pmcid = pmc; 3341 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 3342 } 3343 3344 int 3345 pmc_width(pmc_id_t pmcid, uint32_t *width) 3346 { 3347 unsigned int i; 3348 enum pmc_class cl; 3349 3350 cl = PMC_ID_TO_CLASS(pmcid); 3351 for (i = 0; i < cpu_info.pm_nclass; i++) 3352 if (cpu_info.pm_classes[i].pm_class == cl) { 3353 *width = cpu_info.pm_classes[i].pm_width; 3354 return (0); 3355 } 3356 errno = EINVAL; 3357 return (-1); 3358 } 3359 3360 int 3361 pmc_write(pmc_id_t pmc, pmc_value_t value) 3362 { 3363 struct pmc_op_pmcrw pmc_write_op; 3364 3365 pmc_write_op.pm_pmcid = pmc; 3366 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 3367 pmc_write_op.pm_value = value; 3368 return (PMC_CALL(PMCRW, &pmc_write_op)); 3369 } 3370 3371 int 3372 pmc_writelog(uint32_t userdata) 3373 { 3374 struct pmc_op_writelog wl; 3375 3376 wl.pm_userdata = userdata; 3377 return (PMC_CALL(WRITELOG, &wl)); 3378 } 3379