1 /*- 2 * Copyright (c) 2003-2008 Joseph Koshy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/types.h> 31 #include <sys/module.h> 32 #include <sys/pmc.h> 33 #include <sys/syscall.h> 34 35 #include <ctype.h> 36 #include <errno.h> 37 #include <fcntl.h> 38 #include <pmc.h> 39 #include <stdio.h> 40 #include <stdlib.h> 41 #include <string.h> 42 #include <strings.h> 43 #include <unistd.h> 44 45 /* Function prototypes */ 46 #if defined(__i386__) 47 static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 48 struct pmc_op_pmcallocate *_pmc_config); 49 #endif 50 #if defined(__amd64__) || defined(__i386__) 51 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 52 struct pmc_op_pmcallocate *_pmc_config); 53 static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 54 struct pmc_op_pmcallocate *_pmc_config); 55 #endif 56 #if defined(__i386__) 57 static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 58 struct pmc_op_pmcallocate *_pmc_config); 59 static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 60 struct pmc_op_pmcallocate *_pmc_config); 61 #endif 62 #if defined(__amd64__) || defined(__i386__) 63 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 64 struct pmc_op_pmcallocate *_pmc_config); 65 #endif 66 67 #define PMC_CALL(cmd, params) \ 68 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 69 70 /* 71 * Event aliases provide a way for the user to ask for generic events 72 * like "cache-misses", or "instructions-retired". These aliases are 73 * mapped to the appropriate canonical event descriptions using a 74 * lookup table. 75 */ 76 struct pmc_event_alias { 77 const char *pm_alias; 78 const char *pm_spec; 79 }; 80 81 static const struct pmc_event_alias *pmc_mdep_event_aliases; 82 83 /* 84 * The pmc_event_descr structure maps symbolic names known to the user 85 * to integer codes used by the PMC KLD. 86 */ 87 struct pmc_event_descr { 88 const char *pm_ev_name; 89 enum pmc_event pm_ev_code; 90 }; 91 92 /* 93 * The pmc_class_descr structure maps class name prefixes for 94 * event names to event tables and other PMC class data. 95 */ 96 struct pmc_class_descr { 97 const char *pm_evc_name; 98 size_t pm_evc_name_size; 99 enum pmc_class pm_evc_class; 100 const struct pmc_event_descr *pm_evc_event_table; 101 size_t pm_evc_event_table_size; 102 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 103 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 104 }; 105 106 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 107 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 108 109 #undef __PMC_EV 110 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 111 112 /* 113 * PMC_MDEP_TABLE(NAME, CLASS, ADDITIONAL_CLASSES...) 114 * 115 * Build an event descriptor table and a list of valid PMC classes. 116 */ 117 #define PMC_MDEP_TABLE(N,C,...) \ 118 static const struct pmc_event_descr N##_event_table[] = \ 119 { \ 120 __PMC_EV_##C() \ 121 }; \ 122 static const enum pmc_class N##_pmc_classes[] = { \ 123 PMC_CLASS_##C, __VA_ARGS__ \ 124 } 125 126 PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC); 127 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC); 128 PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC); 129 PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC); 130 PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC); 131 132 static const struct pmc_event_descr tsc_event_table[] = 133 { 134 __PMC_EV_TSC() 135 }; 136 137 #undef PMC_CLASS_TABLE_DESC 138 #define PMC_CLASS_TABLE_DESC(N, C) { \ 139 .pm_evc_name = #N "-", \ 140 .pm_evc_name_size = sizeof(#N "-") - 1, \ 141 .pm_evc_class = PMC_CLASS_##C , \ 142 .pm_evc_event_table = N##_event_table , \ 143 .pm_evc_event_table_size = \ 144 PMC_EVENT_TABLE_SIZE(N), \ 145 .pm_evc_allocate_pmc = N##_allocate_pmc \ 146 } 147 148 static const struct pmc_class_descr pmc_class_table[] = 149 { 150 #if defined(__i386__) 151 PMC_CLASS_TABLE_DESC(k7, K7), 152 #endif 153 #if defined(__i386__) || defined(__amd64__) 154 PMC_CLASS_TABLE_DESC(k8, K8), 155 PMC_CLASS_TABLE_DESC(p4, P4), 156 #endif 157 #if defined(__i386__) 158 PMC_CLASS_TABLE_DESC(p5, P5), 159 PMC_CLASS_TABLE_DESC(p6, P6), 160 #endif 161 #if defined(__i386__) || defined(__amd64__) 162 PMC_CLASS_TABLE_DESC(tsc, TSC) 163 #endif 164 }; 165 166 static size_t pmc_event_class_table_size = 167 PMC_TABLE_SIZE(pmc_class_table); 168 169 #undef PMC_CLASS_TABLE_DESC 170 171 static const enum pmc_class *pmc_mdep_class_list; 172 static size_t pmc_mdep_class_list_size; 173 174 /* 175 * Mapping tables, mapping enumeration values to human readable 176 * strings. 177 */ 178 179 static const char * pmc_capability_names[] = { 180 #undef __PMC_CAP 181 #define __PMC_CAP(N,V,D) #N , 182 __PMC_CAPS() 183 }; 184 185 static const char * pmc_class_names[] = { 186 #undef __PMC_CLASS 187 #define __PMC_CLASS(C) #C , 188 __PMC_CLASSES() 189 }; 190 191 struct pmc_cputype_map { 192 enum pmc_class pm_cputype; 193 const char *pm_name; 194 }; 195 196 static const struct pmc_cputype_map pmc_cputype_names[] = { 197 #undef __PMC_CPU 198 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 199 __PMC_CPUS() 200 }; 201 202 static const char * pmc_disposition_names[] = { 203 #undef __PMC_DISP 204 #define __PMC_DISP(D) #D , 205 __PMC_DISPOSITIONS() 206 }; 207 208 static const char * pmc_mode_names[] = { 209 #undef __PMC_MODE 210 #define __PMC_MODE(M,N) #M , 211 __PMC_MODES() 212 }; 213 214 static const char * pmc_state_names[] = { 215 #undef __PMC_STATE 216 #define __PMC_STATE(S) #S , 217 __PMC_STATES() 218 }; 219 220 static int pmc_syscall = -1; /* filled in by pmc_init() */ 221 222 static struct pmc_cpuinfo cpu_info; /* filled in by pmc_init() */ 223 224 /* Event masks for events */ 225 struct pmc_masks { 226 const char *pm_name; 227 const uint32_t pm_value; 228 }; 229 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 230 #define NULLMASK PMCMASK(NULL,0) 231 232 #if defined(__amd64__) || defined(__i386__) 233 static int 234 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask) 235 { 236 const struct pmc_masks *pm; 237 char *q, *r; 238 int c; 239 240 if (pmask == NULL) /* no mask keywords */ 241 return (-1); 242 q = strchr(p, '='); /* skip '=' */ 243 if (*++q == '\0') /* no more data */ 244 return (-1); 245 c = 0; /* count of mask keywords seen */ 246 while ((r = strsep(&q, "+")) != NULL) { 247 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 248 pm++) 249 ; 250 if (pm->pm_name == NULL) /* not found */ 251 return (-1); 252 *evmask |= pm->pm_value; 253 c++; 254 } 255 return (c); 256 } 257 #endif 258 259 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 260 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 261 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 262 263 #if defined(__i386__) 264 265 /* 266 * AMD K7 (Athlon) CPUs. 267 */ 268 269 static struct pmc_event_alias k7_aliases[] = { 270 EV_ALIAS("branches", "k7-retired-branches"), 271 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"), 272 EV_ALIAS("cycles", "tsc"), 273 EV_ALIAS("dc-misses", "k7-dc-misses"), 274 EV_ALIAS("ic-misses", "k7-ic-misses"), 275 EV_ALIAS("instructions", "k7-retired-instructions"), 276 EV_ALIAS("interrupts", "k7-hardware-interrupts"), 277 EV_ALIAS(NULL, NULL) 278 }; 279 280 #define K7_KW_COUNT "count" 281 #define K7_KW_EDGE "edge" 282 #define K7_KW_INV "inv" 283 #define K7_KW_OS "os" 284 #define K7_KW_UNITMASK "unitmask" 285 #define K7_KW_USR "usr" 286 287 static int 288 k7_allocate_pmc(enum pmc_event pe, char *ctrspec, 289 struct pmc_op_pmcallocate *pmc_config) 290 { 291 char *e, *p, *q; 292 int c, has_unitmask; 293 uint32_t count, unitmask; 294 295 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 296 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 297 298 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 || 299 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM || 300 pe == PMC_EV_K7_DC_WRITEBACKS) { 301 has_unitmask = 1; 302 unitmask = AMD_PMC_UNITMASK_MOESI; 303 } else 304 unitmask = has_unitmask = 0; 305 306 while ((p = strsep(&ctrspec, ",")) != NULL) { 307 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) { 308 q = strchr(p, '='); 309 if (*++q == '\0') /* skip '=' */ 310 return (-1); 311 312 count = strtol(q, &e, 0); 313 if (e == q || *e != '\0') 314 return (-1); 315 316 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 317 pmc_config->pm_md.pm_amd.pm_amd_config |= 318 AMD_PMC_TO_COUNTER(count); 319 320 } else if (KWMATCH(p, K7_KW_EDGE)) { 321 pmc_config->pm_caps |= PMC_CAP_EDGE; 322 } else if (KWMATCH(p, K7_KW_INV)) { 323 pmc_config->pm_caps |= PMC_CAP_INVERT; 324 } else if (KWMATCH(p, K7_KW_OS)) { 325 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 326 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) { 327 if (has_unitmask == 0) 328 return (-1); 329 unitmask = 0; 330 q = strchr(p, '='); 331 if (*++q == '\0') /* skip '=' */ 332 return (-1); 333 334 while ((c = tolower(*q++)) != 0) 335 if (c == 'm') 336 unitmask |= AMD_PMC_UNITMASK_M; 337 else if (c == 'o') 338 unitmask |= AMD_PMC_UNITMASK_O; 339 else if (c == 'e') 340 unitmask |= AMD_PMC_UNITMASK_E; 341 else if (c == 's') 342 unitmask |= AMD_PMC_UNITMASK_S; 343 else if (c == 'i') 344 unitmask |= AMD_PMC_UNITMASK_I; 345 else if (c == '+') 346 continue; 347 else 348 return (-1); 349 350 if (unitmask == 0) 351 return (-1); 352 353 } else if (KWMATCH(p, K7_KW_USR)) { 354 pmc_config->pm_caps |= PMC_CAP_USER; 355 } else 356 return (-1); 357 } 358 359 if (has_unitmask) { 360 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 361 pmc_config->pm_md.pm_amd.pm_amd_config |= 362 AMD_PMC_TO_UNITMASK(unitmask); 363 } 364 365 return (0); 366 367 } 368 369 #endif 370 371 #if defined(__amd64__) || defined(__i386__) 372 373 /* 374 * AMD K8 PMCs. 375 * 376 * These are very similar to AMD K7 PMCs, but support more kinds of 377 * events. 378 */ 379 380 static struct pmc_event_alias k8_aliases[] = { 381 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 382 EV_ALIAS("branch-mispredicts", 383 "k8-fr-retired-taken-branches-mispredicted"), 384 EV_ALIAS("cycles", "tsc"), 385 EV_ALIAS("dc-misses", "k8-dc-miss"), 386 EV_ALIAS("ic-misses", "k8-ic-miss"), 387 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 388 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 389 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 390 EV_ALIAS(NULL, NULL) 391 }; 392 393 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 394 395 /* 396 * Parsing tables 397 */ 398 399 /* fp dispatched fpu ops */ 400 static const struct pmc_masks k8_mask_fdfo[] = { 401 __K8MASK(add-pipe-excluding-junk-ops, 0), 402 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 403 __K8MASK(store-pipe-excluding-junk-ops, 2), 404 __K8MASK(add-pipe-junk-ops, 3), 405 __K8MASK(multiply-pipe-junk-ops, 4), 406 __K8MASK(store-pipe-junk-ops, 5), 407 NULLMASK 408 }; 409 410 /* ls segment register loads */ 411 static const struct pmc_masks k8_mask_lsrl[] = { 412 __K8MASK(es, 0), 413 __K8MASK(cs, 1), 414 __K8MASK(ss, 2), 415 __K8MASK(ds, 3), 416 __K8MASK(fs, 4), 417 __K8MASK(gs, 5), 418 __K8MASK(hs, 6), 419 NULLMASK 420 }; 421 422 /* ls locked operation */ 423 static const struct pmc_masks k8_mask_llo[] = { 424 __K8MASK(locked-instructions, 0), 425 __K8MASK(cycles-in-request, 1), 426 __K8MASK(cycles-to-complete, 2), 427 NULLMASK 428 }; 429 430 /* dc refill from {l2,system} and dc copyback */ 431 static const struct pmc_masks k8_mask_dc[] = { 432 __K8MASK(invalid, 0), 433 __K8MASK(shared, 1), 434 __K8MASK(exclusive, 2), 435 __K8MASK(owner, 3), 436 __K8MASK(modified, 4), 437 NULLMASK 438 }; 439 440 /* dc one bit ecc error */ 441 static const struct pmc_masks k8_mask_dobee[] = { 442 __K8MASK(scrubber, 0), 443 __K8MASK(piggyback, 1), 444 NULLMASK 445 }; 446 447 /* dc dispatched prefetch instructions */ 448 static const struct pmc_masks k8_mask_ddpi[] = { 449 __K8MASK(load, 0), 450 __K8MASK(store, 1), 451 __K8MASK(nta, 2), 452 NULLMASK 453 }; 454 455 /* dc dcache accesses by locks */ 456 static const struct pmc_masks k8_mask_dabl[] = { 457 __K8MASK(accesses, 0), 458 __K8MASK(misses, 1), 459 NULLMASK 460 }; 461 462 /* bu internal l2 request */ 463 static const struct pmc_masks k8_mask_bilr[] = { 464 __K8MASK(ic-fill, 0), 465 __K8MASK(dc-fill, 1), 466 __K8MASK(tlb-reload, 2), 467 __K8MASK(tag-snoop, 3), 468 __K8MASK(cancelled, 4), 469 NULLMASK 470 }; 471 472 /* bu fill request l2 miss */ 473 static const struct pmc_masks k8_mask_bfrlm[] = { 474 __K8MASK(ic-fill, 0), 475 __K8MASK(dc-fill, 1), 476 __K8MASK(tlb-reload, 2), 477 NULLMASK 478 }; 479 480 /* bu fill into l2 */ 481 static const struct pmc_masks k8_mask_bfil[] = { 482 __K8MASK(dirty-l2-victim, 0), 483 __K8MASK(victim-from-l2, 1), 484 NULLMASK 485 }; 486 487 /* fr retired fpu instructions */ 488 static const struct pmc_masks k8_mask_frfi[] = { 489 __K8MASK(x87, 0), 490 __K8MASK(mmx-3dnow, 1), 491 __K8MASK(packed-sse-sse2, 2), 492 __K8MASK(scalar-sse-sse2, 3), 493 NULLMASK 494 }; 495 496 /* fr retired fastpath double op instructions */ 497 static const struct pmc_masks k8_mask_frfdoi[] = { 498 __K8MASK(low-op-pos-0, 0), 499 __K8MASK(low-op-pos-1, 1), 500 __K8MASK(low-op-pos-2, 2), 501 NULLMASK 502 }; 503 504 /* fr fpu exceptions */ 505 static const struct pmc_masks k8_mask_ffe[] = { 506 __K8MASK(x87-reclass-microfaults, 0), 507 __K8MASK(sse-retype-microfaults, 1), 508 __K8MASK(sse-reclass-microfaults, 2), 509 __K8MASK(sse-and-x87-microtraps, 3), 510 NULLMASK 511 }; 512 513 /* nb memory controller page access event */ 514 static const struct pmc_masks k8_mask_nmcpae[] = { 515 __K8MASK(page-hit, 0), 516 __K8MASK(page-miss, 1), 517 __K8MASK(page-conflict, 2), 518 NULLMASK 519 }; 520 521 /* nb memory controller turnaround */ 522 static const struct pmc_masks k8_mask_nmct[] = { 523 __K8MASK(dimm-turnaround, 0), 524 __K8MASK(read-to-write-turnaround, 1), 525 __K8MASK(write-to-read-turnaround, 2), 526 NULLMASK 527 }; 528 529 /* nb memory controller bypass saturation */ 530 static const struct pmc_masks k8_mask_nmcbs[] = { 531 __K8MASK(memory-controller-hi-pri-bypass, 0), 532 __K8MASK(memory-controller-lo-pri-bypass, 1), 533 __K8MASK(dram-controller-interface-bypass, 2), 534 __K8MASK(dram-controller-queue-bypass, 3), 535 NULLMASK 536 }; 537 538 /* nb sized commands */ 539 static const struct pmc_masks k8_mask_nsc[] = { 540 __K8MASK(nonpostwrszbyte, 0), 541 __K8MASK(nonpostwrszdword, 1), 542 __K8MASK(postwrszbyte, 2), 543 __K8MASK(postwrszdword, 3), 544 __K8MASK(rdszbyte, 4), 545 __K8MASK(rdszdword, 5), 546 __K8MASK(rdmodwr, 6), 547 NULLMASK 548 }; 549 550 /* nb probe result */ 551 static const struct pmc_masks k8_mask_npr[] = { 552 __K8MASK(probe-miss, 0), 553 __K8MASK(probe-hit, 1), 554 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 555 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 556 NULLMASK 557 }; 558 559 /* nb hypertransport bus bandwidth */ 560 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 561 __K8MASK(command, 0), 562 __K8MASK(data, 1), 563 __K8MASK(buffer-release, 2), 564 __K8MASK(nop, 3), 565 NULLMASK 566 }; 567 568 #undef __K8MASK 569 570 #define K8_KW_COUNT "count" 571 #define K8_KW_EDGE "edge" 572 #define K8_KW_INV "inv" 573 #define K8_KW_MASK "mask" 574 #define K8_KW_OS "os" 575 #define K8_KW_USR "usr" 576 577 static int 578 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 579 struct pmc_op_pmcallocate *pmc_config) 580 { 581 char *e, *p, *q; 582 int n; 583 uint32_t count, evmask; 584 const struct pmc_masks *pm, *pmask; 585 586 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 587 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 588 589 pmask = NULL; 590 evmask = 0; 591 592 #define __K8SETMASK(M) pmask = k8_mask_##M 593 594 /* setup parsing tables */ 595 switch (pe) { 596 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 597 __K8SETMASK(fdfo); 598 break; 599 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 600 __K8SETMASK(lsrl); 601 break; 602 case PMC_EV_K8_LS_LOCKED_OPERATION: 603 __K8SETMASK(llo); 604 break; 605 case PMC_EV_K8_DC_REFILL_FROM_L2: 606 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 607 case PMC_EV_K8_DC_COPYBACK: 608 __K8SETMASK(dc); 609 break; 610 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 611 __K8SETMASK(dobee); 612 break; 613 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 614 __K8SETMASK(ddpi); 615 break; 616 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 617 __K8SETMASK(dabl); 618 break; 619 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 620 __K8SETMASK(bilr); 621 break; 622 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 623 __K8SETMASK(bfrlm); 624 break; 625 case PMC_EV_K8_BU_FILL_INTO_L2: 626 __K8SETMASK(bfil); 627 break; 628 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 629 __K8SETMASK(frfi); 630 break; 631 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 632 __K8SETMASK(frfdoi); 633 break; 634 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 635 __K8SETMASK(ffe); 636 break; 637 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 638 __K8SETMASK(nmcpae); 639 break; 640 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 641 __K8SETMASK(nmct); 642 break; 643 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 644 __K8SETMASK(nmcbs); 645 break; 646 case PMC_EV_K8_NB_SIZED_COMMANDS: 647 __K8SETMASK(nsc); 648 break; 649 case PMC_EV_K8_NB_PROBE_RESULT: 650 __K8SETMASK(npr); 651 break; 652 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 653 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 654 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 655 __K8SETMASK(nhbb); 656 break; 657 658 default: 659 break; /* no options defined */ 660 } 661 662 while ((p = strsep(&ctrspec, ",")) != NULL) { 663 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 664 q = strchr(p, '='); 665 if (*++q == '\0') /* skip '=' */ 666 return (-1); 667 668 count = strtol(q, &e, 0); 669 if (e == q || *e != '\0') 670 return (-1); 671 672 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 673 pmc_config->pm_md.pm_amd.pm_amd_config |= 674 AMD_PMC_TO_COUNTER(count); 675 676 } else if (KWMATCH(p, K8_KW_EDGE)) { 677 pmc_config->pm_caps |= PMC_CAP_EDGE; 678 } else if (KWMATCH(p, K8_KW_INV)) { 679 pmc_config->pm_caps |= PMC_CAP_INVERT; 680 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 681 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 682 return (-1); 683 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 684 } else if (KWMATCH(p, K8_KW_OS)) { 685 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 686 } else if (KWMATCH(p, K8_KW_USR)) { 687 pmc_config->pm_caps |= PMC_CAP_USER; 688 } else 689 return (-1); 690 } 691 692 /* other post processing */ 693 switch (pe) { 694 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 695 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 696 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 697 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 698 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 699 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 700 /* XXX only available in rev B and later */ 701 break; 702 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 703 /* XXX only available in rev C and later */ 704 break; 705 case PMC_EV_K8_LS_LOCKED_OPERATION: 706 /* XXX CPU Rev A,B evmask is to be zero */ 707 if (evmask & (evmask - 1)) /* > 1 bit set */ 708 return (-1); 709 if (evmask == 0) { 710 evmask = 0x01; /* Rev C and later: #instrs */ 711 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 712 } 713 break; 714 default: 715 if (evmask == 0 && pmask != NULL) { 716 for (pm = pmask; pm->pm_name; pm++) 717 evmask |= pm->pm_value; 718 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 719 } 720 } 721 722 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 723 pmc_config->pm_md.pm_amd.pm_amd_config = 724 AMD_PMC_TO_UNITMASK(evmask); 725 726 return (0); 727 } 728 729 #endif 730 731 #if defined(__amd64__) || defined(__i386__) 732 733 /* 734 * Intel P4 PMCs 735 */ 736 737 static struct pmc_event_alias p4_aliases[] = { 738 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"), 739 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"), 740 EV_ALIAS("cycles", "tsc"), 741 EV_ALIAS("instructions", 742 "p4-instr-retired,mask=nbogusntag+nbogustag"), 743 EV_ALIAS("unhalted-cycles", "p4-global-power-events"), 744 EV_ALIAS(NULL, NULL) 745 }; 746 747 #define P4_KW_ACTIVE "active" 748 #define P4_KW_ACTIVE_ANY "any" 749 #define P4_KW_ACTIVE_BOTH "both" 750 #define P4_KW_ACTIVE_NONE "none" 751 #define P4_KW_ACTIVE_SINGLE "single" 752 #define P4_KW_BUSREQTYPE "busreqtype" 753 #define P4_KW_CASCADE "cascade" 754 #define P4_KW_EDGE "edge" 755 #define P4_KW_INV "complement" 756 #define P4_KW_OS "os" 757 #define P4_KW_MASK "mask" 758 #define P4_KW_PRECISE "precise" 759 #define P4_KW_TAG "tag" 760 #define P4_KW_THRESHOLD "threshold" 761 #define P4_KW_USR "usr" 762 763 #define __P4MASK(N,V) PMCMASK(N, (1 << (V))) 764 765 static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */ 766 __P4MASK(dd, 0), 767 __P4MASK(db, 1), 768 __P4MASK(di, 2), 769 __P4MASK(bd, 3), 770 __P4MASK(bb, 4), 771 __P4MASK(bi, 5), 772 __P4MASK(id, 6), 773 __P4MASK(ib, 7), 774 NULLMASK 775 }; 776 777 static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */ 778 __P4MASK(tcmiss, 0), 779 NULLMASK, 780 }; 781 782 static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */ 783 __P4MASK(hit, 0), 784 __P4MASK(miss, 1), 785 __P4MASK(hit-uc, 2), 786 NULLMASK 787 }; 788 789 static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */ 790 __P4MASK(st-rb-full, 2), 791 __P4MASK(64k-conf, 3), 792 NULLMASK 793 }; 794 795 static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */ 796 __P4MASK(lsc, 0), 797 __P4MASK(ssc, 1), 798 NULLMASK 799 }; 800 801 static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */ 802 __P4MASK(split-ld, 1), 803 NULLMASK 804 }; 805 806 static const struct pmc_masks p4_mask_spr[] = { /* store port replay */ 807 __P4MASK(split-st, 1), 808 NULLMASK 809 }; 810 811 static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */ 812 __P4MASK(no-sta, 1), 813 __P4MASK(no-std, 3), 814 __P4MASK(partial-data, 4), 815 __P4MASK(unalgn-addr, 5), 816 NULLMASK 817 }; 818 819 static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */ 820 __P4MASK(dtmiss, 0), 821 __P4MASK(itmiss, 1), 822 NULLMASK 823 }; 824 825 static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */ 826 __P4MASK(rd-2ndl-hits, 0), 827 __P4MASK(rd-2ndl-hite, 1), 828 __P4MASK(rd-2ndl-hitm, 2), 829 __P4MASK(rd-3rdl-hits, 3), 830 __P4MASK(rd-3rdl-hite, 4), 831 __P4MASK(rd-3rdl-hitm, 5), 832 __P4MASK(rd-2ndl-miss, 8), 833 __P4MASK(rd-3rdl-miss, 9), 834 __P4MASK(wr-2ndl-miss, 10), 835 NULLMASK 836 }; 837 838 static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */ 839 __P4MASK(all-read, 5), 840 __P4MASK(all-write, 6), 841 __P4MASK(mem-uc, 7), 842 __P4MASK(mem-wc, 8), 843 __P4MASK(mem-wt, 9), 844 __P4MASK(mem-wp, 10), 845 __P4MASK(mem-wb, 11), 846 __P4MASK(own, 13), 847 __P4MASK(other, 14), 848 __P4MASK(prefetch, 15), 849 NULLMASK 850 }; 851 852 static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */ 853 __P4MASK(all-read, 5), 854 __P4MASK(all-write, 6), 855 __P4MASK(mem-uc, 7), 856 __P4MASK(mem-wc, 8), 857 __P4MASK(mem-wt, 9), 858 __P4MASK(mem-wp, 10), 859 __P4MASK(mem-wb, 11), 860 __P4MASK(own, 13), 861 __P4MASK(other, 14), 862 __P4MASK(prefetch, 15), 863 NULLMASK 864 }; 865 866 static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */ 867 __P4MASK(drdy-drv, 0), 868 __P4MASK(drdy-own, 1), 869 __P4MASK(drdy-other, 2), 870 __P4MASK(dbsy-drv, 3), 871 __P4MASK(dbsy-own, 4), 872 __P4MASK(dbsy-other, 5), 873 NULLMASK 874 }; 875 876 static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */ 877 __P4MASK(req-type0, 0), 878 __P4MASK(req-type1, 1), 879 __P4MASK(req-len0, 2), 880 __P4MASK(req-len1, 3), 881 __P4MASK(req-io-type, 5), 882 __P4MASK(req-lock-type, 6), 883 __P4MASK(req-cache-type, 7), 884 __P4MASK(req-split-type, 8), 885 __P4MASK(req-dem-type, 9), 886 __P4MASK(req-ord-type, 10), 887 __P4MASK(mem-type0, 11), 888 __P4MASK(mem-type1, 12), 889 __P4MASK(mem-type2, 13), 890 NULLMASK 891 }; 892 893 static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */ 894 __P4MASK(all, 15), 895 NULLMASK 896 }; 897 898 static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */ 899 __P4MASK(all, 15), 900 NULLMASK 901 }; 902 903 static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */ 904 __P4MASK(all, 15), 905 NULLMASK 906 }; 907 908 static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */ 909 __P4MASK(all, 15), 910 NULLMASK 911 }; 912 913 static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */ 914 __P4MASK(all, 15), 915 NULLMASK 916 }; 917 918 static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */ 919 __P4MASK(all, 15), 920 NULLMASK 921 }; 922 923 static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */ 924 __P4MASK(all, 15), 925 NULLMASK 926 }; 927 928 static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */ 929 __P4MASK(all, 15), 930 NULLMASK 931 }; 932 933 static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */ 934 __P4MASK(allp0, 3), 935 __P4MASK(allp2, 4), 936 NULLMASK 937 }; 938 939 static const struct pmc_masks p4_mask_gpe[] = { /* global power events */ 940 __P4MASK(running, 0), 941 NULLMASK 942 }; 943 944 static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */ 945 __P4MASK(cisc, 0), 946 NULLMASK 947 }; 948 949 static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */ 950 __P4MASK(from-tc-build, 0), 951 __P4MASK(from-tc-deliver, 1), 952 __P4MASK(from-rom, 2), 953 NULLMASK 954 }; 955 956 static const struct pmc_masks p4_mask_rmbt[] = { 957 /* retired mispred branch type */ 958 __P4MASK(conditional, 1), 959 __P4MASK(call, 2), 960 __P4MASK(return, 3), 961 __P4MASK(indirect, 4), 962 NULLMASK 963 }; 964 965 static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */ 966 __P4MASK(conditional, 1), 967 __P4MASK(call, 2), 968 __P4MASK(retired, 3), 969 __P4MASK(indirect, 4), 970 NULLMASK 971 }; 972 973 static const struct pmc_masks p4_mask_rs[] = { /* resource stall */ 974 __P4MASK(sbfull, 5), 975 NULLMASK 976 }; 977 978 static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */ 979 __P4MASK(wcb-evicts, 0), 980 __P4MASK(wcb-full-evict, 1), 981 NULLMASK 982 }; 983 984 static const struct pmc_masks p4_mask_fee[] = { /* front end event */ 985 __P4MASK(nbogus, 0), 986 __P4MASK(bogus, 1), 987 NULLMASK 988 }; 989 990 static const struct pmc_masks p4_mask_ee[] = { /* execution event */ 991 __P4MASK(nbogus0, 0), 992 __P4MASK(nbogus1, 1), 993 __P4MASK(nbogus2, 2), 994 __P4MASK(nbogus3, 3), 995 __P4MASK(bogus0, 4), 996 __P4MASK(bogus1, 5), 997 __P4MASK(bogus2, 6), 998 __P4MASK(bogus3, 7), 999 NULLMASK 1000 }; 1001 1002 static const struct pmc_masks p4_mask_re[] = { /* replay event */ 1003 __P4MASK(nbogus, 0), 1004 __P4MASK(bogus, 1), 1005 NULLMASK 1006 }; 1007 1008 static const struct pmc_masks p4_mask_insret[] = { /* instr retired */ 1009 __P4MASK(nbogusntag, 0), 1010 __P4MASK(nbogustag, 1), 1011 __P4MASK(bogusntag, 2), 1012 __P4MASK(bogustag, 3), 1013 NULLMASK 1014 }; 1015 1016 static const struct pmc_masks p4_mask_ur[] = { /* uops retired */ 1017 __P4MASK(nbogus, 0), 1018 __P4MASK(bogus, 1), 1019 NULLMASK 1020 }; 1021 1022 static const struct pmc_masks p4_mask_ut[] = { /* uop type */ 1023 __P4MASK(tagloads, 1), 1024 __P4MASK(tagstores, 2), 1025 NULLMASK 1026 }; 1027 1028 static const struct pmc_masks p4_mask_br[] = { /* branch retired */ 1029 __P4MASK(mmnp, 0), 1030 __P4MASK(mmnm, 1), 1031 __P4MASK(mmtp, 2), 1032 __P4MASK(mmtm, 3), 1033 NULLMASK 1034 }; 1035 1036 static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */ 1037 __P4MASK(nbogus, 0), 1038 NULLMASK 1039 }; 1040 1041 static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */ 1042 __P4MASK(fpsu, 0), 1043 __P4MASK(fpso, 1), 1044 __P4MASK(poao, 2), 1045 __P4MASK(poau, 3), 1046 __P4MASK(prea, 4), 1047 NULLMASK 1048 }; 1049 1050 static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */ 1051 __P4MASK(clear, 0), 1052 __P4MASK(moclear, 2), 1053 __P4MASK(smclear, 3), 1054 NULLMASK 1055 }; 1056 1057 /* P4 event parser */ 1058 static int 1059 p4_allocate_pmc(enum pmc_event pe, char *ctrspec, 1060 struct pmc_op_pmcallocate *pmc_config) 1061 { 1062 1063 char *e, *p, *q; 1064 int count, has_tag, has_busreqtype, n; 1065 uint32_t evmask, cccractivemask; 1066 const struct pmc_masks *pm, *pmask; 1067 1068 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1069 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig = 1070 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0; 1071 1072 pmask = NULL; 1073 evmask = 0; 1074 cccractivemask = 0x3; 1075 has_tag = has_busreqtype = 0; 1076 1077 #define __P4SETMASK(M) do { \ 1078 pmask = p4_mask_##M; \ 1079 } while (0) 1080 1081 switch (pe) { 1082 case PMC_EV_P4_TC_DELIVER_MODE: 1083 __P4SETMASK(tcdm); 1084 break; 1085 case PMC_EV_P4_BPU_FETCH_REQUEST: 1086 __P4SETMASK(bfr); 1087 break; 1088 case PMC_EV_P4_ITLB_REFERENCE: 1089 __P4SETMASK(ir); 1090 break; 1091 case PMC_EV_P4_MEMORY_CANCEL: 1092 __P4SETMASK(memcan); 1093 break; 1094 case PMC_EV_P4_MEMORY_COMPLETE: 1095 __P4SETMASK(memcomp); 1096 break; 1097 case PMC_EV_P4_LOAD_PORT_REPLAY: 1098 __P4SETMASK(lpr); 1099 break; 1100 case PMC_EV_P4_STORE_PORT_REPLAY: 1101 __P4SETMASK(spr); 1102 break; 1103 case PMC_EV_P4_MOB_LOAD_REPLAY: 1104 __P4SETMASK(mlr); 1105 break; 1106 case PMC_EV_P4_PAGE_WALK_TYPE: 1107 __P4SETMASK(pwt); 1108 break; 1109 case PMC_EV_P4_BSQ_CACHE_REFERENCE: 1110 __P4SETMASK(bcr); 1111 break; 1112 case PMC_EV_P4_IOQ_ALLOCATION: 1113 __P4SETMASK(ia); 1114 has_busreqtype = 1; 1115 break; 1116 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES: 1117 __P4SETMASK(iae); 1118 has_busreqtype = 1; 1119 break; 1120 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1121 __P4SETMASK(fda); 1122 break; 1123 case PMC_EV_P4_BSQ_ALLOCATION: 1124 __P4SETMASK(ba); 1125 break; 1126 case PMC_EV_P4_SSE_INPUT_ASSIST: 1127 __P4SETMASK(sia); 1128 break; 1129 case PMC_EV_P4_PACKED_SP_UOP: 1130 __P4SETMASK(psu); 1131 break; 1132 case PMC_EV_P4_PACKED_DP_UOP: 1133 __P4SETMASK(pdu); 1134 break; 1135 case PMC_EV_P4_SCALAR_SP_UOP: 1136 __P4SETMASK(ssu); 1137 break; 1138 case PMC_EV_P4_SCALAR_DP_UOP: 1139 __P4SETMASK(sdu); 1140 break; 1141 case PMC_EV_P4_64BIT_MMX_UOP: 1142 __P4SETMASK(64bmu); 1143 break; 1144 case PMC_EV_P4_128BIT_MMX_UOP: 1145 __P4SETMASK(128bmu); 1146 break; 1147 case PMC_EV_P4_X87_FP_UOP: 1148 __P4SETMASK(xfu); 1149 break; 1150 case PMC_EV_P4_X87_SIMD_MOVES_UOP: 1151 __P4SETMASK(xsmu); 1152 break; 1153 case PMC_EV_P4_GLOBAL_POWER_EVENTS: 1154 __P4SETMASK(gpe); 1155 break; 1156 case PMC_EV_P4_TC_MS_XFER: 1157 __P4SETMASK(tmx); 1158 break; 1159 case PMC_EV_P4_UOP_QUEUE_WRITES: 1160 __P4SETMASK(uqw); 1161 break; 1162 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE: 1163 __P4SETMASK(rmbt); 1164 break; 1165 case PMC_EV_P4_RETIRED_BRANCH_TYPE: 1166 __P4SETMASK(rbt); 1167 break; 1168 case PMC_EV_P4_RESOURCE_STALL: 1169 __P4SETMASK(rs); 1170 break; 1171 case PMC_EV_P4_WC_BUFFER: 1172 __P4SETMASK(wb); 1173 break; 1174 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES: 1175 case PMC_EV_P4_B2B_CYCLES: 1176 case PMC_EV_P4_BNR: 1177 case PMC_EV_P4_SNOOP: 1178 case PMC_EV_P4_RESPONSE: 1179 break; 1180 case PMC_EV_P4_FRONT_END_EVENT: 1181 __P4SETMASK(fee); 1182 break; 1183 case PMC_EV_P4_EXECUTION_EVENT: 1184 __P4SETMASK(ee); 1185 break; 1186 case PMC_EV_P4_REPLAY_EVENT: 1187 __P4SETMASK(re); 1188 break; 1189 case PMC_EV_P4_INSTR_RETIRED: 1190 __P4SETMASK(insret); 1191 break; 1192 case PMC_EV_P4_UOPS_RETIRED: 1193 __P4SETMASK(ur); 1194 break; 1195 case PMC_EV_P4_UOP_TYPE: 1196 __P4SETMASK(ut); 1197 break; 1198 case PMC_EV_P4_BRANCH_RETIRED: 1199 __P4SETMASK(br); 1200 break; 1201 case PMC_EV_P4_MISPRED_BRANCH_RETIRED: 1202 __P4SETMASK(mbr); 1203 break; 1204 case PMC_EV_P4_X87_ASSIST: 1205 __P4SETMASK(xa); 1206 break; 1207 case PMC_EV_P4_MACHINE_CLEAR: 1208 __P4SETMASK(machclr); 1209 break; 1210 default: 1211 return (-1); 1212 } 1213 1214 /* process additional flags */ 1215 while ((p = strsep(&ctrspec, ",")) != NULL) { 1216 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) { 1217 q = strchr(p, '='); 1218 if (*++q == '\0') /* skip '=' */ 1219 return (-1); 1220 1221 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0) 1222 cccractivemask = 0x0; 1223 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0) 1224 cccractivemask = 0x1; 1225 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0) 1226 cccractivemask = 0x2; 1227 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0) 1228 cccractivemask = 0x3; 1229 else 1230 return (-1); 1231 1232 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) { 1233 if (has_busreqtype == 0) 1234 return (-1); 1235 1236 q = strchr(p, '='); 1237 if (*++q == '\0') /* skip '=' */ 1238 return (-1); 1239 1240 count = strtol(q, &e, 0); 1241 if (e == q || *e != '\0') 1242 return (-1); 1243 evmask = (evmask & ~0x1F) | (count & 0x1F); 1244 } else if (KWMATCH(p, P4_KW_CASCADE)) 1245 pmc_config->pm_caps |= PMC_CAP_CASCADE; 1246 else if (KWMATCH(p, P4_KW_EDGE)) 1247 pmc_config->pm_caps |= PMC_CAP_EDGE; 1248 else if (KWMATCH(p, P4_KW_INV)) 1249 pmc_config->pm_caps |= PMC_CAP_INVERT; 1250 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) { 1251 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1252 return (-1); 1253 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1254 } else if (KWMATCH(p, P4_KW_OS)) 1255 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1256 else if (KWMATCH(p, P4_KW_PRECISE)) 1257 pmc_config->pm_caps |= PMC_CAP_PRECISE; 1258 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) { 1259 if (has_tag == 0) 1260 return (-1); 1261 1262 q = strchr(p, '='); 1263 if (*++q == '\0') /* skip '=' */ 1264 return (-1); 1265 1266 count = strtol(q, &e, 0); 1267 if (e == q || *e != '\0') 1268 return (-1); 1269 1270 pmc_config->pm_caps |= PMC_CAP_TAGGING; 1271 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |= 1272 P4_ESCR_TO_TAG_VALUE(count); 1273 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) { 1274 q = strchr(p, '='); 1275 if (*++q == '\0') /* skip '=' */ 1276 return (-1); 1277 1278 count = strtol(q, &e, 0); 1279 if (e == q || *e != '\0') 1280 return (-1); 1281 1282 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1283 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &= 1284 ~P4_CCCR_THRESHOLD_MASK; 1285 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1286 P4_CCCR_TO_THRESHOLD(count); 1287 } else if (KWMATCH(p, P4_KW_USR)) 1288 pmc_config->pm_caps |= PMC_CAP_USER; 1289 else 1290 return (-1); 1291 } 1292 1293 /* other post processing */ 1294 if (pe == PMC_EV_P4_IOQ_ALLOCATION || 1295 pe == PMC_EV_P4_FSB_DATA_ACTIVITY || 1296 pe == PMC_EV_P4_BSQ_ALLOCATION) 1297 pmc_config->pm_caps |= PMC_CAP_EDGE; 1298 1299 /* fill in thread activity mask */ 1300 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |= 1301 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask); 1302 1303 if (evmask) 1304 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1305 1306 switch (pe) { 1307 case PMC_EV_P4_FSB_DATA_ACTIVITY: 1308 if ((evmask & 0x06) == 0x06 || 1309 (evmask & 0x18) == 0x18) 1310 return (-1); /* can't have own+other bits together */ 1311 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */ 1312 evmask = 0x1D; 1313 break; 1314 case PMC_EV_P4_MACHINE_CLEAR: 1315 /* only one bit is allowed to be set */ 1316 if ((evmask & (evmask - 1)) != 0) 1317 return (-1); 1318 if (evmask == 0) { 1319 evmask = 0x1; /* 'CLEAR' */ 1320 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1321 } 1322 break; 1323 default: 1324 if (evmask == 0 && pmask) { 1325 for (pm = pmask; pm->pm_name; pm++) 1326 evmask |= pm->pm_value; 1327 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1328 } 1329 } 1330 1331 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 1332 P4_ESCR_TO_EVENT_MASK(evmask); 1333 1334 return (0); 1335 } 1336 1337 #endif 1338 1339 #if defined(__i386__) 1340 1341 /* 1342 * Pentium style PMCs 1343 */ 1344 1345 static struct pmc_event_alias p5_aliases[] = { 1346 EV_ALIAS("branches", "p5-taken-branches"), 1347 EV_ALIAS("cycles", "tsc"), 1348 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"), 1349 EV_ALIAS("ic-misses", "p5-code-cache-miss"), 1350 EV_ALIAS("instructions", "p5-instructions-executed"), 1351 EV_ALIAS("interrupts", "p5-hardware-interrupts"), 1352 EV_ALIAS("unhalted-cycles", 1353 "p5-number-of-cycles-not-in-halt-state"), 1354 EV_ALIAS(NULL, NULL) 1355 }; 1356 1357 static int 1358 p5_allocate_pmc(enum pmc_event pe, char *ctrspec, 1359 struct pmc_op_pmcallocate *pmc_config) 1360 { 1361 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */ 1362 } 1363 1364 /* 1365 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III, 1366 * and Pentium M CPUs. 1367 */ 1368 1369 static struct pmc_event_alias p6_aliases[] = { 1370 EV_ALIAS("branches", "p6-br-inst-retired"), 1371 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"), 1372 EV_ALIAS("cycles", "tsc"), 1373 EV_ALIAS("dc-misses", "p6-dcu-lines-in"), 1374 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"), 1375 EV_ALIAS("instructions", "p6-inst-retired"), 1376 EV_ALIAS("interrupts", "p6-hw-int-rx"), 1377 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"), 1378 EV_ALIAS(NULL, NULL) 1379 }; 1380 1381 #define P6_KW_CMASK "cmask" 1382 #define P6_KW_EDGE "edge" 1383 #define P6_KW_INV "inv" 1384 #define P6_KW_OS "os" 1385 #define P6_KW_UMASK "umask" 1386 #define P6_KW_USR "usr" 1387 1388 static struct pmc_masks p6_mask_mesi[] = { 1389 PMCMASK(m, 0x01), 1390 PMCMASK(e, 0x02), 1391 PMCMASK(s, 0x04), 1392 PMCMASK(i, 0x08), 1393 NULLMASK 1394 }; 1395 1396 static struct pmc_masks p6_mask_mesihw[] = { 1397 PMCMASK(m, 0x01), 1398 PMCMASK(e, 0x02), 1399 PMCMASK(s, 0x04), 1400 PMCMASK(i, 0x08), 1401 PMCMASK(nonhw, 0x00), 1402 PMCMASK(hw, 0x10), 1403 PMCMASK(both, 0x30), 1404 NULLMASK 1405 }; 1406 1407 static struct pmc_masks p6_mask_hw[] = { 1408 PMCMASK(nonhw, 0x00), 1409 PMCMASK(hw, 0x10), 1410 PMCMASK(both, 0x30), 1411 NULLMASK 1412 }; 1413 1414 static struct pmc_masks p6_mask_any[] = { 1415 PMCMASK(self, 0x00), 1416 PMCMASK(any, 0x20), 1417 NULLMASK 1418 }; 1419 1420 static struct pmc_masks p6_mask_ekp[] = { 1421 PMCMASK(nta, 0x00), 1422 PMCMASK(t1, 0x01), 1423 PMCMASK(t2, 0x02), 1424 PMCMASK(wos, 0x03), 1425 NULLMASK 1426 }; 1427 1428 static struct pmc_masks p6_mask_pps[] = { 1429 PMCMASK(packed-and-scalar, 0x00), 1430 PMCMASK(scalar, 0x01), 1431 NULLMASK 1432 }; 1433 1434 static struct pmc_masks p6_mask_mite[] = { 1435 PMCMASK(packed-multiply, 0x01), 1436 PMCMASK(packed-shift, 0x02), 1437 PMCMASK(pack, 0x04), 1438 PMCMASK(unpack, 0x08), 1439 PMCMASK(packed-logical, 0x10), 1440 PMCMASK(packed-arithmetic, 0x20), 1441 NULLMASK 1442 }; 1443 1444 static struct pmc_masks p6_mask_fmt[] = { 1445 PMCMASK(mmxtofp, 0x00), 1446 PMCMASK(fptommx, 0x01), 1447 NULLMASK 1448 }; 1449 1450 static struct pmc_masks p6_mask_sr[] = { 1451 PMCMASK(es, 0x01), 1452 PMCMASK(ds, 0x02), 1453 PMCMASK(fs, 0x04), 1454 PMCMASK(gs, 0x08), 1455 NULLMASK 1456 }; 1457 1458 static struct pmc_masks p6_mask_eet[] = { 1459 PMCMASK(all, 0x00), 1460 PMCMASK(freq, 0x02), 1461 NULLMASK 1462 }; 1463 1464 static struct pmc_masks p6_mask_efur[] = { 1465 PMCMASK(all, 0x00), 1466 PMCMASK(loadop, 0x01), 1467 PMCMASK(stdsta, 0x02), 1468 NULLMASK 1469 }; 1470 1471 static struct pmc_masks p6_mask_essir[] = { 1472 PMCMASK(sse-packed-single, 0x00), 1473 PMCMASK(sse-packed-single-scalar-single, 0x01), 1474 PMCMASK(sse2-packed-double, 0x02), 1475 PMCMASK(sse2-scalar-double, 0x03), 1476 NULLMASK 1477 }; 1478 1479 static struct pmc_masks p6_mask_esscir[] = { 1480 PMCMASK(sse-packed-single, 0x00), 1481 PMCMASK(sse-scalar-single, 0x01), 1482 PMCMASK(sse2-packed-double, 0x02), 1483 PMCMASK(sse2-scalar-double, 0x03), 1484 NULLMASK 1485 }; 1486 1487 /* P6 event parser */ 1488 static int 1489 p6_allocate_pmc(enum pmc_event pe, char *ctrspec, 1490 struct pmc_op_pmcallocate *pmc_config) 1491 { 1492 char *e, *p, *q; 1493 uint32_t evmask; 1494 int count, n; 1495 const struct pmc_masks *pm, *pmask; 1496 1497 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 1498 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0; 1499 1500 evmask = 0; 1501 1502 #define P6MASKSET(M) pmask = p6_mask_ ## M 1503 1504 switch(pe) { 1505 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break; 1506 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break; 1507 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break; 1508 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break; 1509 case PMC_EV_P6_BUS_DRDY_CLOCKS: 1510 case PMC_EV_P6_BUS_LOCK_CLOCKS: 1511 case PMC_EV_P6_BUS_TRAN_BRD: 1512 case PMC_EV_P6_BUS_TRAN_RFO: 1513 case PMC_EV_P6_BUS_TRANS_WB: 1514 case PMC_EV_P6_BUS_TRAN_IFETCH: 1515 case PMC_EV_P6_BUS_TRAN_INVAL: 1516 case PMC_EV_P6_BUS_TRAN_PWR: 1517 case PMC_EV_P6_BUS_TRANS_P: 1518 case PMC_EV_P6_BUS_TRANS_IO: 1519 case PMC_EV_P6_BUS_TRAN_DEF: 1520 case PMC_EV_P6_BUS_TRAN_BURST: 1521 case PMC_EV_P6_BUS_TRAN_ANY: 1522 case PMC_EV_P6_BUS_TRAN_MEM: 1523 P6MASKSET(any); break; 1524 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 1525 case PMC_EV_P6_EMON_KNI_PREF_MISS: 1526 P6MASKSET(ekp); break; 1527 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 1528 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 1529 P6MASKSET(pps); break; 1530 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC: 1531 P6MASKSET(mite); break; 1532 case PMC_EV_P6_FP_MMX_TRANS: 1533 P6MASKSET(fmt); break; 1534 case PMC_EV_P6_SEG_RENAME_STALLS: 1535 case PMC_EV_P6_SEG_REG_RENAMES: 1536 P6MASKSET(sr); break; 1537 case PMC_EV_P6_EMON_EST_TRANS: 1538 P6MASKSET(eet); break; 1539 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 1540 P6MASKSET(efur); break; 1541 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 1542 P6MASKSET(essir); break; 1543 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 1544 P6MASKSET(esscir); break; 1545 default: 1546 pmask = NULL; 1547 break; 1548 } 1549 1550 /* Pentium M PMCs have a few events with different semantics */ 1551 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) { 1552 if (pe == PMC_EV_P6_L2_LD || 1553 pe == PMC_EV_P6_L2_LINES_IN || 1554 pe == PMC_EV_P6_L2_LINES_OUT) 1555 P6MASKSET(mesihw); 1556 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM) 1557 P6MASKSET(hw); 1558 } 1559 1560 /* Parse additional modifiers if present */ 1561 while ((p = strsep(&ctrspec, ",")) != NULL) { 1562 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) { 1563 q = strchr(p, '='); 1564 if (*++q == '\0') /* skip '=' */ 1565 return (-1); 1566 count = strtol(q, &e, 0); 1567 if (e == q || *e != '\0') 1568 return (-1); 1569 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 1570 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 1571 P6_EVSEL_TO_CMASK(count); 1572 } else if (KWMATCH(p, P6_KW_EDGE)) { 1573 pmc_config->pm_caps |= PMC_CAP_EDGE; 1574 } else if (KWMATCH(p, P6_KW_INV)) { 1575 pmc_config->pm_caps |= PMC_CAP_INVERT; 1576 } else if (KWMATCH(p, P6_KW_OS)) { 1577 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 1578 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) { 1579 evmask = 0; 1580 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 1581 return (-1); 1582 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS || 1583 pe == PMC_EV_P6_BUS_LOCK_CLOCKS || 1584 pe == PMC_EV_P6_BUS_TRAN_BRD || 1585 pe == PMC_EV_P6_BUS_TRAN_RFO || 1586 pe == PMC_EV_P6_BUS_TRAN_IFETCH || 1587 pe == PMC_EV_P6_BUS_TRAN_INVAL || 1588 pe == PMC_EV_P6_BUS_TRAN_PWR || 1589 pe == PMC_EV_P6_BUS_TRAN_DEF || 1590 pe == PMC_EV_P6_BUS_TRAN_BURST || 1591 pe == PMC_EV_P6_BUS_TRAN_ANY || 1592 pe == PMC_EV_P6_BUS_TRAN_MEM || 1593 pe == PMC_EV_P6_BUS_TRANS_IO || 1594 pe == PMC_EV_P6_BUS_TRANS_P || 1595 pe == PMC_EV_P6_BUS_TRANS_WB || 1596 pe == PMC_EV_P6_EMON_EST_TRANS || 1597 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET || 1598 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET || 1599 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED || 1600 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED || 1601 pe == PMC_EV_P6_EMON_KNI_PREF_MISS || 1602 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED || 1603 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED || 1604 pe == PMC_EV_P6_FP_MMX_TRANS) 1605 && (n > 1)) /* Only one mask keyword is allowed. */ 1606 return (-1); 1607 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1608 } else if (KWMATCH(p, P6_KW_USR)) { 1609 pmc_config->pm_caps |= PMC_CAP_USER; 1610 } else 1611 return (-1); 1612 } 1613 1614 /* post processing */ 1615 switch (pe) { 1616 1617 /* 1618 * The following events default to an evmask of 0 1619 */ 1620 1621 /* default => 'self' */ 1622 case PMC_EV_P6_BUS_DRDY_CLOCKS: 1623 case PMC_EV_P6_BUS_LOCK_CLOCKS: 1624 case PMC_EV_P6_BUS_TRAN_BRD: 1625 case PMC_EV_P6_BUS_TRAN_RFO: 1626 case PMC_EV_P6_BUS_TRANS_WB: 1627 case PMC_EV_P6_BUS_TRAN_IFETCH: 1628 case PMC_EV_P6_BUS_TRAN_INVAL: 1629 case PMC_EV_P6_BUS_TRAN_PWR: 1630 case PMC_EV_P6_BUS_TRANS_P: 1631 case PMC_EV_P6_BUS_TRANS_IO: 1632 case PMC_EV_P6_BUS_TRAN_DEF: 1633 case PMC_EV_P6_BUS_TRAN_BURST: 1634 case PMC_EV_P6_BUS_TRAN_ANY: 1635 case PMC_EV_P6_BUS_TRAN_MEM: 1636 1637 /* default => 'nta' */ 1638 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED: 1639 case PMC_EV_P6_EMON_KNI_PREF_MISS: 1640 1641 /* default => 'packed and scalar' */ 1642 case PMC_EV_P6_EMON_KNI_INST_RETIRED: 1643 case PMC_EV_P6_EMON_KNI_COMP_INST_RET: 1644 1645 /* default => 'mmx to fp transitions' */ 1646 case PMC_EV_P6_FP_MMX_TRANS: 1647 1648 /* default => 'SSE Packed Single' */ 1649 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED: 1650 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED: 1651 1652 /* default => 'all fused micro-ops' */ 1653 case PMC_EV_P6_EMON_FUSED_UOPS_RET: 1654 1655 /* default => 'all transitions' */ 1656 case PMC_EV_P6_EMON_EST_TRANS: 1657 break; 1658 1659 case PMC_EV_P6_MMX_UOPS_EXEC: 1660 evmask = 0x0F; /* only value allowed */ 1661 break; 1662 1663 default: 1664 /* 1665 * For all other events, set the default event mask 1666 * to a logical OR of all the allowed event mask bits. 1667 */ 1668 if (evmask == 0 && pmask) { 1669 for (pm = pmask; pm->pm_name; pm++) 1670 evmask |= pm->pm_value; 1671 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 1672 } 1673 1674 break; 1675 } 1676 1677 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 1678 pmc_config->pm_md.pm_ppro.pm_ppro_config |= 1679 P6_EVSEL_TO_UMASK(evmask); 1680 1681 return (0); 1682 } 1683 1684 #endif 1685 1686 #if defined(__i386__) || defined(__amd64__) 1687 static int 1688 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 1689 struct pmc_op_pmcallocate *pmc_config) 1690 { 1691 if (pe != PMC_EV_TSC_TSC) 1692 return (-1); 1693 1694 /* TSC events must be unqualified. */ 1695 if (ctrspec && *ctrspec != '\0') 1696 return (-1); 1697 1698 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 1699 pmc_config->pm_caps |= PMC_CAP_READ; 1700 1701 return (0); 1702 } 1703 #endif 1704 1705 /* 1706 * Match an event name `name' with its canonical form. 1707 * 1708 * Matches are case insensitive and spaces, underscores and hyphen 1709 * characters are considered to match each other. 1710 * 1711 * Returns 1 for a match, 0 otherwise. 1712 */ 1713 1714 static int 1715 pmc_match_event_name(const char *name, const char *canonicalname) 1716 { 1717 int cc, nc; 1718 const unsigned char *c, *n; 1719 1720 c = (const unsigned char *) canonicalname; 1721 n = (const unsigned char *) name; 1722 1723 for (; (nc = *n) && (cc = *c); n++, c++) { 1724 1725 if (toupper(nc) == cc) 1726 continue; 1727 1728 if ((nc == ' ' || nc == '_' || nc == '-') && 1729 (cc == ' ' || cc == '_' || cc == '-')) 1730 continue; 1731 1732 return (0); 1733 } 1734 1735 if (*n == '\0' && *c == '\0') 1736 return (1); 1737 1738 return (0); 1739 } 1740 1741 /* 1742 * Match an event name against all the event named supported by a 1743 * PMC class. 1744 * 1745 * Returns an event descriptor pointer on match or NULL otherwise. 1746 */ 1747 static const struct pmc_event_descr * 1748 pmc_match_event_class(const char *name, 1749 const struct pmc_class_descr *pcd) 1750 { 1751 size_t n; 1752 const struct pmc_event_descr *ev; 1753 1754 ev = pcd->pm_evc_event_table; 1755 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 1756 if (pmc_match_event_name(name, ev->pm_ev_name)) 1757 return (ev); 1758 1759 return (NULL); 1760 } 1761 1762 static int 1763 pmc_mdep_is_compatible_class(enum pmc_class pc) 1764 { 1765 size_t n; 1766 1767 for (n = 0; n < pmc_mdep_class_list_size; n++) 1768 if (pmc_mdep_class_list[n] == pc) 1769 return (1); 1770 return (0); 1771 } 1772 1773 /* 1774 * API entry points 1775 */ 1776 1777 int 1778 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 1779 uint32_t flags, int cpu, pmc_id_t *pmcid) 1780 { 1781 size_t n; 1782 int retval; 1783 char *r, *spec_copy; 1784 const char *ctrname; 1785 const struct pmc_event_descr *ev; 1786 const struct pmc_event_alias *alias; 1787 struct pmc_op_pmcallocate pmc_config; 1788 const struct pmc_class_descr *pcd; 1789 1790 spec_copy = NULL; 1791 retval = -1; 1792 1793 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 1794 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 1795 errno = EINVAL; 1796 goto out; 1797 } 1798 1799 /* replace an event alias with the canonical event specifier */ 1800 if (pmc_mdep_event_aliases) 1801 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 1802 if (!strcasecmp(ctrspec, alias->pm_alias)) { 1803 spec_copy = strdup(alias->pm_spec); 1804 break; 1805 } 1806 1807 if (spec_copy == NULL) 1808 spec_copy = strdup(ctrspec); 1809 1810 r = spec_copy; 1811 ctrname = strsep(&r, ","); 1812 1813 /* 1814 * If a explicit class prefix was given by the user, restrict the 1815 * search for the event to the specified PMC class. 1816 */ 1817 ev = NULL; 1818 for (n = 0; n < pmc_event_class_table_size; n++) { 1819 pcd = &pmc_class_table[n]; 1820 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 1821 strncasecmp(ctrname, pcd->pm_evc_name, 1822 pcd->pm_evc_name_size) == 0) { 1823 if ((ev = pmc_match_event_class(ctrname + 1824 pcd->pm_evc_name_size, pcd)) == NULL) { 1825 errno = EINVAL; 1826 goto out; 1827 } 1828 break; 1829 } 1830 } 1831 1832 /* 1833 * Otherwise, search for this event in all compatible PMC 1834 * classes. 1835 */ 1836 for (n = 0; ev == NULL && n < pmc_event_class_table_size; n++) { 1837 pcd = &pmc_class_table[n]; 1838 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 1839 ev = pmc_match_event_class(ctrname, pcd); 1840 } 1841 1842 if (ev == NULL) { 1843 errno = EINVAL; 1844 goto out; 1845 } 1846 1847 bzero(&pmc_config, sizeof(pmc_config)); 1848 pmc_config.pm_ev = ev->pm_ev_code; 1849 pmc_config.pm_class = pcd->pm_evc_class; 1850 pmc_config.pm_cpu = cpu; 1851 pmc_config.pm_mode = mode; 1852 pmc_config.pm_flags = flags; 1853 1854 if (PMC_IS_SAMPLING_MODE(mode)) 1855 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 1856 1857 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 1858 errno = EINVAL; 1859 goto out; 1860 } 1861 1862 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 1863 goto out; 1864 1865 *pmcid = pmc_config.pm_pmcid; 1866 1867 retval = 0; 1868 1869 out: 1870 if (spec_copy) 1871 free(spec_copy); 1872 1873 return (retval); 1874 } 1875 1876 int 1877 pmc_attach(pmc_id_t pmc, pid_t pid) 1878 { 1879 struct pmc_op_pmcattach pmc_attach_args; 1880 1881 pmc_attach_args.pm_pmc = pmc; 1882 pmc_attach_args.pm_pid = pid; 1883 1884 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1885 } 1886 1887 int 1888 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1889 { 1890 unsigned int i; 1891 enum pmc_class cl; 1892 1893 cl = PMC_ID_TO_CLASS(pmcid); 1894 for (i = 0; i < cpu_info.pm_nclass; i++) 1895 if (cpu_info.pm_classes[i].pm_class == cl) { 1896 *caps = cpu_info.pm_classes[i].pm_caps; 1897 return (0); 1898 } 1899 errno = EINVAL; 1900 return (-1); 1901 } 1902 1903 int 1904 pmc_configure_logfile(int fd) 1905 { 1906 struct pmc_op_configurelog cla; 1907 1908 cla.pm_logfd = fd; 1909 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1910 return (-1); 1911 return (0); 1912 } 1913 1914 int 1915 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1916 { 1917 if (pmc_syscall == -1) { 1918 errno = ENXIO; 1919 return (-1); 1920 } 1921 1922 *pci = &cpu_info; 1923 return (0); 1924 } 1925 1926 int 1927 pmc_detach(pmc_id_t pmc, pid_t pid) 1928 { 1929 struct pmc_op_pmcattach pmc_detach_args; 1930 1931 pmc_detach_args.pm_pmc = pmc; 1932 pmc_detach_args.pm_pid = pid; 1933 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1934 } 1935 1936 int 1937 pmc_disable(int cpu, int pmc) 1938 { 1939 struct pmc_op_pmcadmin ssa; 1940 1941 ssa.pm_cpu = cpu; 1942 ssa.pm_pmc = pmc; 1943 ssa.pm_state = PMC_STATE_DISABLED; 1944 return (PMC_CALL(PMCADMIN, &ssa)); 1945 } 1946 1947 int 1948 pmc_enable(int cpu, int pmc) 1949 { 1950 struct pmc_op_pmcadmin ssa; 1951 1952 ssa.pm_cpu = cpu; 1953 ssa.pm_pmc = pmc; 1954 ssa.pm_state = PMC_STATE_FREE; 1955 return (PMC_CALL(PMCADMIN, &ssa)); 1956 } 1957 1958 /* 1959 * Return a list of events known to a given PMC class. 'cl' is the 1960 * PMC class identifier, 'eventnames' is the returned list of 'const 1961 * char *' pointers pointing to the names of the events. 'nevents' is 1962 * the number of event name pointers returned. 1963 * 1964 * The space for 'eventnames' is allocated using malloc(3). The caller 1965 * is responsible for freeing this space when done. 1966 */ 1967 int 1968 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1969 int *nevents) 1970 { 1971 int count; 1972 const char **names; 1973 const struct pmc_event_descr *ev; 1974 1975 switch (cl) 1976 { 1977 case PMC_CLASS_TSC: 1978 ev = tsc_event_table; 1979 count = PMC_EVENT_TABLE_SIZE(tsc); 1980 break; 1981 case PMC_CLASS_K7: 1982 ev = k7_event_table; 1983 count = PMC_EVENT_TABLE_SIZE(k7); 1984 break; 1985 case PMC_CLASS_K8: 1986 ev = k8_event_table; 1987 count = PMC_EVENT_TABLE_SIZE(k8); 1988 break; 1989 case PMC_CLASS_P4: 1990 ev = p4_event_table; 1991 count = PMC_EVENT_TABLE_SIZE(p4); 1992 break; 1993 case PMC_CLASS_P5: 1994 ev = p5_event_table; 1995 count = PMC_EVENT_TABLE_SIZE(p5); 1996 break; 1997 case PMC_CLASS_P6: 1998 ev = p6_event_table; 1999 count = PMC_EVENT_TABLE_SIZE(p6); 2000 break; 2001 default: 2002 errno = EINVAL; 2003 return (-1); 2004 } 2005 2006 if ((names = malloc(count * sizeof(const char *))) == NULL) 2007 return (-1); 2008 2009 *eventnames = names; 2010 *nevents = count; 2011 2012 for (;count--; ev++, names++) 2013 *names = ev->pm_ev_name; 2014 return (0); 2015 } 2016 2017 int 2018 pmc_flush_logfile(void) 2019 { 2020 return (PMC_CALL(FLUSHLOG,0)); 2021 } 2022 2023 int 2024 pmc_get_driver_stats(struct pmc_driverstats *ds) 2025 { 2026 struct pmc_op_getdriverstats gms; 2027 2028 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 2029 return (-1); 2030 2031 /* copy out fields in the current userland<->library interface */ 2032 ds->pm_intr_ignored = gms.pm_intr_ignored; 2033 ds->pm_intr_processed = gms.pm_intr_processed; 2034 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 2035 ds->pm_syscalls = gms.pm_syscalls; 2036 ds->pm_syscall_errors = gms.pm_syscall_errors; 2037 ds->pm_buffer_requests = gms.pm_buffer_requests; 2038 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 2039 ds->pm_log_sweeps = gms.pm_log_sweeps; 2040 return (0); 2041 } 2042 2043 int 2044 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 2045 { 2046 struct pmc_op_getmsr gm; 2047 2048 gm.pm_pmcid = pmc; 2049 if (PMC_CALL(PMCGETMSR, &gm) < 0) 2050 return (-1); 2051 *msr = gm.pm_msr; 2052 return (0); 2053 } 2054 2055 int 2056 pmc_init(void) 2057 { 2058 int error, pmc_mod_id; 2059 unsigned int n; 2060 uint32_t abi_version; 2061 struct module_stat pmc_modstat; 2062 struct pmc_op_getcpuinfo op_cpu_info; 2063 2064 if (pmc_syscall != -1) /* already inited */ 2065 return (0); 2066 2067 /* retrieve the system call number from the KLD */ 2068 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 2069 return (-1); 2070 2071 pmc_modstat.version = sizeof(struct module_stat); 2072 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 2073 return (-1); 2074 2075 pmc_syscall = pmc_modstat.data.intval; 2076 2077 /* check the kernel module's ABI against our compiled-in version */ 2078 abi_version = PMC_VERSION; 2079 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 2080 return (pmc_syscall = -1); 2081 2082 /* ignore patch & minor numbers for the comparision */ 2083 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 2084 errno = EPROGMISMATCH; 2085 return (pmc_syscall = -1); 2086 } 2087 2088 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 2089 return (pmc_syscall = -1); 2090 2091 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 2092 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 2093 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 2094 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 2095 for (n = 0; n < cpu_info.pm_nclass; n++) 2096 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n]; 2097 2098 #define PMC_MDEP_INIT(C) do { \ 2099 pmc_mdep_event_aliases = C##_aliases; \ 2100 pmc_mdep_class_list = C##_pmc_classes; \ 2101 pmc_mdep_class_list_size = \ 2102 PMC_TABLE_SIZE(C##_pmc_classes); \ 2103 } while (0) 2104 2105 /* Configure the event name parser. */ 2106 switch (cpu_info.pm_cputype) { 2107 #if defined(__i386__) 2108 case PMC_CPU_AMD_K7: 2109 PMC_MDEP_INIT(k7); 2110 break; 2111 case PMC_CPU_INTEL_P5: 2112 PMC_MDEP_INIT(p5); 2113 break; 2114 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */ 2115 case PMC_CPU_INTEL_PII: /* similar PMCs. */ 2116 case PMC_CPU_INTEL_PIII: 2117 case PMC_CPU_INTEL_PM: 2118 PMC_MDEP_INIT(p6); 2119 break; 2120 #endif 2121 #if defined(__amd64__) || defined(__i386__) 2122 case PMC_CPU_AMD_K8: 2123 PMC_MDEP_INIT(k8); 2124 break; 2125 case PMC_CPU_INTEL_PIV: 2126 PMC_MDEP_INIT(p4); 2127 break; 2128 #endif 2129 2130 default: 2131 /* 2132 * Some kind of CPU this version of the library knows nothing 2133 * about. This shouldn't happen since the abi version check 2134 * should have caught this. 2135 */ 2136 errno = ENXIO; 2137 return (pmc_syscall = -1); 2138 } 2139 2140 return (0); 2141 } 2142 2143 const char * 2144 pmc_name_of_capability(enum pmc_caps cap) 2145 { 2146 int i; 2147 2148 /* 2149 * 'cap' should have a single bit set and should be in 2150 * range. 2151 */ 2152 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 2153 cap > PMC_CAP_LAST) { 2154 errno = EINVAL; 2155 return (NULL); 2156 } 2157 2158 i = ffs(cap); 2159 return (pmc_capability_names[i - 1]); 2160 } 2161 2162 const char * 2163 pmc_name_of_class(enum pmc_class pc) 2164 { 2165 if ((int) pc >= PMC_CLASS_FIRST && 2166 pc <= PMC_CLASS_LAST) 2167 return (pmc_class_names[pc]); 2168 2169 errno = EINVAL; 2170 return (NULL); 2171 } 2172 2173 const char * 2174 pmc_name_of_cputype(enum pmc_cputype cp) 2175 { 2176 size_t n; 2177 2178 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 2179 if (cp == pmc_cputype_names[n].pm_cputype) 2180 return (pmc_cputype_names[n].pm_name); 2181 2182 errno = EINVAL; 2183 return (NULL); 2184 } 2185 2186 const char * 2187 pmc_name_of_disposition(enum pmc_disp pd) 2188 { 2189 if ((int) pd >= PMC_DISP_FIRST && 2190 pd <= PMC_DISP_LAST) 2191 return (pmc_disposition_names[pd]); 2192 2193 errno = EINVAL; 2194 return (NULL); 2195 } 2196 2197 const char * 2198 pmc_name_of_event(enum pmc_event pe) 2199 { 2200 const struct pmc_event_descr *ev, *evfence; 2201 2202 ev = evfence = NULL; 2203 if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) { 2204 ev = k7_event_table; 2205 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7); 2206 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 2207 ev = k8_event_table; 2208 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 2209 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) { 2210 ev = p4_event_table; 2211 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4); 2212 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) { 2213 ev = p5_event_table; 2214 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5); 2215 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) { 2216 ev = p6_event_table; 2217 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6); 2218 } else if (pe == PMC_EV_TSC_TSC) { 2219 ev = tsc_event_table; 2220 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 2221 } 2222 2223 for (; ev != evfence; ev++) 2224 if (pe == ev->pm_ev_code) 2225 return (ev->pm_ev_name); 2226 2227 errno = EINVAL; 2228 return (NULL); 2229 } 2230 2231 const char * 2232 pmc_name_of_mode(enum pmc_mode pm) 2233 { 2234 if ((int) pm >= PMC_MODE_FIRST && 2235 pm <= PMC_MODE_LAST) 2236 return (pmc_mode_names[pm]); 2237 2238 errno = EINVAL; 2239 return (NULL); 2240 } 2241 2242 const char * 2243 pmc_name_of_state(enum pmc_state ps) 2244 { 2245 if ((int) ps >= PMC_STATE_FIRST && 2246 ps <= PMC_STATE_LAST) 2247 return (pmc_state_names[ps]); 2248 2249 errno = EINVAL; 2250 return (NULL); 2251 } 2252 2253 int 2254 pmc_ncpu(void) 2255 { 2256 if (pmc_syscall == -1) { 2257 errno = ENXIO; 2258 return (-1); 2259 } 2260 2261 return (cpu_info.pm_ncpu); 2262 } 2263 2264 int 2265 pmc_npmc(int cpu) 2266 { 2267 if (pmc_syscall == -1) { 2268 errno = ENXIO; 2269 return (-1); 2270 } 2271 2272 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 2273 errno = EINVAL; 2274 return (-1); 2275 } 2276 2277 return (cpu_info.pm_npmc); 2278 } 2279 2280 int 2281 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 2282 { 2283 int nbytes, npmc; 2284 struct pmc_op_getpmcinfo *pmci; 2285 2286 if ((npmc = pmc_npmc(cpu)) < 0) 2287 return (-1); 2288 2289 nbytes = sizeof(struct pmc_op_getpmcinfo) + 2290 npmc * sizeof(struct pmc_info); 2291 2292 if ((pmci = calloc(1, nbytes)) == NULL) 2293 return (-1); 2294 2295 pmci->pm_cpu = cpu; 2296 2297 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 2298 free(pmci); 2299 return (-1); 2300 } 2301 2302 /* kernel<->library, library<->userland interfaces are identical */ 2303 *ppmci = (struct pmc_pmcinfo *) pmci; 2304 return (0); 2305 } 2306 2307 int 2308 pmc_read(pmc_id_t pmc, pmc_value_t *value) 2309 { 2310 struct pmc_op_pmcrw pmc_read_op; 2311 2312 pmc_read_op.pm_pmcid = pmc; 2313 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 2314 pmc_read_op.pm_value = -1; 2315 2316 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 2317 return (-1); 2318 2319 *value = pmc_read_op.pm_value; 2320 return (0); 2321 } 2322 2323 int 2324 pmc_release(pmc_id_t pmc) 2325 { 2326 struct pmc_op_simple pmc_release_args; 2327 2328 pmc_release_args.pm_pmcid = pmc; 2329 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 2330 } 2331 2332 int 2333 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 2334 { 2335 struct pmc_op_pmcrw pmc_rw_op; 2336 2337 pmc_rw_op.pm_pmcid = pmc; 2338 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 2339 pmc_rw_op.pm_value = newvalue; 2340 2341 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 2342 return (-1); 2343 2344 *oldvaluep = pmc_rw_op.pm_value; 2345 return (0); 2346 } 2347 2348 int 2349 pmc_set(pmc_id_t pmc, pmc_value_t value) 2350 { 2351 struct pmc_op_pmcsetcount sc; 2352 2353 sc.pm_pmcid = pmc; 2354 sc.pm_count = value; 2355 2356 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 2357 return (-1); 2358 return (0); 2359 } 2360 2361 int 2362 pmc_start(pmc_id_t pmc) 2363 { 2364 struct pmc_op_simple pmc_start_args; 2365 2366 pmc_start_args.pm_pmcid = pmc; 2367 return (PMC_CALL(PMCSTART, &pmc_start_args)); 2368 } 2369 2370 int 2371 pmc_stop(pmc_id_t pmc) 2372 { 2373 struct pmc_op_simple pmc_stop_args; 2374 2375 pmc_stop_args.pm_pmcid = pmc; 2376 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 2377 } 2378 2379 int 2380 pmc_width(pmc_id_t pmcid, uint32_t *width) 2381 { 2382 unsigned int i; 2383 enum pmc_class cl; 2384 2385 cl = PMC_ID_TO_CLASS(pmcid); 2386 for (i = 0; i < cpu_info.pm_nclass; i++) 2387 if (cpu_info.pm_classes[i].pm_class == cl) { 2388 *width = cpu_info.pm_classes[i].pm_width; 2389 return (0); 2390 } 2391 errno = EINVAL; 2392 return (-1); 2393 } 2394 2395 int 2396 pmc_write(pmc_id_t pmc, pmc_value_t value) 2397 { 2398 struct pmc_op_pmcrw pmc_write_op; 2399 2400 pmc_write_op.pm_pmcid = pmc; 2401 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 2402 pmc_write_op.pm_value = value; 2403 return (PMC_CALL(PMCRW, &pmc_write_op)); 2404 } 2405 2406 int 2407 pmc_writelog(uint32_t userdata) 2408 { 2409 struct pmc_op_writelog wl; 2410 2411 wl.pm_userdata = userdata; 2412 return (PMC_CALL(WRITELOG, &wl)); 2413 } 2414