1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/module.h> 35 #include <sys/pmc.h> 36 #include <sys/syscall.h> 37 38 #include <ctype.h> 39 #include <errno.h> 40 #include <err.h> 41 #include <fcntl.h> 42 #include <pmc.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <strings.h> 47 #include <sysexits.h> 48 #include <unistd.h> 49 50 #include "libpmcinternal.h" 51 52 /* Function prototypes */ 53 #if defined(__amd64__) || defined(__i386__) 54 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 55 struct pmc_op_pmcallocate *_pmc_config); 56 #endif 57 #if defined(__amd64__) || defined(__i386__) 58 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 59 struct pmc_op_pmcallocate *_pmc_config); 60 #endif 61 #if defined(__arm__) 62 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 63 struct pmc_op_pmcallocate *_pmc_config); 64 #endif 65 #if defined(__aarch64__) 66 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 67 struct pmc_op_pmcallocate *_pmc_config); 68 #endif 69 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 70 struct pmc_op_pmcallocate *_pmc_config); 71 72 #if defined(__powerpc__) 73 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 74 struct pmc_op_pmcallocate *_pmc_config); 75 #endif /* __powerpc__ */ 76 77 #define PMC_CALL(cmd, params) \ 78 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 79 80 /* 81 * Event aliases provide a way for the user to ask for generic events 82 * like "cache-misses", or "instructions-retired". These aliases are 83 * mapped to the appropriate canonical event descriptions using a 84 * lookup table. 85 */ 86 struct pmc_event_alias { 87 const char *pm_alias; 88 const char *pm_spec; 89 }; 90 91 static const struct pmc_event_alias *pmc_mdep_event_aliases; 92 93 /* 94 * The pmc_event_descr structure maps symbolic names known to the user 95 * to integer codes used by the PMC KLD. 96 */ 97 struct pmc_event_descr { 98 const char *pm_ev_name; 99 enum pmc_event pm_ev_code; 100 }; 101 102 /* 103 * The pmc_class_descr structure maps class name prefixes for 104 * event names to event tables and other PMC class data. 105 */ 106 struct pmc_class_descr { 107 const char *pm_evc_name; 108 size_t pm_evc_name_size; 109 enum pmc_class pm_evc_class; 110 const struct pmc_event_descr *pm_evc_event_table; 111 size_t pm_evc_event_table_size; 112 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 113 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 114 }; 115 116 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 117 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 118 119 #undef __PMC_EV 120 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 121 122 /* 123 * PMC_CLASSDEP_TABLE(NAME, CLASS) 124 * 125 * Define a table mapping event names and aliases to HWPMC event IDs. 126 */ 127 #define PMC_CLASSDEP_TABLE(N, C) \ 128 static const struct pmc_event_descr N##_event_table[] = \ 129 { \ 130 __PMC_EV_##C() \ 131 } 132 133 PMC_CLASSDEP_TABLE(iaf, IAF); 134 PMC_CLASSDEP_TABLE(k8, K8); 135 PMC_CLASSDEP_TABLE(armv7, ARMV7); 136 PMC_CLASSDEP_TABLE(armv8, ARMV8); 137 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 138 PMC_CLASSDEP_TABLE(ppc970, PPC970); 139 PMC_CLASSDEP_TABLE(e500, E500); 140 141 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 142 143 #undef __PMC_EV_ALIAS 144 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 145 146 static const struct pmc_event_descr cortex_a8_event_table[] = 147 { 148 __PMC_EV_ALIAS_ARMV7_CORTEX_A8() 149 }; 150 151 static const struct pmc_event_descr cortex_a9_event_table[] = 152 { 153 __PMC_EV_ALIAS_ARMV7_CORTEX_A9() 154 }; 155 156 static const struct pmc_event_descr cortex_a53_event_table[] = 157 { 158 __PMC_EV_ALIAS_ARMV8_CORTEX_A53() 159 }; 160 161 static const struct pmc_event_descr cortex_a57_event_table[] = 162 { 163 __PMC_EV_ALIAS_ARMV8_CORTEX_A57() 164 }; 165 166 static const struct pmc_event_descr cortex_a76_event_table[] = 167 { 168 __PMC_EV_ALIAS_ARMV8_CORTEX_A76() 169 }; 170 171 static const struct pmc_event_descr tsc_event_table[] = 172 { 173 __PMC_EV_ALIAS_TSC() 174 }; 175 176 #undef PMC_CLASS_TABLE_DESC 177 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 178 static const struct pmc_class_descr NAME##_class_table_descr = \ 179 { \ 180 .pm_evc_name = #CLASS "-", \ 181 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 182 .pm_evc_class = PMC_CLASS_##CLASS , \ 183 .pm_evc_event_table = EVENTS##_event_table , \ 184 .pm_evc_event_table_size = \ 185 PMC_EVENT_TABLE_SIZE(EVENTS), \ 186 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 187 } 188 189 #if defined(__i386__) || defined(__amd64__) 190 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 191 #endif 192 #if defined(__i386__) || defined(__amd64__) 193 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 194 #endif 195 #if defined(__arm__) 196 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); 197 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); 198 #endif 199 #if defined(__aarch64__) 200 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); 201 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); 202 PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64); 203 #endif 204 #if defined(__powerpc__) 205 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 206 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 207 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); 208 #endif 209 210 static struct pmc_class_descr soft_class_table_descr = 211 { 212 .pm_evc_name = "SOFT-", 213 .pm_evc_name_size = sizeof("SOFT-") - 1, 214 .pm_evc_class = PMC_CLASS_SOFT, 215 .pm_evc_event_table = NULL, 216 .pm_evc_event_table_size = 0, 217 .pm_evc_allocate_pmc = soft_allocate_pmc 218 }; 219 220 #undef PMC_CLASS_TABLE_DESC 221 222 static const struct pmc_class_descr **pmc_class_table; 223 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 224 225 /* 226 * Mapping tables, mapping enumeration values to human readable 227 * strings. 228 */ 229 230 static const char * pmc_capability_names[] = { 231 #undef __PMC_CAP 232 #define __PMC_CAP(N,V,D) #N , 233 __PMC_CAPS() 234 }; 235 236 struct pmc_class_map { 237 enum pmc_class pm_class; 238 const char *pm_name; 239 }; 240 241 static const struct pmc_class_map pmc_class_names[] = { 242 #undef __PMC_CLASS 243 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , 244 __PMC_CLASSES() 245 }; 246 247 struct pmc_cputype_map { 248 enum pmc_cputype pm_cputype; 249 const char *pm_name; 250 }; 251 252 static const struct pmc_cputype_map pmc_cputype_names[] = { 253 #undef __PMC_CPU 254 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 255 __PMC_CPUS() 256 }; 257 258 static const char * pmc_disposition_names[] = { 259 #undef __PMC_DISP 260 #define __PMC_DISP(D) #D , 261 __PMC_DISPOSITIONS() 262 }; 263 264 static const char * pmc_mode_names[] = { 265 #undef __PMC_MODE 266 #define __PMC_MODE(M,N) #M , 267 __PMC_MODES() 268 }; 269 270 static const char * pmc_state_names[] = { 271 #undef __PMC_STATE 272 #define __PMC_STATE(S) #S , 273 __PMC_STATES() 274 }; 275 276 /* 277 * Filled in by pmc_init(). 278 */ 279 static int pmc_syscall = -1; 280 static struct pmc_cpuinfo cpu_info; 281 static struct pmc_op_getdyneventinfo soft_event_info; 282 283 /* Event masks for events */ 284 struct pmc_masks { 285 const char *pm_name; 286 const uint64_t pm_value; 287 }; 288 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 289 #define NULLMASK { .pm_name = NULL } 290 291 #if defined(__amd64__) || defined(__i386__) 292 static int 293 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 294 { 295 const struct pmc_masks *pm; 296 char *q, *r; 297 int c; 298 299 if (pmask == NULL) /* no mask keywords */ 300 return (-1); 301 q = strchr(p, '='); /* skip '=' */ 302 if (*++q == '\0') /* no more data */ 303 return (-1); 304 c = 0; /* count of mask keywords seen */ 305 while ((r = strsep(&q, "+")) != NULL) { 306 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 307 pm++) 308 ; 309 if (pm->pm_name == NULL) /* not found */ 310 return (-1); 311 *evmask |= pm->pm_value; 312 c++; 313 } 314 return (c); 315 } 316 #endif 317 318 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 319 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 320 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 321 322 #if defined(__amd64__) || defined(__i386__) 323 /* 324 * AMD K8 PMCs. 325 * 326 */ 327 328 static struct pmc_event_alias k8_aliases[] = { 329 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 330 EV_ALIAS("branch-mispredicts", 331 "k8-fr-retired-taken-branches-mispredicted"), 332 EV_ALIAS("cycles", "tsc"), 333 EV_ALIAS("dc-misses", "k8-dc-miss"), 334 EV_ALIAS("ic-misses", "k8-ic-miss"), 335 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 336 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 337 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 338 EV_ALIAS(NULL, NULL) 339 }; 340 341 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 342 343 /* 344 * Parsing tables 345 */ 346 347 /* fp dispatched fpu ops */ 348 static const struct pmc_masks k8_mask_fdfo[] = { 349 __K8MASK(add-pipe-excluding-junk-ops, 0), 350 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 351 __K8MASK(store-pipe-excluding-junk-ops, 2), 352 __K8MASK(add-pipe-junk-ops, 3), 353 __K8MASK(multiply-pipe-junk-ops, 4), 354 __K8MASK(store-pipe-junk-ops, 5), 355 NULLMASK 356 }; 357 358 /* ls segment register loads */ 359 static const struct pmc_masks k8_mask_lsrl[] = { 360 __K8MASK(es, 0), 361 __K8MASK(cs, 1), 362 __K8MASK(ss, 2), 363 __K8MASK(ds, 3), 364 __K8MASK(fs, 4), 365 __K8MASK(gs, 5), 366 __K8MASK(hs, 6), 367 NULLMASK 368 }; 369 370 /* ls locked operation */ 371 static const struct pmc_masks k8_mask_llo[] = { 372 __K8MASK(locked-instructions, 0), 373 __K8MASK(cycles-in-request, 1), 374 __K8MASK(cycles-to-complete, 2), 375 NULLMASK 376 }; 377 378 /* dc refill from {l2,system} and dc copyback */ 379 static const struct pmc_masks k8_mask_dc[] = { 380 __K8MASK(invalid, 0), 381 __K8MASK(shared, 1), 382 __K8MASK(exclusive, 2), 383 __K8MASK(owner, 3), 384 __K8MASK(modified, 4), 385 NULLMASK 386 }; 387 388 /* dc one bit ecc error */ 389 static const struct pmc_masks k8_mask_dobee[] = { 390 __K8MASK(scrubber, 0), 391 __K8MASK(piggyback, 1), 392 NULLMASK 393 }; 394 395 /* dc dispatched prefetch instructions */ 396 static const struct pmc_masks k8_mask_ddpi[] = { 397 __K8MASK(load, 0), 398 __K8MASK(store, 1), 399 __K8MASK(nta, 2), 400 NULLMASK 401 }; 402 403 /* dc dcache accesses by locks */ 404 static const struct pmc_masks k8_mask_dabl[] = { 405 __K8MASK(accesses, 0), 406 __K8MASK(misses, 1), 407 NULLMASK 408 }; 409 410 /* bu internal l2 request */ 411 static const struct pmc_masks k8_mask_bilr[] = { 412 __K8MASK(ic-fill, 0), 413 __K8MASK(dc-fill, 1), 414 __K8MASK(tlb-reload, 2), 415 __K8MASK(tag-snoop, 3), 416 __K8MASK(cancelled, 4), 417 NULLMASK 418 }; 419 420 /* bu fill request l2 miss */ 421 static const struct pmc_masks k8_mask_bfrlm[] = { 422 __K8MASK(ic-fill, 0), 423 __K8MASK(dc-fill, 1), 424 __K8MASK(tlb-reload, 2), 425 NULLMASK 426 }; 427 428 /* bu fill into l2 */ 429 static const struct pmc_masks k8_mask_bfil[] = { 430 __K8MASK(dirty-l2-victim, 0), 431 __K8MASK(victim-from-l2, 1), 432 NULLMASK 433 }; 434 435 /* fr retired fpu instructions */ 436 static const struct pmc_masks k8_mask_frfi[] = { 437 __K8MASK(x87, 0), 438 __K8MASK(mmx-3dnow, 1), 439 __K8MASK(packed-sse-sse2, 2), 440 __K8MASK(scalar-sse-sse2, 3), 441 NULLMASK 442 }; 443 444 /* fr retired fastpath double op instructions */ 445 static const struct pmc_masks k8_mask_frfdoi[] = { 446 __K8MASK(low-op-pos-0, 0), 447 __K8MASK(low-op-pos-1, 1), 448 __K8MASK(low-op-pos-2, 2), 449 NULLMASK 450 }; 451 452 /* fr fpu exceptions */ 453 static const struct pmc_masks k8_mask_ffe[] = { 454 __K8MASK(x87-reclass-microfaults, 0), 455 __K8MASK(sse-retype-microfaults, 1), 456 __K8MASK(sse-reclass-microfaults, 2), 457 __K8MASK(sse-and-x87-microtraps, 3), 458 NULLMASK 459 }; 460 461 /* nb memory controller page access event */ 462 static const struct pmc_masks k8_mask_nmcpae[] = { 463 __K8MASK(page-hit, 0), 464 __K8MASK(page-miss, 1), 465 __K8MASK(page-conflict, 2), 466 NULLMASK 467 }; 468 469 /* nb memory controller turnaround */ 470 static const struct pmc_masks k8_mask_nmct[] = { 471 __K8MASK(dimm-turnaround, 0), 472 __K8MASK(read-to-write-turnaround, 1), 473 __K8MASK(write-to-read-turnaround, 2), 474 NULLMASK 475 }; 476 477 /* nb memory controller bypass saturation */ 478 static const struct pmc_masks k8_mask_nmcbs[] = { 479 __K8MASK(memory-controller-hi-pri-bypass, 0), 480 __K8MASK(memory-controller-lo-pri-bypass, 1), 481 __K8MASK(dram-controller-interface-bypass, 2), 482 __K8MASK(dram-controller-queue-bypass, 3), 483 NULLMASK 484 }; 485 486 /* nb sized commands */ 487 static const struct pmc_masks k8_mask_nsc[] = { 488 __K8MASK(nonpostwrszbyte, 0), 489 __K8MASK(nonpostwrszdword, 1), 490 __K8MASK(postwrszbyte, 2), 491 __K8MASK(postwrszdword, 3), 492 __K8MASK(rdszbyte, 4), 493 __K8MASK(rdszdword, 5), 494 __K8MASK(rdmodwr, 6), 495 NULLMASK 496 }; 497 498 /* nb probe result */ 499 static const struct pmc_masks k8_mask_npr[] = { 500 __K8MASK(probe-miss, 0), 501 __K8MASK(probe-hit, 1), 502 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 503 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 504 NULLMASK 505 }; 506 507 /* nb hypertransport bus bandwidth */ 508 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 509 __K8MASK(command, 0), 510 __K8MASK(data, 1), 511 __K8MASK(buffer-release, 2), 512 __K8MASK(nop, 3), 513 NULLMASK 514 }; 515 516 #undef __K8MASK 517 518 #define K8_KW_COUNT "count" 519 #define K8_KW_EDGE "edge" 520 #define K8_KW_INV "inv" 521 #define K8_KW_MASK "mask" 522 #define K8_KW_OS "os" 523 #define K8_KW_USR "usr" 524 525 static int 526 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 527 struct pmc_op_pmcallocate *pmc_config) 528 { 529 char *e, *p, *q; 530 int n; 531 uint32_t count; 532 uint64_t evmask; 533 const struct pmc_masks *pm, *pmask; 534 535 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 536 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 537 538 pmask = NULL; 539 evmask = 0; 540 541 #define __K8SETMASK(M) pmask = k8_mask_##M 542 543 /* setup parsing tables */ 544 switch (pe) { 545 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 546 __K8SETMASK(fdfo); 547 break; 548 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 549 __K8SETMASK(lsrl); 550 break; 551 case PMC_EV_K8_LS_LOCKED_OPERATION: 552 __K8SETMASK(llo); 553 break; 554 case PMC_EV_K8_DC_REFILL_FROM_L2: 555 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 556 case PMC_EV_K8_DC_COPYBACK: 557 __K8SETMASK(dc); 558 break; 559 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 560 __K8SETMASK(dobee); 561 break; 562 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 563 __K8SETMASK(ddpi); 564 break; 565 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 566 __K8SETMASK(dabl); 567 break; 568 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 569 __K8SETMASK(bilr); 570 break; 571 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 572 __K8SETMASK(bfrlm); 573 break; 574 case PMC_EV_K8_BU_FILL_INTO_L2: 575 __K8SETMASK(bfil); 576 break; 577 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 578 __K8SETMASK(frfi); 579 break; 580 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 581 __K8SETMASK(frfdoi); 582 break; 583 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 584 __K8SETMASK(ffe); 585 break; 586 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 587 __K8SETMASK(nmcpae); 588 break; 589 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 590 __K8SETMASK(nmct); 591 break; 592 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 593 __K8SETMASK(nmcbs); 594 break; 595 case PMC_EV_K8_NB_SIZED_COMMANDS: 596 __K8SETMASK(nsc); 597 break; 598 case PMC_EV_K8_NB_PROBE_RESULT: 599 __K8SETMASK(npr); 600 break; 601 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 602 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 603 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 604 __K8SETMASK(nhbb); 605 break; 606 607 default: 608 break; /* no options defined */ 609 } 610 611 while ((p = strsep(&ctrspec, ",")) != NULL) { 612 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 613 q = strchr(p, '='); 614 if (*++q == '\0') /* skip '=' */ 615 return (-1); 616 617 count = strtol(q, &e, 0); 618 if (e == q || *e != '\0') 619 return (-1); 620 621 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 622 pmc_config->pm_md.pm_amd.pm_amd_config |= 623 AMD_PMC_TO_COUNTER(count); 624 625 } else if (KWMATCH(p, K8_KW_EDGE)) { 626 pmc_config->pm_caps |= PMC_CAP_EDGE; 627 } else if (KWMATCH(p, K8_KW_INV)) { 628 pmc_config->pm_caps |= PMC_CAP_INVERT; 629 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 630 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 631 return (-1); 632 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 633 } else if (KWMATCH(p, K8_KW_OS)) { 634 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 635 } else if (KWMATCH(p, K8_KW_USR)) { 636 pmc_config->pm_caps |= PMC_CAP_USER; 637 } else 638 return (-1); 639 } 640 641 /* other post processing */ 642 switch (pe) { 643 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 644 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 645 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 646 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 647 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 648 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 649 /* XXX only available in rev B and later */ 650 break; 651 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 652 /* XXX only available in rev C and later */ 653 break; 654 case PMC_EV_K8_LS_LOCKED_OPERATION: 655 /* XXX CPU Rev A,B evmask is to be zero */ 656 if (evmask & (evmask - 1)) /* > 1 bit set */ 657 return (-1); 658 if (evmask == 0) { 659 evmask = 0x01; /* Rev C and later: #instrs */ 660 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 661 } 662 break; 663 default: 664 if (evmask == 0 && pmask != NULL) { 665 for (pm = pmask; pm->pm_name; pm++) 666 evmask |= pm->pm_value; 667 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 668 } 669 } 670 671 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 672 pmc_config->pm_md.pm_amd.pm_amd_config = 673 AMD_PMC_TO_UNITMASK(evmask); 674 675 return (0); 676 } 677 678 #endif 679 680 #if defined(__i386__) || defined(__amd64__) 681 static int 682 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 683 struct pmc_op_pmcallocate *pmc_config) 684 { 685 if (pe != PMC_EV_TSC_TSC) 686 return (-1); 687 688 /* TSC events must be unqualified. */ 689 if (ctrspec && *ctrspec != '\0') 690 return (-1); 691 692 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 693 pmc_config->pm_caps |= PMC_CAP_READ; 694 695 return (0); 696 } 697 #endif 698 699 static struct pmc_event_alias generic_aliases[] = { 700 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 701 EV_ALIAS(NULL, NULL) 702 }; 703 704 static int 705 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 706 struct pmc_op_pmcallocate *pmc_config) 707 { 708 (void)ctrspec; 709 (void)pmc_config; 710 711 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 712 return (-1); 713 714 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 715 return (0); 716 } 717 718 #if defined(__arm__) 719 static struct pmc_event_alias cortex_a8_aliases[] = { 720 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 721 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 722 EV_ALIAS("instructions", "INSTR_EXECUTED"), 723 EV_ALIAS(NULL, NULL) 724 }; 725 726 static struct pmc_event_alias cortex_a9_aliases[] = { 727 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 728 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 729 EV_ALIAS("instructions", "INSTR_EXECUTED"), 730 EV_ALIAS(NULL, NULL) 731 }; 732 733 static int 734 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 735 struct pmc_op_pmcallocate *pmc_config __unused) 736 { 737 switch (pe) { 738 default: 739 break; 740 } 741 742 return (0); 743 } 744 #endif 745 746 #if defined(__aarch64__) 747 static struct pmc_event_alias cortex_a53_aliases[] = { 748 EV_ALIAS(NULL, NULL) 749 }; 750 static struct pmc_event_alias cortex_a57_aliases[] = { 751 EV_ALIAS(NULL, NULL) 752 }; 753 static struct pmc_event_alias cortex_a76_aliases[] = { 754 EV_ALIAS(NULL, NULL) 755 }; 756 static int 757 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 758 struct pmc_op_pmcallocate *pmc_config __unused) 759 { 760 switch (pe) { 761 default: 762 break; 763 } 764 765 return (0); 766 } 767 #endif 768 769 #if defined(__powerpc__) 770 771 static struct pmc_event_alias ppc7450_aliases[] = { 772 EV_ALIAS("instructions", "INSTR_COMPLETED"), 773 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 774 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 775 EV_ALIAS(NULL, NULL) 776 }; 777 778 static struct pmc_event_alias ppc970_aliases[] = { 779 EV_ALIAS("instructions", "INSTR_COMPLETED"), 780 EV_ALIAS("cycles", "CYCLES"), 781 EV_ALIAS(NULL, NULL) 782 }; 783 784 static struct pmc_event_alias e500_aliases[] = { 785 EV_ALIAS("instructions", "INSTR_COMPLETED"), 786 EV_ALIAS("cycles", "CYCLES"), 787 EV_ALIAS(NULL, NULL) 788 }; 789 790 #define POWERPC_KW_OS "os" 791 #define POWERPC_KW_USR "usr" 792 #define POWERPC_KW_ANYTHREAD "anythread" 793 794 static int 795 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 796 struct pmc_op_pmcallocate *pmc_config __unused) 797 { 798 char *p; 799 800 (void) pe; 801 802 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 803 804 while ((p = strsep(&ctrspec, ",")) != NULL) { 805 if (KWMATCH(p, POWERPC_KW_OS)) 806 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 807 else if (KWMATCH(p, POWERPC_KW_USR)) 808 pmc_config->pm_caps |= PMC_CAP_USER; 809 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 810 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 811 else 812 return (-1); 813 } 814 815 return (0); 816 } 817 818 #endif /* __powerpc__ */ 819 820 821 /* 822 * Match an event name `name' with its canonical form. 823 * 824 * Matches are case insensitive and spaces, periods, underscores and 825 * hyphen characters are considered to match each other. 826 * 827 * Returns 1 for a match, 0 otherwise. 828 */ 829 830 static int 831 pmc_match_event_name(const char *name, const char *canonicalname) 832 { 833 int cc, nc; 834 const unsigned char *c, *n; 835 836 c = (const unsigned char *) canonicalname; 837 n = (const unsigned char *) name; 838 839 for (; (nc = *n) && (cc = *c); n++, c++) { 840 841 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 842 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 843 continue; 844 845 if (toupper(nc) == toupper(cc)) 846 continue; 847 848 849 return (0); 850 } 851 852 if (*n == '\0' && *c == '\0') 853 return (1); 854 855 return (0); 856 } 857 858 /* 859 * Match an event name against all the event named supported by a 860 * PMC class. 861 * 862 * Returns an event descriptor pointer on match or NULL otherwise. 863 */ 864 static const struct pmc_event_descr * 865 pmc_match_event_class(const char *name, 866 const struct pmc_class_descr *pcd) 867 { 868 size_t n; 869 const struct pmc_event_descr *ev; 870 871 ev = pcd->pm_evc_event_table; 872 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 873 if (pmc_match_event_name(name, ev->pm_ev_name)) 874 return (ev); 875 876 return (NULL); 877 } 878 879 /* 880 * API entry points 881 */ 882 883 int 884 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 885 uint32_t flags, int cpu, pmc_id_t *pmcid, 886 uint64_t count) 887 { 888 size_t n; 889 int retval; 890 char *r, *spec_copy; 891 const char *ctrname; 892 const struct pmc_event_descr *ev; 893 const struct pmc_event_alias *alias; 894 struct pmc_op_pmcallocate pmc_config; 895 const struct pmc_class_descr *pcd; 896 897 spec_copy = NULL; 898 retval = -1; 899 900 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 901 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 902 errno = EINVAL; 903 goto out; 904 } 905 bzero(&pmc_config, sizeof(pmc_config)); 906 pmc_config.pm_cpu = cpu; 907 pmc_config.pm_mode = mode; 908 pmc_config.pm_flags = flags; 909 pmc_config.pm_count = count; 910 if (PMC_IS_SAMPLING_MODE(mode)) 911 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 912 913 /* 914 * Try to pull the raw event ID directly from the pmu-events table. If 915 * this is unsupported on the platform, or the event is not found, 916 * continue with searching the regular event tables. 917 */ 918 r = spec_copy = strdup(ctrspec); 919 ctrname = strsep(&r, ","); 920 if (pmc_pmu_enabled()) { 921 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) 922 goto found; 923 924 /* Otherwise, reset any changes */ 925 pmc_config.pm_ev = 0; 926 pmc_config.pm_caps = 0; 927 pmc_config.pm_class = 0; 928 } 929 free(spec_copy); 930 spec_copy = NULL; 931 932 /* replace an event alias with the canonical event specifier */ 933 if (pmc_mdep_event_aliases) 934 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 935 if (!strcasecmp(ctrspec, alias->pm_alias)) { 936 spec_copy = strdup(alias->pm_spec); 937 break; 938 } 939 940 if (spec_copy == NULL) 941 spec_copy = strdup(ctrspec); 942 943 r = spec_copy; 944 ctrname = strsep(&r, ","); 945 946 /* 947 * If a explicit class prefix was given by the user, restrict the 948 * search for the event to the specified PMC class. 949 */ 950 ev = NULL; 951 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 952 pcd = pmc_class_table[n]; 953 if (pcd != NULL && strncasecmp(ctrname, pcd->pm_evc_name, 954 pcd->pm_evc_name_size) == 0) { 955 if ((ev = pmc_match_event_class(ctrname + 956 pcd->pm_evc_name_size, pcd)) == NULL) { 957 errno = EINVAL; 958 goto out; 959 } 960 break; 961 } 962 } 963 964 /* 965 * Otherwise, search for this event in all compatible PMC 966 * classes. 967 */ 968 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 969 pcd = pmc_class_table[n]; 970 if (pcd != NULL) 971 ev = pmc_match_event_class(ctrname, pcd); 972 } 973 974 if (ev == NULL) { 975 errno = EINVAL; 976 goto out; 977 } 978 979 pmc_config.pm_ev = ev->pm_ev_code; 980 pmc_config.pm_class = pcd->pm_evc_class; 981 982 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 983 errno = EINVAL; 984 goto out; 985 } 986 987 found: 988 if (PMC_CALL(PMCALLOCATE, &pmc_config) == 0) { 989 *pmcid = pmc_config.pm_pmcid; 990 retval = 0; 991 } 992 out: 993 if (spec_copy) 994 free(spec_copy); 995 996 return (retval); 997 } 998 999 int 1000 pmc_attach(pmc_id_t pmc, pid_t pid) 1001 { 1002 struct pmc_op_pmcattach pmc_attach_args; 1003 1004 pmc_attach_args.pm_pmc = pmc; 1005 pmc_attach_args.pm_pid = pid; 1006 1007 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1008 } 1009 1010 int 1011 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1012 { 1013 unsigned int i; 1014 enum pmc_class cl; 1015 1016 cl = PMC_ID_TO_CLASS(pmcid); 1017 for (i = 0; i < cpu_info.pm_nclass; i++) 1018 if (cpu_info.pm_classes[i].pm_class == cl) { 1019 *caps = cpu_info.pm_classes[i].pm_caps; 1020 return (0); 1021 } 1022 errno = EINVAL; 1023 return (-1); 1024 } 1025 1026 int 1027 pmc_configure_logfile(int fd) 1028 { 1029 struct pmc_op_configurelog cla; 1030 1031 cla.pm_logfd = fd; 1032 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1033 return (-1); 1034 return (0); 1035 } 1036 1037 int 1038 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1039 { 1040 if (pmc_syscall == -1) { 1041 errno = ENXIO; 1042 return (-1); 1043 } 1044 1045 *pci = &cpu_info; 1046 return (0); 1047 } 1048 1049 int 1050 pmc_detach(pmc_id_t pmc, pid_t pid) 1051 { 1052 struct pmc_op_pmcattach pmc_detach_args; 1053 1054 pmc_detach_args.pm_pmc = pmc; 1055 pmc_detach_args.pm_pid = pid; 1056 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1057 } 1058 1059 int 1060 pmc_disable(int cpu, int pmc) 1061 { 1062 struct pmc_op_pmcadmin ssa; 1063 1064 ssa.pm_cpu = cpu; 1065 ssa.pm_pmc = pmc; 1066 ssa.pm_state = PMC_STATE_DISABLED; 1067 return (PMC_CALL(PMCADMIN, &ssa)); 1068 } 1069 1070 int 1071 pmc_enable(int cpu, int pmc) 1072 { 1073 struct pmc_op_pmcadmin ssa; 1074 1075 ssa.pm_cpu = cpu; 1076 ssa.pm_pmc = pmc; 1077 ssa.pm_state = PMC_STATE_FREE; 1078 return (PMC_CALL(PMCADMIN, &ssa)); 1079 } 1080 1081 /* 1082 * Return a list of events known to a given PMC class. 'cl' is the 1083 * PMC class identifier, 'eventnames' is the returned list of 'const 1084 * char *' pointers pointing to the names of the events. 'nevents' is 1085 * the number of event name pointers returned. 1086 * 1087 * The space for 'eventnames' is allocated using malloc(3). The caller 1088 * is responsible for freeing this space when done. 1089 */ 1090 int 1091 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1092 int *nevents) 1093 { 1094 int count; 1095 const char **names; 1096 const struct pmc_event_descr *ev; 1097 1098 switch (cl) 1099 { 1100 case PMC_CLASS_IAF: 1101 ev = iaf_event_table; 1102 count = PMC_EVENT_TABLE_SIZE(iaf); 1103 break; 1104 case PMC_CLASS_TSC: 1105 ev = tsc_event_table; 1106 count = PMC_EVENT_TABLE_SIZE(tsc); 1107 break; 1108 case PMC_CLASS_K8: 1109 ev = k8_event_table; 1110 count = PMC_EVENT_TABLE_SIZE(k8); 1111 break; 1112 case PMC_CLASS_ARMV7: 1113 switch (cpu_info.pm_cputype) { 1114 default: 1115 case PMC_CPU_ARMV7_CORTEX_A8: 1116 ev = cortex_a8_event_table; 1117 count = PMC_EVENT_TABLE_SIZE(cortex_a8); 1118 break; 1119 case PMC_CPU_ARMV7_CORTEX_A9: 1120 ev = cortex_a9_event_table; 1121 count = PMC_EVENT_TABLE_SIZE(cortex_a9); 1122 break; 1123 } 1124 break; 1125 case PMC_CLASS_ARMV8: 1126 switch (cpu_info.pm_cputype) { 1127 default: 1128 case PMC_CPU_ARMV8_CORTEX_A53: 1129 ev = cortex_a53_event_table; 1130 count = PMC_EVENT_TABLE_SIZE(cortex_a53); 1131 break; 1132 case PMC_CPU_ARMV8_CORTEX_A57: 1133 ev = cortex_a57_event_table; 1134 count = PMC_EVENT_TABLE_SIZE(cortex_a57); 1135 break; 1136 case PMC_CPU_ARMV8_CORTEX_A76: 1137 ev = cortex_a76_event_table; 1138 count = PMC_EVENT_TABLE_SIZE(cortex_a76); 1139 break; 1140 } 1141 break; 1142 case PMC_CLASS_PPC7450: 1143 ev = ppc7450_event_table; 1144 count = PMC_EVENT_TABLE_SIZE(ppc7450); 1145 break; 1146 case PMC_CLASS_PPC970: 1147 ev = ppc970_event_table; 1148 count = PMC_EVENT_TABLE_SIZE(ppc970); 1149 break; 1150 case PMC_CLASS_E500: 1151 ev = e500_event_table; 1152 count = PMC_EVENT_TABLE_SIZE(e500); 1153 break; 1154 case PMC_CLASS_SOFT: 1155 ev = soft_event_table; 1156 count = soft_event_info.pm_nevent; 1157 break; 1158 default: 1159 errno = EINVAL; 1160 return (-1); 1161 } 1162 1163 if ((names = malloc(count * sizeof(const char *))) == NULL) 1164 return (-1); 1165 1166 *eventnames = names; 1167 *nevents = count; 1168 1169 for (;count--; ev++, names++) 1170 *names = ev->pm_ev_name; 1171 1172 return (0); 1173 } 1174 1175 int 1176 pmc_flush_logfile(void) 1177 { 1178 return (PMC_CALL(FLUSHLOG,0)); 1179 } 1180 1181 int 1182 pmc_close_logfile(void) 1183 { 1184 return (PMC_CALL(CLOSELOG,0)); 1185 } 1186 1187 int 1188 pmc_get_driver_stats(struct pmc_driverstats *ds) 1189 { 1190 struct pmc_op_getdriverstats gms; 1191 1192 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 1193 return (-1); 1194 1195 /* copy out fields in the current userland<->library interface */ 1196 ds->pm_intr_ignored = gms.pm_intr_ignored; 1197 ds->pm_intr_processed = gms.pm_intr_processed; 1198 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 1199 ds->pm_syscalls = gms.pm_syscalls; 1200 ds->pm_syscall_errors = gms.pm_syscall_errors; 1201 ds->pm_buffer_requests = gms.pm_buffer_requests; 1202 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 1203 ds->pm_log_sweeps = gms.pm_log_sweeps; 1204 return (0); 1205 } 1206 1207 int 1208 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 1209 { 1210 struct pmc_op_getmsr gm; 1211 1212 gm.pm_pmcid = pmc; 1213 if (PMC_CALL(PMCGETMSR, &gm) < 0) 1214 return (-1); 1215 *msr = gm.pm_msr; 1216 return (0); 1217 } 1218 1219 int 1220 pmc_init(void) 1221 { 1222 int error, pmc_mod_id; 1223 unsigned int n; 1224 uint32_t abi_version; 1225 struct module_stat pmc_modstat; 1226 struct pmc_op_getcpuinfo op_cpu_info; 1227 1228 if (pmc_syscall != -1) /* already inited */ 1229 return (0); 1230 1231 /* retrieve the system call number from the KLD */ 1232 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 1233 return (-1); 1234 1235 pmc_modstat.version = sizeof(struct module_stat); 1236 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 1237 return (-1); 1238 1239 pmc_syscall = pmc_modstat.data.intval; 1240 1241 /* check the kernel module's ABI against our compiled-in version */ 1242 abi_version = PMC_VERSION; 1243 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 1244 return (pmc_syscall = -1); 1245 1246 /* ignore patch & minor numbers for the comparison */ 1247 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 1248 errno = EPROGMISMATCH; 1249 return (pmc_syscall = -1); 1250 } 1251 1252 bzero(&op_cpu_info, sizeof(op_cpu_info)); 1253 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 1254 return (pmc_syscall = -1); 1255 1256 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 1257 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 1258 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 1259 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 1260 for (n = 0; n < op_cpu_info.pm_nclass; n++) 1261 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], 1262 sizeof(cpu_info.pm_classes[n])); 1263 1264 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 1265 sizeof(struct pmc_class_descr *)); 1266 1267 if (pmc_class_table == NULL) 1268 return (-1); 1269 1270 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 1271 pmc_class_table[n] = NULL; 1272 1273 /* 1274 * Get soft events list. 1275 */ 1276 soft_event_info.pm_class = PMC_CLASS_SOFT; 1277 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 1278 return (pmc_syscall = -1); 1279 1280 /* Map soft events to static list. */ 1281 for (n = 0; n < soft_event_info.pm_nevent; n++) { 1282 soft_event_table[n].pm_ev_name = 1283 soft_event_info.pm_events[n].pm_ev_name; 1284 soft_event_table[n].pm_ev_code = 1285 soft_event_info.pm_events[n].pm_ev_code; 1286 } 1287 soft_class_table_descr.pm_evc_event_table_size = \ 1288 soft_event_info.pm_nevent; 1289 soft_class_table_descr.pm_evc_event_table = \ 1290 soft_event_table; 1291 1292 /* 1293 * Fill in the class table. 1294 */ 1295 n = 0; 1296 1297 /* Fill soft events information. */ 1298 pmc_class_table[n++] = &soft_class_table_descr; 1299 #if defined(__amd64__) || defined(__i386__) 1300 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 1301 pmc_class_table[n++] = &tsc_class_table_descr; 1302 #endif 1303 1304 #define PMC_MDEP_INIT(C) pmc_mdep_event_aliases = C##_aliases 1305 1306 /* Configure the event name parser. */ 1307 switch (cpu_info.pm_cputype) { 1308 #if defined(__amd64__) || defined(__i386__) 1309 case PMC_CPU_AMD_K8: 1310 PMC_MDEP_INIT(k8); 1311 pmc_class_table[n] = &k8_class_table_descr; 1312 break; 1313 #endif 1314 case PMC_CPU_GENERIC: 1315 PMC_MDEP_INIT(generic); 1316 break; 1317 #if defined(__arm__) 1318 case PMC_CPU_ARMV7_CORTEX_A8: 1319 PMC_MDEP_INIT(cortex_a8); 1320 pmc_class_table[n] = &cortex_a8_class_table_descr; 1321 break; 1322 case PMC_CPU_ARMV7_CORTEX_A9: 1323 PMC_MDEP_INIT(cortex_a9); 1324 pmc_class_table[n] = &cortex_a9_class_table_descr; 1325 break; 1326 #endif 1327 #if defined(__aarch64__) 1328 case PMC_CPU_ARMV8_CORTEX_A53: 1329 PMC_MDEP_INIT(cortex_a53); 1330 pmc_class_table[n] = &cortex_a53_class_table_descr; 1331 break; 1332 case PMC_CPU_ARMV8_CORTEX_A57: 1333 PMC_MDEP_INIT(cortex_a57); 1334 pmc_class_table[n] = &cortex_a57_class_table_descr; 1335 break; 1336 case PMC_CPU_ARMV8_CORTEX_A76: 1337 PMC_MDEP_INIT(cortex_a76); 1338 pmc_class_table[n] = &cortex_a76_class_table_descr; 1339 break; 1340 #endif 1341 #if defined(__powerpc__) 1342 case PMC_CPU_PPC_7450: 1343 PMC_MDEP_INIT(ppc7450); 1344 pmc_class_table[n] = &ppc7450_class_table_descr; 1345 break; 1346 case PMC_CPU_PPC_970: 1347 PMC_MDEP_INIT(ppc970); 1348 pmc_class_table[n] = &ppc970_class_table_descr; 1349 break; 1350 case PMC_CPU_PPC_E500: 1351 PMC_MDEP_INIT(e500); 1352 pmc_class_table[n] = &e500_class_table_descr; 1353 break; 1354 #endif 1355 default: 1356 /* 1357 * Some kind of CPU this version of the library knows nothing 1358 * about. This shouldn't happen since the abi version check 1359 * should have caught this. 1360 */ 1361 #if defined(__amd64__) || defined(__i386__) || defined(__powerpc64__) 1362 break; 1363 #endif 1364 errno = ENXIO; 1365 return (pmc_syscall = -1); 1366 } 1367 1368 return (0); 1369 } 1370 1371 const char * 1372 pmc_name_of_capability(enum pmc_caps cap) 1373 { 1374 int i; 1375 1376 /* 1377 * 'cap' should have a single bit set and should be in 1378 * range. 1379 */ 1380 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 1381 cap > PMC_CAP_LAST) { 1382 errno = EINVAL; 1383 return (NULL); 1384 } 1385 1386 i = ffs(cap); 1387 return (pmc_capability_names[i - 1]); 1388 } 1389 1390 const char * 1391 pmc_name_of_class(enum pmc_class pc) 1392 { 1393 size_t n; 1394 1395 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) 1396 if (pc == pmc_class_names[n].pm_class) 1397 return (pmc_class_names[n].pm_name); 1398 1399 errno = EINVAL; 1400 return (NULL); 1401 } 1402 1403 const char * 1404 pmc_name_of_cputype(enum pmc_cputype cp) 1405 { 1406 size_t n; 1407 1408 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 1409 if (cp == pmc_cputype_names[n].pm_cputype) 1410 return (pmc_cputype_names[n].pm_name); 1411 1412 errno = EINVAL; 1413 return (NULL); 1414 } 1415 1416 const char * 1417 pmc_name_of_disposition(enum pmc_disp pd) 1418 { 1419 if ((int) pd >= PMC_DISP_FIRST && 1420 pd <= PMC_DISP_LAST) 1421 return (pmc_disposition_names[pd]); 1422 1423 errno = EINVAL; 1424 return (NULL); 1425 } 1426 1427 const char * 1428 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 1429 { 1430 const struct pmc_event_descr *ev, *evfence; 1431 1432 ev = evfence = NULL; 1433 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 1434 ev = k8_event_table; 1435 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 1436 1437 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { 1438 switch (cpu) { 1439 case PMC_CPU_ARMV7_CORTEX_A8: 1440 ev = cortex_a8_event_table; 1441 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); 1442 break; 1443 case PMC_CPU_ARMV7_CORTEX_A9: 1444 ev = cortex_a9_event_table; 1445 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); 1446 break; 1447 default: /* Unknown CPU type. */ 1448 break; 1449 } 1450 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { 1451 switch (cpu) { 1452 case PMC_CPU_ARMV8_CORTEX_A53: 1453 ev = cortex_a53_event_table; 1454 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); 1455 break; 1456 case PMC_CPU_ARMV8_CORTEX_A57: 1457 ev = cortex_a57_event_table; 1458 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); 1459 break; 1460 case PMC_CPU_ARMV8_CORTEX_A76: 1461 ev = cortex_a76_event_table; 1462 evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76); 1463 break; 1464 default: /* Unknown CPU type. */ 1465 break; 1466 } 1467 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 1468 ev = ppc7450_event_table; 1469 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 1470 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 1471 ev = ppc970_event_table; 1472 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 1473 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { 1474 ev = e500_event_table; 1475 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); 1476 } else if (pe == PMC_EV_TSC_TSC) { 1477 ev = tsc_event_table; 1478 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 1479 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 1480 ev = soft_event_table; 1481 evfence = soft_event_table + soft_event_info.pm_nevent; 1482 } 1483 1484 for (; ev != evfence; ev++) 1485 if (pe == ev->pm_ev_code) 1486 return (ev->pm_ev_name); 1487 1488 return (NULL); 1489 } 1490 1491 const char * 1492 pmc_name_of_event(enum pmc_event pe) 1493 { 1494 const char *n; 1495 1496 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 1497 return (n); 1498 1499 errno = EINVAL; 1500 return (NULL); 1501 } 1502 1503 const char * 1504 pmc_name_of_mode(enum pmc_mode pm) 1505 { 1506 if ((int) pm >= PMC_MODE_FIRST && 1507 pm <= PMC_MODE_LAST) 1508 return (pmc_mode_names[pm]); 1509 1510 errno = EINVAL; 1511 return (NULL); 1512 } 1513 1514 const char * 1515 pmc_name_of_state(enum pmc_state ps) 1516 { 1517 if ((int) ps >= PMC_STATE_FIRST && 1518 ps <= PMC_STATE_LAST) 1519 return (pmc_state_names[ps]); 1520 1521 errno = EINVAL; 1522 return (NULL); 1523 } 1524 1525 int 1526 pmc_ncpu(void) 1527 { 1528 if (pmc_syscall == -1) { 1529 errno = ENXIO; 1530 return (-1); 1531 } 1532 1533 return (cpu_info.pm_ncpu); 1534 } 1535 1536 int 1537 pmc_npmc(int cpu) 1538 { 1539 if (pmc_syscall == -1) { 1540 errno = ENXIO; 1541 return (-1); 1542 } 1543 1544 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 1545 errno = EINVAL; 1546 return (-1); 1547 } 1548 1549 return (cpu_info.pm_npmc); 1550 } 1551 1552 int 1553 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 1554 { 1555 int nbytes, npmc; 1556 struct pmc_op_getpmcinfo *pmci; 1557 1558 if ((npmc = pmc_npmc(cpu)) < 0) 1559 return (-1); 1560 1561 nbytes = sizeof(struct pmc_op_getpmcinfo) + 1562 npmc * sizeof(struct pmc_info); 1563 1564 if ((pmci = calloc(1, nbytes)) == NULL) 1565 return (-1); 1566 1567 pmci->pm_cpu = cpu; 1568 1569 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 1570 free(pmci); 1571 return (-1); 1572 } 1573 1574 /* kernel<->library, library<->userland interfaces are identical */ 1575 *ppmci = (struct pmc_pmcinfo *) pmci; 1576 return (0); 1577 } 1578 1579 int 1580 pmc_read(pmc_id_t pmc, pmc_value_t *value) 1581 { 1582 struct pmc_op_pmcrw pmc_read_op; 1583 1584 pmc_read_op.pm_pmcid = pmc; 1585 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 1586 pmc_read_op.pm_value = -1; 1587 1588 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 1589 return (-1); 1590 1591 *value = pmc_read_op.pm_value; 1592 return (0); 1593 } 1594 1595 int 1596 pmc_release(pmc_id_t pmc) 1597 { 1598 struct pmc_op_simple pmc_release_args; 1599 1600 pmc_release_args.pm_pmcid = pmc; 1601 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 1602 } 1603 1604 int 1605 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 1606 { 1607 struct pmc_op_pmcrw pmc_rw_op; 1608 1609 pmc_rw_op.pm_pmcid = pmc; 1610 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 1611 pmc_rw_op.pm_value = newvalue; 1612 1613 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 1614 return (-1); 1615 1616 *oldvaluep = pmc_rw_op.pm_value; 1617 return (0); 1618 } 1619 1620 int 1621 pmc_set(pmc_id_t pmc, pmc_value_t value) 1622 { 1623 struct pmc_op_pmcsetcount sc; 1624 1625 sc.pm_pmcid = pmc; 1626 sc.pm_count = value; 1627 1628 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 1629 return (-1); 1630 return (0); 1631 } 1632 1633 int 1634 pmc_start(pmc_id_t pmc) 1635 { 1636 struct pmc_op_simple pmc_start_args; 1637 1638 pmc_start_args.pm_pmcid = pmc; 1639 return (PMC_CALL(PMCSTART, &pmc_start_args)); 1640 } 1641 1642 int 1643 pmc_stop(pmc_id_t pmc) 1644 { 1645 struct pmc_op_simple pmc_stop_args; 1646 1647 pmc_stop_args.pm_pmcid = pmc; 1648 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 1649 } 1650 1651 int 1652 pmc_width(pmc_id_t pmcid, uint32_t *width) 1653 { 1654 unsigned int i; 1655 enum pmc_class cl; 1656 1657 cl = PMC_ID_TO_CLASS(pmcid); 1658 for (i = 0; i < cpu_info.pm_nclass; i++) 1659 if (cpu_info.pm_classes[i].pm_class == cl) { 1660 *width = cpu_info.pm_classes[i].pm_width; 1661 return (0); 1662 } 1663 errno = EINVAL; 1664 return (-1); 1665 } 1666 1667 int 1668 pmc_write(pmc_id_t pmc, pmc_value_t value) 1669 { 1670 struct pmc_op_pmcrw pmc_write_op; 1671 1672 pmc_write_op.pm_pmcid = pmc; 1673 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 1674 pmc_write_op.pm_value = value; 1675 return (PMC_CALL(PMCRW, &pmc_write_op)); 1676 } 1677 1678 int 1679 pmc_writelog(uint32_t userdata) 1680 { 1681 struct pmc_op_writelog wl; 1682 1683 wl.pm_userdata = userdata; 1684 return (PMC_CALL(WRITELOG, &wl)); 1685 } 1686