1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/module.h> 35 #include <sys/pmc.h> 36 #include <sys/syscall.h> 37 38 #include <ctype.h> 39 #include <errno.h> 40 #include <err.h> 41 #include <fcntl.h> 42 #include <pmc.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <strings.h> 47 #include <sysexits.h> 48 #include <unistd.h> 49 50 #include "libpmcinternal.h" 51 52 /* Function prototypes */ 53 #if defined(__amd64__) || defined(__i386__) 54 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 55 struct pmc_op_pmcallocate *_pmc_config); 56 #endif 57 #if defined(__amd64__) || defined(__i386__) 58 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 59 struct pmc_op_pmcallocate *_pmc_config); 60 #endif 61 #if defined(__arm__) 62 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 63 struct pmc_op_pmcallocate *_pmc_config); 64 #endif 65 #if defined(__aarch64__) 66 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 67 struct pmc_op_pmcallocate *_pmc_config); 68 #endif 69 #if defined(__mips__) 70 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 71 struct pmc_op_pmcallocate *_pmc_config); 72 #endif /* __mips__ */ 73 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 74 struct pmc_op_pmcallocate *_pmc_config); 75 76 #if defined(__powerpc__) 77 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 78 struct pmc_op_pmcallocate *_pmc_config); 79 #endif /* __powerpc__ */ 80 81 #define PMC_CALL(cmd, params) \ 82 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 83 84 /* 85 * Event aliases provide a way for the user to ask for generic events 86 * like "cache-misses", or "instructions-retired". These aliases are 87 * mapped to the appropriate canonical event descriptions using a 88 * lookup table. 89 */ 90 struct pmc_event_alias { 91 const char *pm_alias; 92 const char *pm_spec; 93 }; 94 95 static const struct pmc_event_alias *pmc_mdep_event_aliases; 96 97 /* 98 * The pmc_event_descr structure maps symbolic names known to the user 99 * to integer codes used by the PMC KLD. 100 */ 101 struct pmc_event_descr { 102 const char *pm_ev_name; 103 enum pmc_event pm_ev_code; 104 }; 105 106 /* 107 * The pmc_class_descr structure maps class name prefixes for 108 * event names to event tables and other PMC class data. 109 */ 110 struct pmc_class_descr { 111 const char *pm_evc_name; 112 size_t pm_evc_name_size; 113 enum pmc_class pm_evc_class; 114 const struct pmc_event_descr *pm_evc_event_table; 115 size_t pm_evc_event_table_size; 116 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 117 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 118 }; 119 120 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 121 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 122 123 #undef __PMC_EV 124 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 125 126 /* 127 * PMC_CLASSDEP_TABLE(NAME, CLASS) 128 * 129 * Define a table mapping event names and aliases to HWPMC event IDs. 130 */ 131 #define PMC_CLASSDEP_TABLE(N, C) \ 132 static const struct pmc_event_descr N##_event_table[] = \ 133 { \ 134 __PMC_EV_##C() \ 135 } 136 137 PMC_CLASSDEP_TABLE(iaf, IAF); 138 PMC_CLASSDEP_TABLE(k8, K8); 139 PMC_CLASSDEP_TABLE(armv7, ARMV7); 140 PMC_CLASSDEP_TABLE(armv8, ARMV8); 141 PMC_CLASSDEP_TABLE(beri, BERI); 142 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 143 PMC_CLASSDEP_TABLE(mips74k, MIPS74K); 144 PMC_CLASSDEP_TABLE(octeon, OCTEON); 145 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 146 PMC_CLASSDEP_TABLE(ppc970, PPC970); 147 PMC_CLASSDEP_TABLE(power8, POWER8); 148 PMC_CLASSDEP_TABLE(e500, E500); 149 150 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 151 152 #undef __PMC_EV_ALIAS 153 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 154 155 static const struct pmc_event_descr cortex_a8_event_table[] = 156 { 157 __PMC_EV_ALIAS_ARMV7_CORTEX_A8() 158 }; 159 160 static const struct pmc_event_descr cortex_a9_event_table[] = 161 { 162 __PMC_EV_ALIAS_ARMV7_CORTEX_A9() 163 }; 164 165 static const struct pmc_event_descr cortex_a53_event_table[] = 166 { 167 __PMC_EV_ALIAS_ARMV8_CORTEX_A53() 168 }; 169 170 static const struct pmc_event_descr cortex_a57_event_table[] = 171 { 172 __PMC_EV_ALIAS_ARMV8_CORTEX_A57() 173 }; 174 175 static const struct pmc_event_descr cortex_a76_event_table[] = 176 { 177 __PMC_EV_ALIAS_ARMV8_CORTEX_A76() 178 }; 179 180 /* 181 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 182 * 183 * Map a CPU to the PMC classes it supports. 184 */ 185 #define PMC_MDEP_TABLE(N,C,...) \ 186 static const enum pmc_class N##_pmc_classes[] = { \ 187 PMC_CLASS_##C, __VA_ARGS__ \ 188 } 189 190 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 191 PMC_MDEP_TABLE(beri, BERI, PMC_CLASS_SOFT, PMC_CLASS_BERI); 192 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 193 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 194 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 195 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 196 PMC_MDEP_TABLE(cortex_a76, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 197 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 198 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); 199 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 200 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); 201 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); 202 PMC_MDEP_TABLE(power8, POWER8, PMC_CLASS_SOFT, PMC_CLASS_POWER8, PMC_CLASS_TSC); 203 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); 204 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 205 206 static const struct pmc_event_descr tsc_event_table[] = 207 { 208 __PMC_EV_TSC() 209 }; 210 211 #undef PMC_CLASS_TABLE_DESC 212 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 213 static const struct pmc_class_descr NAME##_class_table_descr = \ 214 { \ 215 .pm_evc_name = #CLASS "-", \ 216 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 217 .pm_evc_class = PMC_CLASS_##CLASS , \ 218 .pm_evc_event_table = EVENTS##_event_table , \ 219 .pm_evc_event_table_size = \ 220 PMC_EVENT_TABLE_SIZE(EVENTS), \ 221 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 222 } 223 224 #if defined(__i386__) || defined(__amd64__) 225 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 226 #endif 227 #if defined(__i386__) || defined(__amd64__) 228 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 229 #endif 230 #if defined(__arm__) 231 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); 232 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); 233 #endif 234 #if defined(__aarch64__) 235 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); 236 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); 237 PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64); 238 #endif 239 #if defined(__mips__) 240 PMC_CLASS_TABLE_DESC(beri, BERI, beri, mips); 241 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 242 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); 243 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 244 #endif /* __mips__ */ 245 #if defined(__powerpc__) 246 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 247 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 248 PMC_CLASS_TABLE_DESC(power8, POWER8, power8, powerpc); 249 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); 250 #endif 251 252 static struct pmc_class_descr soft_class_table_descr = 253 { 254 .pm_evc_name = "SOFT-", 255 .pm_evc_name_size = sizeof("SOFT-") - 1, 256 .pm_evc_class = PMC_CLASS_SOFT, 257 .pm_evc_event_table = NULL, 258 .pm_evc_event_table_size = 0, 259 .pm_evc_allocate_pmc = soft_allocate_pmc 260 }; 261 262 #undef PMC_CLASS_TABLE_DESC 263 264 static const struct pmc_class_descr **pmc_class_table; 265 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 266 267 static const enum pmc_class *pmc_mdep_class_list; 268 static size_t pmc_mdep_class_list_size; 269 270 /* 271 * Mapping tables, mapping enumeration values to human readable 272 * strings. 273 */ 274 275 static const char * pmc_capability_names[] = { 276 #undef __PMC_CAP 277 #define __PMC_CAP(N,V,D) #N , 278 __PMC_CAPS() 279 }; 280 281 struct pmc_class_map { 282 enum pmc_class pm_class; 283 const char *pm_name; 284 }; 285 286 static const struct pmc_class_map pmc_class_names[] = { 287 #undef __PMC_CLASS 288 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , 289 __PMC_CLASSES() 290 }; 291 292 struct pmc_cputype_map { 293 enum pmc_cputype pm_cputype; 294 const char *pm_name; 295 }; 296 297 static const struct pmc_cputype_map pmc_cputype_names[] = { 298 #undef __PMC_CPU 299 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 300 __PMC_CPUS() 301 }; 302 303 static const char * pmc_disposition_names[] = { 304 #undef __PMC_DISP 305 #define __PMC_DISP(D) #D , 306 __PMC_DISPOSITIONS() 307 }; 308 309 static const char * pmc_mode_names[] = { 310 #undef __PMC_MODE 311 #define __PMC_MODE(M,N) #M , 312 __PMC_MODES() 313 }; 314 315 static const char * pmc_state_names[] = { 316 #undef __PMC_STATE 317 #define __PMC_STATE(S) #S , 318 __PMC_STATES() 319 }; 320 321 /* 322 * Filled in by pmc_init(). 323 */ 324 static int pmc_syscall = -1; 325 static struct pmc_cpuinfo cpu_info; 326 static struct pmc_op_getdyneventinfo soft_event_info; 327 328 /* Event masks for events */ 329 struct pmc_masks { 330 const char *pm_name; 331 const uint64_t pm_value; 332 }; 333 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 334 #define NULLMASK { .pm_name = NULL } 335 336 #if defined(__amd64__) || defined(__i386__) 337 static int 338 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 339 { 340 const struct pmc_masks *pm; 341 char *q, *r; 342 int c; 343 344 if (pmask == NULL) /* no mask keywords */ 345 return (-1); 346 q = strchr(p, '='); /* skip '=' */ 347 if (*++q == '\0') /* no more data */ 348 return (-1); 349 c = 0; /* count of mask keywords seen */ 350 while ((r = strsep(&q, "+")) != NULL) { 351 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 352 pm++) 353 ; 354 if (pm->pm_name == NULL) /* not found */ 355 return (-1); 356 *evmask |= pm->pm_value; 357 c++; 358 } 359 return (c); 360 } 361 #endif 362 363 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 364 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 365 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 366 367 #if defined(__amd64__) || defined(__i386__) 368 /* 369 * AMD K8 PMCs. 370 * 371 */ 372 373 static struct pmc_event_alias k8_aliases[] = { 374 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 375 EV_ALIAS("branch-mispredicts", 376 "k8-fr-retired-taken-branches-mispredicted"), 377 EV_ALIAS("cycles", "tsc"), 378 EV_ALIAS("dc-misses", "k8-dc-miss"), 379 EV_ALIAS("ic-misses", "k8-ic-miss"), 380 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 381 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 382 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 383 EV_ALIAS(NULL, NULL) 384 }; 385 386 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 387 388 /* 389 * Parsing tables 390 */ 391 392 /* fp dispatched fpu ops */ 393 static const struct pmc_masks k8_mask_fdfo[] = { 394 __K8MASK(add-pipe-excluding-junk-ops, 0), 395 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 396 __K8MASK(store-pipe-excluding-junk-ops, 2), 397 __K8MASK(add-pipe-junk-ops, 3), 398 __K8MASK(multiply-pipe-junk-ops, 4), 399 __K8MASK(store-pipe-junk-ops, 5), 400 NULLMASK 401 }; 402 403 /* ls segment register loads */ 404 static const struct pmc_masks k8_mask_lsrl[] = { 405 __K8MASK(es, 0), 406 __K8MASK(cs, 1), 407 __K8MASK(ss, 2), 408 __K8MASK(ds, 3), 409 __K8MASK(fs, 4), 410 __K8MASK(gs, 5), 411 __K8MASK(hs, 6), 412 NULLMASK 413 }; 414 415 /* ls locked operation */ 416 static const struct pmc_masks k8_mask_llo[] = { 417 __K8MASK(locked-instructions, 0), 418 __K8MASK(cycles-in-request, 1), 419 __K8MASK(cycles-to-complete, 2), 420 NULLMASK 421 }; 422 423 /* dc refill from {l2,system} and dc copyback */ 424 static const struct pmc_masks k8_mask_dc[] = { 425 __K8MASK(invalid, 0), 426 __K8MASK(shared, 1), 427 __K8MASK(exclusive, 2), 428 __K8MASK(owner, 3), 429 __K8MASK(modified, 4), 430 NULLMASK 431 }; 432 433 /* dc one bit ecc error */ 434 static const struct pmc_masks k8_mask_dobee[] = { 435 __K8MASK(scrubber, 0), 436 __K8MASK(piggyback, 1), 437 NULLMASK 438 }; 439 440 /* dc dispatched prefetch instructions */ 441 static const struct pmc_masks k8_mask_ddpi[] = { 442 __K8MASK(load, 0), 443 __K8MASK(store, 1), 444 __K8MASK(nta, 2), 445 NULLMASK 446 }; 447 448 /* dc dcache accesses by locks */ 449 static const struct pmc_masks k8_mask_dabl[] = { 450 __K8MASK(accesses, 0), 451 __K8MASK(misses, 1), 452 NULLMASK 453 }; 454 455 /* bu internal l2 request */ 456 static const struct pmc_masks k8_mask_bilr[] = { 457 __K8MASK(ic-fill, 0), 458 __K8MASK(dc-fill, 1), 459 __K8MASK(tlb-reload, 2), 460 __K8MASK(tag-snoop, 3), 461 __K8MASK(cancelled, 4), 462 NULLMASK 463 }; 464 465 /* bu fill request l2 miss */ 466 static const struct pmc_masks k8_mask_bfrlm[] = { 467 __K8MASK(ic-fill, 0), 468 __K8MASK(dc-fill, 1), 469 __K8MASK(tlb-reload, 2), 470 NULLMASK 471 }; 472 473 /* bu fill into l2 */ 474 static const struct pmc_masks k8_mask_bfil[] = { 475 __K8MASK(dirty-l2-victim, 0), 476 __K8MASK(victim-from-l2, 1), 477 NULLMASK 478 }; 479 480 /* fr retired fpu instructions */ 481 static const struct pmc_masks k8_mask_frfi[] = { 482 __K8MASK(x87, 0), 483 __K8MASK(mmx-3dnow, 1), 484 __K8MASK(packed-sse-sse2, 2), 485 __K8MASK(scalar-sse-sse2, 3), 486 NULLMASK 487 }; 488 489 /* fr retired fastpath double op instructions */ 490 static const struct pmc_masks k8_mask_frfdoi[] = { 491 __K8MASK(low-op-pos-0, 0), 492 __K8MASK(low-op-pos-1, 1), 493 __K8MASK(low-op-pos-2, 2), 494 NULLMASK 495 }; 496 497 /* fr fpu exceptions */ 498 static const struct pmc_masks k8_mask_ffe[] = { 499 __K8MASK(x87-reclass-microfaults, 0), 500 __K8MASK(sse-retype-microfaults, 1), 501 __K8MASK(sse-reclass-microfaults, 2), 502 __K8MASK(sse-and-x87-microtraps, 3), 503 NULLMASK 504 }; 505 506 /* nb memory controller page access event */ 507 static const struct pmc_masks k8_mask_nmcpae[] = { 508 __K8MASK(page-hit, 0), 509 __K8MASK(page-miss, 1), 510 __K8MASK(page-conflict, 2), 511 NULLMASK 512 }; 513 514 /* nb memory controller turnaround */ 515 static const struct pmc_masks k8_mask_nmct[] = { 516 __K8MASK(dimm-turnaround, 0), 517 __K8MASK(read-to-write-turnaround, 1), 518 __K8MASK(write-to-read-turnaround, 2), 519 NULLMASK 520 }; 521 522 /* nb memory controller bypass saturation */ 523 static const struct pmc_masks k8_mask_nmcbs[] = { 524 __K8MASK(memory-controller-hi-pri-bypass, 0), 525 __K8MASK(memory-controller-lo-pri-bypass, 1), 526 __K8MASK(dram-controller-interface-bypass, 2), 527 __K8MASK(dram-controller-queue-bypass, 3), 528 NULLMASK 529 }; 530 531 /* nb sized commands */ 532 static const struct pmc_masks k8_mask_nsc[] = { 533 __K8MASK(nonpostwrszbyte, 0), 534 __K8MASK(nonpostwrszdword, 1), 535 __K8MASK(postwrszbyte, 2), 536 __K8MASK(postwrszdword, 3), 537 __K8MASK(rdszbyte, 4), 538 __K8MASK(rdszdword, 5), 539 __K8MASK(rdmodwr, 6), 540 NULLMASK 541 }; 542 543 /* nb probe result */ 544 static const struct pmc_masks k8_mask_npr[] = { 545 __K8MASK(probe-miss, 0), 546 __K8MASK(probe-hit, 1), 547 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 548 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 549 NULLMASK 550 }; 551 552 /* nb hypertransport bus bandwidth */ 553 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 554 __K8MASK(command, 0), 555 __K8MASK(data, 1), 556 __K8MASK(buffer-release, 2), 557 __K8MASK(nop, 3), 558 NULLMASK 559 }; 560 561 #undef __K8MASK 562 563 #define K8_KW_COUNT "count" 564 #define K8_KW_EDGE "edge" 565 #define K8_KW_INV "inv" 566 #define K8_KW_MASK "mask" 567 #define K8_KW_OS "os" 568 #define K8_KW_USR "usr" 569 570 static int 571 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 572 struct pmc_op_pmcallocate *pmc_config) 573 { 574 char *e, *p, *q; 575 int n; 576 uint32_t count; 577 uint64_t evmask; 578 const struct pmc_masks *pm, *pmask; 579 580 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 581 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 582 583 pmask = NULL; 584 evmask = 0; 585 586 #define __K8SETMASK(M) pmask = k8_mask_##M 587 588 /* setup parsing tables */ 589 switch (pe) { 590 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 591 __K8SETMASK(fdfo); 592 break; 593 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 594 __K8SETMASK(lsrl); 595 break; 596 case PMC_EV_K8_LS_LOCKED_OPERATION: 597 __K8SETMASK(llo); 598 break; 599 case PMC_EV_K8_DC_REFILL_FROM_L2: 600 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 601 case PMC_EV_K8_DC_COPYBACK: 602 __K8SETMASK(dc); 603 break; 604 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 605 __K8SETMASK(dobee); 606 break; 607 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 608 __K8SETMASK(ddpi); 609 break; 610 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 611 __K8SETMASK(dabl); 612 break; 613 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 614 __K8SETMASK(bilr); 615 break; 616 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 617 __K8SETMASK(bfrlm); 618 break; 619 case PMC_EV_K8_BU_FILL_INTO_L2: 620 __K8SETMASK(bfil); 621 break; 622 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 623 __K8SETMASK(frfi); 624 break; 625 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 626 __K8SETMASK(frfdoi); 627 break; 628 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 629 __K8SETMASK(ffe); 630 break; 631 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 632 __K8SETMASK(nmcpae); 633 break; 634 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 635 __K8SETMASK(nmct); 636 break; 637 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 638 __K8SETMASK(nmcbs); 639 break; 640 case PMC_EV_K8_NB_SIZED_COMMANDS: 641 __K8SETMASK(nsc); 642 break; 643 case PMC_EV_K8_NB_PROBE_RESULT: 644 __K8SETMASK(npr); 645 break; 646 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 647 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 648 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 649 __K8SETMASK(nhbb); 650 break; 651 652 default: 653 break; /* no options defined */ 654 } 655 656 while ((p = strsep(&ctrspec, ",")) != NULL) { 657 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 658 q = strchr(p, '='); 659 if (*++q == '\0') /* skip '=' */ 660 return (-1); 661 662 count = strtol(q, &e, 0); 663 if (e == q || *e != '\0') 664 return (-1); 665 666 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 667 pmc_config->pm_md.pm_amd.pm_amd_config |= 668 AMD_PMC_TO_COUNTER(count); 669 670 } else if (KWMATCH(p, K8_KW_EDGE)) { 671 pmc_config->pm_caps |= PMC_CAP_EDGE; 672 } else if (KWMATCH(p, K8_KW_INV)) { 673 pmc_config->pm_caps |= PMC_CAP_INVERT; 674 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 675 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 676 return (-1); 677 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 678 } else if (KWMATCH(p, K8_KW_OS)) { 679 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 680 } else if (KWMATCH(p, K8_KW_USR)) { 681 pmc_config->pm_caps |= PMC_CAP_USER; 682 } else 683 return (-1); 684 } 685 686 /* other post processing */ 687 switch (pe) { 688 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 689 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 690 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 691 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 692 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 693 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 694 /* XXX only available in rev B and later */ 695 break; 696 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 697 /* XXX only available in rev C and later */ 698 break; 699 case PMC_EV_K8_LS_LOCKED_OPERATION: 700 /* XXX CPU Rev A,B evmask is to be zero */ 701 if (evmask & (evmask - 1)) /* > 1 bit set */ 702 return (-1); 703 if (evmask == 0) { 704 evmask = 0x01; /* Rev C and later: #instrs */ 705 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 706 } 707 break; 708 default: 709 if (evmask == 0 && pmask != NULL) { 710 for (pm = pmask; pm->pm_name; pm++) 711 evmask |= pm->pm_value; 712 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 713 } 714 } 715 716 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 717 pmc_config->pm_md.pm_amd.pm_amd_config = 718 AMD_PMC_TO_UNITMASK(evmask); 719 720 return (0); 721 } 722 723 #endif 724 725 #if defined(__i386__) || defined(__amd64__) 726 static int 727 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 728 struct pmc_op_pmcallocate *pmc_config) 729 { 730 if (pe != PMC_EV_TSC_TSC) 731 return (-1); 732 733 /* TSC events must be unqualified. */ 734 if (ctrspec && *ctrspec != '\0') 735 return (-1); 736 737 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 738 pmc_config->pm_caps |= PMC_CAP_READ; 739 740 return (0); 741 } 742 #endif 743 744 static struct pmc_event_alias generic_aliases[] = { 745 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 746 EV_ALIAS(NULL, NULL) 747 }; 748 749 static int 750 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 751 struct pmc_op_pmcallocate *pmc_config) 752 { 753 (void)ctrspec; 754 (void)pmc_config; 755 756 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 757 return (-1); 758 759 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 760 return (0); 761 } 762 763 #if defined(__arm__) 764 static struct pmc_event_alias cortex_a8_aliases[] = { 765 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 766 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 767 EV_ALIAS("instructions", "INSTR_EXECUTED"), 768 EV_ALIAS(NULL, NULL) 769 }; 770 771 static struct pmc_event_alias cortex_a9_aliases[] = { 772 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 773 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 774 EV_ALIAS("instructions", "INSTR_EXECUTED"), 775 EV_ALIAS(NULL, NULL) 776 }; 777 778 static int 779 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 780 struct pmc_op_pmcallocate *pmc_config __unused) 781 { 782 switch (pe) { 783 default: 784 break; 785 } 786 787 return (0); 788 } 789 #endif 790 791 #if defined(__aarch64__) 792 static struct pmc_event_alias cortex_a53_aliases[] = { 793 EV_ALIAS(NULL, NULL) 794 }; 795 static struct pmc_event_alias cortex_a57_aliases[] = { 796 EV_ALIAS(NULL, NULL) 797 }; 798 static struct pmc_event_alias cortex_a76_aliases[] = { 799 EV_ALIAS(NULL, NULL) 800 }; 801 static int 802 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 803 struct pmc_op_pmcallocate *pmc_config __unused) 804 { 805 switch (pe) { 806 default: 807 break; 808 } 809 810 return (0); 811 } 812 #endif 813 814 #if defined(__mips__) 815 816 static struct pmc_event_alias beri_aliases[] = { 817 EV_ALIAS("instructions", "INST"), 818 EV_ALIAS(NULL, NULL) 819 }; 820 821 static struct pmc_event_alias mips24k_aliases[] = { 822 EV_ALIAS("instructions", "INSTR_EXECUTED"), 823 EV_ALIAS("branches", "BRANCH_COMPLETED"), 824 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 825 EV_ALIAS(NULL, NULL) 826 }; 827 828 static struct pmc_event_alias mips74k_aliases[] = { 829 EV_ALIAS("instructions", "INSTR_EXECUTED"), 830 EV_ALIAS("branches", "BRANCH_INSNS"), 831 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), 832 EV_ALIAS(NULL, NULL) 833 }; 834 835 static struct pmc_event_alias octeon_aliases[] = { 836 EV_ALIAS("instructions", "RET"), 837 EV_ALIAS("branches", "BR"), 838 EV_ALIAS("branch-mispredicts", "BRMIS"), 839 EV_ALIAS(NULL, NULL) 840 }; 841 842 #define MIPS_KW_OS "os" 843 #define MIPS_KW_USR "usr" 844 #define MIPS_KW_ANYTHREAD "anythread" 845 846 static int 847 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 848 struct pmc_op_pmcallocate *pmc_config __unused) 849 { 850 char *p; 851 852 (void) pe; 853 854 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 855 856 while ((p = strsep(&ctrspec, ",")) != NULL) { 857 if (KWMATCH(p, MIPS_KW_OS)) 858 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 859 else if (KWMATCH(p, MIPS_KW_USR)) 860 pmc_config->pm_caps |= PMC_CAP_USER; 861 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 862 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 863 else 864 return (-1); 865 } 866 867 return (0); 868 } 869 870 #endif /* __mips__ */ 871 872 #if defined(__powerpc__) 873 874 static struct pmc_event_alias ppc7450_aliases[] = { 875 EV_ALIAS("instructions", "INSTR_COMPLETED"), 876 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 877 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 878 EV_ALIAS(NULL, NULL) 879 }; 880 881 static struct pmc_event_alias ppc970_aliases[] = { 882 EV_ALIAS("instructions", "INSTR_COMPLETED"), 883 EV_ALIAS("cycles", "CYCLES"), 884 EV_ALIAS(NULL, NULL) 885 }; 886 887 static struct pmc_event_alias power8_aliases[] = { 888 EV_ALIAS("instructions", "INSTR_COMPLETED"), 889 EV_ALIAS("cycles", "CYCLES"), 890 EV_ALIAS(NULL, NULL) 891 }; 892 893 static struct pmc_event_alias e500_aliases[] = { 894 EV_ALIAS("instructions", "INSTR_COMPLETED"), 895 EV_ALIAS("cycles", "CYCLES"), 896 EV_ALIAS(NULL, NULL) 897 }; 898 899 #define POWERPC_KW_OS "os" 900 #define POWERPC_KW_USR "usr" 901 #define POWERPC_KW_ANYTHREAD "anythread" 902 903 static int 904 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 905 struct pmc_op_pmcallocate *pmc_config __unused) 906 { 907 char *p; 908 909 (void) pe; 910 911 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 912 913 while ((p = strsep(&ctrspec, ",")) != NULL) { 914 if (KWMATCH(p, POWERPC_KW_OS)) 915 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 916 else if (KWMATCH(p, POWERPC_KW_USR)) 917 pmc_config->pm_caps |= PMC_CAP_USER; 918 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 919 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 920 else 921 return (-1); 922 } 923 924 return (0); 925 } 926 927 #endif /* __powerpc__ */ 928 929 930 /* 931 * Match an event name `name' with its canonical form. 932 * 933 * Matches are case insensitive and spaces, periods, underscores and 934 * hyphen characters are considered to match each other. 935 * 936 * Returns 1 for a match, 0 otherwise. 937 */ 938 939 static int 940 pmc_match_event_name(const char *name, const char *canonicalname) 941 { 942 int cc, nc; 943 const unsigned char *c, *n; 944 945 c = (const unsigned char *) canonicalname; 946 n = (const unsigned char *) name; 947 948 for (; (nc = *n) && (cc = *c); n++, c++) { 949 950 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 951 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 952 continue; 953 954 if (toupper(nc) == toupper(cc)) 955 continue; 956 957 958 return (0); 959 } 960 961 if (*n == '\0' && *c == '\0') 962 return (1); 963 964 return (0); 965 } 966 967 /* 968 * Match an event name against all the event named supported by a 969 * PMC class. 970 * 971 * Returns an event descriptor pointer on match or NULL otherwise. 972 */ 973 static const struct pmc_event_descr * 974 pmc_match_event_class(const char *name, 975 const struct pmc_class_descr *pcd) 976 { 977 size_t n; 978 const struct pmc_event_descr *ev; 979 980 ev = pcd->pm_evc_event_table; 981 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 982 if (pmc_match_event_name(name, ev->pm_ev_name)) 983 return (ev); 984 985 return (NULL); 986 } 987 988 static int 989 pmc_mdep_is_compatible_class(enum pmc_class pc) 990 { 991 size_t n; 992 993 for (n = 0; n < pmc_mdep_class_list_size; n++) 994 if (pmc_mdep_class_list[n] == pc) 995 return (1); 996 return (0); 997 } 998 999 /* 1000 * API entry points 1001 */ 1002 1003 int 1004 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 1005 uint32_t flags, int cpu, pmc_id_t *pmcid, 1006 uint64_t count) 1007 { 1008 size_t n; 1009 int retval; 1010 char *r, *spec_copy; 1011 const char *ctrname; 1012 const struct pmc_event_descr *ev; 1013 const struct pmc_event_alias *alias; 1014 struct pmc_op_pmcallocate pmc_config; 1015 const struct pmc_class_descr *pcd; 1016 1017 spec_copy = NULL; 1018 retval = -1; 1019 1020 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 1021 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 1022 errno = EINVAL; 1023 goto out; 1024 } 1025 bzero(&pmc_config, sizeof(pmc_config)); 1026 pmc_config.pm_cpu = cpu; 1027 pmc_config.pm_mode = mode; 1028 pmc_config.pm_flags = flags; 1029 pmc_config.pm_count = count; 1030 if (PMC_IS_SAMPLING_MODE(mode)) 1031 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 1032 /* 1033 * Can we pull this straight from the pmu table? 1034 */ 1035 r = spec_copy = strdup(ctrspec); 1036 ctrname = strsep(&r, ","); 1037 if (pmc_pmu_enabled()) { 1038 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { 1039 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { 1040 goto out; 1041 } 1042 retval = 0; 1043 *pmcid = pmc_config.pm_pmcid; 1044 goto out; 1045 } 1046 errx(EX_USAGE, "ERROR: pmc_pmu_allocate failed, check for ctrname %s\n", ctrname); 1047 } else { 1048 free(spec_copy); 1049 spec_copy = NULL; 1050 } 1051 1052 /* replace an event alias with the canonical event specifier */ 1053 if (pmc_mdep_event_aliases) 1054 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 1055 if (!strcasecmp(ctrspec, alias->pm_alias)) { 1056 spec_copy = strdup(alias->pm_spec); 1057 break; 1058 } 1059 1060 if (spec_copy == NULL) 1061 spec_copy = strdup(ctrspec); 1062 1063 r = spec_copy; 1064 ctrname = strsep(&r, ","); 1065 1066 /* 1067 * If a explicit class prefix was given by the user, restrict the 1068 * search for the event to the specified PMC class. 1069 */ 1070 ev = NULL; 1071 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 1072 pcd = pmc_class_table[n]; 1073 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 1074 strncasecmp(ctrname, pcd->pm_evc_name, 1075 pcd->pm_evc_name_size) == 0) { 1076 if ((ev = pmc_match_event_class(ctrname + 1077 pcd->pm_evc_name_size, pcd)) == NULL) { 1078 errno = EINVAL; 1079 goto out; 1080 } 1081 break; 1082 } 1083 } 1084 1085 /* 1086 * Otherwise, search for this event in all compatible PMC 1087 * classes. 1088 */ 1089 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 1090 pcd = pmc_class_table[n]; 1091 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 1092 ev = pmc_match_event_class(ctrname, pcd); 1093 } 1094 1095 if (ev == NULL) { 1096 errno = EINVAL; 1097 goto out; 1098 } 1099 1100 pmc_config.pm_ev = ev->pm_ev_code; 1101 pmc_config.pm_class = pcd->pm_evc_class; 1102 1103 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 1104 errno = EINVAL; 1105 goto out; 1106 } 1107 1108 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 1109 goto out; 1110 1111 *pmcid = pmc_config.pm_pmcid; 1112 1113 retval = 0; 1114 1115 out: 1116 if (spec_copy) 1117 free(spec_copy); 1118 1119 return (retval); 1120 } 1121 1122 int 1123 pmc_attach(pmc_id_t pmc, pid_t pid) 1124 { 1125 struct pmc_op_pmcattach pmc_attach_args; 1126 1127 pmc_attach_args.pm_pmc = pmc; 1128 pmc_attach_args.pm_pid = pid; 1129 1130 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1131 } 1132 1133 int 1134 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1135 { 1136 unsigned int i; 1137 enum pmc_class cl; 1138 1139 cl = PMC_ID_TO_CLASS(pmcid); 1140 for (i = 0; i < cpu_info.pm_nclass; i++) 1141 if (cpu_info.pm_classes[i].pm_class == cl) { 1142 *caps = cpu_info.pm_classes[i].pm_caps; 1143 return (0); 1144 } 1145 errno = EINVAL; 1146 return (-1); 1147 } 1148 1149 int 1150 pmc_configure_logfile(int fd) 1151 { 1152 struct pmc_op_configurelog cla; 1153 1154 cla.pm_logfd = fd; 1155 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1156 return (-1); 1157 return (0); 1158 } 1159 1160 int 1161 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1162 { 1163 if (pmc_syscall == -1) { 1164 errno = ENXIO; 1165 return (-1); 1166 } 1167 1168 *pci = &cpu_info; 1169 return (0); 1170 } 1171 1172 int 1173 pmc_detach(pmc_id_t pmc, pid_t pid) 1174 { 1175 struct pmc_op_pmcattach pmc_detach_args; 1176 1177 pmc_detach_args.pm_pmc = pmc; 1178 pmc_detach_args.pm_pid = pid; 1179 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1180 } 1181 1182 int 1183 pmc_disable(int cpu, int pmc) 1184 { 1185 struct pmc_op_pmcadmin ssa; 1186 1187 ssa.pm_cpu = cpu; 1188 ssa.pm_pmc = pmc; 1189 ssa.pm_state = PMC_STATE_DISABLED; 1190 return (PMC_CALL(PMCADMIN, &ssa)); 1191 } 1192 1193 int 1194 pmc_enable(int cpu, int pmc) 1195 { 1196 struct pmc_op_pmcadmin ssa; 1197 1198 ssa.pm_cpu = cpu; 1199 ssa.pm_pmc = pmc; 1200 ssa.pm_state = PMC_STATE_FREE; 1201 return (PMC_CALL(PMCADMIN, &ssa)); 1202 } 1203 1204 /* 1205 * Return a list of events known to a given PMC class. 'cl' is the 1206 * PMC class identifier, 'eventnames' is the returned list of 'const 1207 * char *' pointers pointing to the names of the events. 'nevents' is 1208 * the number of event name pointers returned. 1209 * 1210 * The space for 'eventnames' is allocated using malloc(3). The caller 1211 * is responsible for freeing this space when done. 1212 */ 1213 int 1214 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1215 int *nevents) 1216 { 1217 int count; 1218 const char **names; 1219 const struct pmc_event_descr *ev; 1220 1221 switch (cl) 1222 { 1223 case PMC_CLASS_IAF: 1224 ev = iaf_event_table; 1225 count = PMC_EVENT_TABLE_SIZE(iaf); 1226 break; 1227 case PMC_CLASS_TSC: 1228 ev = tsc_event_table; 1229 count = PMC_EVENT_TABLE_SIZE(tsc); 1230 break; 1231 case PMC_CLASS_K8: 1232 ev = k8_event_table; 1233 count = PMC_EVENT_TABLE_SIZE(k8); 1234 break; 1235 case PMC_CLASS_ARMV7: 1236 switch (cpu_info.pm_cputype) { 1237 default: 1238 case PMC_CPU_ARMV7_CORTEX_A8: 1239 ev = cortex_a8_event_table; 1240 count = PMC_EVENT_TABLE_SIZE(cortex_a8); 1241 break; 1242 case PMC_CPU_ARMV7_CORTEX_A9: 1243 ev = cortex_a9_event_table; 1244 count = PMC_EVENT_TABLE_SIZE(cortex_a9); 1245 break; 1246 } 1247 break; 1248 case PMC_CLASS_ARMV8: 1249 switch (cpu_info.pm_cputype) { 1250 default: 1251 case PMC_CPU_ARMV8_CORTEX_A53: 1252 ev = cortex_a53_event_table; 1253 count = PMC_EVENT_TABLE_SIZE(cortex_a53); 1254 break; 1255 case PMC_CPU_ARMV8_CORTEX_A57: 1256 ev = cortex_a57_event_table; 1257 count = PMC_EVENT_TABLE_SIZE(cortex_a57); 1258 break; 1259 case PMC_CPU_ARMV8_CORTEX_A76: 1260 ev = cortex_a76_event_table; 1261 count = PMC_EVENT_TABLE_SIZE(cortex_a76); 1262 break; 1263 } 1264 break; 1265 case PMC_CLASS_BERI: 1266 ev = beri_event_table; 1267 count = PMC_EVENT_TABLE_SIZE(beri); 1268 break; 1269 case PMC_CLASS_MIPS24K: 1270 ev = mips24k_event_table; 1271 count = PMC_EVENT_TABLE_SIZE(mips24k); 1272 break; 1273 case PMC_CLASS_MIPS74K: 1274 ev = mips74k_event_table; 1275 count = PMC_EVENT_TABLE_SIZE(mips74k); 1276 break; 1277 case PMC_CLASS_OCTEON: 1278 ev = octeon_event_table; 1279 count = PMC_EVENT_TABLE_SIZE(octeon); 1280 break; 1281 case PMC_CLASS_PPC7450: 1282 ev = ppc7450_event_table; 1283 count = PMC_EVENT_TABLE_SIZE(ppc7450); 1284 break; 1285 case PMC_CLASS_PPC970: 1286 ev = ppc970_event_table; 1287 count = PMC_EVENT_TABLE_SIZE(ppc970); 1288 break; 1289 case PMC_CLASS_POWER8: 1290 ev = power8_event_table; 1291 count = PMC_EVENT_TABLE_SIZE(power8); 1292 break; 1293 case PMC_CLASS_E500: 1294 ev = e500_event_table; 1295 count = PMC_EVENT_TABLE_SIZE(e500); 1296 break; 1297 case PMC_CLASS_SOFT: 1298 ev = soft_event_table; 1299 count = soft_event_info.pm_nevent; 1300 break; 1301 default: 1302 errno = EINVAL; 1303 return (-1); 1304 } 1305 1306 if ((names = malloc(count * sizeof(const char *))) == NULL) 1307 return (-1); 1308 1309 *eventnames = names; 1310 *nevents = count; 1311 1312 for (;count--; ev++, names++) 1313 *names = ev->pm_ev_name; 1314 1315 return (0); 1316 } 1317 1318 int 1319 pmc_flush_logfile(void) 1320 { 1321 return (PMC_CALL(FLUSHLOG,0)); 1322 } 1323 1324 int 1325 pmc_close_logfile(void) 1326 { 1327 return (PMC_CALL(CLOSELOG,0)); 1328 } 1329 1330 int 1331 pmc_get_driver_stats(struct pmc_driverstats *ds) 1332 { 1333 struct pmc_op_getdriverstats gms; 1334 1335 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 1336 return (-1); 1337 1338 /* copy out fields in the current userland<->library interface */ 1339 ds->pm_intr_ignored = gms.pm_intr_ignored; 1340 ds->pm_intr_processed = gms.pm_intr_processed; 1341 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 1342 ds->pm_syscalls = gms.pm_syscalls; 1343 ds->pm_syscall_errors = gms.pm_syscall_errors; 1344 ds->pm_buffer_requests = gms.pm_buffer_requests; 1345 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 1346 ds->pm_log_sweeps = gms.pm_log_sweeps; 1347 return (0); 1348 } 1349 1350 int 1351 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 1352 { 1353 struct pmc_op_getmsr gm; 1354 1355 gm.pm_pmcid = pmc; 1356 if (PMC_CALL(PMCGETMSR, &gm) < 0) 1357 return (-1); 1358 *msr = gm.pm_msr; 1359 return (0); 1360 } 1361 1362 int 1363 pmc_init(void) 1364 { 1365 int error, pmc_mod_id; 1366 unsigned int n; 1367 uint32_t abi_version; 1368 struct module_stat pmc_modstat; 1369 struct pmc_op_getcpuinfo op_cpu_info; 1370 #if defined(__amd64__) || defined(__i386__) 1371 int cpu_has_iaf_counters; 1372 unsigned int t; 1373 #endif 1374 1375 if (pmc_syscall != -1) /* already inited */ 1376 return (0); 1377 1378 /* retrieve the system call number from the KLD */ 1379 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 1380 return (-1); 1381 1382 pmc_modstat.version = sizeof(struct module_stat); 1383 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 1384 return (-1); 1385 1386 pmc_syscall = pmc_modstat.data.intval; 1387 1388 /* check the kernel module's ABI against our compiled-in version */ 1389 abi_version = PMC_VERSION; 1390 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 1391 return (pmc_syscall = -1); 1392 1393 /* ignore patch & minor numbers for the comparison */ 1394 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 1395 errno = EPROGMISMATCH; 1396 return (pmc_syscall = -1); 1397 } 1398 1399 bzero(&op_cpu_info, sizeof(op_cpu_info)); 1400 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 1401 return (pmc_syscall = -1); 1402 1403 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 1404 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 1405 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 1406 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 1407 for (n = 0; n < op_cpu_info.pm_nclass; n++) 1408 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], 1409 sizeof(cpu_info.pm_classes[n])); 1410 1411 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 1412 sizeof(struct pmc_class_descr *)); 1413 1414 if (pmc_class_table == NULL) 1415 return (-1); 1416 1417 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 1418 pmc_class_table[n] = NULL; 1419 1420 /* 1421 * Get soft events list. 1422 */ 1423 soft_event_info.pm_class = PMC_CLASS_SOFT; 1424 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 1425 return (pmc_syscall = -1); 1426 1427 /* Map soft events to static list. */ 1428 for (n = 0; n < soft_event_info.pm_nevent; n++) { 1429 soft_event_table[n].pm_ev_name = 1430 soft_event_info.pm_events[n].pm_ev_name; 1431 soft_event_table[n].pm_ev_code = 1432 soft_event_info.pm_events[n].pm_ev_code; 1433 } 1434 soft_class_table_descr.pm_evc_event_table_size = \ 1435 soft_event_info.pm_nevent; 1436 soft_class_table_descr.pm_evc_event_table = \ 1437 soft_event_table; 1438 1439 /* 1440 * Fill in the class table. 1441 */ 1442 n = 0; 1443 1444 /* Fill soft events information. */ 1445 pmc_class_table[n++] = &soft_class_table_descr; 1446 #if defined(__amd64__) || defined(__i386__) 1447 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 1448 pmc_class_table[n++] = &tsc_class_table_descr; 1449 1450 /* 1451 * Check if this CPU has fixed function counters. 1452 */ 1453 cpu_has_iaf_counters = 0; 1454 for (t = 0; t < cpu_info.pm_nclass; t++) 1455 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 1456 cpu_info.pm_classes[t].pm_num > 0) 1457 cpu_has_iaf_counters = 1; 1458 #endif 1459 1460 #define PMC_MDEP_INIT(C) do { \ 1461 pmc_mdep_event_aliases = C##_aliases; \ 1462 pmc_mdep_class_list = C##_pmc_classes; \ 1463 pmc_mdep_class_list_size = \ 1464 PMC_TABLE_SIZE(C##_pmc_classes); \ 1465 } while (0) 1466 1467 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 1468 PMC_MDEP_INIT(C); \ 1469 pmc_class_table[n++] = &iaf_class_table_descr; \ 1470 if (!cpu_has_iaf_counters) \ 1471 pmc_mdep_event_aliases = \ 1472 C##_aliases_without_iaf; \ 1473 pmc_class_table[n] = &C##_class_table_descr; \ 1474 } while (0) 1475 1476 /* Configure the event name parser. */ 1477 switch (cpu_info.pm_cputype) { 1478 #if defined(__amd64__) || defined(__i386__) 1479 case PMC_CPU_AMD_K8: 1480 PMC_MDEP_INIT(k8); 1481 pmc_class_table[n] = &k8_class_table_descr; 1482 break; 1483 #endif 1484 case PMC_CPU_GENERIC: 1485 PMC_MDEP_INIT(generic); 1486 break; 1487 #if defined(__arm__) 1488 case PMC_CPU_ARMV7_CORTEX_A8: 1489 PMC_MDEP_INIT(cortex_a8); 1490 pmc_class_table[n] = &cortex_a8_class_table_descr; 1491 break; 1492 case PMC_CPU_ARMV7_CORTEX_A9: 1493 PMC_MDEP_INIT(cortex_a9); 1494 pmc_class_table[n] = &cortex_a9_class_table_descr; 1495 break; 1496 #endif 1497 #if defined(__aarch64__) 1498 case PMC_CPU_ARMV8_CORTEX_A53: 1499 PMC_MDEP_INIT(cortex_a53); 1500 pmc_class_table[n] = &cortex_a53_class_table_descr; 1501 break; 1502 case PMC_CPU_ARMV8_CORTEX_A57: 1503 PMC_MDEP_INIT(cortex_a57); 1504 pmc_class_table[n] = &cortex_a57_class_table_descr; 1505 break; 1506 case PMC_CPU_ARMV8_CORTEX_A76: 1507 PMC_MDEP_INIT(cortex_a76); 1508 pmc_class_table[n] = &cortex_a76_class_table_descr; 1509 break; 1510 #endif 1511 #if defined(__mips__) 1512 case PMC_CPU_MIPS_BERI: 1513 PMC_MDEP_INIT(beri); 1514 pmc_class_table[n] = &beri_class_table_descr; 1515 break; 1516 case PMC_CPU_MIPS_24K: 1517 PMC_MDEP_INIT(mips24k); 1518 pmc_class_table[n] = &mips24k_class_table_descr; 1519 break; 1520 case PMC_CPU_MIPS_74K: 1521 PMC_MDEP_INIT(mips74k); 1522 pmc_class_table[n] = &mips74k_class_table_descr; 1523 break; 1524 case PMC_CPU_MIPS_OCTEON: 1525 PMC_MDEP_INIT(octeon); 1526 pmc_class_table[n] = &octeon_class_table_descr; 1527 break; 1528 #endif /* __mips__ */ 1529 #if defined(__powerpc__) 1530 case PMC_CPU_PPC_7450: 1531 PMC_MDEP_INIT(ppc7450); 1532 pmc_class_table[n] = &ppc7450_class_table_descr; 1533 break; 1534 case PMC_CPU_PPC_970: 1535 PMC_MDEP_INIT(ppc970); 1536 pmc_class_table[n] = &ppc970_class_table_descr; 1537 break; 1538 case PMC_CPU_PPC_POWER8: 1539 PMC_MDEP_INIT(power8); 1540 pmc_class_table[n] = &power8_class_table_descr; 1541 break; 1542 case PMC_CPU_PPC_E500: 1543 PMC_MDEP_INIT(e500); 1544 pmc_class_table[n] = &e500_class_table_descr; 1545 break; 1546 #endif 1547 default: 1548 /* 1549 * Some kind of CPU this version of the library knows nothing 1550 * about. This shouldn't happen since the abi version check 1551 * should have caught this. 1552 */ 1553 #if defined(__amd64__) || defined(__i386__) 1554 break; 1555 #endif 1556 errno = ENXIO; 1557 return (pmc_syscall = -1); 1558 } 1559 1560 return (0); 1561 } 1562 1563 const char * 1564 pmc_name_of_capability(enum pmc_caps cap) 1565 { 1566 int i; 1567 1568 /* 1569 * 'cap' should have a single bit set and should be in 1570 * range. 1571 */ 1572 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 1573 cap > PMC_CAP_LAST) { 1574 errno = EINVAL; 1575 return (NULL); 1576 } 1577 1578 i = ffs(cap); 1579 return (pmc_capability_names[i - 1]); 1580 } 1581 1582 const char * 1583 pmc_name_of_class(enum pmc_class pc) 1584 { 1585 size_t n; 1586 1587 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) 1588 if (pc == pmc_class_names[n].pm_class) 1589 return (pmc_class_names[n].pm_name); 1590 1591 errno = EINVAL; 1592 return (NULL); 1593 } 1594 1595 const char * 1596 pmc_name_of_cputype(enum pmc_cputype cp) 1597 { 1598 size_t n; 1599 1600 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 1601 if (cp == pmc_cputype_names[n].pm_cputype) 1602 return (pmc_cputype_names[n].pm_name); 1603 1604 errno = EINVAL; 1605 return (NULL); 1606 } 1607 1608 const char * 1609 pmc_name_of_disposition(enum pmc_disp pd) 1610 { 1611 if ((int) pd >= PMC_DISP_FIRST && 1612 pd <= PMC_DISP_LAST) 1613 return (pmc_disposition_names[pd]); 1614 1615 errno = EINVAL; 1616 return (NULL); 1617 } 1618 1619 const char * 1620 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 1621 { 1622 const struct pmc_event_descr *ev, *evfence; 1623 1624 ev = evfence = NULL; 1625 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 1626 ev = k8_event_table; 1627 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 1628 1629 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { 1630 switch (cpu) { 1631 case PMC_CPU_ARMV7_CORTEX_A8: 1632 ev = cortex_a8_event_table; 1633 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); 1634 break; 1635 case PMC_CPU_ARMV7_CORTEX_A9: 1636 ev = cortex_a9_event_table; 1637 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); 1638 break; 1639 default: /* Unknown CPU type. */ 1640 break; 1641 } 1642 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { 1643 switch (cpu) { 1644 case PMC_CPU_ARMV8_CORTEX_A53: 1645 ev = cortex_a53_event_table; 1646 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); 1647 break; 1648 case PMC_CPU_ARMV8_CORTEX_A57: 1649 ev = cortex_a57_event_table; 1650 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); 1651 break; 1652 case PMC_CPU_ARMV8_CORTEX_A76: 1653 ev = cortex_a76_event_table; 1654 evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76); 1655 break; 1656 default: /* Unknown CPU type. */ 1657 break; 1658 } 1659 } else if (pe >= PMC_EV_BERI_FIRST && pe <= PMC_EV_BERI_LAST) { 1660 ev = beri_event_table; 1661 evfence = beri_event_table + PMC_EVENT_TABLE_SIZE(beri); 1662 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 1663 ev = mips24k_event_table; 1664 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 1665 } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { 1666 ev = mips74k_event_table; 1667 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); 1668 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 1669 ev = octeon_event_table; 1670 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 1671 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 1672 ev = ppc7450_event_table; 1673 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 1674 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 1675 ev = ppc970_event_table; 1676 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 1677 } else if (pe >= PMC_EV_POWER8_FIRST && pe <= PMC_EV_POWER8_LAST) { 1678 ev = power8_event_table; 1679 evfence = power8_event_table + PMC_EVENT_TABLE_SIZE(power8); 1680 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { 1681 ev = e500_event_table; 1682 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); 1683 } else if (pe == PMC_EV_TSC_TSC) { 1684 ev = tsc_event_table; 1685 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 1686 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 1687 ev = soft_event_table; 1688 evfence = soft_event_table + soft_event_info.pm_nevent; 1689 } 1690 1691 for (; ev != evfence; ev++) 1692 if (pe == ev->pm_ev_code) 1693 return (ev->pm_ev_name); 1694 1695 return (NULL); 1696 } 1697 1698 const char * 1699 pmc_name_of_event(enum pmc_event pe) 1700 { 1701 const char *n; 1702 1703 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 1704 return (n); 1705 1706 errno = EINVAL; 1707 return (NULL); 1708 } 1709 1710 const char * 1711 pmc_name_of_mode(enum pmc_mode pm) 1712 { 1713 if ((int) pm >= PMC_MODE_FIRST && 1714 pm <= PMC_MODE_LAST) 1715 return (pmc_mode_names[pm]); 1716 1717 errno = EINVAL; 1718 return (NULL); 1719 } 1720 1721 const char * 1722 pmc_name_of_state(enum pmc_state ps) 1723 { 1724 if ((int) ps >= PMC_STATE_FIRST && 1725 ps <= PMC_STATE_LAST) 1726 return (pmc_state_names[ps]); 1727 1728 errno = EINVAL; 1729 return (NULL); 1730 } 1731 1732 int 1733 pmc_ncpu(void) 1734 { 1735 if (pmc_syscall == -1) { 1736 errno = ENXIO; 1737 return (-1); 1738 } 1739 1740 return (cpu_info.pm_ncpu); 1741 } 1742 1743 int 1744 pmc_npmc(int cpu) 1745 { 1746 if (pmc_syscall == -1) { 1747 errno = ENXIO; 1748 return (-1); 1749 } 1750 1751 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 1752 errno = EINVAL; 1753 return (-1); 1754 } 1755 1756 return (cpu_info.pm_npmc); 1757 } 1758 1759 int 1760 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 1761 { 1762 int nbytes, npmc; 1763 struct pmc_op_getpmcinfo *pmci; 1764 1765 if ((npmc = pmc_npmc(cpu)) < 0) 1766 return (-1); 1767 1768 nbytes = sizeof(struct pmc_op_getpmcinfo) + 1769 npmc * sizeof(struct pmc_info); 1770 1771 if ((pmci = calloc(1, nbytes)) == NULL) 1772 return (-1); 1773 1774 pmci->pm_cpu = cpu; 1775 1776 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 1777 free(pmci); 1778 return (-1); 1779 } 1780 1781 /* kernel<->library, library<->userland interfaces are identical */ 1782 *ppmci = (struct pmc_pmcinfo *) pmci; 1783 return (0); 1784 } 1785 1786 int 1787 pmc_read(pmc_id_t pmc, pmc_value_t *value) 1788 { 1789 struct pmc_op_pmcrw pmc_read_op; 1790 1791 pmc_read_op.pm_pmcid = pmc; 1792 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 1793 pmc_read_op.pm_value = -1; 1794 1795 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 1796 return (-1); 1797 1798 *value = pmc_read_op.pm_value; 1799 return (0); 1800 } 1801 1802 int 1803 pmc_release(pmc_id_t pmc) 1804 { 1805 struct pmc_op_simple pmc_release_args; 1806 1807 pmc_release_args.pm_pmcid = pmc; 1808 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 1809 } 1810 1811 int 1812 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 1813 { 1814 struct pmc_op_pmcrw pmc_rw_op; 1815 1816 pmc_rw_op.pm_pmcid = pmc; 1817 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 1818 pmc_rw_op.pm_value = newvalue; 1819 1820 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 1821 return (-1); 1822 1823 *oldvaluep = pmc_rw_op.pm_value; 1824 return (0); 1825 } 1826 1827 int 1828 pmc_set(pmc_id_t pmc, pmc_value_t value) 1829 { 1830 struct pmc_op_pmcsetcount sc; 1831 1832 sc.pm_pmcid = pmc; 1833 sc.pm_count = value; 1834 1835 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 1836 return (-1); 1837 return (0); 1838 } 1839 1840 int 1841 pmc_start(pmc_id_t pmc) 1842 { 1843 struct pmc_op_simple pmc_start_args; 1844 1845 pmc_start_args.pm_pmcid = pmc; 1846 return (PMC_CALL(PMCSTART, &pmc_start_args)); 1847 } 1848 1849 int 1850 pmc_stop(pmc_id_t pmc) 1851 { 1852 struct pmc_op_simple pmc_stop_args; 1853 1854 pmc_stop_args.pm_pmcid = pmc; 1855 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 1856 } 1857 1858 int 1859 pmc_width(pmc_id_t pmcid, uint32_t *width) 1860 { 1861 unsigned int i; 1862 enum pmc_class cl; 1863 1864 cl = PMC_ID_TO_CLASS(pmcid); 1865 for (i = 0; i < cpu_info.pm_nclass; i++) 1866 if (cpu_info.pm_classes[i].pm_class == cl) { 1867 *width = cpu_info.pm_classes[i].pm_width; 1868 return (0); 1869 } 1870 errno = EINVAL; 1871 return (-1); 1872 } 1873 1874 int 1875 pmc_write(pmc_id_t pmc, pmc_value_t value) 1876 { 1877 struct pmc_op_pmcrw pmc_write_op; 1878 1879 pmc_write_op.pm_pmcid = pmc; 1880 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 1881 pmc_write_op.pm_value = value; 1882 return (PMC_CALL(PMCRW, &pmc_write_op)); 1883 } 1884 1885 int 1886 pmc_writelog(uint32_t userdata) 1887 { 1888 struct pmc_op_writelog wl; 1889 1890 wl.pm_userdata = userdata; 1891 return (PMC_CALL(WRITELOG, &wl)); 1892 } 1893