1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/module.h> 35 #include <sys/pmc.h> 36 #include <sys/syscall.h> 37 38 #include <ctype.h> 39 #include <errno.h> 40 #include <err.h> 41 #include <fcntl.h> 42 #include <pmc.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <strings.h> 47 #include <sysexits.h> 48 #include <unistd.h> 49 50 #include "libpmcinternal.h" 51 52 /* Function prototypes */ 53 #if defined(__amd64__) || defined(__i386__) 54 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 55 struct pmc_op_pmcallocate *_pmc_config); 56 #endif 57 #if defined(__amd64__) || defined(__i386__) 58 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 59 struct pmc_op_pmcallocate *_pmc_config); 60 #endif 61 #if defined(__arm__) 62 #if defined(__XSCALE__) 63 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 64 struct pmc_op_pmcallocate *_pmc_config); 65 #endif 66 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 67 struct pmc_op_pmcallocate *_pmc_config); 68 #endif 69 #if defined(__aarch64__) 70 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 71 struct pmc_op_pmcallocate *_pmc_config); 72 #endif 73 #if defined(__mips__) 74 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 75 struct pmc_op_pmcallocate *_pmc_config); 76 #endif /* __mips__ */ 77 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 78 struct pmc_op_pmcallocate *_pmc_config); 79 80 #if defined(__powerpc__) 81 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 82 struct pmc_op_pmcallocate *_pmc_config); 83 #endif /* __powerpc__ */ 84 85 #define PMC_CALL(cmd, params) \ 86 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 87 88 /* 89 * Event aliases provide a way for the user to ask for generic events 90 * like "cache-misses", or "instructions-retired". These aliases are 91 * mapped to the appropriate canonical event descriptions using a 92 * lookup table. 93 */ 94 struct pmc_event_alias { 95 const char *pm_alias; 96 const char *pm_spec; 97 }; 98 99 static const struct pmc_event_alias *pmc_mdep_event_aliases; 100 101 /* 102 * The pmc_event_descr structure maps symbolic names known to the user 103 * to integer codes used by the PMC KLD. 104 */ 105 struct pmc_event_descr { 106 const char *pm_ev_name; 107 enum pmc_event pm_ev_code; 108 }; 109 110 /* 111 * The pmc_class_descr structure maps class name prefixes for 112 * event names to event tables and other PMC class data. 113 */ 114 struct pmc_class_descr { 115 const char *pm_evc_name; 116 size_t pm_evc_name_size; 117 enum pmc_class pm_evc_class; 118 const struct pmc_event_descr *pm_evc_event_table; 119 size_t pm_evc_event_table_size; 120 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 121 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 122 }; 123 124 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 125 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 126 127 #undef __PMC_EV 128 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 129 130 /* 131 * PMC_CLASSDEP_TABLE(NAME, CLASS) 132 * 133 * Define a table mapping event names and aliases to HWPMC event IDs. 134 */ 135 #define PMC_CLASSDEP_TABLE(N, C) \ 136 static const struct pmc_event_descr N##_event_table[] = \ 137 { \ 138 __PMC_EV_##C() \ 139 } 140 141 PMC_CLASSDEP_TABLE(iaf, IAF); 142 PMC_CLASSDEP_TABLE(k8, K8); 143 PMC_CLASSDEP_TABLE(xscale, XSCALE); 144 PMC_CLASSDEP_TABLE(armv7, ARMV7); 145 PMC_CLASSDEP_TABLE(armv8, ARMV8); 146 PMC_CLASSDEP_TABLE(beri, BERI); 147 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 148 PMC_CLASSDEP_TABLE(mips74k, MIPS74K); 149 PMC_CLASSDEP_TABLE(octeon, OCTEON); 150 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 151 PMC_CLASSDEP_TABLE(ppc970, PPC970); 152 PMC_CLASSDEP_TABLE(e500, E500); 153 154 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 155 156 #undef __PMC_EV_ALIAS 157 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 158 159 static const struct pmc_event_descr cortex_a8_event_table[] = 160 { 161 __PMC_EV_ALIAS_ARMV7_CORTEX_A8() 162 }; 163 164 static const struct pmc_event_descr cortex_a9_event_table[] = 165 { 166 __PMC_EV_ALIAS_ARMV7_CORTEX_A9() 167 }; 168 169 static const struct pmc_event_descr cortex_a53_event_table[] = 170 { 171 __PMC_EV_ALIAS_ARMV8_CORTEX_A53() 172 }; 173 174 static const struct pmc_event_descr cortex_a57_event_table[] = 175 { 176 __PMC_EV_ALIAS_ARMV8_CORTEX_A57() 177 }; 178 179 /* 180 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 181 * 182 * Map a CPU to the PMC classes it supports. 183 */ 184 #define PMC_MDEP_TABLE(N,C,...) \ 185 static const enum pmc_class N##_pmc_classes[] = { \ 186 PMC_CLASS_##C, __VA_ARGS__ \ 187 } 188 189 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 190 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 191 PMC_MDEP_TABLE(beri, BERI, PMC_CLASS_SOFT, PMC_CLASS_BERI); 192 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 193 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 194 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 195 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 196 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 197 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); 198 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 199 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); 200 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); 201 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); 202 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 203 204 static const struct pmc_event_descr tsc_event_table[] = 205 { 206 __PMC_EV_TSC() 207 }; 208 209 #undef PMC_CLASS_TABLE_DESC 210 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 211 static const struct pmc_class_descr NAME##_class_table_descr = \ 212 { \ 213 .pm_evc_name = #CLASS "-", \ 214 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 215 .pm_evc_class = PMC_CLASS_##CLASS , \ 216 .pm_evc_event_table = EVENTS##_event_table , \ 217 .pm_evc_event_table_size = \ 218 PMC_EVENT_TABLE_SIZE(EVENTS), \ 219 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 220 } 221 222 #if defined(__i386__) || defined(__amd64__) 223 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 224 #endif 225 #if defined(__i386__) || defined(__amd64__) 226 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 227 #endif 228 #if defined(__arm__) 229 #if defined(__XSCALE__) 230 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 231 #endif 232 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); 233 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); 234 #endif 235 #if defined(__aarch64__) 236 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); 237 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); 238 #endif 239 #if defined(__mips__) 240 PMC_CLASS_TABLE_DESC(beri, BERI, beri, mips); 241 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 242 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); 243 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 244 #endif /* __mips__ */ 245 #if defined(__powerpc__) 246 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 247 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 248 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); 249 #endif 250 251 static struct pmc_class_descr soft_class_table_descr = 252 { 253 .pm_evc_name = "SOFT-", 254 .pm_evc_name_size = sizeof("SOFT-") - 1, 255 .pm_evc_class = PMC_CLASS_SOFT, 256 .pm_evc_event_table = NULL, 257 .pm_evc_event_table_size = 0, 258 .pm_evc_allocate_pmc = soft_allocate_pmc 259 }; 260 261 #undef PMC_CLASS_TABLE_DESC 262 263 static const struct pmc_class_descr **pmc_class_table; 264 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 265 266 static const enum pmc_class *pmc_mdep_class_list; 267 static size_t pmc_mdep_class_list_size; 268 269 /* 270 * Mapping tables, mapping enumeration values to human readable 271 * strings. 272 */ 273 274 static const char * pmc_capability_names[] = { 275 #undef __PMC_CAP 276 #define __PMC_CAP(N,V,D) #N , 277 __PMC_CAPS() 278 }; 279 280 struct pmc_class_map { 281 enum pmc_class pm_class; 282 const char *pm_name; 283 }; 284 285 static const struct pmc_class_map pmc_class_names[] = { 286 #undef __PMC_CLASS 287 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , 288 __PMC_CLASSES() 289 }; 290 291 struct pmc_cputype_map { 292 enum pmc_cputype pm_cputype; 293 const char *pm_name; 294 }; 295 296 static const struct pmc_cputype_map pmc_cputype_names[] = { 297 #undef __PMC_CPU 298 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 299 __PMC_CPUS() 300 }; 301 302 static const char * pmc_disposition_names[] = { 303 #undef __PMC_DISP 304 #define __PMC_DISP(D) #D , 305 __PMC_DISPOSITIONS() 306 }; 307 308 static const char * pmc_mode_names[] = { 309 #undef __PMC_MODE 310 #define __PMC_MODE(M,N) #M , 311 __PMC_MODES() 312 }; 313 314 static const char * pmc_state_names[] = { 315 #undef __PMC_STATE 316 #define __PMC_STATE(S) #S , 317 __PMC_STATES() 318 }; 319 320 /* 321 * Filled in by pmc_init(). 322 */ 323 static int pmc_syscall = -1; 324 static struct pmc_cpuinfo cpu_info; 325 static struct pmc_op_getdyneventinfo soft_event_info; 326 327 /* Event masks for events */ 328 struct pmc_masks { 329 const char *pm_name; 330 const uint64_t pm_value; 331 }; 332 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 333 #define NULLMASK { .pm_name = NULL } 334 335 #if defined(__amd64__) || defined(__i386__) 336 static int 337 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 338 { 339 const struct pmc_masks *pm; 340 char *q, *r; 341 int c; 342 343 if (pmask == NULL) /* no mask keywords */ 344 return (-1); 345 q = strchr(p, '='); /* skip '=' */ 346 if (*++q == '\0') /* no more data */ 347 return (-1); 348 c = 0; /* count of mask keywords seen */ 349 while ((r = strsep(&q, "+")) != NULL) { 350 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 351 pm++) 352 ; 353 if (pm->pm_name == NULL) /* not found */ 354 return (-1); 355 *evmask |= pm->pm_value; 356 c++; 357 } 358 return (c); 359 } 360 #endif 361 362 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 363 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 364 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 365 366 #if defined(__amd64__) || defined(__i386__) 367 /* 368 * AMD K8 PMCs. 369 * 370 */ 371 372 static struct pmc_event_alias k8_aliases[] = { 373 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 374 EV_ALIAS("branch-mispredicts", 375 "k8-fr-retired-taken-branches-mispredicted"), 376 EV_ALIAS("cycles", "tsc"), 377 EV_ALIAS("dc-misses", "k8-dc-miss"), 378 EV_ALIAS("ic-misses", "k8-ic-miss"), 379 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 380 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 381 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 382 EV_ALIAS(NULL, NULL) 383 }; 384 385 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 386 387 /* 388 * Parsing tables 389 */ 390 391 /* fp dispatched fpu ops */ 392 static const struct pmc_masks k8_mask_fdfo[] = { 393 __K8MASK(add-pipe-excluding-junk-ops, 0), 394 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 395 __K8MASK(store-pipe-excluding-junk-ops, 2), 396 __K8MASK(add-pipe-junk-ops, 3), 397 __K8MASK(multiply-pipe-junk-ops, 4), 398 __K8MASK(store-pipe-junk-ops, 5), 399 NULLMASK 400 }; 401 402 /* ls segment register loads */ 403 static const struct pmc_masks k8_mask_lsrl[] = { 404 __K8MASK(es, 0), 405 __K8MASK(cs, 1), 406 __K8MASK(ss, 2), 407 __K8MASK(ds, 3), 408 __K8MASK(fs, 4), 409 __K8MASK(gs, 5), 410 __K8MASK(hs, 6), 411 NULLMASK 412 }; 413 414 /* ls locked operation */ 415 static const struct pmc_masks k8_mask_llo[] = { 416 __K8MASK(locked-instructions, 0), 417 __K8MASK(cycles-in-request, 1), 418 __K8MASK(cycles-to-complete, 2), 419 NULLMASK 420 }; 421 422 /* dc refill from {l2,system} and dc copyback */ 423 static const struct pmc_masks k8_mask_dc[] = { 424 __K8MASK(invalid, 0), 425 __K8MASK(shared, 1), 426 __K8MASK(exclusive, 2), 427 __K8MASK(owner, 3), 428 __K8MASK(modified, 4), 429 NULLMASK 430 }; 431 432 /* dc one bit ecc error */ 433 static const struct pmc_masks k8_mask_dobee[] = { 434 __K8MASK(scrubber, 0), 435 __K8MASK(piggyback, 1), 436 NULLMASK 437 }; 438 439 /* dc dispatched prefetch instructions */ 440 static const struct pmc_masks k8_mask_ddpi[] = { 441 __K8MASK(load, 0), 442 __K8MASK(store, 1), 443 __K8MASK(nta, 2), 444 NULLMASK 445 }; 446 447 /* dc dcache accesses by locks */ 448 static const struct pmc_masks k8_mask_dabl[] = { 449 __K8MASK(accesses, 0), 450 __K8MASK(misses, 1), 451 NULLMASK 452 }; 453 454 /* bu internal l2 request */ 455 static const struct pmc_masks k8_mask_bilr[] = { 456 __K8MASK(ic-fill, 0), 457 __K8MASK(dc-fill, 1), 458 __K8MASK(tlb-reload, 2), 459 __K8MASK(tag-snoop, 3), 460 __K8MASK(cancelled, 4), 461 NULLMASK 462 }; 463 464 /* bu fill request l2 miss */ 465 static const struct pmc_masks k8_mask_bfrlm[] = { 466 __K8MASK(ic-fill, 0), 467 __K8MASK(dc-fill, 1), 468 __K8MASK(tlb-reload, 2), 469 NULLMASK 470 }; 471 472 /* bu fill into l2 */ 473 static const struct pmc_masks k8_mask_bfil[] = { 474 __K8MASK(dirty-l2-victim, 0), 475 __K8MASK(victim-from-l2, 1), 476 NULLMASK 477 }; 478 479 /* fr retired fpu instructions */ 480 static const struct pmc_masks k8_mask_frfi[] = { 481 __K8MASK(x87, 0), 482 __K8MASK(mmx-3dnow, 1), 483 __K8MASK(packed-sse-sse2, 2), 484 __K8MASK(scalar-sse-sse2, 3), 485 NULLMASK 486 }; 487 488 /* fr retired fastpath double op instructions */ 489 static const struct pmc_masks k8_mask_frfdoi[] = { 490 __K8MASK(low-op-pos-0, 0), 491 __K8MASK(low-op-pos-1, 1), 492 __K8MASK(low-op-pos-2, 2), 493 NULLMASK 494 }; 495 496 /* fr fpu exceptions */ 497 static const struct pmc_masks k8_mask_ffe[] = { 498 __K8MASK(x87-reclass-microfaults, 0), 499 __K8MASK(sse-retype-microfaults, 1), 500 __K8MASK(sse-reclass-microfaults, 2), 501 __K8MASK(sse-and-x87-microtraps, 3), 502 NULLMASK 503 }; 504 505 /* nb memory controller page access event */ 506 static const struct pmc_masks k8_mask_nmcpae[] = { 507 __K8MASK(page-hit, 0), 508 __K8MASK(page-miss, 1), 509 __K8MASK(page-conflict, 2), 510 NULLMASK 511 }; 512 513 /* nb memory controller turnaround */ 514 static const struct pmc_masks k8_mask_nmct[] = { 515 __K8MASK(dimm-turnaround, 0), 516 __K8MASK(read-to-write-turnaround, 1), 517 __K8MASK(write-to-read-turnaround, 2), 518 NULLMASK 519 }; 520 521 /* nb memory controller bypass saturation */ 522 static const struct pmc_masks k8_mask_nmcbs[] = { 523 __K8MASK(memory-controller-hi-pri-bypass, 0), 524 __K8MASK(memory-controller-lo-pri-bypass, 1), 525 __K8MASK(dram-controller-interface-bypass, 2), 526 __K8MASK(dram-controller-queue-bypass, 3), 527 NULLMASK 528 }; 529 530 /* nb sized commands */ 531 static const struct pmc_masks k8_mask_nsc[] = { 532 __K8MASK(nonpostwrszbyte, 0), 533 __K8MASK(nonpostwrszdword, 1), 534 __K8MASK(postwrszbyte, 2), 535 __K8MASK(postwrszdword, 3), 536 __K8MASK(rdszbyte, 4), 537 __K8MASK(rdszdword, 5), 538 __K8MASK(rdmodwr, 6), 539 NULLMASK 540 }; 541 542 /* nb probe result */ 543 static const struct pmc_masks k8_mask_npr[] = { 544 __K8MASK(probe-miss, 0), 545 __K8MASK(probe-hit, 1), 546 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 547 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 548 NULLMASK 549 }; 550 551 /* nb hypertransport bus bandwidth */ 552 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 553 __K8MASK(command, 0), 554 __K8MASK(data, 1), 555 __K8MASK(buffer-release, 2), 556 __K8MASK(nop, 3), 557 NULLMASK 558 }; 559 560 #undef __K8MASK 561 562 #define K8_KW_COUNT "count" 563 #define K8_KW_EDGE "edge" 564 #define K8_KW_INV "inv" 565 #define K8_KW_MASK "mask" 566 #define K8_KW_OS "os" 567 #define K8_KW_USR "usr" 568 569 static int 570 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 571 struct pmc_op_pmcallocate *pmc_config) 572 { 573 char *e, *p, *q; 574 int n; 575 uint32_t count; 576 uint64_t evmask; 577 const struct pmc_masks *pm, *pmask; 578 579 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 580 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 581 582 pmask = NULL; 583 evmask = 0; 584 585 #define __K8SETMASK(M) pmask = k8_mask_##M 586 587 /* setup parsing tables */ 588 switch (pe) { 589 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 590 __K8SETMASK(fdfo); 591 break; 592 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 593 __K8SETMASK(lsrl); 594 break; 595 case PMC_EV_K8_LS_LOCKED_OPERATION: 596 __K8SETMASK(llo); 597 break; 598 case PMC_EV_K8_DC_REFILL_FROM_L2: 599 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 600 case PMC_EV_K8_DC_COPYBACK: 601 __K8SETMASK(dc); 602 break; 603 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 604 __K8SETMASK(dobee); 605 break; 606 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 607 __K8SETMASK(ddpi); 608 break; 609 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 610 __K8SETMASK(dabl); 611 break; 612 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 613 __K8SETMASK(bilr); 614 break; 615 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 616 __K8SETMASK(bfrlm); 617 break; 618 case PMC_EV_K8_BU_FILL_INTO_L2: 619 __K8SETMASK(bfil); 620 break; 621 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 622 __K8SETMASK(frfi); 623 break; 624 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 625 __K8SETMASK(frfdoi); 626 break; 627 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 628 __K8SETMASK(ffe); 629 break; 630 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 631 __K8SETMASK(nmcpae); 632 break; 633 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 634 __K8SETMASK(nmct); 635 break; 636 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 637 __K8SETMASK(nmcbs); 638 break; 639 case PMC_EV_K8_NB_SIZED_COMMANDS: 640 __K8SETMASK(nsc); 641 break; 642 case PMC_EV_K8_NB_PROBE_RESULT: 643 __K8SETMASK(npr); 644 break; 645 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 646 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 647 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 648 __K8SETMASK(nhbb); 649 break; 650 651 default: 652 break; /* no options defined */ 653 } 654 655 while ((p = strsep(&ctrspec, ",")) != NULL) { 656 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 657 q = strchr(p, '='); 658 if (*++q == '\0') /* skip '=' */ 659 return (-1); 660 661 count = strtol(q, &e, 0); 662 if (e == q || *e != '\0') 663 return (-1); 664 665 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 666 pmc_config->pm_md.pm_amd.pm_amd_config |= 667 AMD_PMC_TO_COUNTER(count); 668 669 } else if (KWMATCH(p, K8_KW_EDGE)) { 670 pmc_config->pm_caps |= PMC_CAP_EDGE; 671 } else if (KWMATCH(p, K8_KW_INV)) { 672 pmc_config->pm_caps |= PMC_CAP_INVERT; 673 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 674 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 675 return (-1); 676 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 677 } else if (KWMATCH(p, K8_KW_OS)) { 678 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 679 } else if (KWMATCH(p, K8_KW_USR)) { 680 pmc_config->pm_caps |= PMC_CAP_USER; 681 } else 682 return (-1); 683 } 684 685 /* other post processing */ 686 switch (pe) { 687 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 688 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 689 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 690 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 691 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 692 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 693 /* XXX only available in rev B and later */ 694 break; 695 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 696 /* XXX only available in rev C and later */ 697 break; 698 case PMC_EV_K8_LS_LOCKED_OPERATION: 699 /* XXX CPU Rev A,B evmask is to be zero */ 700 if (evmask & (evmask - 1)) /* > 1 bit set */ 701 return (-1); 702 if (evmask == 0) { 703 evmask = 0x01; /* Rev C and later: #instrs */ 704 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 705 } 706 break; 707 default: 708 if (evmask == 0 && pmask != NULL) { 709 for (pm = pmask; pm->pm_name; pm++) 710 evmask |= pm->pm_value; 711 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 712 } 713 } 714 715 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 716 pmc_config->pm_md.pm_amd.pm_amd_config = 717 AMD_PMC_TO_UNITMASK(evmask); 718 719 return (0); 720 } 721 722 #endif 723 724 #if defined(__i386__) || defined(__amd64__) 725 static int 726 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 727 struct pmc_op_pmcallocate *pmc_config) 728 { 729 if (pe != PMC_EV_TSC_TSC) 730 return (-1); 731 732 /* TSC events must be unqualified. */ 733 if (ctrspec && *ctrspec != '\0') 734 return (-1); 735 736 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 737 pmc_config->pm_caps |= PMC_CAP_READ; 738 739 return (0); 740 } 741 #endif 742 743 static struct pmc_event_alias generic_aliases[] = { 744 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 745 EV_ALIAS(NULL, NULL) 746 }; 747 748 static int 749 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 750 struct pmc_op_pmcallocate *pmc_config) 751 { 752 (void)ctrspec; 753 (void)pmc_config; 754 755 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 756 return (-1); 757 758 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 759 return (0); 760 } 761 762 #if defined(__arm__) 763 #if defined(__XSCALE__) 764 765 static struct pmc_event_alias xscale_aliases[] = { 766 EV_ALIAS("branches", "BRANCH_RETIRED"), 767 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 768 EV_ALIAS("dc-misses", "DC_MISS"), 769 EV_ALIAS("ic-misses", "IC_MISS"), 770 EV_ALIAS("instructions", "INSTR_RETIRED"), 771 EV_ALIAS(NULL, NULL) 772 }; 773 static int 774 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 775 struct pmc_op_pmcallocate *pmc_config __unused) 776 { 777 switch (pe) { 778 default: 779 break; 780 } 781 782 return (0); 783 } 784 #endif 785 786 static struct pmc_event_alias cortex_a8_aliases[] = { 787 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 788 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 789 EV_ALIAS("instructions", "INSTR_EXECUTED"), 790 EV_ALIAS(NULL, NULL) 791 }; 792 793 static struct pmc_event_alias cortex_a9_aliases[] = { 794 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 795 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 796 EV_ALIAS("instructions", "INSTR_EXECUTED"), 797 EV_ALIAS(NULL, NULL) 798 }; 799 800 static int 801 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 802 struct pmc_op_pmcallocate *pmc_config __unused) 803 { 804 switch (pe) { 805 default: 806 break; 807 } 808 809 return (0); 810 } 811 #endif 812 813 #if defined(__aarch64__) 814 static struct pmc_event_alias cortex_a53_aliases[] = { 815 EV_ALIAS(NULL, NULL) 816 }; 817 static struct pmc_event_alias cortex_a57_aliases[] = { 818 EV_ALIAS(NULL, NULL) 819 }; 820 static int 821 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 822 struct pmc_op_pmcallocate *pmc_config __unused) 823 { 824 switch (pe) { 825 default: 826 break; 827 } 828 829 return (0); 830 } 831 #endif 832 833 #if defined(__mips__) 834 835 static struct pmc_event_alias beri_aliases[] = { 836 EV_ALIAS("instructions", "INST"), 837 EV_ALIAS(NULL, NULL) 838 }; 839 840 static struct pmc_event_alias mips24k_aliases[] = { 841 EV_ALIAS("instructions", "INSTR_EXECUTED"), 842 EV_ALIAS("branches", "BRANCH_COMPLETED"), 843 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 844 EV_ALIAS(NULL, NULL) 845 }; 846 847 static struct pmc_event_alias mips74k_aliases[] = { 848 EV_ALIAS("instructions", "INSTR_EXECUTED"), 849 EV_ALIAS("branches", "BRANCH_INSNS"), 850 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), 851 EV_ALIAS(NULL, NULL) 852 }; 853 854 static struct pmc_event_alias octeon_aliases[] = { 855 EV_ALIAS("instructions", "RET"), 856 EV_ALIAS("branches", "BR"), 857 EV_ALIAS("branch-mispredicts", "BRMIS"), 858 EV_ALIAS(NULL, NULL) 859 }; 860 861 #define MIPS_KW_OS "os" 862 #define MIPS_KW_USR "usr" 863 #define MIPS_KW_ANYTHREAD "anythread" 864 865 static int 866 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 867 struct pmc_op_pmcallocate *pmc_config __unused) 868 { 869 char *p; 870 871 (void) pe; 872 873 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 874 875 while ((p = strsep(&ctrspec, ",")) != NULL) { 876 if (KWMATCH(p, MIPS_KW_OS)) 877 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 878 else if (KWMATCH(p, MIPS_KW_USR)) 879 pmc_config->pm_caps |= PMC_CAP_USER; 880 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 881 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 882 else 883 return (-1); 884 } 885 886 return (0); 887 } 888 889 #endif /* __mips__ */ 890 891 #if defined(__powerpc__) 892 893 static struct pmc_event_alias ppc7450_aliases[] = { 894 EV_ALIAS("instructions", "INSTR_COMPLETED"), 895 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 896 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 897 EV_ALIAS(NULL, NULL) 898 }; 899 900 static struct pmc_event_alias ppc970_aliases[] = { 901 EV_ALIAS("instructions", "INSTR_COMPLETED"), 902 EV_ALIAS("cycles", "CYCLES"), 903 EV_ALIAS(NULL, NULL) 904 }; 905 906 static struct pmc_event_alias e500_aliases[] = { 907 EV_ALIAS("instructions", "INSTR_COMPLETED"), 908 EV_ALIAS("cycles", "CYCLES"), 909 EV_ALIAS(NULL, NULL) 910 }; 911 912 #define POWERPC_KW_OS "os" 913 #define POWERPC_KW_USR "usr" 914 #define POWERPC_KW_ANYTHREAD "anythread" 915 916 static int 917 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 918 struct pmc_op_pmcallocate *pmc_config __unused) 919 { 920 char *p; 921 922 (void) pe; 923 924 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 925 926 while ((p = strsep(&ctrspec, ",")) != NULL) { 927 if (KWMATCH(p, POWERPC_KW_OS)) 928 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 929 else if (KWMATCH(p, POWERPC_KW_USR)) 930 pmc_config->pm_caps |= PMC_CAP_USER; 931 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 932 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 933 else 934 return (-1); 935 } 936 937 return (0); 938 } 939 940 #endif /* __powerpc__ */ 941 942 943 /* 944 * Match an event name `name' with its canonical form. 945 * 946 * Matches are case insensitive and spaces, periods, underscores and 947 * hyphen characters are considered to match each other. 948 * 949 * Returns 1 for a match, 0 otherwise. 950 */ 951 952 static int 953 pmc_match_event_name(const char *name, const char *canonicalname) 954 { 955 int cc, nc; 956 const unsigned char *c, *n; 957 958 c = (const unsigned char *) canonicalname; 959 n = (const unsigned char *) name; 960 961 for (; (nc = *n) && (cc = *c); n++, c++) { 962 963 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 964 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 965 continue; 966 967 if (toupper(nc) == toupper(cc)) 968 continue; 969 970 971 return (0); 972 } 973 974 if (*n == '\0' && *c == '\0') 975 return (1); 976 977 return (0); 978 } 979 980 /* 981 * Match an event name against all the event named supported by a 982 * PMC class. 983 * 984 * Returns an event descriptor pointer on match or NULL otherwise. 985 */ 986 static const struct pmc_event_descr * 987 pmc_match_event_class(const char *name, 988 const struct pmc_class_descr *pcd) 989 { 990 size_t n; 991 const struct pmc_event_descr *ev; 992 993 ev = pcd->pm_evc_event_table; 994 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 995 if (pmc_match_event_name(name, ev->pm_ev_name)) 996 return (ev); 997 998 return (NULL); 999 } 1000 1001 static int 1002 pmc_mdep_is_compatible_class(enum pmc_class pc) 1003 { 1004 size_t n; 1005 1006 for (n = 0; n < pmc_mdep_class_list_size; n++) 1007 if (pmc_mdep_class_list[n] == pc) 1008 return (1); 1009 return (0); 1010 } 1011 1012 /* 1013 * API entry points 1014 */ 1015 1016 int 1017 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 1018 uint32_t flags, int cpu, pmc_id_t *pmcid, 1019 uint64_t count) 1020 { 1021 size_t n; 1022 int retval; 1023 char *r, *spec_copy; 1024 const char *ctrname; 1025 const struct pmc_event_descr *ev; 1026 const struct pmc_event_alias *alias; 1027 struct pmc_op_pmcallocate pmc_config; 1028 const struct pmc_class_descr *pcd; 1029 1030 spec_copy = NULL; 1031 retval = -1; 1032 1033 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 1034 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 1035 errno = EINVAL; 1036 goto out; 1037 } 1038 bzero(&pmc_config, sizeof(pmc_config)); 1039 pmc_config.pm_cpu = cpu; 1040 pmc_config.pm_mode = mode; 1041 pmc_config.pm_flags = flags; 1042 pmc_config.pm_count = count; 1043 if (PMC_IS_SAMPLING_MODE(mode)) 1044 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 1045 /* 1046 * Can we pull this straight from the pmu table? 1047 */ 1048 r = spec_copy = strdup(ctrspec); 1049 ctrname = strsep(&r, ","); 1050 if (pmc_pmu_enabled()) { 1051 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { 1052 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { 1053 goto out; 1054 } 1055 retval = 0; 1056 *pmcid = pmc_config.pm_pmcid; 1057 goto out; 1058 } 1059 errx(EX_USAGE, "ERROR: pmc_pmu_allocate failed, check for ctrname %s\n", ctrname); 1060 } else { 1061 free(spec_copy); 1062 spec_copy = NULL; 1063 } 1064 1065 /* replace an event alias with the canonical event specifier */ 1066 if (pmc_mdep_event_aliases) 1067 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 1068 if (!strcasecmp(ctrspec, alias->pm_alias)) { 1069 spec_copy = strdup(alias->pm_spec); 1070 break; 1071 } 1072 1073 if (spec_copy == NULL) 1074 spec_copy = strdup(ctrspec); 1075 1076 r = spec_copy; 1077 ctrname = strsep(&r, ","); 1078 1079 /* 1080 * If a explicit class prefix was given by the user, restrict the 1081 * search for the event to the specified PMC class. 1082 */ 1083 ev = NULL; 1084 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 1085 pcd = pmc_class_table[n]; 1086 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 1087 strncasecmp(ctrname, pcd->pm_evc_name, 1088 pcd->pm_evc_name_size) == 0) { 1089 if ((ev = pmc_match_event_class(ctrname + 1090 pcd->pm_evc_name_size, pcd)) == NULL) { 1091 errno = EINVAL; 1092 goto out; 1093 } 1094 break; 1095 } 1096 } 1097 1098 /* 1099 * Otherwise, search for this event in all compatible PMC 1100 * classes. 1101 */ 1102 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 1103 pcd = pmc_class_table[n]; 1104 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 1105 ev = pmc_match_event_class(ctrname, pcd); 1106 } 1107 1108 if (ev == NULL) { 1109 errno = EINVAL; 1110 goto out; 1111 } 1112 1113 pmc_config.pm_ev = ev->pm_ev_code; 1114 pmc_config.pm_class = pcd->pm_evc_class; 1115 1116 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 1117 errno = EINVAL; 1118 goto out; 1119 } 1120 1121 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 1122 goto out; 1123 1124 *pmcid = pmc_config.pm_pmcid; 1125 1126 retval = 0; 1127 1128 out: 1129 if (spec_copy) 1130 free(spec_copy); 1131 1132 return (retval); 1133 } 1134 1135 int 1136 pmc_attach(pmc_id_t pmc, pid_t pid) 1137 { 1138 struct pmc_op_pmcattach pmc_attach_args; 1139 1140 pmc_attach_args.pm_pmc = pmc; 1141 pmc_attach_args.pm_pid = pid; 1142 1143 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1144 } 1145 1146 int 1147 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1148 { 1149 unsigned int i; 1150 enum pmc_class cl; 1151 1152 cl = PMC_ID_TO_CLASS(pmcid); 1153 for (i = 0; i < cpu_info.pm_nclass; i++) 1154 if (cpu_info.pm_classes[i].pm_class == cl) { 1155 *caps = cpu_info.pm_classes[i].pm_caps; 1156 return (0); 1157 } 1158 errno = EINVAL; 1159 return (-1); 1160 } 1161 1162 int 1163 pmc_configure_logfile(int fd) 1164 { 1165 struct pmc_op_configurelog cla; 1166 1167 cla.pm_logfd = fd; 1168 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1169 return (-1); 1170 return (0); 1171 } 1172 1173 int 1174 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1175 { 1176 if (pmc_syscall == -1) { 1177 errno = ENXIO; 1178 return (-1); 1179 } 1180 1181 *pci = &cpu_info; 1182 return (0); 1183 } 1184 1185 int 1186 pmc_detach(pmc_id_t pmc, pid_t pid) 1187 { 1188 struct pmc_op_pmcattach pmc_detach_args; 1189 1190 pmc_detach_args.pm_pmc = pmc; 1191 pmc_detach_args.pm_pid = pid; 1192 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1193 } 1194 1195 int 1196 pmc_disable(int cpu, int pmc) 1197 { 1198 struct pmc_op_pmcadmin ssa; 1199 1200 ssa.pm_cpu = cpu; 1201 ssa.pm_pmc = pmc; 1202 ssa.pm_state = PMC_STATE_DISABLED; 1203 return (PMC_CALL(PMCADMIN, &ssa)); 1204 } 1205 1206 int 1207 pmc_enable(int cpu, int pmc) 1208 { 1209 struct pmc_op_pmcadmin ssa; 1210 1211 ssa.pm_cpu = cpu; 1212 ssa.pm_pmc = pmc; 1213 ssa.pm_state = PMC_STATE_FREE; 1214 return (PMC_CALL(PMCADMIN, &ssa)); 1215 } 1216 1217 /* 1218 * Return a list of events known to a given PMC class. 'cl' is the 1219 * PMC class identifier, 'eventnames' is the returned list of 'const 1220 * char *' pointers pointing to the names of the events. 'nevents' is 1221 * the number of event name pointers returned. 1222 * 1223 * The space for 'eventnames' is allocated using malloc(3). The caller 1224 * is responsible for freeing this space when done. 1225 */ 1226 int 1227 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1228 int *nevents) 1229 { 1230 int count; 1231 const char **names; 1232 const struct pmc_event_descr *ev; 1233 1234 switch (cl) 1235 { 1236 case PMC_CLASS_IAF: 1237 ev = iaf_event_table; 1238 count = PMC_EVENT_TABLE_SIZE(iaf); 1239 break; 1240 case PMC_CLASS_TSC: 1241 ev = tsc_event_table; 1242 count = PMC_EVENT_TABLE_SIZE(tsc); 1243 break; 1244 case PMC_CLASS_K8: 1245 ev = k8_event_table; 1246 count = PMC_EVENT_TABLE_SIZE(k8); 1247 break; 1248 case PMC_CLASS_XSCALE: 1249 ev = xscale_event_table; 1250 count = PMC_EVENT_TABLE_SIZE(xscale); 1251 break; 1252 case PMC_CLASS_ARMV7: 1253 switch (cpu_info.pm_cputype) { 1254 default: 1255 case PMC_CPU_ARMV7_CORTEX_A8: 1256 ev = cortex_a8_event_table; 1257 count = PMC_EVENT_TABLE_SIZE(cortex_a8); 1258 break; 1259 case PMC_CPU_ARMV7_CORTEX_A9: 1260 ev = cortex_a9_event_table; 1261 count = PMC_EVENT_TABLE_SIZE(cortex_a9); 1262 break; 1263 } 1264 break; 1265 case PMC_CLASS_ARMV8: 1266 switch (cpu_info.pm_cputype) { 1267 default: 1268 case PMC_CPU_ARMV8_CORTEX_A53: 1269 ev = cortex_a53_event_table; 1270 count = PMC_EVENT_TABLE_SIZE(cortex_a53); 1271 break; 1272 case PMC_CPU_ARMV8_CORTEX_A57: 1273 ev = cortex_a57_event_table; 1274 count = PMC_EVENT_TABLE_SIZE(cortex_a57); 1275 break; 1276 } 1277 break; 1278 case PMC_CLASS_BERI: 1279 ev = beri_event_table; 1280 count = PMC_EVENT_TABLE_SIZE(beri); 1281 break; 1282 case PMC_CLASS_MIPS24K: 1283 ev = mips24k_event_table; 1284 count = PMC_EVENT_TABLE_SIZE(mips24k); 1285 break; 1286 case PMC_CLASS_MIPS74K: 1287 ev = mips74k_event_table; 1288 count = PMC_EVENT_TABLE_SIZE(mips74k); 1289 break; 1290 case PMC_CLASS_OCTEON: 1291 ev = octeon_event_table; 1292 count = PMC_EVENT_TABLE_SIZE(octeon); 1293 break; 1294 case PMC_CLASS_PPC7450: 1295 ev = ppc7450_event_table; 1296 count = PMC_EVENT_TABLE_SIZE(ppc7450); 1297 break; 1298 case PMC_CLASS_PPC970: 1299 ev = ppc970_event_table; 1300 count = PMC_EVENT_TABLE_SIZE(ppc970); 1301 break; 1302 case PMC_CLASS_E500: 1303 ev = e500_event_table; 1304 count = PMC_EVENT_TABLE_SIZE(e500); 1305 break; 1306 case PMC_CLASS_SOFT: 1307 ev = soft_event_table; 1308 count = soft_event_info.pm_nevent; 1309 break; 1310 default: 1311 errno = EINVAL; 1312 return (-1); 1313 } 1314 1315 if ((names = malloc(count * sizeof(const char *))) == NULL) 1316 return (-1); 1317 1318 *eventnames = names; 1319 *nevents = count; 1320 1321 for (;count--; ev++, names++) 1322 *names = ev->pm_ev_name; 1323 1324 return (0); 1325 } 1326 1327 int 1328 pmc_flush_logfile(void) 1329 { 1330 return (PMC_CALL(FLUSHLOG,0)); 1331 } 1332 1333 int 1334 pmc_close_logfile(void) 1335 { 1336 return (PMC_CALL(CLOSELOG,0)); 1337 } 1338 1339 int 1340 pmc_get_driver_stats(struct pmc_driverstats *ds) 1341 { 1342 struct pmc_op_getdriverstats gms; 1343 1344 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 1345 return (-1); 1346 1347 /* copy out fields in the current userland<->library interface */ 1348 ds->pm_intr_ignored = gms.pm_intr_ignored; 1349 ds->pm_intr_processed = gms.pm_intr_processed; 1350 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 1351 ds->pm_syscalls = gms.pm_syscalls; 1352 ds->pm_syscall_errors = gms.pm_syscall_errors; 1353 ds->pm_buffer_requests = gms.pm_buffer_requests; 1354 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 1355 ds->pm_log_sweeps = gms.pm_log_sweeps; 1356 return (0); 1357 } 1358 1359 int 1360 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 1361 { 1362 struct pmc_op_getmsr gm; 1363 1364 gm.pm_pmcid = pmc; 1365 if (PMC_CALL(PMCGETMSR, &gm) < 0) 1366 return (-1); 1367 *msr = gm.pm_msr; 1368 return (0); 1369 } 1370 1371 int 1372 pmc_init(void) 1373 { 1374 int error, pmc_mod_id; 1375 unsigned int n; 1376 uint32_t abi_version; 1377 struct module_stat pmc_modstat; 1378 struct pmc_op_getcpuinfo op_cpu_info; 1379 #if defined(__amd64__) || defined(__i386__) 1380 int cpu_has_iaf_counters; 1381 unsigned int t; 1382 #endif 1383 1384 if (pmc_syscall != -1) /* already inited */ 1385 return (0); 1386 1387 /* retrieve the system call number from the KLD */ 1388 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 1389 return (-1); 1390 1391 pmc_modstat.version = sizeof(struct module_stat); 1392 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 1393 return (-1); 1394 1395 pmc_syscall = pmc_modstat.data.intval; 1396 1397 /* check the kernel module's ABI against our compiled-in version */ 1398 abi_version = PMC_VERSION; 1399 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 1400 return (pmc_syscall = -1); 1401 1402 /* ignore patch & minor numbers for the comparison */ 1403 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 1404 errno = EPROGMISMATCH; 1405 return (pmc_syscall = -1); 1406 } 1407 1408 bzero(&op_cpu_info, sizeof(op_cpu_info)); 1409 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 1410 return (pmc_syscall = -1); 1411 1412 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 1413 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 1414 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 1415 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 1416 for (n = 0; n < op_cpu_info.pm_nclass; n++) 1417 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], 1418 sizeof(cpu_info.pm_classes[n])); 1419 1420 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 1421 sizeof(struct pmc_class_descr *)); 1422 1423 if (pmc_class_table == NULL) 1424 return (-1); 1425 1426 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 1427 pmc_class_table[n] = NULL; 1428 1429 /* 1430 * Get soft events list. 1431 */ 1432 soft_event_info.pm_class = PMC_CLASS_SOFT; 1433 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 1434 return (pmc_syscall = -1); 1435 1436 /* Map soft events to static list. */ 1437 for (n = 0; n < soft_event_info.pm_nevent; n++) { 1438 soft_event_table[n].pm_ev_name = 1439 soft_event_info.pm_events[n].pm_ev_name; 1440 soft_event_table[n].pm_ev_code = 1441 soft_event_info.pm_events[n].pm_ev_code; 1442 } 1443 soft_class_table_descr.pm_evc_event_table_size = \ 1444 soft_event_info.pm_nevent; 1445 soft_class_table_descr.pm_evc_event_table = \ 1446 soft_event_table; 1447 1448 /* 1449 * Fill in the class table. 1450 */ 1451 n = 0; 1452 1453 /* Fill soft events information. */ 1454 pmc_class_table[n++] = &soft_class_table_descr; 1455 #if defined(__amd64__) || defined(__i386__) 1456 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 1457 pmc_class_table[n++] = &tsc_class_table_descr; 1458 1459 /* 1460 * Check if this CPU has fixed function counters. 1461 */ 1462 cpu_has_iaf_counters = 0; 1463 for (t = 0; t < cpu_info.pm_nclass; t++) 1464 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 1465 cpu_info.pm_classes[t].pm_num > 0) 1466 cpu_has_iaf_counters = 1; 1467 #endif 1468 1469 #define PMC_MDEP_INIT(C) do { \ 1470 pmc_mdep_event_aliases = C##_aliases; \ 1471 pmc_mdep_class_list = C##_pmc_classes; \ 1472 pmc_mdep_class_list_size = \ 1473 PMC_TABLE_SIZE(C##_pmc_classes); \ 1474 } while (0) 1475 1476 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 1477 PMC_MDEP_INIT(C); \ 1478 pmc_class_table[n++] = &iaf_class_table_descr; \ 1479 if (!cpu_has_iaf_counters) \ 1480 pmc_mdep_event_aliases = \ 1481 C##_aliases_without_iaf; \ 1482 pmc_class_table[n] = &C##_class_table_descr; \ 1483 } while (0) 1484 1485 /* Configure the event name parser. */ 1486 switch (cpu_info.pm_cputype) { 1487 #if defined(__amd64__) || defined(__i386__) 1488 case PMC_CPU_AMD_K8: 1489 PMC_MDEP_INIT(k8); 1490 pmc_class_table[n] = &k8_class_table_descr; 1491 break; 1492 #endif 1493 case PMC_CPU_GENERIC: 1494 PMC_MDEP_INIT(generic); 1495 break; 1496 #if defined(__arm__) 1497 #if defined(__XSCALE__) 1498 case PMC_CPU_INTEL_XSCALE: 1499 PMC_MDEP_INIT(xscale); 1500 pmc_class_table[n] = &xscale_class_table_descr; 1501 break; 1502 #endif 1503 case PMC_CPU_ARMV7_CORTEX_A8: 1504 PMC_MDEP_INIT(cortex_a8); 1505 pmc_class_table[n] = &cortex_a8_class_table_descr; 1506 break; 1507 case PMC_CPU_ARMV7_CORTEX_A9: 1508 PMC_MDEP_INIT(cortex_a9); 1509 pmc_class_table[n] = &cortex_a9_class_table_descr; 1510 break; 1511 #endif 1512 #if defined(__aarch64__) 1513 case PMC_CPU_ARMV8_CORTEX_A53: 1514 PMC_MDEP_INIT(cortex_a53); 1515 pmc_class_table[n] = &cortex_a53_class_table_descr; 1516 break; 1517 case PMC_CPU_ARMV8_CORTEX_A57: 1518 PMC_MDEP_INIT(cortex_a57); 1519 pmc_class_table[n] = &cortex_a57_class_table_descr; 1520 break; 1521 #endif 1522 #if defined(__mips__) 1523 case PMC_CPU_MIPS_BERI: 1524 PMC_MDEP_INIT(beri); 1525 pmc_class_table[n] = &beri_class_table_descr; 1526 break; 1527 case PMC_CPU_MIPS_24K: 1528 PMC_MDEP_INIT(mips24k); 1529 pmc_class_table[n] = &mips24k_class_table_descr; 1530 break; 1531 case PMC_CPU_MIPS_74K: 1532 PMC_MDEP_INIT(mips74k); 1533 pmc_class_table[n] = &mips74k_class_table_descr; 1534 break; 1535 case PMC_CPU_MIPS_OCTEON: 1536 PMC_MDEP_INIT(octeon); 1537 pmc_class_table[n] = &octeon_class_table_descr; 1538 break; 1539 #endif /* __mips__ */ 1540 #if defined(__powerpc__) 1541 case PMC_CPU_PPC_7450: 1542 PMC_MDEP_INIT(ppc7450); 1543 pmc_class_table[n] = &ppc7450_class_table_descr; 1544 break; 1545 case PMC_CPU_PPC_970: 1546 PMC_MDEP_INIT(ppc970); 1547 pmc_class_table[n] = &ppc970_class_table_descr; 1548 break; 1549 case PMC_CPU_PPC_E500: 1550 PMC_MDEP_INIT(e500); 1551 pmc_class_table[n] = &e500_class_table_descr; 1552 break; 1553 #endif 1554 default: 1555 /* 1556 * Some kind of CPU this version of the library knows nothing 1557 * about. This shouldn't happen since the abi version check 1558 * should have caught this. 1559 */ 1560 #if defined(__amd64__) || defined(__i386__) 1561 break; 1562 #endif 1563 errno = ENXIO; 1564 return (pmc_syscall = -1); 1565 } 1566 1567 return (0); 1568 } 1569 1570 const char * 1571 pmc_name_of_capability(enum pmc_caps cap) 1572 { 1573 int i; 1574 1575 /* 1576 * 'cap' should have a single bit set and should be in 1577 * range. 1578 */ 1579 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 1580 cap > PMC_CAP_LAST) { 1581 errno = EINVAL; 1582 return (NULL); 1583 } 1584 1585 i = ffs(cap); 1586 return (pmc_capability_names[i - 1]); 1587 } 1588 1589 const char * 1590 pmc_name_of_class(enum pmc_class pc) 1591 { 1592 size_t n; 1593 1594 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) 1595 if (pc == pmc_class_names[n].pm_class) 1596 return (pmc_class_names[n].pm_name); 1597 1598 errno = EINVAL; 1599 return (NULL); 1600 } 1601 1602 const char * 1603 pmc_name_of_cputype(enum pmc_cputype cp) 1604 { 1605 size_t n; 1606 1607 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 1608 if (cp == pmc_cputype_names[n].pm_cputype) 1609 return (pmc_cputype_names[n].pm_name); 1610 1611 errno = EINVAL; 1612 return (NULL); 1613 } 1614 1615 const char * 1616 pmc_name_of_disposition(enum pmc_disp pd) 1617 { 1618 if ((int) pd >= PMC_DISP_FIRST && 1619 pd <= PMC_DISP_LAST) 1620 return (pmc_disposition_names[pd]); 1621 1622 errno = EINVAL; 1623 return (NULL); 1624 } 1625 1626 const char * 1627 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 1628 { 1629 const struct pmc_event_descr *ev, *evfence; 1630 1631 ev = evfence = NULL; 1632 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 1633 ev = k8_event_table; 1634 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 1635 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 1636 ev = xscale_event_table; 1637 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 1638 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { 1639 switch (cpu) { 1640 case PMC_CPU_ARMV7_CORTEX_A8: 1641 ev = cortex_a8_event_table; 1642 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); 1643 break; 1644 case PMC_CPU_ARMV7_CORTEX_A9: 1645 ev = cortex_a9_event_table; 1646 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); 1647 break; 1648 default: /* Unknown CPU type. */ 1649 break; 1650 } 1651 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { 1652 switch (cpu) { 1653 case PMC_CPU_ARMV8_CORTEX_A53: 1654 ev = cortex_a53_event_table; 1655 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); 1656 break; 1657 case PMC_CPU_ARMV8_CORTEX_A57: 1658 ev = cortex_a57_event_table; 1659 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); 1660 break; 1661 default: /* Unknown CPU type. */ 1662 break; 1663 } 1664 } else if (pe >= PMC_EV_BERI_FIRST && pe <= PMC_EV_BERI_LAST) { 1665 ev = beri_event_table; 1666 evfence = beri_event_table + PMC_EVENT_TABLE_SIZE(beri); 1667 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 1668 ev = mips24k_event_table; 1669 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 1670 } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { 1671 ev = mips74k_event_table; 1672 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); 1673 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 1674 ev = octeon_event_table; 1675 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 1676 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 1677 ev = ppc7450_event_table; 1678 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 1679 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 1680 ev = ppc970_event_table; 1681 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 1682 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { 1683 ev = e500_event_table; 1684 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); 1685 } else if (pe == PMC_EV_TSC_TSC) { 1686 ev = tsc_event_table; 1687 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 1688 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 1689 ev = soft_event_table; 1690 evfence = soft_event_table + soft_event_info.pm_nevent; 1691 } 1692 1693 for (; ev != evfence; ev++) 1694 if (pe == ev->pm_ev_code) 1695 return (ev->pm_ev_name); 1696 1697 return (NULL); 1698 } 1699 1700 const char * 1701 pmc_name_of_event(enum pmc_event pe) 1702 { 1703 const char *n; 1704 1705 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 1706 return (n); 1707 1708 errno = EINVAL; 1709 return (NULL); 1710 } 1711 1712 const char * 1713 pmc_name_of_mode(enum pmc_mode pm) 1714 { 1715 if ((int) pm >= PMC_MODE_FIRST && 1716 pm <= PMC_MODE_LAST) 1717 return (pmc_mode_names[pm]); 1718 1719 errno = EINVAL; 1720 return (NULL); 1721 } 1722 1723 const char * 1724 pmc_name_of_state(enum pmc_state ps) 1725 { 1726 if ((int) ps >= PMC_STATE_FIRST && 1727 ps <= PMC_STATE_LAST) 1728 return (pmc_state_names[ps]); 1729 1730 errno = EINVAL; 1731 return (NULL); 1732 } 1733 1734 int 1735 pmc_ncpu(void) 1736 { 1737 if (pmc_syscall == -1) { 1738 errno = ENXIO; 1739 return (-1); 1740 } 1741 1742 return (cpu_info.pm_ncpu); 1743 } 1744 1745 int 1746 pmc_npmc(int cpu) 1747 { 1748 if (pmc_syscall == -1) { 1749 errno = ENXIO; 1750 return (-1); 1751 } 1752 1753 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 1754 errno = EINVAL; 1755 return (-1); 1756 } 1757 1758 return (cpu_info.pm_npmc); 1759 } 1760 1761 int 1762 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 1763 { 1764 int nbytes, npmc; 1765 struct pmc_op_getpmcinfo *pmci; 1766 1767 if ((npmc = pmc_npmc(cpu)) < 0) 1768 return (-1); 1769 1770 nbytes = sizeof(struct pmc_op_getpmcinfo) + 1771 npmc * sizeof(struct pmc_info); 1772 1773 if ((pmci = calloc(1, nbytes)) == NULL) 1774 return (-1); 1775 1776 pmci->pm_cpu = cpu; 1777 1778 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 1779 free(pmci); 1780 return (-1); 1781 } 1782 1783 /* kernel<->library, library<->userland interfaces are identical */ 1784 *ppmci = (struct pmc_pmcinfo *) pmci; 1785 return (0); 1786 } 1787 1788 int 1789 pmc_read(pmc_id_t pmc, pmc_value_t *value) 1790 { 1791 struct pmc_op_pmcrw pmc_read_op; 1792 1793 pmc_read_op.pm_pmcid = pmc; 1794 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 1795 pmc_read_op.pm_value = -1; 1796 1797 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 1798 return (-1); 1799 1800 *value = pmc_read_op.pm_value; 1801 return (0); 1802 } 1803 1804 int 1805 pmc_release(pmc_id_t pmc) 1806 { 1807 struct pmc_op_simple pmc_release_args; 1808 1809 pmc_release_args.pm_pmcid = pmc; 1810 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 1811 } 1812 1813 int 1814 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 1815 { 1816 struct pmc_op_pmcrw pmc_rw_op; 1817 1818 pmc_rw_op.pm_pmcid = pmc; 1819 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 1820 pmc_rw_op.pm_value = newvalue; 1821 1822 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 1823 return (-1); 1824 1825 *oldvaluep = pmc_rw_op.pm_value; 1826 return (0); 1827 } 1828 1829 int 1830 pmc_set(pmc_id_t pmc, pmc_value_t value) 1831 { 1832 struct pmc_op_pmcsetcount sc; 1833 1834 sc.pm_pmcid = pmc; 1835 sc.pm_count = value; 1836 1837 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 1838 return (-1); 1839 return (0); 1840 } 1841 1842 int 1843 pmc_start(pmc_id_t pmc) 1844 { 1845 struct pmc_op_simple pmc_start_args; 1846 1847 pmc_start_args.pm_pmcid = pmc; 1848 return (PMC_CALL(PMCSTART, &pmc_start_args)); 1849 } 1850 1851 int 1852 pmc_stop(pmc_id_t pmc) 1853 { 1854 struct pmc_op_simple pmc_stop_args; 1855 1856 pmc_stop_args.pm_pmcid = pmc; 1857 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 1858 } 1859 1860 int 1861 pmc_width(pmc_id_t pmcid, uint32_t *width) 1862 { 1863 unsigned int i; 1864 enum pmc_class cl; 1865 1866 cl = PMC_ID_TO_CLASS(pmcid); 1867 for (i = 0; i < cpu_info.pm_nclass; i++) 1868 if (cpu_info.pm_classes[i].pm_class == cl) { 1869 *width = cpu_info.pm_classes[i].pm_width; 1870 return (0); 1871 } 1872 errno = EINVAL; 1873 return (-1); 1874 } 1875 1876 int 1877 pmc_write(pmc_id_t pmc, pmc_value_t value) 1878 { 1879 struct pmc_op_pmcrw pmc_write_op; 1880 1881 pmc_write_op.pm_pmcid = pmc; 1882 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 1883 pmc_write_op.pm_value = value; 1884 return (PMC_CALL(PMCRW, &pmc_write_op)); 1885 } 1886 1887 int 1888 pmc_writelog(uint32_t userdata) 1889 { 1890 struct pmc_op_writelog wl; 1891 1892 wl.pm_userdata = userdata; 1893 return (PMC_CALL(WRITELOG, &wl)); 1894 } 1895