1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/module.h> 35 #include <sys/pmc.h> 36 #include <sys/syscall.h> 37 38 #include <ctype.h> 39 #include <errno.h> 40 #include <err.h> 41 #include <fcntl.h> 42 #include <pmc.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <strings.h> 47 #include <sysexits.h> 48 #include <unistd.h> 49 50 #include "libpmcinternal.h" 51 52 /* Function prototypes */ 53 #if defined(__amd64__) || defined(__i386__) 54 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 55 struct pmc_op_pmcallocate *_pmc_config); 56 #endif 57 #if defined(__amd64__) || defined(__i386__) 58 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 59 struct pmc_op_pmcallocate *_pmc_config); 60 #endif 61 #if defined(__arm__) 62 #if defined(__XSCALE__) 63 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 64 struct pmc_op_pmcallocate *_pmc_config); 65 #endif 66 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 67 struct pmc_op_pmcallocate *_pmc_config); 68 #endif 69 #if defined(__aarch64__) 70 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 71 struct pmc_op_pmcallocate *_pmc_config); 72 #endif 73 #if defined(__mips__) 74 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 75 struct pmc_op_pmcallocate *_pmc_config); 76 #endif /* __mips__ */ 77 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 78 struct pmc_op_pmcallocate *_pmc_config); 79 80 #if defined(__powerpc__) 81 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 82 struct pmc_op_pmcallocate *_pmc_config); 83 #endif /* __powerpc__ */ 84 85 #define PMC_CALL(cmd, params) \ 86 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 87 88 /* 89 * Event aliases provide a way for the user to ask for generic events 90 * like "cache-misses", or "instructions-retired". These aliases are 91 * mapped to the appropriate canonical event descriptions using a 92 * lookup table. 93 */ 94 struct pmc_event_alias { 95 const char *pm_alias; 96 const char *pm_spec; 97 }; 98 99 static const struct pmc_event_alias *pmc_mdep_event_aliases; 100 101 /* 102 * The pmc_event_descr structure maps symbolic names known to the user 103 * to integer codes used by the PMC KLD. 104 */ 105 struct pmc_event_descr { 106 const char *pm_ev_name; 107 enum pmc_event pm_ev_code; 108 }; 109 110 /* 111 * The pmc_class_descr structure maps class name prefixes for 112 * event names to event tables and other PMC class data. 113 */ 114 struct pmc_class_descr { 115 const char *pm_evc_name; 116 size_t pm_evc_name_size; 117 enum pmc_class pm_evc_class; 118 const struct pmc_event_descr *pm_evc_event_table; 119 size_t pm_evc_event_table_size; 120 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 121 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 122 }; 123 124 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 125 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 126 127 #undef __PMC_EV 128 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 129 130 /* 131 * PMC_CLASSDEP_TABLE(NAME, CLASS) 132 * 133 * Define a table mapping event names and aliases to HWPMC event IDs. 134 */ 135 #define PMC_CLASSDEP_TABLE(N, C) \ 136 static const struct pmc_event_descr N##_event_table[] = \ 137 { \ 138 __PMC_EV_##C() \ 139 } 140 141 PMC_CLASSDEP_TABLE(iaf, IAF); 142 PMC_CLASSDEP_TABLE(k8, K8); 143 PMC_CLASSDEP_TABLE(xscale, XSCALE); 144 PMC_CLASSDEP_TABLE(armv7, ARMV7); 145 PMC_CLASSDEP_TABLE(armv8, ARMV8); 146 PMC_CLASSDEP_TABLE(beri, BERI); 147 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 148 PMC_CLASSDEP_TABLE(mips74k, MIPS74K); 149 PMC_CLASSDEP_TABLE(octeon, OCTEON); 150 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 151 PMC_CLASSDEP_TABLE(ppc970, PPC970); 152 PMC_CLASSDEP_TABLE(e500, E500); 153 154 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 155 156 #undef __PMC_EV_ALIAS 157 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 158 159 static const struct pmc_event_descr cortex_a8_event_table[] = 160 { 161 __PMC_EV_ALIAS_ARMV7_CORTEX_A8() 162 }; 163 164 static const struct pmc_event_descr cortex_a9_event_table[] = 165 { 166 __PMC_EV_ALIAS_ARMV7_CORTEX_A9() 167 }; 168 169 static const struct pmc_event_descr cortex_a53_event_table[] = 170 { 171 __PMC_EV_ALIAS_ARMV8_CORTEX_A53() 172 }; 173 174 static const struct pmc_event_descr cortex_a57_event_table[] = 175 { 176 __PMC_EV_ALIAS_ARMV8_CORTEX_A57() 177 }; 178 179 static const struct pmc_event_descr cortex_a76_event_table[] = 180 { 181 __PMC_EV_ALIAS_ARMV8_CORTEX_A76() 182 }; 183 184 /* 185 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 186 * 187 * Map a CPU to the PMC classes it supports. 188 */ 189 #define PMC_MDEP_TABLE(N,C,...) \ 190 static const enum pmc_class N##_pmc_classes[] = { \ 191 PMC_CLASS_##C, __VA_ARGS__ \ 192 } 193 194 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 195 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 196 PMC_MDEP_TABLE(beri, BERI, PMC_CLASS_SOFT, PMC_CLASS_BERI); 197 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 198 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 199 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 200 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 201 PMC_MDEP_TABLE(cortex_a76, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 202 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 203 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); 204 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 205 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); 206 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); 207 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); 208 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 209 210 static const struct pmc_event_descr tsc_event_table[] = 211 { 212 __PMC_EV_TSC() 213 }; 214 215 #undef PMC_CLASS_TABLE_DESC 216 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 217 static const struct pmc_class_descr NAME##_class_table_descr = \ 218 { \ 219 .pm_evc_name = #CLASS "-", \ 220 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 221 .pm_evc_class = PMC_CLASS_##CLASS , \ 222 .pm_evc_event_table = EVENTS##_event_table , \ 223 .pm_evc_event_table_size = \ 224 PMC_EVENT_TABLE_SIZE(EVENTS), \ 225 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 226 } 227 228 #if defined(__i386__) || defined(__amd64__) 229 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 230 #endif 231 #if defined(__i386__) || defined(__amd64__) 232 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 233 #endif 234 #if defined(__arm__) 235 #if defined(__XSCALE__) 236 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 237 #endif 238 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); 239 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); 240 #endif 241 #if defined(__aarch64__) 242 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); 243 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); 244 PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64); 245 #endif 246 #if defined(__mips__) 247 PMC_CLASS_TABLE_DESC(beri, BERI, beri, mips); 248 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 249 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); 250 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 251 #endif /* __mips__ */ 252 #if defined(__powerpc__) 253 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 254 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 255 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); 256 #endif 257 258 static struct pmc_class_descr soft_class_table_descr = 259 { 260 .pm_evc_name = "SOFT-", 261 .pm_evc_name_size = sizeof("SOFT-") - 1, 262 .pm_evc_class = PMC_CLASS_SOFT, 263 .pm_evc_event_table = NULL, 264 .pm_evc_event_table_size = 0, 265 .pm_evc_allocate_pmc = soft_allocate_pmc 266 }; 267 268 #undef PMC_CLASS_TABLE_DESC 269 270 static const struct pmc_class_descr **pmc_class_table; 271 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 272 273 static const enum pmc_class *pmc_mdep_class_list; 274 static size_t pmc_mdep_class_list_size; 275 276 /* 277 * Mapping tables, mapping enumeration values to human readable 278 * strings. 279 */ 280 281 static const char * pmc_capability_names[] = { 282 #undef __PMC_CAP 283 #define __PMC_CAP(N,V,D) #N , 284 __PMC_CAPS() 285 }; 286 287 struct pmc_class_map { 288 enum pmc_class pm_class; 289 const char *pm_name; 290 }; 291 292 static const struct pmc_class_map pmc_class_names[] = { 293 #undef __PMC_CLASS 294 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , 295 __PMC_CLASSES() 296 }; 297 298 struct pmc_cputype_map { 299 enum pmc_cputype pm_cputype; 300 const char *pm_name; 301 }; 302 303 static const struct pmc_cputype_map pmc_cputype_names[] = { 304 #undef __PMC_CPU 305 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 306 __PMC_CPUS() 307 }; 308 309 static const char * pmc_disposition_names[] = { 310 #undef __PMC_DISP 311 #define __PMC_DISP(D) #D , 312 __PMC_DISPOSITIONS() 313 }; 314 315 static const char * pmc_mode_names[] = { 316 #undef __PMC_MODE 317 #define __PMC_MODE(M,N) #M , 318 __PMC_MODES() 319 }; 320 321 static const char * pmc_state_names[] = { 322 #undef __PMC_STATE 323 #define __PMC_STATE(S) #S , 324 __PMC_STATES() 325 }; 326 327 /* 328 * Filled in by pmc_init(). 329 */ 330 static int pmc_syscall = -1; 331 static struct pmc_cpuinfo cpu_info; 332 static struct pmc_op_getdyneventinfo soft_event_info; 333 334 /* Event masks for events */ 335 struct pmc_masks { 336 const char *pm_name; 337 const uint64_t pm_value; 338 }; 339 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 340 #define NULLMASK { .pm_name = NULL } 341 342 #if defined(__amd64__) || defined(__i386__) 343 static int 344 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 345 { 346 const struct pmc_masks *pm; 347 char *q, *r; 348 int c; 349 350 if (pmask == NULL) /* no mask keywords */ 351 return (-1); 352 q = strchr(p, '='); /* skip '=' */ 353 if (*++q == '\0') /* no more data */ 354 return (-1); 355 c = 0; /* count of mask keywords seen */ 356 while ((r = strsep(&q, "+")) != NULL) { 357 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 358 pm++) 359 ; 360 if (pm->pm_name == NULL) /* not found */ 361 return (-1); 362 *evmask |= pm->pm_value; 363 c++; 364 } 365 return (c); 366 } 367 #endif 368 369 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 370 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 371 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 372 373 #if defined(__amd64__) || defined(__i386__) 374 /* 375 * AMD K8 PMCs. 376 * 377 */ 378 379 static struct pmc_event_alias k8_aliases[] = { 380 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 381 EV_ALIAS("branch-mispredicts", 382 "k8-fr-retired-taken-branches-mispredicted"), 383 EV_ALIAS("cycles", "tsc"), 384 EV_ALIAS("dc-misses", "k8-dc-miss"), 385 EV_ALIAS("ic-misses", "k8-ic-miss"), 386 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 387 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 388 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 389 EV_ALIAS(NULL, NULL) 390 }; 391 392 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 393 394 /* 395 * Parsing tables 396 */ 397 398 /* fp dispatched fpu ops */ 399 static const struct pmc_masks k8_mask_fdfo[] = { 400 __K8MASK(add-pipe-excluding-junk-ops, 0), 401 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 402 __K8MASK(store-pipe-excluding-junk-ops, 2), 403 __K8MASK(add-pipe-junk-ops, 3), 404 __K8MASK(multiply-pipe-junk-ops, 4), 405 __K8MASK(store-pipe-junk-ops, 5), 406 NULLMASK 407 }; 408 409 /* ls segment register loads */ 410 static const struct pmc_masks k8_mask_lsrl[] = { 411 __K8MASK(es, 0), 412 __K8MASK(cs, 1), 413 __K8MASK(ss, 2), 414 __K8MASK(ds, 3), 415 __K8MASK(fs, 4), 416 __K8MASK(gs, 5), 417 __K8MASK(hs, 6), 418 NULLMASK 419 }; 420 421 /* ls locked operation */ 422 static const struct pmc_masks k8_mask_llo[] = { 423 __K8MASK(locked-instructions, 0), 424 __K8MASK(cycles-in-request, 1), 425 __K8MASK(cycles-to-complete, 2), 426 NULLMASK 427 }; 428 429 /* dc refill from {l2,system} and dc copyback */ 430 static const struct pmc_masks k8_mask_dc[] = { 431 __K8MASK(invalid, 0), 432 __K8MASK(shared, 1), 433 __K8MASK(exclusive, 2), 434 __K8MASK(owner, 3), 435 __K8MASK(modified, 4), 436 NULLMASK 437 }; 438 439 /* dc one bit ecc error */ 440 static const struct pmc_masks k8_mask_dobee[] = { 441 __K8MASK(scrubber, 0), 442 __K8MASK(piggyback, 1), 443 NULLMASK 444 }; 445 446 /* dc dispatched prefetch instructions */ 447 static const struct pmc_masks k8_mask_ddpi[] = { 448 __K8MASK(load, 0), 449 __K8MASK(store, 1), 450 __K8MASK(nta, 2), 451 NULLMASK 452 }; 453 454 /* dc dcache accesses by locks */ 455 static const struct pmc_masks k8_mask_dabl[] = { 456 __K8MASK(accesses, 0), 457 __K8MASK(misses, 1), 458 NULLMASK 459 }; 460 461 /* bu internal l2 request */ 462 static const struct pmc_masks k8_mask_bilr[] = { 463 __K8MASK(ic-fill, 0), 464 __K8MASK(dc-fill, 1), 465 __K8MASK(tlb-reload, 2), 466 __K8MASK(tag-snoop, 3), 467 __K8MASK(cancelled, 4), 468 NULLMASK 469 }; 470 471 /* bu fill request l2 miss */ 472 static const struct pmc_masks k8_mask_bfrlm[] = { 473 __K8MASK(ic-fill, 0), 474 __K8MASK(dc-fill, 1), 475 __K8MASK(tlb-reload, 2), 476 NULLMASK 477 }; 478 479 /* bu fill into l2 */ 480 static const struct pmc_masks k8_mask_bfil[] = { 481 __K8MASK(dirty-l2-victim, 0), 482 __K8MASK(victim-from-l2, 1), 483 NULLMASK 484 }; 485 486 /* fr retired fpu instructions */ 487 static const struct pmc_masks k8_mask_frfi[] = { 488 __K8MASK(x87, 0), 489 __K8MASK(mmx-3dnow, 1), 490 __K8MASK(packed-sse-sse2, 2), 491 __K8MASK(scalar-sse-sse2, 3), 492 NULLMASK 493 }; 494 495 /* fr retired fastpath double op instructions */ 496 static const struct pmc_masks k8_mask_frfdoi[] = { 497 __K8MASK(low-op-pos-0, 0), 498 __K8MASK(low-op-pos-1, 1), 499 __K8MASK(low-op-pos-2, 2), 500 NULLMASK 501 }; 502 503 /* fr fpu exceptions */ 504 static const struct pmc_masks k8_mask_ffe[] = { 505 __K8MASK(x87-reclass-microfaults, 0), 506 __K8MASK(sse-retype-microfaults, 1), 507 __K8MASK(sse-reclass-microfaults, 2), 508 __K8MASK(sse-and-x87-microtraps, 3), 509 NULLMASK 510 }; 511 512 /* nb memory controller page access event */ 513 static const struct pmc_masks k8_mask_nmcpae[] = { 514 __K8MASK(page-hit, 0), 515 __K8MASK(page-miss, 1), 516 __K8MASK(page-conflict, 2), 517 NULLMASK 518 }; 519 520 /* nb memory controller turnaround */ 521 static const struct pmc_masks k8_mask_nmct[] = { 522 __K8MASK(dimm-turnaround, 0), 523 __K8MASK(read-to-write-turnaround, 1), 524 __K8MASK(write-to-read-turnaround, 2), 525 NULLMASK 526 }; 527 528 /* nb memory controller bypass saturation */ 529 static const struct pmc_masks k8_mask_nmcbs[] = { 530 __K8MASK(memory-controller-hi-pri-bypass, 0), 531 __K8MASK(memory-controller-lo-pri-bypass, 1), 532 __K8MASK(dram-controller-interface-bypass, 2), 533 __K8MASK(dram-controller-queue-bypass, 3), 534 NULLMASK 535 }; 536 537 /* nb sized commands */ 538 static const struct pmc_masks k8_mask_nsc[] = { 539 __K8MASK(nonpostwrszbyte, 0), 540 __K8MASK(nonpostwrszdword, 1), 541 __K8MASK(postwrszbyte, 2), 542 __K8MASK(postwrszdword, 3), 543 __K8MASK(rdszbyte, 4), 544 __K8MASK(rdszdword, 5), 545 __K8MASK(rdmodwr, 6), 546 NULLMASK 547 }; 548 549 /* nb probe result */ 550 static const struct pmc_masks k8_mask_npr[] = { 551 __K8MASK(probe-miss, 0), 552 __K8MASK(probe-hit, 1), 553 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 554 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 555 NULLMASK 556 }; 557 558 /* nb hypertransport bus bandwidth */ 559 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 560 __K8MASK(command, 0), 561 __K8MASK(data, 1), 562 __K8MASK(buffer-release, 2), 563 __K8MASK(nop, 3), 564 NULLMASK 565 }; 566 567 #undef __K8MASK 568 569 #define K8_KW_COUNT "count" 570 #define K8_KW_EDGE "edge" 571 #define K8_KW_INV "inv" 572 #define K8_KW_MASK "mask" 573 #define K8_KW_OS "os" 574 #define K8_KW_USR "usr" 575 576 static int 577 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 578 struct pmc_op_pmcallocate *pmc_config) 579 { 580 char *e, *p, *q; 581 int n; 582 uint32_t count; 583 uint64_t evmask; 584 const struct pmc_masks *pm, *pmask; 585 586 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 587 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 588 589 pmask = NULL; 590 evmask = 0; 591 592 #define __K8SETMASK(M) pmask = k8_mask_##M 593 594 /* setup parsing tables */ 595 switch (pe) { 596 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 597 __K8SETMASK(fdfo); 598 break; 599 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 600 __K8SETMASK(lsrl); 601 break; 602 case PMC_EV_K8_LS_LOCKED_OPERATION: 603 __K8SETMASK(llo); 604 break; 605 case PMC_EV_K8_DC_REFILL_FROM_L2: 606 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 607 case PMC_EV_K8_DC_COPYBACK: 608 __K8SETMASK(dc); 609 break; 610 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 611 __K8SETMASK(dobee); 612 break; 613 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 614 __K8SETMASK(ddpi); 615 break; 616 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 617 __K8SETMASK(dabl); 618 break; 619 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 620 __K8SETMASK(bilr); 621 break; 622 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 623 __K8SETMASK(bfrlm); 624 break; 625 case PMC_EV_K8_BU_FILL_INTO_L2: 626 __K8SETMASK(bfil); 627 break; 628 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 629 __K8SETMASK(frfi); 630 break; 631 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 632 __K8SETMASK(frfdoi); 633 break; 634 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 635 __K8SETMASK(ffe); 636 break; 637 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 638 __K8SETMASK(nmcpae); 639 break; 640 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 641 __K8SETMASK(nmct); 642 break; 643 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 644 __K8SETMASK(nmcbs); 645 break; 646 case PMC_EV_K8_NB_SIZED_COMMANDS: 647 __K8SETMASK(nsc); 648 break; 649 case PMC_EV_K8_NB_PROBE_RESULT: 650 __K8SETMASK(npr); 651 break; 652 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 653 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 654 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 655 __K8SETMASK(nhbb); 656 break; 657 658 default: 659 break; /* no options defined */ 660 } 661 662 while ((p = strsep(&ctrspec, ",")) != NULL) { 663 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 664 q = strchr(p, '='); 665 if (*++q == '\0') /* skip '=' */ 666 return (-1); 667 668 count = strtol(q, &e, 0); 669 if (e == q || *e != '\0') 670 return (-1); 671 672 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 673 pmc_config->pm_md.pm_amd.pm_amd_config |= 674 AMD_PMC_TO_COUNTER(count); 675 676 } else if (KWMATCH(p, K8_KW_EDGE)) { 677 pmc_config->pm_caps |= PMC_CAP_EDGE; 678 } else if (KWMATCH(p, K8_KW_INV)) { 679 pmc_config->pm_caps |= PMC_CAP_INVERT; 680 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 681 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 682 return (-1); 683 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 684 } else if (KWMATCH(p, K8_KW_OS)) { 685 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 686 } else if (KWMATCH(p, K8_KW_USR)) { 687 pmc_config->pm_caps |= PMC_CAP_USER; 688 } else 689 return (-1); 690 } 691 692 /* other post processing */ 693 switch (pe) { 694 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 695 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 696 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 697 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 698 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 699 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 700 /* XXX only available in rev B and later */ 701 break; 702 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 703 /* XXX only available in rev C and later */ 704 break; 705 case PMC_EV_K8_LS_LOCKED_OPERATION: 706 /* XXX CPU Rev A,B evmask is to be zero */ 707 if (evmask & (evmask - 1)) /* > 1 bit set */ 708 return (-1); 709 if (evmask == 0) { 710 evmask = 0x01; /* Rev C and later: #instrs */ 711 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 712 } 713 break; 714 default: 715 if (evmask == 0 && pmask != NULL) { 716 for (pm = pmask; pm->pm_name; pm++) 717 evmask |= pm->pm_value; 718 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 719 } 720 } 721 722 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 723 pmc_config->pm_md.pm_amd.pm_amd_config = 724 AMD_PMC_TO_UNITMASK(evmask); 725 726 return (0); 727 } 728 729 #endif 730 731 #if defined(__i386__) || defined(__amd64__) 732 static int 733 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 734 struct pmc_op_pmcallocate *pmc_config) 735 { 736 if (pe != PMC_EV_TSC_TSC) 737 return (-1); 738 739 /* TSC events must be unqualified. */ 740 if (ctrspec && *ctrspec != '\0') 741 return (-1); 742 743 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 744 pmc_config->pm_caps |= PMC_CAP_READ; 745 746 return (0); 747 } 748 #endif 749 750 static struct pmc_event_alias generic_aliases[] = { 751 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 752 EV_ALIAS(NULL, NULL) 753 }; 754 755 static int 756 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 757 struct pmc_op_pmcallocate *pmc_config) 758 { 759 (void)ctrspec; 760 (void)pmc_config; 761 762 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 763 return (-1); 764 765 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 766 return (0); 767 } 768 769 #if defined(__arm__) 770 #if defined(__XSCALE__) 771 772 static struct pmc_event_alias xscale_aliases[] = { 773 EV_ALIAS("branches", "BRANCH_RETIRED"), 774 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 775 EV_ALIAS("dc-misses", "DC_MISS"), 776 EV_ALIAS("ic-misses", "IC_MISS"), 777 EV_ALIAS("instructions", "INSTR_RETIRED"), 778 EV_ALIAS(NULL, NULL) 779 }; 780 static int 781 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 782 struct pmc_op_pmcallocate *pmc_config __unused) 783 { 784 switch (pe) { 785 default: 786 break; 787 } 788 789 return (0); 790 } 791 #endif 792 793 static struct pmc_event_alias cortex_a8_aliases[] = { 794 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 795 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 796 EV_ALIAS("instructions", "INSTR_EXECUTED"), 797 EV_ALIAS(NULL, NULL) 798 }; 799 800 static struct pmc_event_alias cortex_a9_aliases[] = { 801 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 802 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 803 EV_ALIAS("instructions", "INSTR_EXECUTED"), 804 EV_ALIAS(NULL, NULL) 805 }; 806 807 static int 808 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 809 struct pmc_op_pmcallocate *pmc_config __unused) 810 { 811 switch (pe) { 812 default: 813 break; 814 } 815 816 return (0); 817 } 818 #endif 819 820 #if defined(__aarch64__) 821 static struct pmc_event_alias cortex_a53_aliases[] = { 822 EV_ALIAS(NULL, NULL) 823 }; 824 static struct pmc_event_alias cortex_a57_aliases[] = { 825 EV_ALIAS(NULL, NULL) 826 }; 827 static struct pmc_event_alias cortex_a76_aliases[] = { 828 EV_ALIAS(NULL, NULL) 829 }; 830 static int 831 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 832 struct pmc_op_pmcallocate *pmc_config __unused) 833 { 834 switch (pe) { 835 default: 836 break; 837 } 838 839 return (0); 840 } 841 #endif 842 843 #if defined(__mips__) 844 845 static struct pmc_event_alias beri_aliases[] = { 846 EV_ALIAS("instructions", "INST"), 847 EV_ALIAS(NULL, NULL) 848 }; 849 850 static struct pmc_event_alias mips24k_aliases[] = { 851 EV_ALIAS("instructions", "INSTR_EXECUTED"), 852 EV_ALIAS("branches", "BRANCH_COMPLETED"), 853 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 854 EV_ALIAS(NULL, NULL) 855 }; 856 857 static struct pmc_event_alias mips74k_aliases[] = { 858 EV_ALIAS("instructions", "INSTR_EXECUTED"), 859 EV_ALIAS("branches", "BRANCH_INSNS"), 860 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), 861 EV_ALIAS(NULL, NULL) 862 }; 863 864 static struct pmc_event_alias octeon_aliases[] = { 865 EV_ALIAS("instructions", "RET"), 866 EV_ALIAS("branches", "BR"), 867 EV_ALIAS("branch-mispredicts", "BRMIS"), 868 EV_ALIAS(NULL, NULL) 869 }; 870 871 #define MIPS_KW_OS "os" 872 #define MIPS_KW_USR "usr" 873 #define MIPS_KW_ANYTHREAD "anythread" 874 875 static int 876 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 877 struct pmc_op_pmcallocate *pmc_config __unused) 878 { 879 char *p; 880 881 (void) pe; 882 883 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 884 885 while ((p = strsep(&ctrspec, ",")) != NULL) { 886 if (KWMATCH(p, MIPS_KW_OS)) 887 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 888 else if (KWMATCH(p, MIPS_KW_USR)) 889 pmc_config->pm_caps |= PMC_CAP_USER; 890 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 891 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 892 else 893 return (-1); 894 } 895 896 return (0); 897 } 898 899 #endif /* __mips__ */ 900 901 #if defined(__powerpc__) 902 903 static struct pmc_event_alias ppc7450_aliases[] = { 904 EV_ALIAS("instructions", "INSTR_COMPLETED"), 905 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 906 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 907 EV_ALIAS(NULL, NULL) 908 }; 909 910 static struct pmc_event_alias ppc970_aliases[] = { 911 EV_ALIAS("instructions", "INSTR_COMPLETED"), 912 EV_ALIAS("cycles", "CYCLES"), 913 EV_ALIAS(NULL, NULL) 914 }; 915 916 static struct pmc_event_alias e500_aliases[] = { 917 EV_ALIAS("instructions", "INSTR_COMPLETED"), 918 EV_ALIAS("cycles", "CYCLES"), 919 EV_ALIAS(NULL, NULL) 920 }; 921 922 #define POWERPC_KW_OS "os" 923 #define POWERPC_KW_USR "usr" 924 #define POWERPC_KW_ANYTHREAD "anythread" 925 926 static int 927 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 928 struct pmc_op_pmcallocate *pmc_config __unused) 929 { 930 char *p; 931 932 (void) pe; 933 934 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 935 936 while ((p = strsep(&ctrspec, ",")) != NULL) { 937 if (KWMATCH(p, POWERPC_KW_OS)) 938 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 939 else if (KWMATCH(p, POWERPC_KW_USR)) 940 pmc_config->pm_caps |= PMC_CAP_USER; 941 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 942 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 943 else 944 return (-1); 945 } 946 947 return (0); 948 } 949 950 #endif /* __powerpc__ */ 951 952 953 /* 954 * Match an event name `name' with its canonical form. 955 * 956 * Matches are case insensitive and spaces, periods, underscores and 957 * hyphen characters are considered to match each other. 958 * 959 * Returns 1 for a match, 0 otherwise. 960 */ 961 962 static int 963 pmc_match_event_name(const char *name, const char *canonicalname) 964 { 965 int cc, nc; 966 const unsigned char *c, *n; 967 968 c = (const unsigned char *) canonicalname; 969 n = (const unsigned char *) name; 970 971 for (; (nc = *n) && (cc = *c); n++, c++) { 972 973 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 974 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 975 continue; 976 977 if (toupper(nc) == toupper(cc)) 978 continue; 979 980 981 return (0); 982 } 983 984 if (*n == '\0' && *c == '\0') 985 return (1); 986 987 return (0); 988 } 989 990 /* 991 * Match an event name against all the event named supported by a 992 * PMC class. 993 * 994 * Returns an event descriptor pointer on match or NULL otherwise. 995 */ 996 static const struct pmc_event_descr * 997 pmc_match_event_class(const char *name, 998 const struct pmc_class_descr *pcd) 999 { 1000 size_t n; 1001 const struct pmc_event_descr *ev; 1002 1003 ev = pcd->pm_evc_event_table; 1004 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 1005 if (pmc_match_event_name(name, ev->pm_ev_name)) 1006 return (ev); 1007 1008 return (NULL); 1009 } 1010 1011 static int 1012 pmc_mdep_is_compatible_class(enum pmc_class pc) 1013 { 1014 size_t n; 1015 1016 for (n = 0; n < pmc_mdep_class_list_size; n++) 1017 if (pmc_mdep_class_list[n] == pc) 1018 return (1); 1019 return (0); 1020 } 1021 1022 /* 1023 * API entry points 1024 */ 1025 1026 int 1027 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 1028 uint32_t flags, int cpu, pmc_id_t *pmcid, 1029 uint64_t count) 1030 { 1031 size_t n; 1032 int retval; 1033 char *r, *spec_copy; 1034 const char *ctrname; 1035 const struct pmc_event_descr *ev; 1036 const struct pmc_event_alias *alias; 1037 struct pmc_op_pmcallocate pmc_config; 1038 const struct pmc_class_descr *pcd; 1039 1040 spec_copy = NULL; 1041 retval = -1; 1042 1043 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 1044 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 1045 errno = EINVAL; 1046 goto out; 1047 } 1048 bzero(&pmc_config, sizeof(pmc_config)); 1049 pmc_config.pm_cpu = cpu; 1050 pmc_config.pm_mode = mode; 1051 pmc_config.pm_flags = flags; 1052 pmc_config.pm_count = count; 1053 if (PMC_IS_SAMPLING_MODE(mode)) 1054 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 1055 /* 1056 * Can we pull this straight from the pmu table? 1057 */ 1058 r = spec_copy = strdup(ctrspec); 1059 ctrname = strsep(&r, ","); 1060 if (pmc_pmu_enabled()) { 1061 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { 1062 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { 1063 goto out; 1064 } 1065 retval = 0; 1066 *pmcid = pmc_config.pm_pmcid; 1067 goto out; 1068 } 1069 errx(EX_USAGE, "ERROR: pmc_pmu_allocate failed, check for ctrname %s\n", ctrname); 1070 } else { 1071 free(spec_copy); 1072 spec_copy = NULL; 1073 } 1074 1075 /* replace an event alias with the canonical event specifier */ 1076 if (pmc_mdep_event_aliases) 1077 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 1078 if (!strcasecmp(ctrspec, alias->pm_alias)) { 1079 spec_copy = strdup(alias->pm_spec); 1080 break; 1081 } 1082 1083 if (spec_copy == NULL) 1084 spec_copy = strdup(ctrspec); 1085 1086 r = spec_copy; 1087 ctrname = strsep(&r, ","); 1088 1089 /* 1090 * If a explicit class prefix was given by the user, restrict the 1091 * search for the event to the specified PMC class. 1092 */ 1093 ev = NULL; 1094 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 1095 pcd = pmc_class_table[n]; 1096 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 1097 strncasecmp(ctrname, pcd->pm_evc_name, 1098 pcd->pm_evc_name_size) == 0) { 1099 if ((ev = pmc_match_event_class(ctrname + 1100 pcd->pm_evc_name_size, pcd)) == NULL) { 1101 errno = EINVAL; 1102 goto out; 1103 } 1104 break; 1105 } 1106 } 1107 1108 /* 1109 * Otherwise, search for this event in all compatible PMC 1110 * classes. 1111 */ 1112 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 1113 pcd = pmc_class_table[n]; 1114 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 1115 ev = pmc_match_event_class(ctrname, pcd); 1116 } 1117 1118 if (ev == NULL) { 1119 errno = EINVAL; 1120 goto out; 1121 } 1122 1123 pmc_config.pm_ev = ev->pm_ev_code; 1124 pmc_config.pm_class = pcd->pm_evc_class; 1125 1126 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 1127 errno = EINVAL; 1128 goto out; 1129 } 1130 1131 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 1132 goto out; 1133 1134 *pmcid = pmc_config.pm_pmcid; 1135 1136 retval = 0; 1137 1138 out: 1139 if (spec_copy) 1140 free(spec_copy); 1141 1142 return (retval); 1143 } 1144 1145 int 1146 pmc_attach(pmc_id_t pmc, pid_t pid) 1147 { 1148 struct pmc_op_pmcattach pmc_attach_args; 1149 1150 pmc_attach_args.pm_pmc = pmc; 1151 pmc_attach_args.pm_pid = pid; 1152 1153 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1154 } 1155 1156 int 1157 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1158 { 1159 unsigned int i; 1160 enum pmc_class cl; 1161 1162 cl = PMC_ID_TO_CLASS(pmcid); 1163 for (i = 0; i < cpu_info.pm_nclass; i++) 1164 if (cpu_info.pm_classes[i].pm_class == cl) { 1165 *caps = cpu_info.pm_classes[i].pm_caps; 1166 return (0); 1167 } 1168 errno = EINVAL; 1169 return (-1); 1170 } 1171 1172 int 1173 pmc_configure_logfile(int fd) 1174 { 1175 struct pmc_op_configurelog cla; 1176 1177 cla.pm_logfd = fd; 1178 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1179 return (-1); 1180 return (0); 1181 } 1182 1183 int 1184 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1185 { 1186 if (pmc_syscall == -1) { 1187 errno = ENXIO; 1188 return (-1); 1189 } 1190 1191 *pci = &cpu_info; 1192 return (0); 1193 } 1194 1195 int 1196 pmc_detach(pmc_id_t pmc, pid_t pid) 1197 { 1198 struct pmc_op_pmcattach pmc_detach_args; 1199 1200 pmc_detach_args.pm_pmc = pmc; 1201 pmc_detach_args.pm_pid = pid; 1202 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1203 } 1204 1205 int 1206 pmc_disable(int cpu, int pmc) 1207 { 1208 struct pmc_op_pmcadmin ssa; 1209 1210 ssa.pm_cpu = cpu; 1211 ssa.pm_pmc = pmc; 1212 ssa.pm_state = PMC_STATE_DISABLED; 1213 return (PMC_CALL(PMCADMIN, &ssa)); 1214 } 1215 1216 int 1217 pmc_enable(int cpu, int pmc) 1218 { 1219 struct pmc_op_pmcadmin ssa; 1220 1221 ssa.pm_cpu = cpu; 1222 ssa.pm_pmc = pmc; 1223 ssa.pm_state = PMC_STATE_FREE; 1224 return (PMC_CALL(PMCADMIN, &ssa)); 1225 } 1226 1227 /* 1228 * Return a list of events known to a given PMC class. 'cl' is the 1229 * PMC class identifier, 'eventnames' is the returned list of 'const 1230 * char *' pointers pointing to the names of the events. 'nevents' is 1231 * the number of event name pointers returned. 1232 * 1233 * The space for 'eventnames' is allocated using malloc(3). The caller 1234 * is responsible for freeing this space when done. 1235 */ 1236 int 1237 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1238 int *nevents) 1239 { 1240 int count; 1241 const char **names; 1242 const struct pmc_event_descr *ev; 1243 1244 switch (cl) 1245 { 1246 case PMC_CLASS_IAF: 1247 ev = iaf_event_table; 1248 count = PMC_EVENT_TABLE_SIZE(iaf); 1249 break; 1250 case PMC_CLASS_TSC: 1251 ev = tsc_event_table; 1252 count = PMC_EVENT_TABLE_SIZE(tsc); 1253 break; 1254 case PMC_CLASS_K8: 1255 ev = k8_event_table; 1256 count = PMC_EVENT_TABLE_SIZE(k8); 1257 break; 1258 case PMC_CLASS_XSCALE: 1259 ev = xscale_event_table; 1260 count = PMC_EVENT_TABLE_SIZE(xscale); 1261 break; 1262 case PMC_CLASS_ARMV7: 1263 switch (cpu_info.pm_cputype) { 1264 default: 1265 case PMC_CPU_ARMV7_CORTEX_A8: 1266 ev = cortex_a8_event_table; 1267 count = PMC_EVENT_TABLE_SIZE(cortex_a8); 1268 break; 1269 case PMC_CPU_ARMV7_CORTEX_A9: 1270 ev = cortex_a9_event_table; 1271 count = PMC_EVENT_TABLE_SIZE(cortex_a9); 1272 break; 1273 } 1274 break; 1275 case PMC_CLASS_ARMV8: 1276 switch (cpu_info.pm_cputype) { 1277 default: 1278 case PMC_CPU_ARMV8_CORTEX_A53: 1279 ev = cortex_a53_event_table; 1280 count = PMC_EVENT_TABLE_SIZE(cortex_a53); 1281 break; 1282 case PMC_CPU_ARMV8_CORTEX_A57: 1283 ev = cortex_a57_event_table; 1284 count = PMC_EVENT_TABLE_SIZE(cortex_a57); 1285 break; 1286 case PMC_CPU_ARMV8_CORTEX_A76: 1287 ev = cortex_a76_event_table; 1288 count = PMC_EVENT_TABLE_SIZE(cortex_a76); 1289 break; 1290 } 1291 break; 1292 case PMC_CLASS_BERI: 1293 ev = beri_event_table; 1294 count = PMC_EVENT_TABLE_SIZE(beri); 1295 break; 1296 case PMC_CLASS_MIPS24K: 1297 ev = mips24k_event_table; 1298 count = PMC_EVENT_TABLE_SIZE(mips24k); 1299 break; 1300 case PMC_CLASS_MIPS74K: 1301 ev = mips74k_event_table; 1302 count = PMC_EVENT_TABLE_SIZE(mips74k); 1303 break; 1304 case PMC_CLASS_OCTEON: 1305 ev = octeon_event_table; 1306 count = PMC_EVENT_TABLE_SIZE(octeon); 1307 break; 1308 case PMC_CLASS_PPC7450: 1309 ev = ppc7450_event_table; 1310 count = PMC_EVENT_TABLE_SIZE(ppc7450); 1311 break; 1312 case PMC_CLASS_PPC970: 1313 ev = ppc970_event_table; 1314 count = PMC_EVENT_TABLE_SIZE(ppc970); 1315 break; 1316 case PMC_CLASS_E500: 1317 ev = e500_event_table; 1318 count = PMC_EVENT_TABLE_SIZE(e500); 1319 break; 1320 case PMC_CLASS_SOFT: 1321 ev = soft_event_table; 1322 count = soft_event_info.pm_nevent; 1323 break; 1324 default: 1325 errno = EINVAL; 1326 return (-1); 1327 } 1328 1329 if ((names = malloc(count * sizeof(const char *))) == NULL) 1330 return (-1); 1331 1332 *eventnames = names; 1333 *nevents = count; 1334 1335 for (;count--; ev++, names++) 1336 *names = ev->pm_ev_name; 1337 1338 return (0); 1339 } 1340 1341 int 1342 pmc_flush_logfile(void) 1343 { 1344 return (PMC_CALL(FLUSHLOG,0)); 1345 } 1346 1347 int 1348 pmc_close_logfile(void) 1349 { 1350 return (PMC_CALL(CLOSELOG,0)); 1351 } 1352 1353 int 1354 pmc_get_driver_stats(struct pmc_driverstats *ds) 1355 { 1356 struct pmc_op_getdriverstats gms; 1357 1358 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 1359 return (-1); 1360 1361 /* copy out fields in the current userland<->library interface */ 1362 ds->pm_intr_ignored = gms.pm_intr_ignored; 1363 ds->pm_intr_processed = gms.pm_intr_processed; 1364 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 1365 ds->pm_syscalls = gms.pm_syscalls; 1366 ds->pm_syscall_errors = gms.pm_syscall_errors; 1367 ds->pm_buffer_requests = gms.pm_buffer_requests; 1368 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 1369 ds->pm_log_sweeps = gms.pm_log_sweeps; 1370 return (0); 1371 } 1372 1373 int 1374 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 1375 { 1376 struct pmc_op_getmsr gm; 1377 1378 gm.pm_pmcid = pmc; 1379 if (PMC_CALL(PMCGETMSR, &gm) < 0) 1380 return (-1); 1381 *msr = gm.pm_msr; 1382 return (0); 1383 } 1384 1385 int 1386 pmc_init(void) 1387 { 1388 int error, pmc_mod_id; 1389 unsigned int n; 1390 uint32_t abi_version; 1391 struct module_stat pmc_modstat; 1392 struct pmc_op_getcpuinfo op_cpu_info; 1393 #if defined(__amd64__) || defined(__i386__) 1394 int cpu_has_iaf_counters; 1395 unsigned int t; 1396 #endif 1397 1398 if (pmc_syscall != -1) /* already inited */ 1399 return (0); 1400 1401 /* retrieve the system call number from the KLD */ 1402 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 1403 return (-1); 1404 1405 pmc_modstat.version = sizeof(struct module_stat); 1406 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 1407 return (-1); 1408 1409 pmc_syscall = pmc_modstat.data.intval; 1410 1411 /* check the kernel module's ABI against our compiled-in version */ 1412 abi_version = PMC_VERSION; 1413 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 1414 return (pmc_syscall = -1); 1415 1416 /* ignore patch & minor numbers for the comparison */ 1417 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 1418 errno = EPROGMISMATCH; 1419 return (pmc_syscall = -1); 1420 } 1421 1422 bzero(&op_cpu_info, sizeof(op_cpu_info)); 1423 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 1424 return (pmc_syscall = -1); 1425 1426 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 1427 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 1428 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 1429 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 1430 for (n = 0; n < op_cpu_info.pm_nclass; n++) 1431 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], 1432 sizeof(cpu_info.pm_classes[n])); 1433 1434 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 1435 sizeof(struct pmc_class_descr *)); 1436 1437 if (pmc_class_table == NULL) 1438 return (-1); 1439 1440 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 1441 pmc_class_table[n] = NULL; 1442 1443 /* 1444 * Get soft events list. 1445 */ 1446 soft_event_info.pm_class = PMC_CLASS_SOFT; 1447 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 1448 return (pmc_syscall = -1); 1449 1450 /* Map soft events to static list. */ 1451 for (n = 0; n < soft_event_info.pm_nevent; n++) { 1452 soft_event_table[n].pm_ev_name = 1453 soft_event_info.pm_events[n].pm_ev_name; 1454 soft_event_table[n].pm_ev_code = 1455 soft_event_info.pm_events[n].pm_ev_code; 1456 } 1457 soft_class_table_descr.pm_evc_event_table_size = \ 1458 soft_event_info.pm_nevent; 1459 soft_class_table_descr.pm_evc_event_table = \ 1460 soft_event_table; 1461 1462 /* 1463 * Fill in the class table. 1464 */ 1465 n = 0; 1466 1467 /* Fill soft events information. */ 1468 pmc_class_table[n++] = &soft_class_table_descr; 1469 #if defined(__amd64__) || defined(__i386__) 1470 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 1471 pmc_class_table[n++] = &tsc_class_table_descr; 1472 1473 /* 1474 * Check if this CPU has fixed function counters. 1475 */ 1476 cpu_has_iaf_counters = 0; 1477 for (t = 0; t < cpu_info.pm_nclass; t++) 1478 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 1479 cpu_info.pm_classes[t].pm_num > 0) 1480 cpu_has_iaf_counters = 1; 1481 #endif 1482 1483 #define PMC_MDEP_INIT(C) do { \ 1484 pmc_mdep_event_aliases = C##_aliases; \ 1485 pmc_mdep_class_list = C##_pmc_classes; \ 1486 pmc_mdep_class_list_size = \ 1487 PMC_TABLE_SIZE(C##_pmc_classes); \ 1488 } while (0) 1489 1490 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 1491 PMC_MDEP_INIT(C); \ 1492 pmc_class_table[n++] = &iaf_class_table_descr; \ 1493 if (!cpu_has_iaf_counters) \ 1494 pmc_mdep_event_aliases = \ 1495 C##_aliases_without_iaf; \ 1496 pmc_class_table[n] = &C##_class_table_descr; \ 1497 } while (0) 1498 1499 /* Configure the event name parser. */ 1500 switch (cpu_info.pm_cputype) { 1501 #if defined(__amd64__) || defined(__i386__) 1502 case PMC_CPU_AMD_K8: 1503 PMC_MDEP_INIT(k8); 1504 pmc_class_table[n] = &k8_class_table_descr; 1505 break; 1506 #endif 1507 case PMC_CPU_GENERIC: 1508 PMC_MDEP_INIT(generic); 1509 break; 1510 #if defined(__arm__) 1511 #if defined(__XSCALE__) 1512 case PMC_CPU_INTEL_XSCALE: 1513 PMC_MDEP_INIT(xscale); 1514 pmc_class_table[n] = &xscale_class_table_descr; 1515 break; 1516 #endif 1517 case PMC_CPU_ARMV7_CORTEX_A8: 1518 PMC_MDEP_INIT(cortex_a8); 1519 pmc_class_table[n] = &cortex_a8_class_table_descr; 1520 break; 1521 case PMC_CPU_ARMV7_CORTEX_A9: 1522 PMC_MDEP_INIT(cortex_a9); 1523 pmc_class_table[n] = &cortex_a9_class_table_descr; 1524 break; 1525 #endif 1526 #if defined(__aarch64__) 1527 case PMC_CPU_ARMV8_CORTEX_A53: 1528 PMC_MDEP_INIT(cortex_a53); 1529 pmc_class_table[n] = &cortex_a53_class_table_descr; 1530 break; 1531 case PMC_CPU_ARMV8_CORTEX_A57: 1532 PMC_MDEP_INIT(cortex_a57); 1533 pmc_class_table[n] = &cortex_a57_class_table_descr; 1534 break; 1535 case PMC_CPU_ARMV8_CORTEX_A76: 1536 PMC_MDEP_INIT(cortex_a76); 1537 pmc_class_table[n] = &cortex_a76_class_table_descr; 1538 break; 1539 #endif 1540 #if defined(__mips__) 1541 case PMC_CPU_MIPS_BERI: 1542 PMC_MDEP_INIT(beri); 1543 pmc_class_table[n] = &beri_class_table_descr; 1544 break; 1545 case PMC_CPU_MIPS_24K: 1546 PMC_MDEP_INIT(mips24k); 1547 pmc_class_table[n] = &mips24k_class_table_descr; 1548 break; 1549 case PMC_CPU_MIPS_74K: 1550 PMC_MDEP_INIT(mips74k); 1551 pmc_class_table[n] = &mips74k_class_table_descr; 1552 break; 1553 case PMC_CPU_MIPS_OCTEON: 1554 PMC_MDEP_INIT(octeon); 1555 pmc_class_table[n] = &octeon_class_table_descr; 1556 break; 1557 #endif /* __mips__ */ 1558 #if defined(__powerpc__) 1559 case PMC_CPU_PPC_7450: 1560 PMC_MDEP_INIT(ppc7450); 1561 pmc_class_table[n] = &ppc7450_class_table_descr; 1562 break; 1563 case PMC_CPU_PPC_970: 1564 PMC_MDEP_INIT(ppc970); 1565 pmc_class_table[n] = &ppc970_class_table_descr; 1566 break; 1567 case PMC_CPU_PPC_E500: 1568 PMC_MDEP_INIT(e500); 1569 pmc_class_table[n] = &e500_class_table_descr; 1570 break; 1571 #endif 1572 default: 1573 /* 1574 * Some kind of CPU this version of the library knows nothing 1575 * about. This shouldn't happen since the abi version check 1576 * should have caught this. 1577 */ 1578 #if defined(__amd64__) || defined(__i386__) 1579 break; 1580 #endif 1581 errno = ENXIO; 1582 return (pmc_syscall = -1); 1583 } 1584 1585 return (0); 1586 } 1587 1588 const char * 1589 pmc_name_of_capability(enum pmc_caps cap) 1590 { 1591 int i; 1592 1593 /* 1594 * 'cap' should have a single bit set and should be in 1595 * range. 1596 */ 1597 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 1598 cap > PMC_CAP_LAST) { 1599 errno = EINVAL; 1600 return (NULL); 1601 } 1602 1603 i = ffs(cap); 1604 return (pmc_capability_names[i - 1]); 1605 } 1606 1607 const char * 1608 pmc_name_of_class(enum pmc_class pc) 1609 { 1610 size_t n; 1611 1612 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) 1613 if (pc == pmc_class_names[n].pm_class) 1614 return (pmc_class_names[n].pm_name); 1615 1616 errno = EINVAL; 1617 return (NULL); 1618 } 1619 1620 const char * 1621 pmc_name_of_cputype(enum pmc_cputype cp) 1622 { 1623 size_t n; 1624 1625 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 1626 if (cp == pmc_cputype_names[n].pm_cputype) 1627 return (pmc_cputype_names[n].pm_name); 1628 1629 errno = EINVAL; 1630 return (NULL); 1631 } 1632 1633 const char * 1634 pmc_name_of_disposition(enum pmc_disp pd) 1635 { 1636 if ((int) pd >= PMC_DISP_FIRST && 1637 pd <= PMC_DISP_LAST) 1638 return (pmc_disposition_names[pd]); 1639 1640 errno = EINVAL; 1641 return (NULL); 1642 } 1643 1644 const char * 1645 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 1646 { 1647 const struct pmc_event_descr *ev, *evfence; 1648 1649 ev = evfence = NULL; 1650 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 1651 ev = k8_event_table; 1652 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 1653 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 1654 ev = xscale_event_table; 1655 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 1656 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { 1657 switch (cpu) { 1658 case PMC_CPU_ARMV7_CORTEX_A8: 1659 ev = cortex_a8_event_table; 1660 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); 1661 break; 1662 case PMC_CPU_ARMV7_CORTEX_A9: 1663 ev = cortex_a9_event_table; 1664 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); 1665 break; 1666 default: /* Unknown CPU type. */ 1667 break; 1668 } 1669 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { 1670 switch (cpu) { 1671 case PMC_CPU_ARMV8_CORTEX_A53: 1672 ev = cortex_a53_event_table; 1673 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); 1674 break; 1675 case PMC_CPU_ARMV8_CORTEX_A57: 1676 ev = cortex_a57_event_table; 1677 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); 1678 break; 1679 case PMC_CPU_ARMV8_CORTEX_A76: 1680 ev = cortex_a76_event_table; 1681 evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76); 1682 break; 1683 default: /* Unknown CPU type. */ 1684 break; 1685 } 1686 } else if (pe >= PMC_EV_BERI_FIRST && pe <= PMC_EV_BERI_LAST) { 1687 ev = beri_event_table; 1688 evfence = beri_event_table + PMC_EVENT_TABLE_SIZE(beri); 1689 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 1690 ev = mips24k_event_table; 1691 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 1692 } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { 1693 ev = mips74k_event_table; 1694 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); 1695 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 1696 ev = octeon_event_table; 1697 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 1698 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 1699 ev = ppc7450_event_table; 1700 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 1701 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 1702 ev = ppc970_event_table; 1703 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 1704 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { 1705 ev = e500_event_table; 1706 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); 1707 } else if (pe == PMC_EV_TSC_TSC) { 1708 ev = tsc_event_table; 1709 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 1710 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 1711 ev = soft_event_table; 1712 evfence = soft_event_table + soft_event_info.pm_nevent; 1713 } 1714 1715 for (; ev != evfence; ev++) 1716 if (pe == ev->pm_ev_code) 1717 return (ev->pm_ev_name); 1718 1719 return (NULL); 1720 } 1721 1722 const char * 1723 pmc_name_of_event(enum pmc_event pe) 1724 { 1725 const char *n; 1726 1727 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 1728 return (n); 1729 1730 errno = EINVAL; 1731 return (NULL); 1732 } 1733 1734 const char * 1735 pmc_name_of_mode(enum pmc_mode pm) 1736 { 1737 if ((int) pm >= PMC_MODE_FIRST && 1738 pm <= PMC_MODE_LAST) 1739 return (pmc_mode_names[pm]); 1740 1741 errno = EINVAL; 1742 return (NULL); 1743 } 1744 1745 const char * 1746 pmc_name_of_state(enum pmc_state ps) 1747 { 1748 if ((int) ps >= PMC_STATE_FIRST && 1749 ps <= PMC_STATE_LAST) 1750 return (pmc_state_names[ps]); 1751 1752 errno = EINVAL; 1753 return (NULL); 1754 } 1755 1756 int 1757 pmc_ncpu(void) 1758 { 1759 if (pmc_syscall == -1) { 1760 errno = ENXIO; 1761 return (-1); 1762 } 1763 1764 return (cpu_info.pm_ncpu); 1765 } 1766 1767 int 1768 pmc_npmc(int cpu) 1769 { 1770 if (pmc_syscall == -1) { 1771 errno = ENXIO; 1772 return (-1); 1773 } 1774 1775 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 1776 errno = EINVAL; 1777 return (-1); 1778 } 1779 1780 return (cpu_info.pm_npmc); 1781 } 1782 1783 int 1784 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 1785 { 1786 int nbytes, npmc; 1787 struct pmc_op_getpmcinfo *pmci; 1788 1789 if ((npmc = pmc_npmc(cpu)) < 0) 1790 return (-1); 1791 1792 nbytes = sizeof(struct pmc_op_getpmcinfo) + 1793 npmc * sizeof(struct pmc_info); 1794 1795 if ((pmci = calloc(1, nbytes)) == NULL) 1796 return (-1); 1797 1798 pmci->pm_cpu = cpu; 1799 1800 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 1801 free(pmci); 1802 return (-1); 1803 } 1804 1805 /* kernel<->library, library<->userland interfaces are identical */ 1806 *ppmci = (struct pmc_pmcinfo *) pmci; 1807 return (0); 1808 } 1809 1810 int 1811 pmc_read(pmc_id_t pmc, pmc_value_t *value) 1812 { 1813 struct pmc_op_pmcrw pmc_read_op; 1814 1815 pmc_read_op.pm_pmcid = pmc; 1816 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 1817 pmc_read_op.pm_value = -1; 1818 1819 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 1820 return (-1); 1821 1822 *value = pmc_read_op.pm_value; 1823 return (0); 1824 } 1825 1826 int 1827 pmc_release(pmc_id_t pmc) 1828 { 1829 struct pmc_op_simple pmc_release_args; 1830 1831 pmc_release_args.pm_pmcid = pmc; 1832 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 1833 } 1834 1835 int 1836 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 1837 { 1838 struct pmc_op_pmcrw pmc_rw_op; 1839 1840 pmc_rw_op.pm_pmcid = pmc; 1841 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 1842 pmc_rw_op.pm_value = newvalue; 1843 1844 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 1845 return (-1); 1846 1847 *oldvaluep = pmc_rw_op.pm_value; 1848 return (0); 1849 } 1850 1851 int 1852 pmc_set(pmc_id_t pmc, pmc_value_t value) 1853 { 1854 struct pmc_op_pmcsetcount sc; 1855 1856 sc.pm_pmcid = pmc; 1857 sc.pm_count = value; 1858 1859 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 1860 return (-1); 1861 return (0); 1862 } 1863 1864 int 1865 pmc_start(pmc_id_t pmc) 1866 { 1867 struct pmc_op_simple pmc_start_args; 1868 1869 pmc_start_args.pm_pmcid = pmc; 1870 return (PMC_CALL(PMCSTART, &pmc_start_args)); 1871 } 1872 1873 int 1874 pmc_stop(pmc_id_t pmc) 1875 { 1876 struct pmc_op_simple pmc_stop_args; 1877 1878 pmc_stop_args.pm_pmcid = pmc; 1879 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 1880 } 1881 1882 int 1883 pmc_width(pmc_id_t pmcid, uint32_t *width) 1884 { 1885 unsigned int i; 1886 enum pmc_class cl; 1887 1888 cl = PMC_ID_TO_CLASS(pmcid); 1889 for (i = 0; i < cpu_info.pm_nclass; i++) 1890 if (cpu_info.pm_classes[i].pm_class == cl) { 1891 *width = cpu_info.pm_classes[i].pm_width; 1892 return (0); 1893 } 1894 errno = EINVAL; 1895 return (-1); 1896 } 1897 1898 int 1899 pmc_write(pmc_id_t pmc, pmc_value_t value) 1900 { 1901 struct pmc_op_pmcrw pmc_write_op; 1902 1903 pmc_write_op.pm_pmcid = pmc; 1904 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 1905 pmc_write_op.pm_value = value; 1906 return (PMC_CALL(PMCRW, &pmc_write_op)); 1907 } 1908 1909 int 1910 pmc_writelog(uint32_t userdata) 1911 { 1912 struct pmc_op_writelog wl; 1913 1914 wl.pm_userdata = userdata; 1915 return (PMC_CALL(WRITELOG, &wl)); 1916 } 1917