1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/module.h> 35 #include <sys/pmc.h> 36 #include <sys/syscall.h> 37 38 #include <ctype.h> 39 #include <errno.h> 40 #include <err.h> 41 #include <fcntl.h> 42 #include <pmc.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <strings.h> 47 #include <sysexits.h> 48 #include <unistd.h> 49 50 #include "libpmcinternal.h" 51 52 /* Function prototypes */ 53 #if defined(__amd64__) || defined(__i386__) 54 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 55 struct pmc_op_pmcallocate *_pmc_config); 56 #endif 57 #if defined(__amd64__) || defined(__i386__) 58 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 59 struct pmc_op_pmcallocate *_pmc_config); 60 #endif 61 #if defined(__arm__) 62 #if defined(__XSCALE__) 63 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 64 struct pmc_op_pmcallocate *_pmc_config); 65 #endif 66 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 67 struct pmc_op_pmcallocate *_pmc_config); 68 #endif 69 #if defined(__aarch64__) 70 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 71 struct pmc_op_pmcallocate *_pmc_config); 72 #endif 73 #if defined(__mips__) 74 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 75 struct pmc_op_pmcallocate *_pmc_config); 76 #endif /* __mips__ */ 77 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 78 struct pmc_op_pmcallocate *_pmc_config); 79 80 #if defined(__powerpc__) 81 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 82 struct pmc_op_pmcallocate *_pmc_config); 83 #endif /* __powerpc__ */ 84 85 #define PMC_CALL(cmd, params) \ 86 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 87 88 /* 89 * Event aliases provide a way for the user to ask for generic events 90 * like "cache-misses", or "instructions-retired". These aliases are 91 * mapped to the appropriate canonical event descriptions using a 92 * lookup table. 93 */ 94 struct pmc_event_alias { 95 const char *pm_alias; 96 const char *pm_spec; 97 }; 98 99 static const struct pmc_event_alias *pmc_mdep_event_aliases; 100 101 /* 102 * The pmc_event_descr structure maps symbolic names known to the user 103 * to integer codes used by the PMC KLD. 104 */ 105 struct pmc_event_descr { 106 const char *pm_ev_name; 107 enum pmc_event pm_ev_code; 108 }; 109 110 /* 111 * The pmc_class_descr structure maps class name prefixes for 112 * event names to event tables and other PMC class data. 113 */ 114 struct pmc_class_descr { 115 const char *pm_evc_name; 116 size_t pm_evc_name_size; 117 enum pmc_class pm_evc_class; 118 const struct pmc_event_descr *pm_evc_event_table; 119 size_t pm_evc_event_table_size; 120 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 121 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 122 }; 123 124 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 125 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 126 127 #undef __PMC_EV 128 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 129 130 /* 131 * PMC_CLASSDEP_TABLE(NAME, CLASS) 132 * 133 * Define a table mapping event names and aliases to HWPMC event IDs. 134 */ 135 #define PMC_CLASSDEP_TABLE(N, C) \ 136 static const struct pmc_event_descr N##_event_table[] = \ 137 { \ 138 __PMC_EV_##C() \ 139 } 140 141 PMC_CLASSDEP_TABLE(iaf, IAF); 142 PMC_CLASSDEP_TABLE(k8, K8); 143 PMC_CLASSDEP_TABLE(xscale, XSCALE); 144 PMC_CLASSDEP_TABLE(armv7, ARMV7); 145 PMC_CLASSDEP_TABLE(armv8, ARMV8); 146 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 147 PMC_CLASSDEP_TABLE(mips74k, MIPS74K); 148 PMC_CLASSDEP_TABLE(octeon, OCTEON); 149 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 150 PMC_CLASSDEP_TABLE(ppc970, PPC970); 151 PMC_CLASSDEP_TABLE(e500, E500); 152 153 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 154 155 #undef __PMC_EV_ALIAS 156 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 157 158 static const struct pmc_event_descr cortex_a8_event_table[] = 159 { 160 __PMC_EV_ALIAS_ARMV7_CORTEX_A8() 161 }; 162 163 static const struct pmc_event_descr cortex_a9_event_table[] = 164 { 165 __PMC_EV_ALIAS_ARMV7_CORTEX_A9() 166 }; 167 168 static const struct pmc_event_descr cortex_a53_event_table[] = 169 { 170 __PMC_EV_ALIAS_ARMV8_CORTEX_A53() 171 }; 172 173 static const struct pmc_event_descr cortex_a57_event_table[] = 174 { 175 __PMC_EV_ALIAS_ARMV8_CORTEX_A57() 176 }; 177 178 /* 179 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 180 * 181 * Map a CPU to the PMC classes it supports. 182 */ 183 #define PMC_MDEP_TABLE(N,C,...) \ 184 static const enum pmc_class N##_pmc_classes[] = { \ 185 PMC_CLASS_##C, __VA_ARGS__ \ 186 } 187 188 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 189 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 190 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 191 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 192 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 193 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 194 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 195 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); 196 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 197 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); 198 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); 199 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); 200 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 201 202 static const struct pmc_event_descr tsc_event_table[] = 203 { 204 __PMC_EV_TSC() 205 }; 206 207 #undef PMC_CLASS_TABLE_DESC 208 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 209 static const struct pmc_class_descr NAME##_class_table_descr = \ 210 { \ 211 .pm_evc_name = #CLASS "-", \ 212 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 213 .pm_evc_class = PMC_CLASS_##CLASS , \ 214 .pm_evc_event_table = EVENTS##_event_table , \ 215 .pm_evc_event_table_size = \ 216 PMC_EVENT_TABLE_SIZE(EVENTS), \ 217 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 218 } 219 220 #if defined(__i386__) || defined(__amd64__) 221 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 222 #endif 223 #if defined(__i386__) || defined(__amd64__) 224 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 225 #endif 226 #if defined(__arm__) 227 #if defined(__XSCALE__) 228 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 229 #endif 230 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); 231 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); 232 #endif 233 #if defined(__aarch64__) 234 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); 235 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); 236 #endif 237 #if defined(__mips__) 238 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 239 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); 240 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 241 #endif /* __mips__ */ 242 #if defined(__powerpc__) 243 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 244 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 245 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); 246 #endif 247 248 static struct pmc_class_descr soft_class_table_descr = 249 { 250 .pm_evc_name = "SOFT-", 251 .pm_evc_name_size = sizeof("SOFT-") - 1, 252 .pm_evc_class = PMC_CLASS_SOFT, 253 .pm_evc_event_table = NULL, 254 .pm_evc_event_table_size = 0, 255 .pm_evc_allocate_pmc = soft_allocate_pmc 256 }; 257 258 #undef PMC_CLASS_TABLE_DESC 259 260 static const struct pmc_class_descr **pmc_class_table; 261 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 262 263 static const enum pmc_class *pmc_mdep_class_list; 264 static size_t pmc_mdep_class_list_size; 265 266 /* 267 * Mapping tables, mapping enumeration values to human readable 268 * strings. 269 */ 270 271 static const char * pmc_capability_names[] = { 272 #undef __PMC_CAP 273 #define __PMC_CAP(N,V,D) #N , 274 __PMC_CAPS() 275 }; 276 277 struct pmc_class_map { 278 enum pmc_class pm_class; 279 const char *pm_name; 280 }; 281 282 static const struct pmc_class_map pmc_class_names[] = { 283 #undef __PMC_CLASS 284 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , 285 __PMC_CLASSES() 286 }; 287 288 struct pmc_cputype_map { 289 enum pmc_cputype pm_cputype; 290 const char *pm_name; 291 }; 292 293 static const struct pmc_cputype_map pmc_cputype_names[] = { 294 #undef __PMC_CPU 295 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 296 __PMC_CPUS() 297 }; 298 299 static const char * pmc_disposition_names[] = { 300 #undef __PMC_DISP 301 #define __PMC_DISP(D) #D , 302 __PMC_DISPOSITIONS() 303 }; 304 305 static const char * pmc_mode_names[] = { 306 #undef __PMC_MODE 307 #define __PMC_MODE(M,N) #M , 308 __PMC_MODES() 309 }; 310 311 static const char * pmc_state_names[] = { 312 #undef __PMC_STATE 313 #define __PMC_STATE(S) #S , 314 __PMC_STATES() 315 }; 316 317 /* 318 * Filled in by pmc_init(). 319 */ 320 static int pmc_syscall = -1; 321 static struct pmc_cpuinfo cpu_info; 322 static struct pmc_op_getdyneventinfo soft_event_info; 323 324 /* Event masks for events */ 325 struct pmc_masks { 326 const char *pm_name; 327 const uint64_t pm_value; 328 }; 329 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 330 #define NULLMASK { .pm_name = NULL } 331 332 #if defined(__amd64__) || defined(__i386__) 333 static int 334 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 335 { 336 const struct pmc_masks *pm; 337 char *q, *r; 338 int c; 339 340 if (pmask == NULL) /* no mask keywords */ 341 return (-1); 342 q = strchr(p, '='); /* skip '=' */ 343 if (*++q == '\0') /* no more data */ 344 return (-1); 345 c = 0; /* count of mask keywords seen */ 346 while ((r = strsep(&q, "+")) != NULL) { 347 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 348 pm++) 349 ; 350 if (pm->pm_name == NULL) /* not found */ 351 return (-1); 352 *evmask |= pm->pm_value; 353 c++; 354 } 355 return (c); 356 } 357 #endif 358 359 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 360 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 361 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 362 363 #if defined(__amd64__) || defined(__i386__) 364 /* 365 * AMD K8 PMCs. 366 * 367 */ 368 369 static struct pmc_event_alias k8_aliases[] = { 370 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 371 EV_ALIAS("branch-mispredicts", 372 "k8-fr-retired-taken-branches-mispredicted"), 373 EV_ALIAS("cycles", "tsc"), 374 EV_ALIAS("dc-misses", "k8-dc-miss"), 375 EV_ALIAS("ic-misses", "k8-ic-miss"), 376 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 377 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 378 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 379 EV_ALIAS(NULL, NULL) 380 }; 381 382 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 383 384 /* 385 * Parsing tables 386 */ 387 388 /* fp dispatched fpu ops */ 389 static const struct pmc_masks k8_mask_fdfo[] = { 390 __K8MASK(add-pipe-excluding-junk-ops, 0), 391 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 392 __K8MASK(store-pipe-excluding-junk-ops, 2), 393 __K8MASK(add-pipe-junk-ops, 3), 394 __K8MASK(multiply-pipe-junk-ops, 4), 395 __K8MASK(store-pipe-junk-ops, 5), 396 NULLMASK 397 }; 398 399 /* ls segment register loads */ 400 static const struct pmc_masks k8_mask_lsrl[] = { 401 __K8MASK(es, 0), 402 __K8MASK(cs, 1), 403 __K8MASK(ss, 2), 404 __K8MASK(ds, 3), 405 __K8MASK(fs, 4), 406 __K8MASK(gs, 5), 407 __K8MASK(hs, 6), 408 NULLMASK 409 }; 410 411 /* ls locked operation */ 412 static const struct pmc_masks k8_mask_llo[] = { 413 __K8MASK(locked-instructions, 0), 414 __K8MASK(cycles-in-request, 1), 415 __K8MASK(cycles-to-complete, 2), 416 NULLMASK 417 }; 418 419 /* dc refill from {l2,system} and dc copyback */ 420 static const struct pmc_masks k8_mask_dc[] = { 421 __K8MASK(invalid, 0), 422 __K8MASK(shared, 1), 423 __K8MASK(exclusive, 2), 424 __K8MASK(owner, 3), 425 __K8MASK(modified, 4), 426 NULLMASK 427 }; 428 429 /* dc one bit ecc error */ 430 static const struct pmc_masks k8_mask_dobee[] = { 431 __K8MASK(scrubber, 0), 432 __K8MASK(piggyback, 1), 433 NULLMASK 434 }; 435 436 /* dc dispatched prefetch instructions */ 437 static const struct pmc_masks k8_mask_ddpi[] = { 438 __K8MASK(load, 0), 439 __K8MASK(store, 1), 440 __K8MASK(nta, 2), 441 NULLMASK 442 }; 443 444 /* dc dcache accesses by locks */ 445 static const struct pmc_masks k8_mask_dabl[] = { 446 __K8MASK(accesses, 0), 447 __K8MASK(misses, 1), 448 NULLMASK 449 }; 450 451 /* bu internal l2 request */ 452 static const struct pmc_masks k8_mask_bilr[] = { 453 __K8MASK(ic-fill, 0), 454 __K8MASK(dc-fill, 1), 455 __K8MASK(tlb-reload, 2), 456 __K8MASK(tag-snoop, 3), 457 __K8MASK(cancelled, 4), 458 NULLMASK 459 }; 460 461 /* bu fill request l2 miss */ 462 static const struct pmc_masks k8_mask_bfrlm[] = { 463 __K8MASK(ic-fill, 0), 464 __K8MASK(dc-fill, 1), 465 __K8MASK(tlb-reload, 2), 466 NULLMASK 467 }; 468 469 /* bu fill into l2 */ 470 static const struct pmc_masks k8_mask_bfil[] = { 471 __K8MASK(dirty-l2-victim, 0), 472 __K8MASK(victim-from-l2, 1), 473 NULLMASK 474 }; 475 476 /* fr retired fpu instructions */ 477 static const struct pmc_masks k8_mask_frfi[] = { 478 __K8MASK(x87, 0), 479 __K8MASK(mmx-3dnow, 1), 480 __K8MASK(packed-sse-sse2, 2), 481 __K8MASK(scalar-sse-sse2, 3), 482 NULLMASK 483 }; 484 485 /* fr retired fastpath double op instructions */ 486 static const struct pmc_masks k8_mask_frfdoi[] = { 487 __K8MASK(low-op-pos-0, 0), 488 __K8MASK(low-op-pos-1, 1), 489 __K8MASK(low-op-pos-2, 2), 490 NULLMASK 491 }; 492 493 /* fr fpu exceptions */ 494 static const struct pmc_masks k8_mask_ffe[] = { 495 __K8MASK(x87-reclass-microfaults, 0), 496 __K8MASK(sse-retype-microfaults, 1), 497 __K8MASK(sse-reclass-microfaults, 2), 498 __K8MASK(sse-and-x87-microtraps, 3), 499 NULLMASK 500 }; 501 502 /* nb memory controller page access event */ 503 static const struct pmc_masks k8_mask_nmcpae[] = { 504 __K8MASK(page-hit, 0), 505 __K8MASK(page-miss, 1), 506 __K8MASK(page-conflict, 2), 507 NULLMASK 508 }; 509 510 /* nb memory controller turnaround */ 511 static const struct pmc_masks k8_mask_nmct[] = { 512 __K8MASK(dimm-turnaround, 0), 513 __K8MASK(read-to-write-turnaround, 1), 514 __K8MASK(write-to-read-turnaround, 2), 515 NULLMASK 516 }; 517 518 /* nb memory controller bypass saturation */ 519 static const struct pmc_masks k8_mask_nmcbs[] = { 520 __K8MASK(memory-controller-hi-pri-bypass, 0), 521 __K8MASK(memory-controller-lo-pri-bypass, 1), 522 __K8MASK(dram-controller-interface-bypass, 2), 523 __K8MASK(dram-controller-queue-bypass, 3), 524 NULLMASK 525 }; 526 527 /* nb sized commands */ 528 static const struct pmc_masks k8_mask_nsc[] = { 529 __K8MASK(nonpostwrszbyte, 0), 530 __K8MASK(nonpostwrszdword, 1), 531 __K8MASK(postwrszbyte, 2), 532 __K8MASK(postwrszdword, 3), 533 __K8MASK(rdszbyte, 4), 534 __K8MASK(rdszdword, 5), 535 __K8MASK(rdmodwr, 6), 536 NULLMASK 537 }; 538 539 /* nb probe result */ 540 static const struct pmc_masks k8_mask_npr[] = { 541 __K8MASK(probe-miss, 0), 542 __K8MASK(probe-hit, 1), 543 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 544 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 545 NULLMASK 546 }; 547 548 /* nb hypertransport bus bandwidth */ 549 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 550 __K8MASK(command, 0), 551 __K8MASK(data, 1), 552 __K8MASK(buffer-release, 2), 553 __K8MASK(nop, 3), 554 NULLMASK 555 }; 556 557 #undef __K8MASK 558 559 #define K8_KW_COUNT "count" 560 #define K8_KW_EDGE "edge" 561 #define K8_KW_INV "inv" 562 #define K8_KW_MASK "mask" 563 #define K8_KW_OS "os" 564 #define K8_KW_USR "usr" 565 566 static int 567 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 568 struct pmc_op_pmcallocate *pmc_config) 569 { 570 char *e, *p, *q; 571 int n; 572 uint32_t count; 573 uint64_t evmask; 574 const struct pmc_masks *pm, *pmask; 575 576 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 577 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 578 579 pmask = NULL; 580 evmask = 0; 581 582 #define __K8SETMASK(M) pmask = k8_mask_##M 583 584 /* setup parsing tables */ 585 switch (pe) { 586 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 587 __K8SETMASK(fdfo); 588 break; 589 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 590 __K8SETMASK(lsrl); 591 break; 592 case PMC_EV_K8_LS_LOCKED_OPERATION: 593 __K8SETMASK(llo); 594 break; 595 case PMC_EV_K8_DC_REFILL_FROM_L2: 596 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 597 case PMC_EV_K8_DC_COPYBACK: 598 __K8SETMASK(dc); 599 break; 600 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 601 __K8SETMASK(dobee); 602 break; 603 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 604 __K8SETMASK(ddpi); 605 break; 606 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 607 __K8SETMASK(dabl); 608 break; 609 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 610 __K8SETMASK(bilr); 611 break; 612 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 613 __K8SETMASK(bfrlm); 614 break; 615 case PMC_EV_K8_BU_FILL_INTO_L2: 616 __K8SETMASK(bfil); 617 break; 618 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 619 __K8SETMASK(frfi); 620 break; 621 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 622 __K8SETMASK(frfdoi); 623 break; 624 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 625 __K8SETMASK(ffe); 626 break; 627 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 628 __K8SETMASK(nmcpae); 629 break; 630 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 631 __K8SETMASK(nmct); 632 break; 633 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 634 __K8SETMASK(nmcbs); 635 break; 636 case PMC_EV_K8_NB_SIZED_COMMANDS: 637 __K8SETMASK(nsc); 638 break; 639 case PMC_EV_K8_NB_PROBE_RESULT: 640 __K8SETMASK(npr); 641 break; 642 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 643 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 644 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 645 __K8SETMASK(nhbb); 646 break; 647 648 default: 649 break; /* no options defined */ 650 } 651 652 while ((p = strsep(&ctrspec, ",")) != NULL) { 653 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 654 q = strchr(p, '='); 655 if (*++q == '\0') /* skip '=' */ 656 return (-1); 657 658 count = strtol(q, &e, 0); 659 if (e == q || *e != '\0') 660 return (-1); 661 662 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 663 pmc_config->pm_md.pm_amd.pm_amd_config |= 664 AMD_PMC_TO_COUNTER(count); 665 666 } else if (KWMATCH(p, K8_KW_EDGE)) { 667 pmc_config->pm_caps |= PMC_CAP_EDGE; 668 } else if (KWMATCH(p, K8_KW_INV)) { 669 pmc_config->pm_caps |= PMC_CAP_INVERT; 670 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 671 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 672 return (-1); 673 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 674 } else if (KWMATCH(p, K8_KW_OS)) { 675 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 676 } else if (KWMATCH(p, K8_KW_USR)) { 677 pmc_config->pm_caps |= PMC_CAP_USER; 678 } else 679 return (-1); 680 } 681 682 /* other post processing */ 683 switch (pe) { 684 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 685 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 686 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 687 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 688 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 689 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 690 /* XXX only available in rev B and later */ 691 break; 692 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 693 /* XXX only available in rev C and later */ 694 break; 695 case PMC_EV_K8_LS_LOCKED_OPERATION: 696 /* XXX CPU Rev A,B evmask is to be zero */ 697 if (evmask & (evmask - 1)) /* > 1 bit set */ 698 return (-1); 699 if (evmask == 0) { 700 evmask = 0x01; /* Rev C and later: #instrs */ 701 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 702 } 703 break; 704 default: 705 if (evmask == 0 && pmask != NULL) { 706 for (pm = pmask; pm->pm_name; pm++) 707 evmask |= pm->pm_value; 708 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 709 } 710 } 711 712 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 713 pmc_config->pm_md.pm_amd.pm_amd_config = 714 AMD_PMC_TO_UNITMASK(evmask); 715 716 return (0); 717 } 718 719 #endif 720 721 #if defined(__i386__) || defined(__amd64__) 722 static int 723 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 724 struct pmc_op_pmcallocate *pmc_config) 725 { 726 if (pe != PMC_EV_TSC_TSC) 727 return (-1); 728 729 /* TSC events must be unqualified. */ 730 if (ctrspec && *ctrspec != '\0') 731 return (-1); 732 733 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 734 pmc_config->pm_caps |= PMC_CAP_READ; 735 736 return (0); 737 } 738 #endif 739 740 static struct pmc_event_alias generic_aliases[] = { 741 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 742 EV_ALIAS(NULL, NULL) 743 }; 744 745 static int 746 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 747 struct pmc_op_pmcallocate *pmc_config) 748 { 749 (void)ctrspec; 750 (void)pmc_config; 751 752 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 753 return (-1); 754 755 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 756 return (0); 757 } 758 759 #if defined(__arm__) 760 #if defined(__XSCALE__) 761 762 static struct pmc_event_alias xscale_aliases[] = { 763 EV_ALIAS("branches", "BRANCH_RETIRED"), 764 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 765 EV_ALIAS("dc-misses", "DC_MISS"), 766 EV_ALIAS("ic-misses", "IC_MISS"), 767 EV_ALIAS("instructions", "INSTR_RETIRED"), 768 EV_ALIAS(NULL, NULL) 769 }; 770 static int 771 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 772 struct pmc_op_pmcallocate *pmc_config __unused) 773 { 774 switch (pe) { 775 default: 776 break; 777 } 778 779 return (0); 780 } 781 #endif 782 783 static struct pmc_event_alias cortex_a8_aliases[] = { 784 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 785 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 786 EV_ALIAS("instructions", "INSTR_EXECUTED"), 787 EV_ALIAS(NULL, NULL) 788 }; 789 790 static struct pmc_event_alias cortex_a9_aliases[] = { 791 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 792 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 793 EV_ALIAS("instructions", "INSTR_EXECUTED"), 794 EV_ALIAS(NULL, NULL) 795 }; 796 797 static int 798 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 799 struct pmc_op_pmcallocate *pmc_config __unused) 800 { 801 switch (pe) { 802 default: 803 break; 804 } 805 806 return (0); 807 } 808 #endif 809 810 #if defined(__aarch64__) 811 static struct pmc_event_alias cortex_a53_aliases[] = { 812 EV_ALIAS(NULL, NULL) 813 }; 814 static struct pmc_event_alias cortex_a57_aliases[] = { 815 EV_ALIAS(NULL, NULL) 816 }; 817 static int 818 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 819 struct pmc_op_pmcallocate *pmc_config __unused) 820 { 821 switch (pe) { 822 default: 823 break; 824 } 825 826 return (0); 827 } 828 #endif 829 830 #if defined(__mips__) 831 832 static struct pmc_event_alias mips24k_aliases[] = { 833 EV_ALIAS("instructions", "INSTR_EXECUTED"), 834 EV_ALIAS("branches", "BRANCH_COMPLETED"), 835 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 836 EV_ALIAS(NULL, NULL) 837 }; 838 839 static struct pmc_event_alias mips74k_aliases[] = { 840 EV_ALIAS("instructions", "INSTR_EXECUTED"), 841 EV_ALIAS("branches", "BRANCH_INSNS"), 842 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), 843 EV_ALIAS(NULL, NULL) 844 }; 845 846 static struct pmc_event_alias octeon_aliases[] = { 847 EV_ALIAS("instructions", "RET"), 848 EV_ALIAS("branches", "BR"), 849 EV_ALIAS("branch-mispredicts", "BRMIS"), 850 EV_ALIAS(NULL, NULL) 851 }; 852 853 #define MIPS_KW_OS "os" 854 #define MIPS_KW_USR "usr" 855 #define MIPS_KW_ANYTHREAD "anythread" 856 857 static int 858 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 859 struct pmc_op_pmcallocate *pmc_config __unused) 860 { 861 char *p; 862 863 (void) pe; 864 865 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 866 867 while ((p = strsep(&ctrspec, ",")) != NULL) { 868 if (KWMATCH(p, MIPS_KW_OS)) 869 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 870 else if (KWMATCH(p, MIPS_KW_USR)) 871 pmc_config->pm_caps |= PMC_CAP_USER; 872 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 873 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 874 else 875 return (-1); 876 } 877 878 return (0); 879 } 880 881 #endif /* __mips__ */ 882 883 #if defined(__powerpc__) 884 885 static struct pmc_event_alias ppc7450_aliases[] = { 886 EV_ALIAS("instructions", "INSTR_COMPLETED"), 887 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 888 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 889 EV_ALIAS(NULL, NULL) 890 }; 891 892 static struct pmc_event_alias ppc970_aliases[] = { 893 EV_ALIAS("instructions", "INSTR_COMPLETED"), 894 EV_ALIAS("cycles", "CYCLES"), 895 EV_ALIAS(NULL, NULL) 896 }; 897 898 static struct pmc_event_alias e500_aliases[] = { 899 EV_ALIAS("instructions", "INSTR_COMPLETED"), 900 EV_ALIAS("cycles", "CYCLES"), 901 EV_ALIAS(NULL, NULL) 902 }; 903 904 #define POWERPC_KW_OS "os" 905 #define POWERPC_KW_USR "usr" 906 #define POWERPC_KW_ANYTHREAD "anythread" 907 908 static int 909 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 910 struct pmc_op_pmcallocate *pmc_config __unused) 911 { 912 char *p; 913 914 (void) pe; 915 916 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 917 918 while ((p = strsep(&ctrspec, ",")) != NULL) { 919 if (KWMATCH(p, POWERPC_KW_OS)) 920 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 921 else if (KWMATCH(p, POWERPC_KW_USR)) 922 pmc_config->pm_caps |= PMC_CAP_USER; 923 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 924 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 925 else 926 return (-1); 927 } 928 929 return (0); 930 } 931 932 #endif /* __powerpc__ */ 933 934 935 /* 936 * Match an event name `name' with its canonical form. 937 * 938 * Matches are case insensitive and spaces, periods, underscores and 939 * hyphen characters are considered to match each other. 940 * 941 * Returns 1 for a match, 0 otherwise. 942 */ 943 944 static int 945 pmc_match_event_name(const char *name, const char *canonicalname) 946 { 947 int cc, nc; 948 const unsigned char *c, *n; 949 950 c = (const unsigned char *) canonicalname; 951 n = (const unsigned char *) name; 952 953 for (; (nc = *n) && (cc = *c); n++, c++) { 954 955 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 956 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 957 continue; 958 959 if (toupper(nc) == toupper(cc)) 960 continue; 961 962 963 return (0); 964 } 965 966 if (*n == '\0' && *c == '\0') 967 return (1); 968 969 return (0); 970 } 971 972 /* 973 * Match an event name against all the event named supported by a 974 * PMC class. 975 * 976 * Returns an event descriptor pointer on match or NULL otherwise. 977 */ 978 static const struct pmc_event_descr * 979 pmc_match_event_class(const char *name, 980 const struct pmc_class_descr *pcd) 981 { 982 size_t n; 983 const struct pmc_event_descr *ev; 984 985 ev = pcd->pm_evc_event_table; 986 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 987 if (pmc_match_event_name(name, ev->pm_ev_name)) 988 return (ev); 989 990 return (NULL); 991 } 992 993 static int 994 pmc_mdep_is_compatible_class(enum pmc_class pc) 995 { 996 size_t n; 997 998 for (n = 0; n < pmc_mdep_class_list_size; n++) 999 if (pmc_mdep_class_list[n] == pc) 1000 return (1); 1001 return (0); 1002 } 1003 1004 /* 1005 * API entry points 1006 */ 1007 1008 int 1009 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 1010 uint32_t flags, int cpu, pmc_id_t *pmcid, 1011 uint64_t count) 1012 { 1013 size_t n; 1014 int retval; 1015 char *r, *spec_copy; 1016 const char *ctrname; 1017 const struct pmc_event_descr *ev; 1018 const struct pmc_event_alias *alias; 1019 struct pmc_op_pmcallocate pmc_config; 1020 const struct pmc_class_descr *pcd; 1021 1022 spec_copy = NULL; 1023 retval = -1; 1024 1025 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 1026 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 1027 errno = EINVAL; 1028 goto out; 1029 } 1030 bzero(&pmc_config, sizeof(pmc_config)); 1031 pmc_config.pm_cpu = cpu; 1032 pmc_config.pm_mode = mode; 1033 pmc_config.pm_flags = flags; 1034 pmc_config.pm_count = count; 1035 if (PMC_IS_SAMPLING_MODE(mode)) 1036 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 1037 /* 1038 * Can we pull this straight from the pmu table? 1039 */ 1040 r = spec_copy = strdup(ctrspec); 1041 ctrname = strsep(&r, ","); 1042 if (pmc_pmu_enabled()) { 1043 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { 1044 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { 1045 goto out; 1046 } 1047 retval = 0; 1048 *pmcid = pmc_config.pm_pmcid; 1049 goto out; 1050 } 1051 errx(EX_USAGE, "ERROR: pmc_pmu_allocate failed, check for ctrname %s\n", ctrname); 1052 } else { 1053 free(spec_copy); 1054 spec_copy = NULL; 1055 } 1056 1057 /* replace an event alias with the canonical event specifier */ 1058 if (pmc_mdep_event_aliases) 1059 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 1060 if (!strcasecmp(ctrspec, alias->pm_alias)) { 1061 spec_copy = strdup(alias->pm_spec); 1062 break; 1063 } 1064 1065 if (spec_copy == NULL) 1066 spec_copy = strdup(ctrspec); 1067 1068 r = spec_copy; 1069 ctrname = strsep(&r, ","); 1070 1071 /* 1072 * If a explicit class prefix was given by the user, restrict the 1073 * search for the event to the specified PMC class. 1074 */ 1075 ev = NULL; 1076 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 1077 pcd = pmc_class_table[n]; 1078 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 1079 strncasecmp(ctrname, pcd->pm_evc_name, 1080 pcd->pm_evc_name_size) == 0) { 1081 if ((ev = pmc_match_event_class(ctrname + 1082 pcd->pm_evc_name_size, pcd)) == NULL) { 1083 errno = EINVAL; 1084 goto out; 1085 } 1086 break; 1087 } 1088 } 1089 1090 /* 1091 * Otherwise, search for this event in all compatible PMC 1092 * classes. 1093 */ 1094 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 1095 pcd = pmc_class_table[n]; 1096 if (pcd && pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 1097 ev = pmc_match_event_class(ctrname, pcd); 1098 } 1099 1100 if (ev == NULL) { 1101 errno = EINVAL; 1102 goto out; 1103 } 1104 1105 pmc_config.pm_ev = ev->pm_ev_code; 1106 pmc_config.pm_class = pcd->pm_evc_class; 1107 1108 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 1109 errno = EINVAL; 1110 goto out; 1111 } 1112 1113 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 1114 goto out; 1115 1116 *pmcid = pmc_config.pm_pmcid; 1117 1118 retval = 0; 1119 1120 out: 1121 if (spec_copy) 1122 free(spec_copy); 1123 1124 return (retval); 1125 } 1126 1127 int 1128 pmc_attach(pmc_id_t pmc, pid_t pid) 1129 { 1130 struct pmc_op_pmcattach pmc_attach_args; 1131 1132 pmc_attach_args.pm_pmc = pmc; 1133 pmc_attach_args.pm_pid = pid; 1134 1135 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1136 } 1137 1138 int 1139 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1140 { 1141 unsigned int i; 1142 enum pmc_class cl; 1143 1144 cl = PMC_ID_TO_CLASS(pmcid); 1145 for (i = 0; i < cpu_info.pm_nclass; i++) 1146 if (cpu_info.pm_classes[i].pm_class == cl) { 1147 *caps = cpu_info.pm_classes[i].pm_caps; 1148 return (0); 1149 } 1150 errno = EINVAL; 1151 return (-1); 1152 } 1153 1154 int 1155 pmc_configure_logfile(int fd) 1156 { 1157 struct pmc_op_configurelog cla; 1158 1159 cla.pm_logfd = fd; 1160 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1161 return (-1); 1162 return (0); 1163 } 1164 1165 int 1166 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1167 { 1168 if (pmc_syscall == -1) { 1169 errno = ENXIO; 1170 return (-1); 1171 } 1172 1173 *pci = &cpu_info; 1174 return (0); 1175 } 1176 1177 int 1178 pmc_detach(pmc_id_t pmc, pid_t pid) 1179 { 1180 struct pmc_op_pmcattach pmc_detach_args; 1181 1182 pmc_detach_args.pm_pmc = pmc; 1183 pmc_detach_args.pm_pid = pid; 1184 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1185 } 1186 1187 int 1188 pmc_disable(int cpu, int pmc) 1189 { 1190 struct pmc_op_pmcadmin ssa; 1191 1192 ssa.pm_cpu = cpu; 1193 ssa.pm_pmc = pmc; 1194 ssa.pm_state = PMC_STATE_DISABLED; 1195 return (PMC_CALL(PMCADMIN, &ssa)); 1196 } 1197 1198 int 1199 pmc_enable(int cpu, int pmc) 1200 { 1201 struct pmc_op_pmcadmin ssa; 1202 1203 ssa.pm_cpu = cpu; 1204 ssa.pm_pmc = pmc; 1205 ssa.pm_state = PMC_STATE_FREE; 1206 return (PMC_CALL(PMCADMIN, &ssa)); 1207 } 1208 1209 /* 1210 * Return a list of events known to a given PMC class. 'cl' is the 1211 * PMC class identifier, 'eventnames' is the returned list of 'const 1212 * char *' pointers pointing to the names of the events. 'nevents' is 1213 * the number of event name pointers returned. 1214 * 1215 * The space for 'eventnames' is allocated using malloc(3). The caller 1216 * is responsible for freeing this space when done. 1217 */ 1218 int 1219 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1220 int *nevents) 1221 { 1222 int count; 1223 const char **names; 1224 const struct pmc_event_descr *ev; 1225 1226 switch (cl) 1227 { 1228 case PMC_CLASS_IAF: 1229 ev = iaf_event_table; 1230 count = PMC_EVENT_TABLE_SIZE(iaf); 1231 break; 1232 case PMC_CLASS_TSC: 1233 ev = tsc_event_table; 1234 count = PMC_EVENT_TABLE_SIZE(tsc); 1235 break; 1236 case PMC_CLASS_K8: 1237 ev = k8_event_table; 1238 count = PMC_EVENT_TABLE_SIZE(k8); 1239 break; 1240 case PMC_CLASS_XSCALE: 1241 ev = xscale_event_table; 1242 count = PMC_EVENT_TABLE_SIZE(xscale); 1243 break; 1244 case PMC_CLASS_ARMV7: 1245 switch (cpu_info.pm_cputype) { 1246 default: 1247 case PMC_CPU_ARMV7_CORTEX_A8: 1248 ev = cortex_a8_event_table; 1249 count = PMC_EVENT_TABLE_SIZE(cortex_a8); 1250 break; 1251 case PMC_CPU_ARMV7_CORTEX_A9: 1252 ev = cortex_a9_event_table; 1253 count = PMC_EVENT_TABLE_SIZE(cortex_a9); 1254 break; 1255 } 1256 break; 1257 case PMC_CLASS_ARMV8: 1258 switch (cpu_info.pm_cputype) { 1259 default: 1260 case PMC_CPU_ARMV8_CORTEX_A53: 1261 ev = cortex_a53_event_table; 1262 count = PMC_EVENT_TABLE_SIZE(cortex_a53); 1263 break; 1264 case PMC_CPU_ARMV8_CORTEX_A57: 1265 ev = cortex_a57_event_table; 1266 count = PMC_EVENT_TABLE_SIZE(cortex_a57); 1267 break; 1268 } 1269 break; 1270 case PMC_CLASS_MIPS24K: 1271 ev = mips24k_event_table; 1272 count = PMC_EVENT_TABLE_SIZE(mips24k); 1273 break; 1274 case PMC_CLASS_MIPS74K: 1275 ev = mips74k_event_table; 1276 count = PMC_EVENT_TABLE_SIZE(mips74k); 1277 break; 1278 case PMC_CLASS_OCTEON: 1279 ev = octeon_event_table; 1280 count = PMC_EVENT_TABLE_SIZE(octeon); 1281 break; 1282 case PMC_CLASS_PPC7450: 1283 ev = ppc7450_event_table; 1284 count = PMC_EVENT_TABLE_SIZE(ppc7450); 1285 break; 1286 case PMC_CLASS_PPC970: 1287 ev = ppc970_event_table; 1288 count = PMC_EVENT_TABLE_SIZE(ppc970); 1289 break; 1290 case PMC_CLASS_E500: 1291 ev = e500_event_table; 1292 count = PMC_EVENT_TABLE_SIZE(e500); 1293 break; 1294 case PMC_CLASS_SOFT: 1295 ev = soft_event_table; 1296 count = soft_event_info.pm_nevent; 1297 break; 1298 default: 1299 errno = EINVAL; 1300 return (-1); 1301 } 1302 1303 if ((names = malloc(count * sizeof(const char *))) == NULL) 1304 return (-1); 1305 1306 *eventnames = names; 1307 *nevents = count; 1308 1309 for (;count--; ev++, names++) 1310 *names = ev->pm_ev_name; 1311 1312 return (0); 1313 } 1314 1315 int 1316 pmc_flush_logfile(void) 1317 { 1318 return (PMC_CALL(FLUSHLOG,0)); 1319 } 1320 1321 int 1322 pmc_close_logfile(void) 1323 { 1324 return (PMC_CALL(CLOSELOG,0)); 1325 } 1326 1327 int 1328 pmc_get_driver_stats(struct pmc_driverstats *ds) 1329 { 1330 struct pmc_op_getdriverstats gms; 1331 1332 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 1333 return (-1); 1334 1335 /* copy out fields in the current userland<->library interface */ 1336 ds->pm_intr_ignored = gms.pm_intr_ignored; 1337 ds->pm_intr_processed = gms.pm_intr_processed; 1338 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 1339 ds->pm_syscalls = gms.pm_syscalls; 1340 ds->pm_syscall_errors = gms.pm_syscall_errors; 1341 ds->pm_buffer_requests = gms.pm_buffer_requests; 1342 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 1343 ds->pm_log_sweeps = gms.pm_log_sweeps; 1344 return (0); 1345 } 1346 1347 int 1348 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 1349 { 1350 struct pmc_op_getmsr gm; 1351 1352 gm.pm_pmcid = pmc; 1353 if (PMC_CALL(PMCGETMSR, &gm) < 0) 1354 return (-1); 1355 *msr = gm.pm_msr; 1356 return (0); 1357 } 1358 1359 int 1360 pmc_init(void) 1361 { 1362 int error, pmc_mod_id; 1363 unsigned int n; 1364 uint32_t abi_version; 1365 struct module_stat pmc_modstat; 1366 struct pmc_op_getcpuinfo op_cpu_info; 1367 #if defined(__amd64__) || defined(__i386__) 1368 int cpu_has_iaf_counters; 1369 unsigned int t; 1370 #endif 1371 1372 if (pmc_syscall != -1) /* already inited */ 1373 return (0); 1374 1375 /* retrieve the system call number from the KLD */ 1376 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 1377 return (-1); 1378 1379 pmc_modstat.version = sizeof(struct module_stat); 1380 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 1381 return (-1); 1382 1383 pmc_syscall = pmc_modstat.data.intval; 1384 1385 /* check the kernel module's ABI against our compiled-in version */ 1386 abi_version = PMC_VERSION; 1387 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 1388 return (pmc_syscall = -1); 1389 1390 /* ignore patch & minor numbers for the comparison */ 1391 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 1392 errno = EPROGMISMATCH; 1393 return (pmc_syscall = -1); 1394 } 1395 1396 bzero(&op_cpu_info, sizeof(op_cpu_info)); 1397 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 1398 return (pmc_syscall = -1); 1399 1400 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 1401 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 1402 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 1403 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 1404 for (n = 0; n < op_cpu_info.pm_nclass; n++) 1405 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], 1406 sizeof(cpu_info.pm_classes[n])); 1407 1408 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 1409 sizeof(struct pmc_class_descr *)); 1410 1411 if (pmc_class_table == NULL) 1412 return (-1); 1413 1414 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 1415 pmc_class_table[n] = NULL; 1416 1417 /* 1418 * Get soft events list. 1419 */ 1420 soft_event_info.pm_class = PMC_CLASS_SOFT; 1421 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 1422 return (pmc_syscall = -1); 1423 1424 /* Map soft events to static list. */ 1425 for (n = 0; n < soft_event_info.pm_nevent; n++) { 1426 soft_event_table[n].pm_ev_name = 1427 soft_event_info.pm_events[n].pm_ev_name; 1428 soft_event_table[n].pm_ev_code = 1429 soft_event_info.pm_events[n].pm_ev_code; 1430 } 1431 soft_class_table_descr.pm_evc_event_table_size = \ 1432 soft_event_info.pm_nevent; 1433 soft_class_table_descr.pm_evc_event_table = \ 1434 soft_event_table; 1435 1436 /* 1437 * Fill in the class table. 1438 */ 1439 n = 0; 1440 1441 /* Fill soft events information. */ 1442 pmc_class_table[n++] = &soft_class_table_descr; 1443 #if defined(__amd64__) || defined(__i386__) 1444 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 1445 pmc_class_table[n++] = &tsc_class_table_descr; 1446 1447 /* 1448 * Check if this CPU has fixed function counters. 1449 */ 1450 cpu_has_iaf_counters = 0; 1451 for (t = 0; t < cpu_info.pm_nclass; t++) 1452 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 1453 cpu_info.pm_classes[t].pm_num > 0) 1454 cpu_has_iaf_counters = 1; 1455 #endif 1456 1457 #define PMC_MDEP_INIT(C) do { \ 1458 pmc_mdep_event_aliases = C##_aliases; \ 1459 pmc_mdep_class_list = C##_pmc_classes; \ 1460 pmc_mdep_class_list_size = \ 1461 PMC_TABLE_SIZE(C##_pmc_classes); \ 1462 } while (0) 1463 1464 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 1465 PMC_MDEP_INIT(C); \ 1466 pmc_class_table[n++] = &iaf_class_table_descr; \ 1467 if (!cpu_has_iaf_counters) \ 1468 pmc_mdep_event_aliases = \ 1469 C##_aliases_without_iaf; \ 1470 pmc_class_table[n] = &C##_class_table_descr; \ 1471 } while (0) 1472 1473 /* Configure the event name parser. */ 1474 switch (cpu_info.pm_cputype) { 1475 #if defined(__amd64__) || defined(__i386__) 1476 case PMC_CPU_AMD_K8: 1477 PMC_MDEP_INIT(k8); 1478 pmc_class_table[n] = &k8_class_table_descr; 1479 break; 1480 #endif 1481 case PMC_CPU_GENERIC: 1482 PMC_MDEP_INIT(generic); 1483 break; 1484 #if defined(__arm__) 1485 #if defined(__XSCALE__) 1486 case PMC_CPU_INTEL_XSCALE: 1487 PMC_MDEP_INIT(xscale); 1488 pmc_class_table[n] = &xscale_class_table_descr; 1489 break; 1490 #endif 1491 case PMC_CPU_ARMV7_CORTEX_A8: 1492 PMC_MDEP_INIT(cortex_a8); 1493 pmc_class_table[n] = &cortex_a8_class_table_descr; 1494 break; 1495 case PMC_CPU_ARMV7_CORTEX_A9: 1496 PMC_MDEP_INIT(cortex_a9); 1497 pmc_class_table[n] = &cortex_a9_class_table_descr; 1498 break; 1499 #endif 1500 #if defined(__aarch64__) 1501 case PMC_CPU_ARMV8_CORTEX_A53: 1502 PMC_MDEP_INIT(cortex_a53); 1503 pmc_class_table[n] = &cortex_a53_class_table_descr; 1504 break; 1505 case PMC_CPU_ARMV8_CORTEX_A57: 1506 PMC_MDEP_INIT(cortex_a57); 1507 pmc_class_table[n] = &cortex_a57_class_table_descr; 1508 break; 1509 #endif 1510 #if defined(__mips__) 1511 case PMC_CPU_MIPS_24K: 1512 PMC_MDEP_INIT(mips24k); 1513 pmc_class_table[n] = &mips24k_class_table_descr; 1514 break; 1515 case PMC_CPU_MIPS_74K: 1516 PMC_MDEP_INIT(mips74k); 1517 pmc_class_table[n] = &mips74k_class_table_descr; 1518 break; 1519 case PMC_CPU_MIPS_OCTEON: 1520 PMC_MDEP_INIT(octeon); 1521 pmc_class_table[n] = &octeon_class_table_descr; 1522 break; 1523 #endif /* __mips__ */ 1524 #if defined(__powerpc__) 1525 case PMC_CPU_PPC_7450: 1526 PMC_MDEP_INIT(ppc7450); 1527 pmc_class_table[n] = &ppc7450_class_table_descr; 1528 break; 1529 case PMC_CPU_PPC_970: 1530 PMC_MDEP_INIT(ppc970); 1531 pmc_class_table[n] = &ppc970_class_table_descr; 1532 break; 1533 case PMC_CPU_PPC_E500: 1534 PMC_MDEP_INIT(e500); 1535 pmc_class_table[n] = &e500_class_table_descr; 1536 break; 1537 #endif 1538 default: 1539 /* 1540 * Some kind of CPU this version of the library knows nothing 1541 * about. This shouldn't happen since the abi version check 1542 * should have caught this. 1543 */ 1544 #if defined(__amd64__) || defined(__i386__) 1545 break; 1546 #endif 1547 errno = ENXIO; 1548 return (pmc_syscall = -1); 1549 } 1550 1551 return (0); 1552 } 1553 1554 const char * 1555 pmc_name_of_capability(enum pmc_caps cap) 1556 { 1557 int i; 1558 1559 /* 1560 * 'cap' should have a single bit set and should be in 1561 * range. 1562 */ 1563 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 1564 cap > PMC_CAP_LAST) { 1565 errno = EINVAL; 1566 return (NULL); 1567 } 1568 1569 i = ffs(cap); 1570 return (pmc_capability_names[i - 1]); 1571 } 1572 1573 const char * 1574 pmc_name_of_class(enum pmc_class pc) 1575 { 1576 size_t n; 1577 1578 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) 1579 if (pc == pmc_class_names[n].pm_class) 1580 return (pmc_class_names[n].pm_name); 1581 1582 errno = EINVAL; 1583 return (NULL); 1584 } 1585 1586 const char * 1587 pmc_name_of_cputype(enum pmc_cputype cp) 1588 { 1589 size_t n; 1590 1591 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 1592 if (cp == pmc_cputype_names[n].pm_cputype) 1593 return (pmc_cputype_names[n].pm_name); 1594 1595 errno = EINVAL; 1596 return (NULL); 1597 } 1598 1599 const char * 1600 pmc_name_of_disposition(enum pmc_disp pd) 1601 { 1602 if ((int) pd >= PMC_DISP_FIRST && 1603 pd <= PMC_DISP_LAST) 1604 return (pmc_disposition_names[pd]); 1605 1606 errno = EINVAL; 1607 return (NULL); 1608 } 1609 1610 const char * 1611 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 1612 { 1613 const struct pmc_event_descr *ev, *evfence; 1614 1615 ev = evfence = NULL; 1616 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 1617 ev = k8_event_table; 1618 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 1619 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 1620 ev = xscale_event_table; 1621 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 1622 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { 1623 switch (cpu) { 1624 case PMC_CPU_ARMV7_CORTEX_A8: 1625 ev = cortex_a8_event_table; 1626 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); 1627 break; 1628 case PMC_CPU_ARMV7_CORTEX_A9: 1629 ev = cortex_a9_event_table; 1630 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); 1631 break; 1632 default: /* Unknown CPU type. */ 1633 break; 1634 } 1635 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { 1636 switch (cpu) { 1637 case PMC_CPU_ARMV8_CORTEX_A53: 1638 ev = cortex_a53_event_table; 1639 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); 1640 break; 1641 case PMC_CPU_ARMV8_CORTEX_A57: 1642 ev = cortex_a57_event_table; 1643 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); 1644 break; 1645 default: /* Unknown CPU type. */ 1646 break; 1647 } 1648 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 1649 ev = mips24k_event_table; 1650 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 1651 } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { 1652 ev = mips74k_event_table; 1653 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); 1654 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 1655 ev = octeon_event_table; 1656 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 1657 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 1658 ev = ppc7450_event_table; 1659 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 1660 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 1661 ev = ppc970_event_table; 1662 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 1663 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { 1664 ev = e500_event_table; 1665 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); 1666 } else if (pe == PMC_EV_TSC_TSC) { 1667 ev = tsc_event_table; 1668 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 1669 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 1670 ev = soft_event_table; 1671 evfence = soft_event_table + soft_event_info.pm_nevent; 1672 } 1673 1674 for (; ev != evfence; ev++) 1675 if (pe == ev->pm_ev_code) 1676 return (ev->pm_ev_name); 1677 1678 return (NULL); 1679 } 1680 1681 const char * 1682 pmc_name_of_event(enum pmc_event pe) 1683 { 1684 const char *n; 1685 1686 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 1687 return (n); 1688 1689 errno = EINVAL; 1690 return (NULL); 1691 } 1692 1693 const char * 1694 pmc_name_of_mode(enum pmc_mode pm) 1695 { 1696 if ((int) pm >= PMC_MODE_FIRST && 1697 pm <= PMC_MODE_LAST) 1698 return (pmc_mode_names[pm]); 1699 1700 errno = EINVAL; 1701 return (NULL); 1702 } 1703 1704 const char * 1705 pmc_name_of_state(enum pmc_state ps) 1706 { 1707 if ((int) ps >= PMC_STATE_FIRST && 1708 ps <= PMC_STATE_LAST) 1709 return (pmc_state_names[ps]); 1710 1711 errno = EINVAL; 1712 return (NULL); 1713 } 1714 1715 int 1716 pmc_ncpu(void) 1717 { 1718 if (pmc_syscall == -1) { 1719 errno = ENXIO; 1720 return (-1); 1721 } 1722 1723 return (cpu_info.pm_ncpu); 1724 } 1725 1726 int 1727 pmc_npmc(int cpu) 1728 { 1729 if (pmc_syscall == -1) { 1730 errno = ENXIO; 1731 return (-1); 1732 } 1733 1734 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 1735 errno = EINVAL; 1736 return (-1); 1737 } 1738 1739 return (cpu_info.pm_npmc); 1740 } 1741 1742 int 1743 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 1744 { 1745 int nbytes, npmc; 1746 struct pmc_op_getpmcinfo *pmci; 1747 1748 if ((npmc = pmc_npmc(cpu)) < 0) 1749 return (-1); 1750 1751 nbytes = sizeof(struct pmc_op_getpmcinfo) + 1752 npmc * sizeof(struct pmc_info); 1753 1754 if ((pmci = calloc(1, nbytes)) == NULL) 1755 return (-1); 1756 1757 pmci->pm_cpu = cpu; 1758 1759 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 1760 free(pmci); 1761 return (-1); 1762 } 1763 1764 /* kernel<->library, library<->userland interfaces are identical */ 1765 *ppmci = (struct pmc_pmcinfo *) pmci; 1766 return (0); 1767 } 1768 1769 int 1770 pmc_read(pmc_id_t pmc, pmc_value_t *value) 1771 { 1772 struct pmc_op_pmcrw pmc_read_op; 1773 1774 pmc_read_op.pm_pmcid = pmc; 1775 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 1776 pmc_read_op.pm_value = -1; 1777 1778 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 1779 return (-1); 1780 1781 *value = pmc_read_op.pm_value; 1782 return (0); 1783 } 1784 1785 int 1786 pmc_release(pmc_id_t pmc) 1787 { 1788 struct pmc_op_simple pmc_release_args; 1789 1790 pmc_release_args.pm_pmcid = pmc; 1791 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 1792 } 1793 1794 int 1795 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 1796 { 1797 struct pmc_op_pmcrw pmc_rw_op; 1798 1799 pmc_rw_op.pm_pmcid = pmc; 1800 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 1801 pmc_rw_op.pm_value = newvalue; 1802 1803 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 1804 return (-1); 1805 1806 *oldvaluep = pmc_rw_op.pm_value; 1807 return (0); 1808 } 1809 1810 int 1811 pmc_set(pmc_id_t pmc, pmc_value_t value) 1812 { 1813 struct pmc_op_pmcsetcount sc; 1814 1815 sc.pm_pmcid = pmc; 1816 sc.pm_count = value; 1817 1818 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 1819 return (-1); 1820 return (0); 1821 } 1822 1823 int 1824 pmc_start(pmc_id_t pmc) 1825 { 1826 struct pmc_op_simple pmc_start_args; 1827 1828 pmc_start_args.pm_pmcid = pmc; 1829 return (PMC_CALL(PMCSTART, &pmc_start_args)); 1830 } 1831 1832 int 1833 pmc_stop(pmc_id_t pmc) 1834 { 1835 struct pmc_op_simple pmc_stop_args; 1836 1837 pmc_stop_args.pm_pmcid = pmc; 1838 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 1839 } 1840 1841 int 1842 pmc_width(pmc_id_t pmcid, uint32_t *width) 1843 { 1844 unsigned int i; 1845 enum pmc_class cl; 1846 1847 cl = PMC_ID_TO_CLASS(pmcid); 1848 for (i = 0; i < cpu_info.pm_nclass; i++) 1849 if (cpu_info.pm_classes[i].pm_class == cl) { 1850 *width = cpu_info.pm_classes[i].pm_width; 1851 return (0); 1852 } 1853 errno = EINVAL; 1854 return (-1); 1855 } 1856 1857 int 1858 pmc_write(pmc_id_t pmc, pmc_value_t value) 1859 { 1860 struct pmc_op_pmcrw pmc_write_op; 1861 1862 pmc_write_op.pm_pmcid = pmc; 1863 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 1864 pmc_write_op.pm_value = value; 1865 return (PMC_CALL(PMCRW, &pmc_write_op)); 1866 } 1867 1868 int 1869 pmc_writelog(uint32_t userdata) 1870 { 1871 struct pmc_op_writelog wl; 1872 1873 wl.pm_userdata = userdata; 1874 return (PMC_CALL(WRITELOG, &wl)); 1875 } 1876