1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/module.h> 35 #include <sys/pmc.h> 36 #include <sys/syscall.h> 37 38 #include <ctype.h> 39 #include <errno.h> 40 #include <fcntl.h> 41 #include <pmc.h> 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <string.h> 45 #include <strings.h> 46 #include <unistd.h> 47 48 #include "libpmcinternal.h" 49 50 /* Function prototypes */ 51 #if defined(__amd64__) || defined(__i386__) 52 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 53 struct pmc_op_pmcallocate *_pmc_config); 54 #endif 55 #if defined(__amd64__) || defined(__i386__) 56 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 57 struct pmc_op_pmcallocate *_pmc_config); 58 #endif 59 #if defined(__arm__) 60 #if defined(__XSCALE__) 61 static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 62 struct pmc_op_pmcallocate *_pmc_config); 63 #endif 64 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 65 struct pmc_op_pmcallocate *_pmc_config); 66 #endif 67 #if defined(__aarch64__) 68 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 69 struct pmc_op_pmcallocate *_pmc_config); 70 #endif 71 #if defined(__mips__) 72 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec, 73 struct pmc_op_pmcallocate *_pmc_config); 74 #endif /* __mips__ */ 75 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec, 76 struct pmc_op_pmcallocate *_pmc_config); 77 78 #if defined(__powerpc__) 79 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec, 80 struct pmc_op_pmcallocate *_pmc_config); 81 #endif /* __powerpc__ */ 82 83 #define PMC_CALL(cmd, params) \ 84 syscall(pmc_syscall, PMC_OP_##cmd, (params)) 85 86 /* 87 * Event aliases provide a way for the user to ask for generic events 88 * like "cache-misses", or "instructions-retired". These aliases are 89 * mapped to the appropriate canonical event descriptions using a 90 * lookup table. 91 */ 92 struct pmc_event_alias { 93 const char *pm_alias; 94 const char *pm_spec; 95 }; 96 97 static const struct pmc_event_alias *pmc_mdep_event_aliases; 98 99 /* 100 * The pmc_event_descr structure maps symbolic names known to the user 101 * to integer codes used by the PMC KLD. 102 */ 103 struct pmc_event_descr { 104 const char *pm_ev_name; 105 enum pmc_event pm_ev_code; 106 }; 107 108 /* 109 * The pmc_class_descr structure maps class name prefixes for 110 * event names to event tables and other PMC class data. 111 */ 112 struct pmc_class_descr { 113 const char *pm_evc_name; 114 size_t pm_evc_name_size; 115 enum pmc_class pm_evc_class; 116 const struct pmc_event_descr *pm_evc_event_table; 117 size_t pm_evc_event_table_size; 118 int (*pm_evc_allocate_pmc)(enum pmc_event _pe, 119 char *_ctrspec, struct pmc_op_pmcallocate *_pa); 120 }; 121 122 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0])) 123 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table) 124 125 #undef __PMC_EV 126 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N }, 127 128 /* 129 * PMC_CLASSDEP_TABLE(NAME, CLASS) 130 * 131 * Define a table mapping event names and aliases to HWPMC event IDs. 132 */ 133 #define PMC_CLASSDEP_TABLE(N, C) \ 134 static const struct pmc_event_descr N##_event_table[] = \ 135 { \ 136 __PMC_EV_##C() \ 137 } 138 139 PMC_CLASSDEP_TABLE(iaf, IAF); 140 PMC_CLASSDEP_TABLE(k8, K8); 141 PMC_CLASSDEP_TABLE(xscale, XSCALE); 142 PMC_CLASSDEP_TABLE(armv7, ARMV7); 143 PMC_CLASSDEP_TABLE(armv8, ARMV8); 144 PMC_CLASSDEP_TABLE(mips24k, MIPS24K); 145 PMC_CLASSDEP_TABLE(mips74k, MIPS74K); 146 PMC_CLASSDEP_TABLE(octeon, OCTEON); 147 PMC_CLASSDEP_TABLE(ppc7450, PPC7450); 148 PMC_CLASSDEP_TABLE(ppc970, PPC970); 149 PMC_CLASSDEP_TABLE(e500, E500); 150 151 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT]; 152 153 #undef __PMC_EV_ALIAS 154 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE }, 155 156 static const struct pmc_event_descr cortex_a8_event_table[] = 157 { 158 __PMC_EV_ALIAS_ARMV7_CORTEX_A8() 159 }; 160 161 static const struct pmc_event_descr cortex_a9_event_table[] = 162 { 163 __PMC_EV_ALIAS_ARMV7_CORTEX_A9() 164 }; 165 166 static const struct pmc_event_descr cortex_a53_event_table[] = 167 { 168 __PMC_EV_ALIAS_ARMV8_CORTEX_A53() 169 }; 170 171 static const struct pmc_event_descr cortex_a57_event_table[] = 172 { 173 __PMC_EV_ALIAS_ARMV8_CORTEX_A57() 174 }; 175 176 /* 177 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...) 178 * 179 * Map a CPU to the PMC classes it supports. 180 */ 181 #define PMC_MDEP_TABLE(N,C,...) \ 182 static const enum pmc_class N##_pmc_classes[] = { \ 183 PMC_CLASS_##C, __VA_ARGS__ \ 184 } 185 186 PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); 187 PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); 188 PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 189 PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); 190 PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 191 PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); 192 PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K); 193 PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K); 194 PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON); 195 PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC); 196 PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC); 197 PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC); 198 PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT); 199 200 static const struct pmc_event_descr tsc_event_table[] = 201 { 202 __PMC_EV_TSC() 203 }; 204 205 #undef PMC_CLASS_TABLE_DESC 206 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \ 207 static const struct pmc_class_descr NAME##_class_table_descr = \ 208 { \ 209 .pm_evc_name = #CLASS "-", \ 210 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \ 211 .pm_evc_class = PMC_CLASS_##CLASS , \ 212 .pm_evc_event_table = EVENTS##_event_table , \ 213 .pm_evc_event_table_size = \ 214 PMC_EVENT_TABLE_SIZE(EVENTS), \ 215 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \ 216 } 217 218 #if defined(__i386__) || defined(__amd64__) 219 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); 220 #endif 221 #if defined(__i386__) || defined(__amd64__) 222 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); 223 #endif 224 #if defined(__arm__) 225 #if defined(__XSCALE__) 226 PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); 227 #endif 228 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); 229 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); 230 #endif 231 #if defined(__aarch64__) 232 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64); 233 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64); 234 #endif 235 #if defined(__mips__) 236 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips); 237 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips); 238 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips); 239 #endif /* __mips__ */ 240 #if defined(__powerpc__) 241 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc); 242 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc); 243 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc); 244 #endif 245 246 static struct pmc_class_descr soft_class_table_descr = 247 { 248 .pm_evc_name = "SOFT-", 249 .pm_evc_name_size = sizeof("SOFT-") - 1, 250 .pm_evc_class = PMC_CLASS_SOFT, 251 .pm_evc_event_table = NULL, 252 .pm_evc_event_table_size = 0, 253 .pm_evc_allocate_pmc = soft_allocate_pmc 254 }; 255 256 #undef PMC_CLASS_TABLE_DESC 257 258 static const struct pmc_class_descr **pmc_class_table; 259 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass 260 261 static const enum pmc_class *pmc_mdep_class_list; 262 static size_t pmc_mdep_class_list_size; 263 264 /* 265 * Mapping tables, mapping enumeration values to human readable 266 * strings. 267 */ 268 269 static const char * pmc_capability_names[] = { 270 #undef __PMC_CAP 271 #define __PMC_CAP(N,V,D) #N , 272 __PMC_CAPS() 273 }; 274 275 struct pmc_class_map { 276 enum pmc_class pm_class; 277 const char *pm_name; 278 }; 279 280 static const struct pmc_class_map pmc_class_names[] = { 281 #undef __PMC_CLASS 282 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } , 283 __PMC_CLASSES() 284 }; 285 286 struct pmc_cputype_map { 287 enum pmc_cputype pm_cputype; 288 const char *pm_name; 289 }; 290 291 static const struct pmc_cputype_map pmc_cputype_names[] = { 292 #undef __PMC_CPU 293 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } , 294 __PMC_CPUS() 295 }; 296 297 static const char * pmc_disposition_names[] = { 298 #undef __PMC_DISP 299 #define __PMC_DISP(D) #D , 300 __PMC_DISPOSITIONS() 301 }; 302 303 static const char * pmc_mode_names[] = { 304 #undef __PMC_MODE 305 #define __PMC_MODE(M,N) #M , 306 __PMC_MODES() 307 }; 308 309 static const char * pmc_state_names[] = { 310 #undef __PMC_STATE 311 #define __PMC_STATE(S) #S , 312 __PMC_STATES() 313 }; 314 315 /* 316 * Filled in by pmc_init(). 317 */ 318 static int pmc_syscall = -1; 319 static struct pmc_cpuinfo cpu_info; 320 static struct pmc_op_getdyneventinfo soft_event_info; 321 322 /* Event masks for events */ 323 struct pmc_masks { 324 const char *pm_name; 325 const uint64_t pm_value; 326 }; 327 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) } 328 #define NULLMASK { .pm_name = NULL } 329 330 #if defined(__amd64__) || defined(__i386__) 331 static int 332 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask) 333 { 334 const struct pmc_masks *pm; 335 char *q, *r; 336 int c; 337 338 if (pmask == NULL) /* no mask keywords */ 339 return (-1); 340 q = strchr(p, '='); /* skip '=' */ 341 if (*++q == '\0') /* no more data */ 342 return (-1); 343 c = 0; /* count of mask keywords seen */ 344 while ((r = strsep(&q, "+")) != NULL) { 345 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name); 346 pm++) 347 ; 348 if (pm->pm_name == NULL) /* not found */ 349 return (-1); 350 *evmask |= pm->pm_value; 351 c++; 352 } 353 return (c); 354 } 355 #endif 356 357 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0) 358 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0) 359 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S } 360 361 #if defined(__amd64__) || defined(__i386__) 362 /* 363 * AMD K8 PMCs. 364 * 365 */ 366 367 static struct pmc_event_alias k8_aliases[] = { 368 EV_ALIAS("branches", "k8-fr-retired-taken-branches"), 369 EV_ALIAS("branch-mispredicts", 370 "k8-fr-retired-taken-branches-mispredicted"), 371 EV_ALIAS("cycles", "tsc"), 372 EV_ALIAS("dc-misses", "k8-dc-miss"), 373 EV_ALIAS("ic-misses", "k8-ic-miss"), 374 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"), 375 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"), 376 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"), 377 EV_ALIAS(NULL, NULL) 378 }; 379 380 #define __K8MASK(N,V) PMCMASK(N,(1 << (V))) 381 382 /* 383 * Parsing tables 384 */ 385 386 /* fp dispatched fpu ops */ 387 static const struct pmc_masks k8_mask_fdfo[] = { 388 __K8MASK(add-pipe-excluding-junk-ops, 0), 389 __K8MASK(multiply-pipe-excluding-junk-ops, 1), 390 __K8MASK(store-pipe-excluding-junk-ops, 2), 391 __K8MASK(add-pipe-junk-ops, 3), 392 __K8MASK(multiply-pipe-junk-ops, 4), 393 __K8MASK(store-pipe-junk-ops, 5), 394 NULLMASK 395 }; 396 397 /* ls segment register loads */ 398 static const struct pmc_masks k8_mask_lsrl[] = { 399 __K8MASK(es, 0), 400 __K8MASK(cs, 1), 401 __K8MASK(ss, 2), 402 __K8MASK(ds, 3), 403 __K8MASK(fs, 4), 404 __K8MASK(gs, 5), 405 __K8MASK(hs, 6), 406 NULLMASK 407 }; 408 409 /* ls locked operation */ 410 static const struct pmc_masks k8_mask_llo[] = { 411 __K8MASK(locked-instructions, 0), 412 __K8MASK(cycles-in-request, 1), 413 __K8MASK(cycles-to-complete, 2), 414 NULLMASK 415 }; 416 417 /* dc refill from {l2,system} and dc copyback */ 418 static const struct pmc_masks k8_mask_dc[] = { 419 __K8MASK(invalid, 0), 420 __K8MASK(shared, 1), 421 __K8MASK(exclusive, 2), 422 __K8MASK(owner, 3), 423 __K8MASK(modified, 4), 424 NULLMASK 425 }; 426 427 /* dc one bit ecc error */ 428 static const struct pmc_masks k8_mask_dobee[] = { 429 __K8MASK(scrubber, 0), 430 __K8MASK(piggyback, 1), 431 NULLMASK 432 }; 433 434 /* dc dispatched prefetch instructions */ 435 static const struct pmc_masks k8_mask_ddpi[] = { 436 __K8MASK(load, 0), 437 __K8MASK(store, 1), 438 __K8MASK(nta, 2), 439 NULLMASK 440 }; 441 442 /* dc dcache accesses by locks */ 443 static const struct pmc_masks k8_mask_dabl[] = { 444 __K8MASK(accesses, 0), 445 __K8MASK(misses, 1), 446 NULLMASK 447 }; 448 449 /* bu internal l2 request */ 450 static const struct pmc_masks k8_mask_bilr[] = { 451 __K8MASK(ic-fill, 0), 452 __K8MASK(dc-fill, 1), 453 __K8MASK(tlb-reload, 2), 454 __K8MASK(tag-snoop, 3), 455 __K8MASK(cancelled, 4), 456 NULLMASK 457 }; 458 459 /* bu fill request l2 miss */ 460 static const struct pmc_masks k8_mask_bfrlm[] = { 461 __K8MASK(ic-fill, 0), 462 __K8MASK(dc-fill, 1), 463 __K8MASK(tlb-reload, 2), 464 NULLMASK 465 }; 466 467 /* bu fill into l2 */ 468 static const struct pmc_masks k8_mask_bfil[] = { 469 __K8MASK(dirty-l2-victim, 0), 470 __K8MASK(victim-from-l2, 1), 471 NULLMASK 472 }; 473 474 /* fr retired fpu instructions */ 475 static const struct pmc_masks k8_mask_frfi[] = { 476 __K8MASK(x87, 0), 477 __K8MASK(mmx-3dnow, 1), 478 __K8MASK(packed-sse-sse2, 2), 479 __K8MASK(scalar-sse-sse2, 3), 480 NULLMASK 481 }; 482 483 /* fr retired fastpath double op instructions */ 484 static const struct pmc_masks k8_mask_frfdoi[] = { 485 __K8MASK(low-op-pos-0, 0), 486 __K8MASK(low-op-pos-1, 1), 487 __K8MASK(low-op-pos-2, 2), 488 NULLMASK 489 }; 490 491 /* fr fpu exceptions */ 492 static const struct pmc_masks k8_mask_ffe[] = { 493 __K8MASK(x87-reclass-microfaults, 0), 494 __K8MASK(sse-retype-microfaults, 1), 495 __K8MASK(sse-reclass-microfaults, 2), 496 __K8MASK(sse-and-x87-microtraps, 3), 497 NULLMASK 498 }; 499 500 /* nb memory controller page access event */ 501 static const struct pmc_masks k8_mask_nmcpae[] = { 502 __K8MASK(page-hit, 0), 503 __K8MASK(page-miss, 1), 504 __K8MASK(page-conflict, 2), 505 NULLMASK 506 }; 507 508 /* nb memory controller turnaround */ 509 static const struct pmc_masks k8_mask_nmct[] = { 510 __K8MASK(dimm-turnaround, 0), 511 __K8MASK(read-to-write-turnaround, 1), 512 __K8MASK(write-to-read-turnaround, 2), 513 NULLMASK 514 }; 515 516 /* nb memory controller bypass saturation */ 517 static const struct pmc_masks k8_mask_nmcbs[] = { 518 __K8MASK(memory-controller-hi-pri-bypass, 0), 519 __K8MASK(memory-controller-lo-pri-bypass, 1), 520 __K8MASK(dram-controller-interface-bypass, 2), 521 __K8MASK(dram-controller-queue-bypass, 3), 522 NULLMASK 523 }; 524 525 /* nb sized commands */ 526 static const struct pmc_masks k8_mask_nsc[] = { 527 __K8MASK(nonpostwrszbyte, 0), 528 __K8MASK(nonpostwrszdword, 1), 529 __K8MASK(postwrszbyte, 2), 530 __K8MASK(postwrszdword, 3), 531 __K8MASK(rdszbyte, 4), 532 __K8MASK(rdszdword, 5), 533 __K8MASK(rdmodwr, 6), 534 NULLMASK 535 }; 536 537 /* nb probe result */ 538 static const struct pmc_masks k8_mask_npr[] = { 539 __K8MASK(probe-miss, 0), 540 __K8MASK(probe-hit, 1), 541 __K8MASK(probe-hit-dirty-no-memory-cancel, 2), 542 __K8MASK(probe-hit-dirty-with-memory-cancel, 3), 543 NULLMASK 544 }; 545 546 /* nb hypertransport bus bandwidth */ 547 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */ 548 __K8MASK(command, 0), 549 __K8MASK(data, 1), 550 __K8MASK(buffer-release, 2), 551 __K8MASK(nop, 3), 552 NULLMASK 553 }; 554 555 #undef __K8MASK 556 557 #define K8_KW_COUNT "count" 558 #define K8_KW_EDGE "edge" 559 #define K8_KW_INV "inv" 560 #define K8_KW_MASK "mask" 561 #define K8_KW_OS "os" 562 #define K8_KW_USR "usr" 563 564 static int 565 k8_allocate_pmc(enum pmc_event pe, char *ctrspec, 566 struct pmc_op_pmcallocate *pmc_config) 567 { 568 char *e, *p, *q; 569 int n; 570 uint32_t count; 571 uint64_t evmask; 572 const struct pmc_masks *pm, *pmask; 573 574 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 575 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 576 577 pmask = NULL; 578 evmask = 0; 579 580 #define __K8SETMASK(M) pmask = k8_mask_##M 581 582 /* setup parsing tables */ 583 switch (pe) { 584 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 585 __K8SETMASK(fdfo); 586 break; 587 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD: 588 __K8SETMASK(lsrl); 589 break; 590 case PMC_EV_K8_LS_LOCKED_OPERATION: 591 __K8SETMASK(llo); 592 break; 593 case PMC_EV_K8_DC_REFILL_FROM_L2: 594 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM: 595 case PMC_EV_K8_DC_COPYBACK: 596 __K8SETMASK(dc); 597 break; 598 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR: 599 __K8SETMASK(dobee); 600 break; 601 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS: 602 __K8SETMASK(ddpi); 603 break; 604 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 605 __K8SETMASK(dabl); 606 break; 607 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST: 608 __K8SETMASK(bilr); 609 break; 610 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS: 611 __K8SETMASK(bfrlm); 612 break; 613 case PMC_EV_K8_BU_FILL_INTO_L2: 614 __K8SETMASK(bfil); 615 break; 616 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 617 __K8SETMASK(frfi); 618 break; 619 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 620 __K8SETMASK(frfdoi); 621 break; 622 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 623 __K8SETMASK(ffe); 624 break; 625 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT: 626 __K8SETMASK(nmcpae); 627 break; 628 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND: 629 __K8SETMASK(nmct); 630 break; 631 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION: 632 __K8SETMASK(nmcbs); 633 break; 634 case PMC_EV_K8_NB_SIZED_COMMANDS: 635 __K8SETMASK(nsc); 636 break; 637 case PMC_EV_K8_NB_PROBE_RESULT: 638 __K8SETMASK(npr); 639 break; 640 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH: 641 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH: 642 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH: 643 __K8SETMASK(nhbb); 644 break; 645 646 default: 647 break; /* no options defined */ 648 } 649 650 while ((p = strsep(&ctrspec, ",")) != NULL) { 651 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) { 652 q = strchr(p, '='); 653 if (*++q == '\0') /* skip '=' */ 654 return (-1); 655 656 count = strtol(q, &e, 0); 657 if (e == q || *e != '\0') 658 return (-1); 659 660 pmc_config->pm_caps |= PMC_CAP_THRESHOLD; 661 pmc_config->pm_md.pm_amd.pm_amd_config |= 662 AMD_PMC_TO_COUNTER(count); 663 664 } else if (KWMATCH(p, K8_KW_EDGE)) { 665 pmc_config->pm_caps |= PMC_CAP_EDGE; 666 } else if (KWMATCH(p, K8_KW_INV)) { 667 pmc_config->pm_caps |= PMC_CAP_INVERT; 668 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) { 669 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0) 670 return (-1); 671 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 672 } else if (KWMATCH(p, K8_KW_OS)) { 673 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 674 } else if (KWMATCH(p, K8_KW_USR)) { 675 pmc_config->pm_caps |= PMC_CAP_USER; 676 } else 677 return (-1); 678 } 679 680 /* other post processing */ 681 switch (pe) { 682 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS: 683 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED: 684 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS: 685 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS: 686 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS: 687 case PMC_EV_K8_FR_FPU_EXCEPTIONS: 688 /* XXX only available in rev B and later */ 689 break; 690 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS: 691 /* XXX only available in rev C and later */ 692 break; 693 case PMC_EV_K8_LS_LOCKED_OPERATION: 694 /* XXX CPU Rev A,B evmask is to be zero */ 695 if (evmask & (evmask - 1)) /* > 1 bit set */ 696 return (-1); 697 if (evmask == 0) { 698 evmask = 0x01; /* Rev C and later: #instrs */ 699 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 700 } 701 break; 702 default: 703 if (evmask == 0 && pmask != NULL) { 704 for (pm = pmask; pm->pm_name; pm++) 705 evmask |= pm->pm_value; 706 pmc_config->pm_caps |= PMC_CAP_QUALIFIER; 707 } 708 } 709 710 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) 711 pmc_config->pm_md.pm_amd.pm_amd_config = 712 AMD_PMC_TO_UNITMASK(evmask); 713 714 return (0); 715 } 716 717 #endif 718 719 #if defined(__i386__) || defined(__amd64__) 720 static int 721 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec, 722 struct pmc_op_pmcallocate *pmc_config) 723 { 724 if (pe != PMC_EV_TSC_TSC) 725 return (-1); 726 727 /* TSC events must be unqualified. */ 728 if (ctrspec && *ctrspec != '\0') 729 return (-1); 730 731 pmc_config->pm_md.pm_amd.pm_amd_config = 0; 732 pmc_config->pm_caps |= PMC_CAP_READ; 733 734 return (0); 735 } 736 #endif 737 738 static struct pmc_event_alias generic_aliases[] = { 739 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"), 740 EV_ALIAS(NULL, NULL) 741 }; 742 743 static int 744 soft_allocate_pmc(enum pmc_event pe, char *ctrspec, 745 struct pmc_op_pmcallocate *pmc_config) 746 { 747 (void)ctrspec; 748 (void)pmc_config; 749 750 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST) 751 return (-1); 752 753 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 754 return (0); 755 } 756 757 #if defined(__arm__) 758 #if defined(__XSCALE__) 759 760 static struct pmc_event_alias xscale_aliases[] = { 761 EV_ALIAS("branches", "BRANCH_RETIRED"), 762 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 763 EV_ALIAS("dc-misses", "DC_MISS"), 764 EV_ALIAS("ic-misses", "IC_MISS"), 765 EV_ALIAS("instructions", "INSTR_RETIRED"), 766 EV_ALIAS(NULL, NULL) 767 }; 768 static int 769 xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 770 struct pmc_op_pmcallocate *pmc_config __unused) 771 { 772 switch (pe) { 773 default: 774 break; 775 } 776 777 return (0); 778 } 779 #endif 780 781 static struct pmc_event_alias cortex_a8_aliases[] = { 782 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 783 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 784 EV_ALIAS("instructions", "INSTR_EXECUTED"), 785 EV_ALIAS(NULL, NULL) 786 }; 787 788 static struct pmc_event_alias cortex_a9_aliases[] = { 789 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), 790 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), 791 EV_ALIAS("instructions", "INSTR_EXECUTED"), 792 EV_ALIAS(NULL, NULL) 793 }; 794 795 static int 796 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 797 struct pmc_op_pmcallocate *pmc_config __unused) 798 { 799 switch (pe) { 800 default: 801 break; 802 } 803 804 return (0); 805 } 806 #endif 807 808 #if defined(__aarch64__) 809 static struct pmc_event_alias cortex_a53_aliases[] = { 810 EV_ALIAS(NULL, NULL) 811 }; 812 static struct pmc_event_alias cortex_a57_aliases[] = { 813 EV_ALIAS(NULL, NULL) 814 }; 815 static int 816 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 817 struct pmc_op_pmcallocate *pmc_config __unused) 818 { 819 switch (pe) { 820 default: 821 break; 822 } 823 824 return (0); 825 } 826 #endif 827 828 #if defined(__mips__) 829 830 static struct pmc_event_alias mips24k_aliases[] = { 831 EV_ALIAS("instructions", "INSTR_EXECUTED"), 832 EV_ALIAS("branches", "BRANCH_COMPLETED"), 833 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), 834 EV_ALIAS(NULL, NULL) 835 }; 836 837 static struct pmc_event_alias mips74k_aliases[] = { 838 EV_ALIAS("instructions", "INSTR_EXECUTED"), 839 EV_ALIAS("branches", "BRANCH_INSNS"), 840 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"), 841 EV_ALIAS(NULL, NULL) 842 }; 843 844 static struct pmc_event_alias octeon_aliases[] = { 845 EV_ALIAS("instructions", "RET"), 846 EV_ALIAS("branches", "BR"), 847 EV_ALIAS("branch-mispredicts", "BRMIS"), 848 EV_ALIAS(NULL, NULL) 849 }; 850 851 #define MIPS_KW_OS "os" 852 #define MIPS_KW_USR "usr" 853 #define MIPS_KW_ANYTHREAD "anythread" 854 855 static int 856 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 857 struct pmc_op_pmcallocate *pmc_config __unused) 858 { 859 char *p; 860 861 (void) pe; 862 863 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 864 865 while ((p = strsep(&ctrspec, ",")) != NULL) { 866 if (KWMATCH(p, MIPS_KW_OS)) 867 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 868 else if (KWMATCH(p, MIPS_KW_USR)) 869 pmc_config->pm_caps |= PMC_CAP_USER; 870 else if (KWMATCH(p, MIPS_KW_ANYTHREAD)) 871 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 872 else 873 return (-1); 874 } 875 876 return (0); 877 } 878 879 #endif /* __mips__ */ 880 881 #if defined(__powerpc__) 882 883 static struct pmc_event_alias ppc7450_aliases[] = { 884 EV_ALIAS("instructions", "INSTR_COMPLETED"), 885 EV_ALIAS("branches", "BRANCHES_COMPLETED"), 886 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"), 887 EV_ALIAS(NULL, NULL) 888 }; 889 890 static struct pmc_event_alias ppc970_aliases[] = { 891 EV_ALIAS("instructions", "INSTR_COMPLETED"), 892 EV_ALIAS("cycles", "CYCLES"), 893 EV_ALIAS(NULL, NULL) 894 }; 895 896 static struct pmc_event_alias e500_aliases[] = { 897 EV_ALIAS("instructions", "INSTR_COMPLETED"), 898 EV_ALIAS("cycles", "CYCLES"), 899 EV_ALIAS(NULL, NULL) 900 }; 901 902 #define POWERPC_KW_OS "os" 903 #define POWERPC_KW_USR "usr" 904 #define POWERPC_KW_ANYTHREAD "anythread" 905 906 static int 907 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, 908 struct pmc_op_pmcallocate *pmc_config __unused) 909 { 910 char *p; 911 912 (void) pe; 913 914 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 915 916 while ((p = strsep(&ctrspec, ",")) != NULL) { 917 if (KWMATCH(p, POWERPC_KW_OS)) 918 pmc_config->pm_caps |= PMC_CAP_SYSTEM; 919 else if (KWMATCH(p, POWERPC_KW_USR)) 920 pmc_config->pm_caps |= PMC_CAP_USER; 921 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD)) 922 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM); 923 else 924 return (-1); 925 } 926 927 return (0); 928 } 929 930 #endif /* __powerpc__ */ 931 932 933 /* 934 * Match an event name `name' with its canonical form. 935 * 936 * Matches are case insensitive and spaces, periods, underscores and 937 * hyphen characters are considered to match each other. 938 * 939 * Returns 1 for a match, 0 otherwise. 940 */ 941 942 static int 943 pmc_match_event_name(const char *name, const char *canonicalname) 944 { 945 int cc, nc; 946 const unsigned char *c, *n; 947 948 c = (const unsigned char *) canonicalname; 949 n = (const unsigned char *) name; 950 951 for (; (nc = *n) && (cc = *c); n++, c++) { 952 953 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') && 954 (cc == ' ' || cc == '_' || cc == '-' || cc == '.')) 955 continue; 956 957 if (toupper(nc) == toupper(cc)) 958 continue; 959 960 961 return (0); 962 } 963 964 if (*n == '\0' && *c == '\0') 965 return (1); 966 967 return (0); 968 } 969 970 /* 971 * Match an event name against all the event named supported by a 972 * PMC class. 973 * 974 * Returns an event descriptor pointer on match or NULL otherwise. 975 */ 976 static const struct pmc_event_descr * 977 pmc_match_event_class(const char *name, 978 const struct pmc_class_descr *pcd) 979 { 980 size_t n; 981 const struct pmc_event_descr *ev; 982 983 ev = pcd->pm_evc_event_table; 984 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++) 985 if (pmc_match_event_name(name, ev->pm_ev_name)) 986 return (ev); 987 988 return (NULL); 989 } 990 991 static int 992 pmc_mdep_is_compatible_class(enum pmc_class pc) 993 { 994 size_t n; 995 996 for (n = 0; n < pmc_mdep_class_list_size; n++) 997 if (pmc_mdep_class_list[n] == pc) 998 return (1); 999 return (0); 1000 } 1001 1002 /* 1003 * API entry points 1004 */ 1005 1006 int 1007 pmc_allocate(const char *ctrspec, enum pmc_mode mode, 1008 uint32_t flags, int cpu, pmc_id_t *pmcid) 1009 { 1010 size_t n; 1011 int retval; 1012 char *r, *spec_copy; 1013 const char *ctrname; 1014 const struct pmc_event_descr *ev; 1015 const struct pmc_event_alias *alias; 1016 struct pmc_op_pmcallocate pmc_config; 1017 const struct pmc_class_descr *pcd; 1018 1019 spec_copy = NULL; 1020 retval = -1; 1021 1022 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS && 1023 mode != PMC_MODE_SC && mode != PMC_MODE_TC) { 1024 errno = EINVAL; 1025 goto out; 1026 } 1027 bzero(&pmc_config, sizeof(pmc_config)); 1028 pmc_config.pm_cpu = cpu; 1029 pmc_config.pm_mode = mode; 1030 pmc_config.pm_flags = flags; 1031 if (PMC_IS_SAMPLING_MODE(mode)) 1032 pmc_config.pm_caps |= PMC_CAP_INTERRUPT; 1033 /* 1034 * Can we pull this straight from the pmu table? 1035 */ 1036 r = spec_copy = strdup(ctrspec); 1037 ctrname = strsep(&r, ","); 1038 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0) { 1039 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) { 1040 goto out; 1041 } 1042 retval = 0; 1043 *pmcid = pmc_config.pm_pmcid; 1044 goto out; 1045 } else { 1046 free(spec_copy); 1047 spec_copy = NULL; 1048 } 1049 1050 /* replace an event alias with the canonical event specifier */ 1051 if (pmc_mdep_event_aliases) 1052 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++) 1053 if (!strcasecmp(ctrspec, alias->pm_alias)) { 1054 spec_copy = strdup(alias->pm_spec); 1055 break; 1056 } 1057 1058 if (spec_copy == NULL) 1059 spec_copy = strdup(ctrspec); 1060 1061 r = spec_copy; 1062 ctrname = strsep(&r, ","); 1063 1064 /* 1065 * If a explicit class prefix was given by the user, restrict the 1066 * search for the event to the specified PMC class. 1067 */ 1068 ev = NULL; 1069 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) { 1070 pcd = pmc_class_table[n]; 1071 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) && 1072 strncasecmp(ctrname, pcd->pm_evc_name, 1073 pcd->pm_evc_name_size) == 0) { 1074 if ((ev = pmc_match_event_class(ctrname + 1075 pcd->pm_evc_name_size, pcd)) == NULL) { 1076 errno = EINVAL; 1077 goto out; 1078 } 1079 break; 1080 } 1081 } 1082 1083 /* 1084 * Otherwise, search for this event in all compatible PMC 1085 * classes. 1086 */ 1087 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) { 1088 pcd = pmc_class_table[n]; 1089 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class)) 1090 ev = pmc_match_event_class(ctrname, pcd); 1091 } 1092 1093 if (ev == NULL) { 1094 errno = EINVAL; 1095 goto out; 1096 } 1097 1098 pmc_config.pm_ev = ev->pm_ev_code; 1099 pmc_config.pm_class = pcd->pm_evc_class; 1100 1101 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) { 1102 errno = EINVAL; 1103 goto out; 1104 } 1105 1106 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0) 1107 goto out; 1108 1109 *pmcid = pmc_config.pm_pmcid; 1110 1111 retval = 0; 1112 1113 out: 1114 if (spec_copy) 1115 free(spec_copy); 1116 1117 return (retval); 1118 } 1119 1120 int 1121 pmc_attach(pmc_id_t pmc, pid_t pid) 1122 { 1123 struct pmc_op_pmcattach pmc_attach_args; 1124 1125 pmc_attach_args.pm_pmc = pmc; 1126 pmc_attach_args.pm_pid = pid; 1127 1128 return (PMC_CALL(PMCATTACH, &pmc_attach_args)); 1129 } 1130 1131 int 1132 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps) 1133 { 1134 unsigned int i; 1135 enum pmc_class cl; 1136 1137 cl = PMC_ID_TO_CLASS(pmcid); 1138 for (i = 0; i < cpu_info.pm_nclass; i++) 1139 if (cpu_info.pm_classes[i].pm_class == cl) { 1140 *caps = cpu_info.pm_classes[i].pm_caps; 1141 return (0); 1142 } 1143 errno = EINVAL; 1144 return (-1); 1145 } 1146 1147 int 1148 pmc_configure_logfile(int fd) 1149 { 1150 struct pmc_op_configurelog cla; 1151 1152 cla.pm_logfd = fd; 1153 if (PMC_CALL(CONFIGURELOG, &cla) < 0) 1154 return (-1); 1155 return (0); 1156 } 1157 1158 int 1159 pmc_cpuinfo(const struct pmc_cpuinfo **pci) 1160 { 1161 if (pmc_syscall == -1) { 1162 errno = ENXIO; 1163 return (-1); 1164 } 1165 1166 *pci = &cpu_info; 1167 return (0); 1168 } 1169 1170 int 1171 pmc_detach(pmc_id_t pmc, pid_t pid) 1172 { 1173 struct pmc_op_pmcattach pmc_detach_args; 1174 1175 pmc_detach_args.pm_pmc = pmc; 1176 pmc_detach_args.pm_pid = pid; 1177 return (PMC_CALL(PMCDETACH, &pmc_detach_args)); 1178 } 1179 1180 int 1181 pmc_disable(int cpu, int pmc) 1182 { 1183 struct pmc_op_pmcadmin ssa; 1184 1185 ssa.pm_cpu = cpu; 1186 ssa.pm_pmc = pmc; 1187 ssa.pm_state = PMC_STATE_DISABLED; 1188 return (PMC_CALL(PMCADMIN, &ssa)); 1189 } 1190 1191 int 1192 pmc_enable(int cpu, int pmc) 1193 { 1194 struct pmc_op_pmcadmin ssa; 1195 1196 ssa.pm_cpu = cpu; 1197 ssa.pm_pmc = pmc; 1198 ssa.pm_state = PMC_STATE_FREE; 1199 return (PMC_CALL(PMCADMIN, &ssa)); 1200 } 1201 1202 /* 1203 * Return a list of events known to a given PMC class. 'cl' is the 1204 * PMC class identifier, 'eventnames' is the returned list of 'const 1205 * char *' pointers pointing to the names of the events. 'nevents' is 1206 * the number of event name pointers returned. 1207 * 1208 * The space for 'eventnames' is allocated using malloc(3). The caller 1209 * is responsible for freeing this space when done. 1210 */ 1211 int 1212 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames, 1213 int *nevents) 1214 { 1215 int count; 1216 const char **names; 1217 const struct pmc_event_descr *ev; 1218 1219 switch (cl) 1220 { 1221 case PMC_CLASS_IAF: 1222 ev = iaf_event_table; 1223 count = PMC_EVENT_TABLE_SIZE(iaf); 1224 break; 1225 case PMC_CLASS_TSC: 1226 ev = tsc_event_table; 1227 count = PMC_EVENT_TABLE_SIZE(tsc); 1228 break; 1229 case PMC_CLASS_K8: 1230 ev = k8_event_table; 1231 count = PMC_EVENT_TABLE_SIZE(k8); 1232 break; 1233 case PMC_CLASS_XSCALE: 1234 ev = xscale_event_table; 1235 count = PMC_EVENT_TABLE_SIZE(xscale); 1236 break; 1237 case PMC_CLASS_ARMV7: 1238 switch (cpu_info.pm_cputype) { 1239 default: 1240 case PMC_CPU_ARMV7_CORTEX_A8: 1241 ev = cortex_a8_event_table; 1242 count = PMC_EVENT_TABLE_SIZE(cortex_a8); 1243 break; 1244 case PMC_CPU_ARMV7_CORTEX_A9: 1245 ev = cortex_a9_event_table; 1246 count = PMC_EVENT_TABLE_SIZE(cortex_a9); 1247 break; 1248 } 1249 break; 1250 case PMC_CLASS_ARMV8: 1251 switch (cpu_info.pm_cputype) { 1252 default: 1253 case PMC_CPU_ARMV8_CORTEX_A53: 1254 ev = cortex_a53_event_table; 1255 count = PMC_EVENT_TABLE_SIZE(cortex_a53); 1256 break; 1257 case PMC_CPU_ARMV8_CORTEX_A57: 1258 ev = cortex_a57_event_table; 1259 count = PMC_EVENT_TABLE_SIZE(cortex_a57); 1260 break; 1261 } 1262 break; 1263 case PMC_CLASS_MIPS24K: 1264 ev = mips24k_event_table; 1265 count = PMC_EVENT_TABLE_SIZE(mips24k); 1266 break; 1267 case PMC_CLASS_MIPS74K: 1268 ev = mips74k_event_table; 1269 count = PMC_EVENT_TABLE_SIZE(mips74k); 1270 break; 1271 case PMC_CLASS_OCTEON: 1272 ev = octeon_event_table; 1273 count = PMC_EVENT_TABLE_SIZE(octeon); 1274 break; 1275 case PMC_CLASS_PPC7450: 1276 ev = ppc7450_event_table; 1277 count = PMC_EVENT_TABLE_SIZE(ppc7450); 1278 break; 1279 case PMC_CLASS_PPC970: 1280 ev = ppc970_event_table; 1281 count = PMC_EVENT_TABLE_SIZE(ppc970); 1282 break; 1283 case PMC_CLASS_E500: 1284 ev = e500_event_table; 1285 count = PMC_EVENT_TABLE_SIZE(e500); 1286 break; 1287 case PMC_CLASS_SOFT: 1288 ev = soft_event_table; 1289 count = soft_event_info.pm_nevent; 1290 break; 1291 default: 1292 errno = EINVAL; 1293 return (-1); 1294 } 1295 1296 if ((names = malloc(count * sizeof(const char *))) == NULL) 1297 return (-1); 1298 1299 *eventnames = names; 1300 *nevents = count; 1301 1302 for (;count--; ev++, names++) 1303 *names = ev->pm_ev_name; 1304 1305 return (0); 1306 } 1307 1308 int 1309 pmc_flush_logfile(void) 1310 { 1311 return (PMC_CALL(FLUSHLOG,0)); 1312 } 1313 1314 int 1315 pmc_close_logfile(void) 1316 { 1317 return (PMC_CALL(CLOSELOG,0)); 1318 } 1319 1320 int 1321 pmc_get_driver_stats(struct pmc_driverstats *ds) 1322 { 1323 struct pmc_op_getdriverstats gms; 1324 1325 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0) 1326 return (-1); 1327 1328 /* copy out fields in the current userland<->library interface */ 1329 ds->pm_intr_ignored = gms.pm_intr_ignored; 1330 ds->pm_intr_processed = gms.pm_intr_processed; 1331 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull; 1332 ds->pm_syscalls = gms.pm_syscalls; 1333 ds->pm_syscall_errors = gms.pm_syscall_errors; 1334 ds->pm_buffer_requests = gms.pm_buffer_requests; 1335 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed; 1336 ds->pm_log_sweeps = gms.pm_log_sweeps; 1337 return (0); 1338 } 1339 1340 int 1341 pmc_get_msr(pmc_id_t pmc, uint32_t *msr) 1342 { 1343 struct pmc_op_getmsr gm; 1344 1345 gm.pm_pmcid = pmc; 1346 if (PMC_CALL(PMCGETMSR, &gm) < 0) 1347 return (-1); 1348 *msr = gm.pm_msr; 1349 return (0); 1350 } 1351 1352 int 1353 pmc_init(void) 1354 { 1355 int error, pmc_mod_id; 1356 unsigned int n; 1357 uint32_t abi_version; 1358 struct module_stat pmc_modstat; 1359 struct pmc_op_getcpuinfo op_cpu_info; 1360 #if defined(__amd64__) || defined(__i386__) 1361 int cpu_has_iaf_counters; 1362 unsigned int t; 1363 #endif 1364 1365 if (pmc_syscall != -1) /* already inited */ 1366 return (0); 1367 1368 /* retrieve the system call number from the KLD */ 1369 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0) 1370 return (-1); 1371 1372 pmc_modstat.version = sizeof(struct module_stat); 1373 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0) 1374 return (-1); 1375 1376 pmc_syscall = pmc_modstat.data.intval; 1377 1378 /* check the kernel module's ABI against our compiled-in version */ 1379 abi_version = PMC_VERSION; 1380 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0) 1381 return (pmc_syscall = -1); 1382 1383 /* ignore patch & minor numbers for the comparison */ 1384 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) { 1385 errno = EPROGMISMATCH; 1386 return (pmc_syscall = -1); 1387 } 1388 1389 bzero(&op_cpu_info, sizeof(op_cpu_info)); 1390 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0) 1391 return (pmc_syscall = -1); 1392 1393 cpu_info.pm_cputype = op_cpu_info.pm_cputype; 1394 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu; 1395 cpu_info.pm_npmc = op_cpu_info.pm_npmc; 1396 cpu_info.pm_nclass = op_cpu_info.pm_nclass; 1397 for (n = 0; n < op_cpu_info.pm_nclass; n++) 1398 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n], 1399 sizeof(cpu_info.pm_classes[n])); 1400 1401 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE * 1402 sizeof(struct pmc_class_descr *)); 1403 1404 if (pmc_class_table == NULL) 1405 return (-1); 1406 1407 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) 1408 pmc_class_table[n] = NULL; 1409 1410 /* 1411 * Get soft events list. 1412 */ 1413 soft_event_info.pm_class = PMC_CLASS_SOFT; 1414 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0) 1415 return (pmc_syscall = -1); 1416 1417 /* Map soft events to static list. */ 1418 for (n = 0; n < soft_event_info.pm_nevent; n++) { 1419 soft_event_table[n].pm_ev_name = 1420 soft_event_info.pm_events[n].pm_ev_name; 1421 soft_event_table[n].pm_ev_code = 1422 soft_event_info.pm_events[n].pm_ev_code; 1423 } 1424 soft_class_table_descr.pm_evc_event_table_size = \ 1425 soft_event_info.pm_nevent; 1426 soft_class_table_descr.pm_evc_event_table = \ 1427 soft_event_table; 1428 1429 /* 1430 * Fill in the class table. 1431 */ 1432 n = 0; 1433 1434 /* Fill soft events information. */ 1435 pmc_class_table[n++] = &soft_class_table_descr; 1436 #if defined(__amd64__) || defined(__i386__) 1437 if (cpu_info.pm_cputype != PMC_CPU_GENERIC) 1438 pmc_class_table[n++] = &tsc_class_table_descr; 1439 1440 /* 1441 * Check if this CPU has fixed function counters. 1442 */ 1443 cpu_has_iaf_counters = 0; 1444 for (t = 0; t < cpu_info.pm_nclass; t++) 1445 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF && 1446 cpu_info.pm_classes[t].pm_num > 0) 1447 cpu_has_iaf_counters = 1; 1448 #endif 1449 1450 #define PMC_MDEP_INIT(C) do { \ 1451 pmc_mdep_event_aliases = C##_aliases; \ 1452 pmc_mdep_class_list = C##_pmc_classes; \ 1453 pmc_mdep_class_list_size = \ 1454 PMC_TABLE_SIZE(C##_pmc_classes); \ 1455 } while (0) 1456 1457 #define PMC_MDEP_INIT_INTEL_V2(C) do { \ 1458 PMC_MDEP_INIT(C); \ 1459 pmc_class_table[n++] = &iaf_class_table_descr; \ 1460 if (!cpu_has_iaf_counters) \ 1461 pmc_mdep_event_aliases = \ 1462 C##_aliases_without_iaf; \ 1463 pmc_class_table[n] = &C##_class_table_descr; \ 1464 } while (0) 1465 1466 /* Configure the event name parser. */ 1467 switch (cpu_info.pm_cputype) { 1468 #if defined(__amd64__) || defined(__i386__) 1469 case PMC_CPU_AMD_K8: 1470 PMC_MDEP_INIT(k8); 1471 pmc_class_table[n] = &k8_class_table_descr; 1472 break; 1473 #endif 1474 case PMC_CPU_GENERIC: 1475 PMC_MDEP_INIT(generic); 1476 break; 1477 #if defined(__arm__) 1478 #if defined(__XSCALE__) 1479 case PMC_CPU_INTEL_XSCALE: 1480 PMC_MDEP_INIT(xscale); 1481 pmc_class_table[n] = &xscale_class_table_descr; 1482 break; 1483 #endif 1484 case PMC_CPU_ARMV7_CORTEX_A8: 1485 PMC_MDEP_INIT(cortex_a8); 1486 pmc_class_table[n] = &cortex_a8_class_table_descr; 1487 break; 1488 case PMC_CPU_ARMV7_CORTEX_A9: 1489 PMC_MDEP_INIT(cortex_a9); 1490 pmc_class_table[n] = &cortex_a9_class_table_descr; 1491 break; 1492 #endif 1493 #if defined(__aarch64__) 1494 case PMC_CPU_ARMV8_CORTEX_A53: 1495 PMC_MDEP_INIT(cortex_a53); 1496 pmc_class_table[n] = &cortex_a53_class_table_descr; 1497 break; 1498 case PMC_CPU_ARMV8_CORTEX_A57: 1499 PMC_MDEP_INIT(cortex_a57); 1500 pmc_class_table[n] = &cortex_a57_class_table_descr; 1501 break; 1502 #endif 1503 #if defined(__mips__) 1504 case PMC_CPU_MIPS_24K: 1505 PMC_MDEP_INIT(mips24k); 1506 pmc_class_table[n] = &mips24k_class_table_descr; 1507 break; 1508 case PMC_CPU_MIPS_74K: 1509 PMC_MDEP_INIT(mips74k); 1510 pmc_class_table[n] = &mips74k_class_table_descr; 1511 break; 1512 case PMC_CPU_MIPS_OCTEON: 1513 PMC_MDEP_INIT(octeon); 1514 pmc_class_table[n] = &octeon_class_table_descr; 1515 break; 1516 #endif /* __mips__ */ 1517 #if defined(__powerpc__) 1518 case PMC_CPU_PPC_7450: 1519 PMC_MDEP_INIT(ppc7450); 1520 pmc_class_table[n] = &ppc7450_class_table_descr; 1521 break; 1522 case PMC_CPU_PPC_970: 1523 PMC_MDEP_INIT(ppc970); 1524 pmc_class_table[n] = &ppc970_class_table_descr; 1525 break; 1526 case PMC_CPU_PPC_E500: 1527 PMC_MDEP_INIT(e500); 1528 pmc_class_table[n] = &e500_class_table_descr; 1529 break; 1530 #endif 1531 default: 1532 /* 1533 * Some kind of CPU this version of the library knows nothing 1534 * about. This shouldn't happen since the abi version check 1535 * should have caught this. 1536 */ 1537 #if defined(__amd64__) || defined(__i386__) 1538 break; 1539 #endif 1540 errno = ENXIO; 1541 return (pmc_syscall = -1); 1542 } 1543 1544 return (0); 1545 } 1546 1547 const char * 1548 pmc_name_of_capability(enum pmc_caps cap) 1549 { 1550 int i; 1551 1552 /* 1553 * 'cap' should have a single bit set and should be in 1554 * range. 1555 */ 1556 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST || 1557 cap > PMC_CAP_LAST) { 1558 errno = EINVAL; 1559 return (NULL); 1560 } 1561 1562 i = ffs(cap); 1563 return (pmc_capability_names[i - 1]); 1564 } 1565 1566 const char * 1567 pmc_name_of_class(enum pmc_class pc) 1568 { 1569 size_t n; 1570 1571 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++) 1572 if (pc == pmc_class_names[n].pm_class) 1573 return (pmc_class_names[n].pm_name); 1574 1575 errno = EINVAL; 1576 return (NULL); 1577 } 1578 1579 const char * 1580 pmc_name_of_cputype(enum pmc_cputype cp) 1581 { 1582 size_t n; 1583 1584 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++) 1585 if (cp == pmc_cputype_names[n].pm_cputype) 1586 return (pmc_cputype_names[n].pm_name); 1587 1588 errno = EINVAL; 1589 return (NULL); 1590 } 1591 1592 const char * 1593 pmc_name_of_disposition(enum pmc_disp pd) 1594 { 1595 if ((int) pd >= PMC_DISP_FIRST && 1596 pd <= PMC_DISP_LAST) 1597 return (pmc_disposition_names[pd]); 1598 1599 errno = EINVAL; 1600 return (NULL); 1601 } 1602 1603 const char * 1604 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu) 1605 { 1606 const struct pmc_event_descr *ev, *evfence; 1607 1608 ev = evfence = NULL; 1609 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { 1610 ev = k8_event_table; 1611 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); 1612 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { 1613 ev = xscale_event_table; 1614 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); 1615 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { 1616 switch (cpu) { 1617 case PMC_CPU_ARMV7_CORTEX_A8: 1618 ev = cortex_a8_event_table; 1619 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8); 1620 break; 1621 case PMC_CPU_ARMV7_CORTEX_A9: 1622 ev = cortex_a9_event_table; 1623 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9); 1624 break; 1625 default: /* Unknown CPU type. */ 1626 break; 1627 } 1628 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) { 1629 switch (cpu) { 1630 case PMC_CPU_ARMV8_CORTEX_A53: 1631 ev = cortex_a53_event_table; 1632 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53); 1633 break; 1634 case PMC_CPU_ARMV8_CORTEX_A57: 1635 ev = cortex_a57_event_table; 1636 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57); 1637 break; 1638 default: /* Unknown CPU type. */ 1639 break; 1640 } 1641 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) { 1642 ev = mips24k_event_table; 1643 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k); 1644 } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) { 1645 ev = mips74k_event_table; 1646 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k); 1647 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) { 1648 ev = octeon_event_table; 1649 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon); 1650 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) { 1651 ev = ppc7450_event_table; 1652 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450); 1653 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) { 1654 ev = ppc970_event_table; 1655 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970); 1656 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) { 1657 ev = e500_event_table; 1658 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500); 1659 } else if (pe == PMC_EV_TSC_TSC) { 1660 ev = tsc_event_table; 1661 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc); 1662 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) { 1663 ev = soft_event_table; 1664 evfence = soft_event_table + soft_event_info.pm_nevent; 1665 } 1666 1667 for (; ev != evfence; ev++) 1668 if (pe == ev->pm_ev_code) 1669 return (ev->pm_ev_name); 1670 1671 return (NULL); 1672 } 1673 1674 const char * 1675 pmc_name_of_event(enum pmc_event pe) 1676 { 1677 const char *n; 1678 1679 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL) 1680 return (n); 1681 1682 errno = EINVAL; 1683 return (NULL); 1684 } 1685 1686 const char * 1687 pmc_name_of_mode(enum pmc_mode pm) 1688 { 1689 if ((int) pm >= PMC_MODE_FIRST && 1690 pm <= PMC_MODE_LAST) 1691 return (pmc_mode_names[pm]); 1692 1693 errno = EINVAL; 1694 return (NULL); 1695 } 1696 1697 const char * 1698 pmc_name_of_state(enum pmc_state ps) 1699 { 1700 if ((int) ps >= PMC_STATE_FIRST && 1701 ps <= PMC_STATE_LAST) 1702 return (pmc_state_names[ps]); 1703 1704 errno = EINVAL; 1705 return (NULL); 1706 } 1707 1708 int 1709 pmc_ncpu(void) 1710 { 1711 if (pmc_syscall == -1) { 1712 errno = ENXIO; 1713 return (-1); 1714 } 1715 1716 return (cpu_info.pm_ncpu); 1717 } 1718 1719 int 1720 pmc_npmc(int cpu) 1721 { 1722 if (pmc_syscall == -1) { 1723 errno = ENXIO; 1724 return (-1); 1725 } 1726 1727 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) { 1728 errno = EINVAL; 1729 return (-1); 1730 } 1731 1732 return (cpu_info.pm_npmc); 1733 } 1734 1735 int 1736 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci) 1737 { 1738 int nbytes, npmc; 1739 struct pmc_op_getpmcinfo *pmci; 1740 1741 if ((npmc = pmc_npmc(cpu)) < 0) 1742 return (-1); 1743 1744 nbytes = sizeof(struct pmc_op_getpmcinfo) + 1745 npmc * sizeof(struct pmc_info); 1746 1747 if ((pmci = calloc(1, nbytes)) == NULL) 1748 return (-1); 1749 1750 pmci->pm_cpu = cpu; 1751 1752 if (PMC_CALL(GETPMCINFO, pmci) < 0) { 1753 free(pmci); 1754 return (-1); 1755 } 1756 1757 /* kernel<->library, library<->userland interfaces are identical */ 1758 *ppmci = (struct pmc_pmcinfo *) pmci; 1759 return (0); 1760 } 1761 1762 int 1763 pmc_read(pmc_id_t pmc, pmc_value_t *value) 1764 { 1765 struct pmc_op_pmcrw pmc_read_op; 1766 1767 pmc_read_op.pm_pmcid = pmc; 1768 pmc_read_op.pm_flags = PMC_F_OLDVALUE; 1769 pmc_read_op.pm_value = -1; 1770 1771 if (PMC_CALL(PMCRW, &pmc_read_op) < 0) 1772 return (-1); 1773 1774 *value = pmc_read_op.pm_value; 1775 return (0); 1776 } 1777 1778 int 1779 pmc_release(pmc_id_t pmc) 1780 { 1781 struct pmc_op_simple pmc_release_args; 1782 1783 pmc_release_args.pm_pmcid = pmc; 1784 return (PMC_CALL(PMCRELEASE, &pmc_release_args)); 1785 } 1786 1787 int 1788 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep) 1789 { 1790 struct pmc_op_pmcrw pmc_rw_op; 1791 1792 pmc_rw_op.pm_pmcid = pmc; 1793 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE; 1794 pmc_rw_op.pm_value = newvalue; 1795 1796 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0) 1797 return (-1); 1798 1799 *oldvaluep = pmc_rw_op.pm_value; 1800 return (0); 1801 } 1802 1803 int 1804 pmc_set(pmc_id_t pmc, pmc_value_t value) 1805 { 1806 struct pmc_op_pmcsetcount sc; 1807 1808 sc.pm_pmcid = pmc; 1809 sc.pm_count = value; 1810 1811 if (PMC_CALL(PMCSETCOUNT, &sc) < 0) 1812 return (-1); 1813 return (0); 1814 } 1815 1816 int 1817 pmc_start(pmc_id_t pmc) 1818 { 1819 struct pmc_op_simple pmc_start_args; 1820 1821 pmc_start_args.pm_pmcid = pmc; 1822 return (PMC_CALL(PMCSTART, &pmc_start_args)); 1823 } 1824 1825 int 1826 pmc_stop(pmc_id_t pmc) 1827 { 1828 struct pmc_op_simple pmc_stop_args; 1829 1830 pmc_stop_args.pm_pmcid = pmc; 1831 return (PMC_CALL(PMCSTOP, &pmc_stop_args)); 1832 } 1833 1834 int 1835 pmc_width(pmc_id_t pmcid, uint32_t *width) 1836 { 1837 unsigned int i; 1838 enum pmc_class cl; 1839 1840 cl = PMC_ID_TO_CLASS(pmcid); 1841 for (i = 0; i < cpu_info.pm_nclass; i++) 1842 if (cpu_info.pm_classes[i].pm_class == cl) { 1843 *width = cpu_info.pm_classes[i].pm_width; 1844 return (0); 1845 } 1846 errno = EINVAL; 1847 return (-1); 1848 } 1849 1850 int 1851 pmc_write(pmc_id_t pmc, pmc_value_t value) 1852 { 1853 struct pmc_op_pmcrw pmc_write_op; 1854 1855 pmc_write_op.pm_pmcid = pmc; 1856 pmc_write_op.pm_flags = PMC_F_NEWVALUE; 1857 pmc_write_op.pm_value = value; 1858 return (PMC_CALL(PMCRW, &pmc_write_op)); 1859 } 1860 1861 int 1862 pmc_writelog(uint32_t userdata) 1863 { 1864 struct pmc_op_writelog wl; 1865 1866 wl.pm_userdata = userdata; 1867 return (PMC_CALL(WRITELOG, &wl)); 1868 } 1869