1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018, Matthew Macy 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 * 29 */ 30 31 #include <sys/types.h> 32 #include <sys/errno.h> 33 #include <sys/sysctl.h> 34 #include <stddef.h> 35 #include <stdlib.h> 36 #include <limits.h> 37 #include <string.h> 38 #include <pmc.h> 39 #include <pmclog.h> 40 #include <assert.h> 41 #include <libpmcstat.h> 42 #include "pmu-events/pmu-events.h" 43 44 #if defined(__amd64__) || defined(__i386__) 45 struct pmu_alias { 46 const char *pa_alias; 47 const char *pa_name; 48 }; 49 50 typedef enum { 51 PMU_INVALID, 52 PMU_INTEL, 53 PMU_AMD, 54 } pmu_mfr_t; 55 56 static struct pmu_alias pmu_intel_alias_table[] = { 57 {"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"}, 58 {"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"}, 59 {"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"}, 60 {"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"}, 61 {"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"}, 62 {"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"}, 63 {"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"}, 64 {"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"}, 65 {"RESOURCE_STALL", "RESOURCE_STALLS.ANY"}, 66 {"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"}, 67 {"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"}, 68 {"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"}, 69 {"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"}, 70 {"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"}, 71 {"cycles", "tsc-tsc"}, 72 {"instructions", "inst-retired.any_p"}, 73 {"branch-mispredicts", "br_misp_retired.all_branches"}, 74 {"branches", "br_inst_retired.all_branches"}, 75 {"interrupts", "hw_interrupts.received"}, 76 {"ic-misses", "frontend_retired.l1i_miss"}, 77 {NULL, NULL}, 78 }; 79 80 static struct pmu_alias pmu_amd_alias_table[] = { 81 {"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"}, 82 {"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"}, 83 {NULL, NULL}, 84 }; 85 86 87 static pmu_mfr_t 88 pmu_events_mfr(void) 89 { 90 char *buf; 91 size_t s; 92 pmu_mfr_t mfr; 93 94 if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s, 95 (void *)NULL, 0) == -1) 96 return (PMU_INVALID); 97 if ((buf = malloc(s + 1)) == NULL) 98 return (PMU_INVALID); 99 if (sysctlbyname("kern.hwpmc.cpuid", buf, &s, 100 (void *)NULL, 0) == -1) { 101 free(buf); 102 return (PMU_INVALID); 103 } 104 if (strcasestr(buf, "AuthenticAMD") != NULL) 105 mfr = PMU_AMD; 106 else if (strcasestr(buf, "GenuineIntel") != NULL) 107 mfr = PMU_INTEL; 108 else 109 mfr = PMU_INVALID; 110 free(buf); 111 return (mfr); 112 } 113 114 /* 115 * The Intel fixed mode counters are: 116 * "inst_retired.any", 117 * "cpu_clk_unhalted.thread", 118 * "cpu_clk_unhalted.thread_any", 119 * "cpu_clk_unhalted.ref_tsc", 120 * 121 */ 122 123 static const char * 124 pmu_alias_get(const char *name) 125 { 126 pmu_mfr_t mfr; 127 struct pmu_alias *pa; 128 struct pmu_alias *pmu_alias_table; 129 130 if ((mfr = pmu_events_mfr()) == PMU_INVALID) 131 return (name); 132 if (mfr == PMU_AMD) 133 pmu_alias_table = pmu_amd_alias_table; 134 else if (mfr == PMU_INTEL) 135 pmu_alias_table = pmu_intel_alias_table; 136 else 137 return (name); 138 139 for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++) 140 if (strcasecmp(name, pa->pa_alias) == 0) 141 return (pa->pa_name); 142 143 return (name); 144 } 145 146 struct pmu_event_desc { 147 uint64_t ped_period; 148 uint64_t ped_offcore_rsp; 149 uint64_t ped_l3_thread; 150 uint64_t ped_l3_slice; 151 uint32_t ped_event; 152 uint32_t ped_frontend; 153 uint32_t ped_ldlat; 154 uint32_t ped_config1; 155 int16_t ped_umask; 156 uint8_t ped_cmask; 157 uint8_t ped_any; 158 uint8_t ped_inv; 159 uint8_t ped_edge; 160 uint8_t ped_fc_mask; 161 uint8_t ped_ch_mask; 162 }; 163 164 static const struct pmu_events_map * 165 pmu_events_map_get(const char *cpuid) 166 { 167 size_t s; 168 char buf[64]; 169 const struct pmu_events_map *pme; 170 171 if (cpuid != NULL) { 172 memcpy(buf, cpuid, 64); 173 } else { 174 if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s, 175 (void *)NULL, 0) == -1) 176 return (NULL); 177 if (sysctlbyname("kern.hwpmc.cpuid", buf, &s, 178 (void *)NULL, 0) == -1) 179 return (NULL); 180 } 181 for (pme = pmu_events_map; pme->cpuid != NULL; pme++) 182 if (strcmp(buf, pme->cpuid) == 0) 183 return (pme); 184 return (NULL); 185 } 186 187 static const struct pmu_event * 188 pmu_event_get(const char *cpuid, const char *event_name, int *idx) 189 { 190 const struct pmu_events_map *pme; 191 const struct pmu_event *pe; 192 int i; 193 194 if ((pme = pmu_events_map_get(cpuid)) == NULL) 195 return (NULL); 196 for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) { 197 if (pe->name == NULL) 198 continue; 199 if (strcasecmp(pe->name, event_name) == 0) { 200 if (idx) 201 *idx = i; 202 return (pe); 203 } 204 } 205 return (NULL); 206 } 207 208 int 209 pmc_pmu_idx_get_by_event(const char *cpuid, const char *event) 210 { 211 int idx; 212 const char *realname; 213 214 realname = pmu_alias_get(event); 215 if (pmu_event_get(cpuid, realname, &idx) == NULL) 216 return (-1); 217 return (idx); 218 } 219 220 const char * 221 pmc_pmu_event_get_by_idx(const char *cpuid, int idx) 222 { 223 const struct pmu_events_map *pme; 224 225 if ((pme = pmu_events_map_get(cpuid)) == NULL) 226 return (NULL); 227 assert(pme->table[idx].name); 228 return (pme->table[idx].name); 229 } 230 231 static int 232 pmu_parse_event(struct pmu_event_desc *ped, const char *eventin) 233 { 234 char *event; 235 char *kvp, *key, *value, *r; 236 char *debug; 237 238 if ((event = strdup(eventin)) == NULL) 239 return (ENOMEM); 240 r = event; 241 bzero(ped, sizeof(*ped)); 242 ped->ped_period = DEFAULT_SAMPLE_COUNT; 243 ped->ped_umask = -1; 244 while ((kvp = strsep(&event, ",")) != NULL) { 245 key = strsep(&kvp, "="); 246 if (key == NULL) 247 abort(); 248 value = kvp; 249 if (strcmp(key, "umask") == 0) 250 ped->ped_umask = strtol(value, NULL, 16); 251 else if (strcmp(key, "event") == 0) 252 ped->ped_event = strtol(value, NULL, 16); 253 else if (strcmp(key, "period") == 0) 254 ped->ped_period = strtol(value, NULL, 10); 255 else if (strcmp(key, "offcore_rsp") == 0) 256 ped->ped_offcore_rsp = strtol(value, NULL, 16); 257 else if (strcmp(key, "any") == 0) 258 ped->ped_any = strtol(value, NULL, 10); 259 else if (strcmp(key, "cmask") == 0) 260 ped->ped_cmask = strtol(value, NULL, 10); 261 else if (strcmp(key, "inv") == 0) 262 ped->ped_inv = strtol(value, NULL, 10); 263 else if (strcmp(key, "edge") == 0) 264 ped->ped_edge = strtol(value, NULL, 10); 265 else if (strcmp(key, "frontend") == 0) 266 ped->ped_frontend = strtol(value, NULL, 16); 267 else if (strcmp(key, "ldlat") == 0) 268 ped->ped_ldlat = strtol(value, NULL, 16); 269 else if (strcmp(key, "fc_mask") == 0) 270 ped->ped_fc_mask = strtol(value, NULL, 16); 271 else if (strcmp(key, "ch_mask") == 0) 272 ped->ped_ch_mask = strtol(value, NULL, 16); 273 else if (strcmp(key, "config1") == 0) 274 ped->ped_config1 = strtol(value, NULL, 16); 275 else if (strcmp(key, "l3_thread_mask") == 0) 276 ped->ped_l3_thread = strtol(value, NULL, 16); 277 else if (strcmp(key, "l3_slice_mask") == 0) 278 ped->ped_l3_slice = strtol(value, NULL, 16); 279 else { 280 debug = getenv("PMUDEBUG"); 281 if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL) 282 printf("unrecognized kvpair: %s:%s\n", key, value); 283 } 284 } 285 free(r); 286 return (0); 287 } 288 289 uint64_t 290 pmc_pmu_sample_rate_get(const char *event_name) 291 { 292 const struct pmu_event *pe; 293 struct pmu_event_desc ped; 294 295 event_name = pmu_alias_get(event_name); 296 if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL) 297 return (DEFAULT_SAMPLE_COUNT); 298 if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, NULL)) == NULL) 299 return (DEFAULT_SAMPLE_COUNT); 300 if (pe->event == NULL) 301 return (DEFAULT_SAMPLE_COUNT); 302 if (pmu_parse_event(&ped, pe->event)) 303 return (DEFAULT_SAMPLE_COUNT); 304 return (ped.ped_period); 305 } 306 307 int 308 pmc_pmu_enabled(void) 309 { 310 311 return (pmu_events_map_get(NULL) != NULL); 312 } 313 314 void 315 pmc_pmu_print_counters(const char *event_name) 316 { 317 const struct pmu_events_map *pme; 318 const struct pmu_event *pe; 319 struct pmu_event_desc ped; 320 char *debug; 321 int do_debug; 322 323 debug = getenv("PMUDEBUG"); 324 do_debug = 0; 325 326 if (debug != NULL && strcmp(debug, "true") == 0) 327 do_debug = 1; 328 if ((pme = pmu_events_map_get(NULL)) == NULL) 329 return; 330 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { 331 if (pe->name == NULL) 332 continue; 333 if (event_name != NULL && strcasestr(pe->name, event_name) == NULL) 334 continue; 335 printf("\t%s\n", pe->name); 336 if (do_debug) 337 pmu_parse_event(&ped, pe->event); 338 } 339 } 340 341 void 342 pmc_pmu_print_counter_desc(const char *ev) 343 { 344 const struct pmu_events_map *pme; 345 const struct pmu_event *pe; 346 347 if ((pme = pmu_events_map_get(NULL)) == NULL) 348 return; 349 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { 350 if (pe->name == NULL) 351 continue; 352 if (strcasestr(pe->name, ev) != NULL && 353 pe->desc != NULL) 354 printf("%s:\t%s\n", pe->name, pe->desc); 355 } 356 } 357 358 void 359 pmc_pmu_print_counter_desc_long(const char *ev) 360 { 361 const struct pmu_events_map *pme; 362 const struct pmu_event *pe; 363 364 if ((pme = pmu_events_map_get(NULL)) == NULL) 365 return; 366 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { 367 if (pe->name == NULL) 368 continue; 369 if (strcasestr(pe->name, ev) != NULL) { 370 if (pe->long_desc != NULL) 371 printf("%s:\n%s\n", pe->name, pe->long_desc); 372 else if (pe->desc != NULL) 373 printf("%s:\t%s\n", pe->name, pe->desc); 374 } 375 } 376 } 377 378 void 379 pmc_pmu_print_counter_full(const char *ev) 380 { 381 const struct pmu_events_map *pme; 382 const struct pmu_event *pe; 383 384 if ((pme = pmu_events_map_get(NULL)) == NULL) 385 return; 386 for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) { 387 if (pe->name == NULL) 388 continue; 389 if (strcasestr(pe->name, ev) == NULL) 390 continue; 391 printf("name: %s\n", pe->name); 392 if (pe->long_desc != NULL) 393 printf("desc: %s\n", pe->long_desc); 394 else if (pe->desc != NULL) 395 printf("desc: %s\n", pe->desc); 396 if (pe->event != NULL) 397 printf("event: %s\n", pe->event); 398 if (pe->topic != NULL) 399 printf("topic: %s\n", pe->topic); 400 if (pe->pmu != NULL) 401 printf("pmu: %s\n", pe->pmu); 402 if (pe->unit != NULL) 403 printf("unit: %s\n", pe->unit); 404 if (pe->perpkg != NULL) 405 printf("perpkg: %s\n", pe->perpkg); 406 if (pe->metric_expr != NULL) 407 printf("metric_expr: %s\n", pe->metric_expr); 408 if (pe->metric_name != NULL) 409 printf("metric_name: %s\n", pe->metric_name); 410 if (pe->metric_group != NULL) 411 printf("metric_group: %s\n", pe->metric_group); 412 } 413 } 414 415 static int 416 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm, 417 struct pmu_event_desc *ped) 418 { 419 struct pmc_md_amd_op_pmcallocate *amd; 420 const struct pmu_event *pe; 421 int idx = -1; 422 423 amd = &pm->pm_md.pm_amd; 424 if (ped->ped_umask > 0) { 425 pm->pm_caps |= PMC_CAP_QUALIFIER; 426 amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask); 427 } 428 pm->pm_class = PMC_CLASS_K8; 429 pe = pmu_event_get(NULL, event_name, &idx); 430 431 if (strcmp("l3cache", pe->topic) == 0){ 432 amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); 433 amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE; 434 amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice); 435 amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread); 436 } 437 else if (strcmp("data fabric", pe->topic) == 0){ 438 439 amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event); 440 amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC; 441 } 442 else{ 443 amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event); 444 amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE; 445 if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 || 446 (pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 447 (PMC_CAP_USER|PMC_CAP_SYSTEM)) 448 amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS); 449 else if (pm->pm_caps & PMC_CAP_USER) 450 amd->pm_amd_config |= AMD_PMC_USR; 451 else if (pm->pm_caps & PMC_CAP_SYSTEM) 452 amd->pm_amd_config |= AMD_PMC_OS; 453 if (ped->ped_edge) 454 amd->pm_amd_config |= AMD_PMC_EDGE; 455 if (ped->ped_inv) 456 amd->pm_amd_config |= AMD_PMC_EDGE; 457 if (pm->pm_caps & PMC_CAP_INTERRUPT) 458 amd->pm_amd_config |= AMD_PMC_INT; 459 } 460 return (0); 461 } 462 463 static int 464 pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm, 465 struct pmu_event_desc *ped) 466 { 467 struct pmc_md_iap_op_pmcallocate *iap; 468 int isfixed; 469 470 isfixed = 0; 471 iap = &pm->pm_md.pm_iap; 472 if (strcasestr(event_name, "UNC_") == event_name || 473 strcasestr(event_name, "uncore") != NULL) { 474 pm->pm_class = PMC_CLASS_UCP; 475 pm->pm_caps |= PMC_CAP_QUALIFIER; 476 } else if ((ped->ped_umask == -1) || 477 (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) { 478 pm->pm_class = PMC_CLASS_IAF; 479 } else { 480 pm->pm_class = PMC_CLASS_IAP; 481 pm->pm_caps |= PMC_CAP_QUALIFIER; 482 } 483 iap->pm_iap_config |= IAP_EVSEL(ped->ped_event); 484 if (ped->ped_umask > 0) 485 iap->pm_iap_config |= IAP_UMASK(ped->ped_umask); 486 iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask); 487 iap->pm_iap_rsp = ped->ped_offcore_rsp; 488 489 if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 || 490 (pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 491 (PMC_CAP_USER|PMC_CAP_SYSTEM)) 492 iap->pm_iap_config |= (IAP_USR | IAP_OS); 493 else if (pm->pm_caps & PMC_CAP_USER) 494 iap->pm_iap_config |= IAP_USR; 495 else if (pm->pm_caps & PMC_CAP_SYSTEM) 496 iap->pm_iap_config |= IAP_OS; 497 if (ped->ped_edge) 498 iap->pm_iap_config |= IAP_EDGE; 499 if (ped->ped_any) 500 iap->pm_iap_config |= IAP_ANY; 501 if (ped->ped_inv) 502 iap->pm_iap_config |= IAP_EDGE; 503 if (pm->pm_caps & PMC_CAP_INTERRUPT) 504 iap->pm_iap_config |= IAP_INT; 505 return (0); 506 } 507 508 int 509 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm) 510 { 511 const struct pmu_event *pe; 512 struct pmu_event_desc ped; 513 pmu_mfr_t mfr; 514 int idx = -1; 515 516 if ((mfr = pmu_events_mfr()) == PMU_INVALID) 517 return (ENOENT); 518 519 bzero(&pm->pm_md, sizeof(pm->pm_md)); 520 pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE); 521 event_name = pmu_alias_get(event_name); 522 if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL) 523 return (ENOENT); 524 if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, &idx)) == NULL) 525 return (ENOENT); 526 assert(idx >= 0); 527 pm->pm_ev = idx; 528 529 if (pe->event == NULL) 530 return (ENOENT); 531 if (pmu_parse_event(&ped, pe->event)) 532 return (ENOENT); 533 534 if (mfr == PMU_INTEL) 535 return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped)); 536 else 537 return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped)); 538 } 539 540 /* 541 * Ultimately rely on AMD calling theirs the same 542 */ 543 static const char *stat_mode_cntrs[] = { 544 "cpu_clk_unhalted.thread", 545 "inst_retired.any", 546 "br_inst_retired.all_branches", 547 "br_misp_retired.all_branches", 548 "longest_lat_cache.reference", 549 "longest_lat_cache.miss", 550 }; 551 552 int 553 pmc_pmu_stat_mode(const char ***cntrs) 554 { 555 if (pmc_pmu_enabled()) { 556 *cntrs = stat_mode_cntrs; 557 return (0); 558 } 559 return (EOPNOTSUPP); 560 } 561 562 #else 563 564 uint64_t 565 pmc_pmu_sample_rate_get(const char *event_name __unused) 566 { 567 return (DEFAULT_SAMPLE_COUNT); 568 } 569 570 void 571 pmc_pmu_print_counters(const char *event_name __unused) 572 { 573 } 574 575 void 576 pmc_pmu_print_counter_desc(const char *e __unused) 577 { 578 } 579 580 void 581 pmc_pmu_print_counter_desc_long(const char *e __unused) 582 { 583 } 584 585 void 586 pmc_pmu_print_counter_full(const char *e __unused) 587 { 588 589 } 590 591 int 592 pmc_pmu_enabled(void) 593 { 594 return (0); 595 } 596 597 int 598 pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused) 599 { 600 return (EOPNOTSUPP); 601 } 602 603 const char * 604 pmc_pmu_event_get_by_idx(const char *c __unused, int idx __unused) 605 { 606 return (NULL); 607 } 608 609 int 610 pmc_pmu_stat_mode(const char ***a __unused) 611 { 612 return (EOPNOTSUPP); 613 } 614 615 int 616 pmc_pmu_idx_get_by_event(const char *c __unused, const char *e __unused) 617 { 618 return (-1); 619 } 620 621 #endif 622