1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2008 Joseph Koshy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Intel Core PMCs. 31 */ 32 33 #include <sys/cdefs.h> 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/pmc.h> 37 #include <sys/pmckern.h> 38 #include <sys/smp.h> 39 #include <sys/systm.h> 40 41 #include <machine/intr_machdep.h> 42 #include <x86/apicvar.h> 43 #include <machine/cpu.h> 44 #include <machine/cpufunc.h> 45 #include <machine/md_var.h> 46 #include <machine/specialreg.h> 47 48 #define CORE_CPUID_REQUEST 0xA 49 #define CORE_CPUID_REQUEST_SIZE 0x4 50 #define CORE_CPUID_EAX 0x0 51 #define CORE_CPUID_EBX 0x1 52 #define CORE_CPUID_ECX 0x2 53 #define CORE_CPUID_EDX 0x3 54 55 #define IAF_PMC_CAPS \ 56 (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \ 57 PMC_CAP_USER | PMC_CAP_SYSTEM) 58 #define IAF_RI_TO_MSR(RI) ((RI) + (1 << 30)) 59 60 #define IAP_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \ 61 PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \ 62 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE) 63 64 #define EV_IS_NOTARCH 0 65 #define EV_IS_ARCH_SUPP 1 66 #define EV_IS_ARCH_NOTSUPP -1 67 68 /* 69 * "Architectural" events defined by Intel. The values of these 70 * symbols correspond to positions in the bitmask returned by 71 * the CPUID.0AH instruction. 72 */ 73 enum core_arch_events { 74 CORE_AE_BRANCH_INSTRUCTION_RETIRED = 5, 75 CORE_AE_BRANCH_MISSES_RETIRED = 6, 76 CORE_AE_INSTRUCTION_RETIRED = 1, 77 CORE_AE_LLC_MISSES = 4, 78 CORE_AE_LLC_REFERENCE = 3, 79 CORE_AE_UNHALTED_REFERENCE_CYCLES = 2, 80 CORE_AE_UNHALTED_CORE_CYCLES = 0 81 }; 82 83 static enum pmc_cputype core_cputype; 84 static int core_version; 85 86 struct core_cpu { 87 volatile uint32_t pc_iafctrl; /* Fixed function control. */ 88 volatile uint64_t pc_globalctrl; /* Global control register. */ 89 struct pmc_hw pc_corepmcs[]; 90 }; 91 92 static struct core_cpu **core_pcpu; 93 94 static uint32_t core_architectural_events; 95 static uint64_t core_pmcmask; 96 97 static int core_iaf_ri; /* relative index of fixed counters */ 98 static int core_iaf_width; 99 static int core_iaf_npmc; 100 101 static int core_iap_width; 102 static int core_iap_npmc; 103 static int core_iap_wroffset; 104 105 static u_int pmc_alloc_refs; 106 static bool pmc_tsx_force_abort_set; 107 108 static int 109 core_pcpu_noop(struct pmc_mdep *md, int cpu) 110 { 111 (void) md; 112 (void) cpu; 113 return (0); 114 } 115 116 static int 117 core_pcpu_init(struct pmc_mdep *md, int cpu) 118 { 119 struct pmc_cpu *pc; 120 struct core_cpu *cc; 121 struct pmc_hw *phw; 122 int core_ri, n, npmc; 123 124 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 125 ("[iaf,%d] insane cpu number %d", __LINE__, cpu)); 126 127 PMCDBG1(MDP,INI,1,"core-init cpu=%d", cpu); 128 129 core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri; 130 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num; 131 132 if (core_version >= 2) 133 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num; 134 135 cc = malloc(sizeof(struct core_cpu) + npmc * sizeof(struct pmc_hw), 136 M_PMC, M_WAITOK | M_ZERO); 137 138 core_pcpu[cpu] = cc; 139 pc = pmc_pcpu[cpu]; 140 141 KASSERT(pc != NULL && cc != NULL, 142 ("[core,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu)); 143 144 for (n = 0, phw = cc->pc_corepmcs; n < npmc; n++, phw++) { 145 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 146 PMC_PHW_CPU_TO_STATE(cpu) | 147 PMC_PHW_INDEX_TO_STATE(n + core_ri); 148 phw->phw_pmc = NULL; 149 pc->pc_hwpmcs[n + core_ri] = phw; 150 } 151 152 if (core_version >= 2 && vm_guest == VM_GUEST_NO) { 153 /* Enable Freezing PMCs on PMI. */ 154 wrmsr(MSR_DEBUGCTLMSR, rdmsr(MSR_DEBUGCTLMSR) | 0x1000); 155 } 156 157 return (0); 158 } 159 160 static int 161 core_pcpu_fini(struct pmc_mdep *md, int cpu) 162 { 163 int core_ri, n, npmc; 164 struct pmc_cpu *pc; 165 struct core_cpu *cc; 166 167 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 168 ("[core,%d] insane cpu number (%d)", __LINE__, cpu)); 169 170 PMCDBG1(MDP,INI,1,"core-pcpu-fini cpu=%d", cpu); 171 172 if ((cc = core_pcpu[cpu]) == NULL) 173 return (0); 174 175 core_pcpu[cpu] = NULL; 176 177 pc = pmc_pcpu[cpu]; 178 179 KASSERT(pc != NULL, ("[core,%d] NULL per-cpu %d state", __LINE__, 180 cpu)); 181 182 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num; 183 core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri; 184 185 for (n = 0; n < npmc; n++) 186 wrmsr(IAP_EVSEL0 + n, 0); 187 188 if (core_version >= 2) { 189 wrmsr(IAF_CTRL, 0); 190 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num; 191 } 192 193 for (n = 0; n < npmc; n++) 194 pc->pc_hwpmcs[n + core_ri] = NULL; 195 196 free(cc, M_PMC); 197 198 return (0); 199 } 200 201 /* 202 * Fixed function counters. 203 */ 204 205 static pmc_value_t 206 iaf_perfctr_value_to_reload_count(pmc_value_t v) 207 { 208 209 /* If the PMC has overflowed, return a reload count of zero. */ 210 if ((v & (1ULL << (core_iaf_width - 1))) == 0) 211 return (0); 212 v &= (1ULL << core_iaf_width) - 1; 213 return (1ULL << core_iaf_width) - v; 214 } 215 216 static pmc_value_t 217 iaf_reload_count_to_perfctr_value(pmc_value_t rlc) 218 { 219 return (1ULL << core_iaf_width) - rlc; 220 } 221 222 static int 223 iaf_allocate_pmc(int cpu, int ri, struct pmc *pm, 224 const struct pmc_op_pmcallocate *a) 225 { 226 uint8_t ev, umask; 227 uint32_t caps; 228 uint64_t config, flags; 229 const struct pmc_md_iap_op_pmcallocate *iap; 230 231 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 232 ("[core,%d] illegal CPU %d", __LINE__, cpu)); 233 234 PMCDBG2(MDP,ALL,1, "iaf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps); 235 236 if (ri < 0 || ri > core_iaf_npmc) 237 return (EINVAL); 238 239 if (a->pm_class != PMC_CLASS_IAF) 240 return (EINVAL); 241 242 iap = &a->pm_md.pm_iap; 243 config = iap->pm_iap_config; 244 ev = IAP_EVSEL_GET(config); 245 umask = IAP_UMASK_GET(config); 246 247 if (ev == 0x0) { 248 if (umask != ri + 1) 249 return (EINVAL); 250 } else { 251 switch (ri) { 252 case 0: /* INST_RETIRED.ANY */ 253 if (ev != 0xC0 || umask != 0x00) 254 return (EINVAL); 255 break; 256 case 1: /* CPU_CLK_UNHALTED.THREAD */ 257 if (ev != 0x3C || umask != 0x00) 258 return (EINVAL); 259 break; 260 case 2: /* CPU_CLK_UNHALTED.REF */ 261 if (ev != 0x3C || umask != 0x01) 262 return (EINVAL); 263 break; 264 case 3: /* TOPDOWN.SLOTS */ 265 if (ev != 0xA4 || umask != 0x01) 266 return (EINVAL); 267 break; 268 default: 269 return (EINVAL); 270 } 271 } 272 273 pmc_alloc_refs++; 274 if ((cpu_stdext_feature3 & CPUID_STDEXT3_TSXFA) != 0 && 275 !pmc_tsx_force_abort_set) { 276 pmc_tsx_force_abort_set = true; 277 x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS_ALL | 278 MSR_OP_WRITE, 1, NULL); 279 } 280 281 flags = 0; 282 if (config & IAP_OS) 283 flags |= IAF_OS; 284 if (config & IAP_USR) 285 flags |= IAF_USR; 286 if (config & IAP_ANY) 287 flags |= IAF_ANY; 288 if (config & IAP_INT) 289 flags |= IAF_PMI; 290 291 caps = a->pm_caps; 292 if (caps & PMC_CAP_INTERRUPT) 293 flags |= IAF_PMI; 294 if (caps & PMC_CAP_SYSTEM) 295 flags |= IAF_OS; 296 if (caps & PMC_CAP_USER) 297 flags |= IAF_USR; 298 if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) 299 flags |= (IAF_OS | IAF_USR); 300 301 pm->pm_md.pm_iaf.pm_iaf_ctrl = (flags << (ri * 4)); 302 303 PMCDBG1(MDP,ALL,2, "iaf-allocate config=0x%jx", 304 (uintmax_t) pm->pm_md.pm_iaf.pm_iaf_ctrl); 305 306 return (0); 307 } 308 309 static int 310 iaf_config_pmc(int cpu, int ri, struct pmc *pm) 311 { 312 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 313 ("[core,%d] illegal CPU %d", __LINE__, cpu)); 314 315 KASSERT(ri >= 0 && ri < core_iaf_npmc, 316 ("[core,%d] illegal row-index %d", __LINE__, ri)); 317 318 PMCDBG3(MDP,CFG,1, "iaf-config cpu=%d ri=%d pm=%p", cpu, ri, pm); 319 320 KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__, 321 cpu)); 322 323 core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc = pm; 324 325 return (0); 326 } 327 328 static int 329 iaf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 330 { 331 struct pmc_hw *phw; 332 333 phw = &core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri]; 334 335 snprintf(pi->pm_name, sizeof(pi->pm_name), "IAF-%d", ri); 336 pi->pm_class = PMC_CLASS_IAF; 337 338 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 339 pi->pm_enabled = TRUE; 340 *ppmc = phw->phw_pmc; 341 } else { 342 pi->pm_enabled = FALSE; 343 *ppmc = NULL; 344 } 345 346 return (0); 347 } 348 349 static int 350 iaf_get_config(int cpu, int ri, struct pmc **ppm) 351 { 352 *ppm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc; 353 354 return (0); 355 } 356 357 static int 358 iaf_get_msr(int ri, uint32_t *msr) 359 { 360 KASSERT(ri >= 0 && ri < core_iaf_npmc, 361 ("[iaf,%d] ri %d out of range", __LINE__, ri)); 362 363 *msr = IAF_RI_TO_MSR(ri); 364 365 return (0); 366 } 367 368 static int 369 iaf_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v) 370 { 371 pmc_value_t tmp; 372 373 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 374 ("[core,%d] illegal cpu value %d", __LINE__, cpu)); 375 KASSERT(ri >= 0 && ri < core_iaf_npmc, 376 ("[core,%d] illegal row-index %d", __LINE__, ri)); 377 378 tmp = rdpmc(IAF_RI_TO_MSR(ri)); 379 380 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 381 *v = iaf_perfctr_value_to_reload_count(tmp); 382 else 383 *v = tmp & ((1ULL << core_iaf_width) - 1); 384 385 PMCDBG4(MDP,REA,1, "iaf-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri, 386 IAF_RI_TO_MSR(ri), *v); 387 388 return (0); 389 } 390 391 static int 392 iaf_release_pmc(int cpu, int ri, struct pmc *pmc) 393 { 394 PMCDBG3(MDP,REL,1, "iaf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc); 395 396 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 397 ("[core,%d] illegal CPU value %d", __LINE__, cpu)); 398 KASSERT(ri >= 0 && ri < core_iaf_npmc, 399 ("[core,%d] illegal row-index %d", __LINE__, ri)); 400 401 KASSERT(core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc == NULL, 402 ("[core,%d] PHW pmc non-NULL", __LINE__)); 403 404 MPASS(pmc_alloc_refs > 0); 405 if (pmc_alloc_refs-- == 1 && pmc_tsx_force_abort_set) { 406 pmc_tsx_force_abort_set = false; 407 x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS_ALL | 408 MSR_OP_WRITE, 0, NULL); 409 } 410 411 return (0); 412 } 413 414 static int 415 iaf_start_pmc(int cpu, int ri, struct pmc *pm) 416 { 417 struct core_cpu *cc; 418 419 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 420 ("[core,%d] illegal CPU value %d", __LINE__, cpu)); 421 KASSERT(ri >= 0 && ri < core_iaf_npmc, 422 ("[core,%d] illegal row-index %d", __LINE__, ri)); 423 424 PMCDBG2(MDP,STA,1,"iaf-start cpu=%d ri=%d", cpu, ri); 425 426 cc = core_pcpu[cpu]; 427 cc->pc_iafctrl |= pm->pm_md.pm_iaf.pm_iaf_ctrl; 428 wrmsr(IAF_CTRL, cc->pc_iafctrl); 429 430 cc->pc_globalctrl |= (1ULL << (ri + IAF_OFFSET)); 431 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl); 432 433 PMCDBG4(MDP,STA,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)", 434 cc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL), 435 cc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL)); 436 437 return (0); 438 } 439 440 static int 441 iaf_stop_pmc(int cpu, int ri, struct pmc *pm) 442 { 443 struct core_cpu *cc; 444 445 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 446 ("[core,%d] illegal CPU value %d", __LINE__, cpu)); 447 KASSERT(ri >= 0 && ri < core_iaf_npmc, 448 ("[core,%d] illegal row-index %d", __LINE__, ri)); 449 450 PMCDBG2(MDP,STA,1,"iaf-stop cpu=%d ri=%d", cpu, ri); 451 452 cc = core_pcpu[cpu]; 453 454 cc->pc_iafctrl &= ~(IAF_MASK << (ri * 4)); 455 wrmsr(IAF_CTRL, cc->pc_iafctrl); 456 457 /* Don't need to write IA_GLOBAL_CTRL, one disable is enough. */ 458 459 PMCDBG4(MDP,STO,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)", 460 cc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL), 461 cc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL)); 462 463 return (0); 464 } 465 466 static int 467 iaf_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v) 468 { 469 struct core_cpu *cc; 470 471 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 472 ("[core,%d] illegal cpu value %d", __LINE__, cpu)); 473 KASSERT(ri >= 0 && ri < core_iaf_npmc, 474 ("[core,%d] illegal row-index %d", __LINE__, ri)); 475 476 cc = core_pcpu[cpu]; 477 478 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 479 v = iaf_reload_count_to_perfctr_value(v); 480 481 /* Turn off the fixed counter */ 482 wrmsr(IAF_CTRL, cc->pc_iafctrl & ~(IAF_MASK << (ri * 4))); 483 484 wrmsr(IAF_CTR0 + ri, v & ((1ULL << core_iaf_width) - 1)); 485 486 /* Turn on fixed counters */ 487 wrmsr(IAF_CTRL, cc->pc_iafctrl); 488 489 PMCDBG6(MDP,WRI,1, "iaf-write cpu=%d ri=%d msr=0x%x v=%jx iafctrl=%jx " 490 "pmc=%jx", cpu, ri, IAF_RI_TO_MSR(ri), v, 491 (uintmax_t) rdmsr(IAF_CTRL), 492 (uintmax_t) rdpmc(IAF_RI_TO_MSR(ri))); 493 494 return (0); 495 } 496 497 498 static void 499 iaf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth) 500 { 501 struct pmc_classdep *pcd; 502 503 KASSERT(md != NULL, ("[iaf,%d] md is NULL", __LINE__)); 504 505 PMCDBG0(MDP,INI,1, "iaf-initialize"); 506 507 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF]; 508 509 pcd->pcd_caps = IAF_PMC_CAPS; 510 pcd->pcd_class = PMC_CLASS_IAF; 511 pcd->pcd_num = npmc; 512 pcd->pcd_ri = md->pmd_npmc; 513 pcd->pcd_width = pmcwidth; 514 515 pcd->pcd_allocate_pmc = iaf_allocate_pmc; 516 pcd->pcd_config_pmc = iaf_config_pmc; 517 pcd->pcd_describe = iaf_describe; 518 pcd->pcd_get_config = iaf_get_config; 519 pcd->pcd_get_msr = iaf_get_msr; 520 pcd->pcd_pcpu_fini = core_pcpu_noop; 521 pcd->pcd_pcpu_init = core_pcpu_noop; 522 pcd->pcd_read_pmc = iaf_read_pmc; 523 pcd->pcd_release_pmc = iaf_release_pmc; 524 pcd->pcd_start_pmc = iaf_start_pmc; 525 pcd->pcd_stop_pmc = iaf_stop_pmc; 526 pcd->pcd_write_pmc = iaf_write_pmc; 527 528 md->pmd_npmc += npmc; 529 } 530 531 /* 532 * Intel programmable PMCs. 533 */ 534 535 /* Sub fields of UMASK that this event supports. */ 536 #define IAP_M_CORE (1 << 0) /* Core specificity */ 537 #define IAP_M_AGENT (1 << 1) /* Agent specificity */ 538 #define IAP_M_PREFETCH (1 << 2) /* Prefetch */ 539 #define IAP_M_MESI (1 << 3) /* MESI */ 540 #define IAP_M_SNOOPRESPONSE (1 << 4) /* Snoop response */ 541 #define IAP_M_SNOOPTYPE (1 << 5) /* Snoop type */ 542 #define IAP_M_TRANSITION (1 << 6) /* Transition */ 543 544 #define IAP_F_CORE (0x3 << 14) /* Core specificity */ 545 #define IAP_F_AGENT (0x1 << 13) /* Agent specificity */ 546 #define IAP_F_PREFETCH (0x3 << 12) /* Prefetch */ 547 #define IAP_F_MESI (0xF << 8) /* MESI */ 548 #define IAP_F_SNOOPRESPONSE (0xB << 8) /* Snoop response */ 549 #define IAP_F_SNOOPTYPE (0x3 << 8) /* Snoop type */ 550 #define IAP_F_TRANSITION (0x1 << 12) /* Transition */ 551 552 #define IAP_PREFETCH_RESERVED (0x2 << 12) 553 #define IAP_CORE_THIS (0x1 << 14) 554 #define IAP_CORE_ALL (0x3 << 14) 555 #define IAP_F_CMASK 0xFF000000 556 557 static pmc_value_t 558 iap_perfctr_value_to_reload_count(pmc_value_t v) 559 { 560 561 /* If the PMC has overflowed, return a reload count of zero. */ 562 if ((v & (1ULL << (core_iap_width - 1))) == 0) 563 return (0); 564 v &= (1ULL << core_iap_width) - 1; 565 return (1ULL << core_iap_width) - v; 566 } 567 568 static pmc_value_t 569 iap_reload_count_to_perfctr_value(pmc_value_t rlc) 570 { 571 return (1ULL << core_iap_width) - rlc; 572 } 573 574 static int 575 iap_pmc_has_overflowed(int ri) 576 { 577 uint64_t v; 578 579 /* 580 * We treat a Core (i.e., Intel architecture v1) PMC as has 581 * having overflowed if its MSB is zero. 582 */ 583 v = rdpmc(ri); 584 return ((v & (1ULL << (core_iap_width - 1))) == 0); 585 } 586 587 static int 588 iap_event_corei7_ok_on_counter(uint8_t evsel, int ri) 589 { 590 uint32_t mask; 591 592 switch (evsel) { 593 /* Events valid only on counter 0, 1. */ 594 case 0x40: 595 case 0x41: 596 case 0x42: 597 case 0x43: 598 case 0x4C: 599 case 0x4E: 600 case 0x51: 601 case 0x52: 602 case 0x53: 603 case 0x63: 604 mask = 0x3; 605 break; 606 /* Any row index is ok. */ 607 default: 608 mask = ~0; 609 } 610 611 return (mask & (1 << ri)); 612 } 613 614 static int 615 iap_event_westmere_ok_on_counter(uint8_t evsel, int ri) 616 { 617 uint32_t mask; 618 619 switch (evsel) { 620 /* Events valid only on counter 0. */ 621 case 0x60: 622 case 0xB3: 623 mask = 0x1; 624 break; 625 626 /* Events valid only on counter 0, 1. */ 627 case 0x4C: 628 case 0x4E: 629 case 0x51: 630 case 0x52: 631 case 0x63: 632 mask = 0x3; 633 break; 634 /* Any row index is ok. */ 635 default: 636 mask = ~0; 637 } 638 639 return (mask & (1 << ri)); 640 } 641 642 static int 643 iap_event_sb_sbx_ib_ibx_ok_on_counter(uint8_t evsel, int ri) 644 { 645 uint32_t mask; 646 647 switch (evsel) { 648 /* Events valid only on counter 0. */ 649 case 0xB7: 650 mask = 0x1; 651 break; 652 /* Events valid only on counter 1. */ 653 case 0xC0: 654 mask = 0x2; 655 break; 656 /* Events valid only on counter 2. */ 657 case 0x48: 658 case 0xA2: 659 case 0xA3: 660 mask = 0x4; 661 break; 662 /* Events valid only on counter 3. */ 663 case 0xBB: 664 case 0xCD: 665 mask = 0x8; 666 break; 667 /* Any row index is ok. */ 668 default: 669 mask = ~0; 670 } 671 672 return (mask & (1 << ri)); 673 } 674 675 static int 676 iap_event_core_ok_on_counter(uint8_t evsel, int ri) 677 { 678 uint32_t mask; 679 680 switch (evsel) { 681 /* 682 * Events valid only on counter 0. 683 */ 684 case 0x10: 685 case 0x14: 686 case 0x18: 687 case 0xB3: 688 case 0xC1: 689 case 0xCB: 690 mask = (1 << 0); 691 break; 692 693 /* 694 * Events valid only on counter 1. 695 */ 696 case 0x11: 697 case 0x12: 698 case 0x13: 699 mask = (1 << 1); 700 break; 701 702 default: 703 mask = ~0; /* Any row index is ok. */ 704 } 705 706 return (mask & (1 << ri)); 707 } 708 709 static int 710 iap_allocate_pmc(int cpu, int ri, struct pmc *pm, 711 const struct pmc_op_pmcallocate *a) 712 { 713 uint8_t ev; 714 const struct pmc_md_iap_op_pmcallocate *iap; 715 716 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 717 ("[core,%d] illegal CPU %d", __LINE__, cpu)); 718 KASSERT(ri >= 0 && ri < core_iap_npmc, 719 ("[core,%d] illegal row-index value %d", __LINE__, ri)); 720 721 if (a->pm_class != PMC_CLASS_IAP) 722 return (EINVAL); 723 724 iap = &a->pm_md.pm_iap; 725 ev = IAP_EVSEL_GET(iap->pm_iap_config); 726 727 switch (core_cputype) { 728 case PMC_CPU_INTEL_CORE: 729 case PMC_CPU_INTEL_CORE2: 730 case PMC_CPU_INTEL_CORE2EXTREME: 731 if (iap_event_core_ok_on_counter(ev, ri) == 0) 732 return (EINVAL); 733 case PMC_CPU_INTEL_COREI7: 734 case PMC_CPU_INTEL_NEHALEM_EX: 735 if (iap_event_corei7_ok_on_counter(ev, ri) == 0) 736 return (EINVAL); 737 break; 738 case PMC_CPU_INTEL_WESTMERE: 739 case PMC_CPU_INTEL_WESTMERE_EX: 740 if (iap_event_westmere_ok_on_counter(ev, ri) == 0) 741 return (EINVAL); 742 break; 743 case PMC_CPU_INTEL_SANDYBRIDGE: 744 case PMC_CPU_INTEL_SANDYBRIDGE_XEON: 745 case PMC_CPU_INTEL_IVYBRIDGE: 746 case PMC_CPU_INTEL_IVYBRIDGE_XEON: 747 case PMC_CPU_INTEL_HASWELL: 748 case PMC_CPU_INTEL_HASWELL_XEON: 749 case PMC_CPU_INTEL_BROADWELL: 750 case PMC_CPU_INTEL_BROADWELL_XEON: 751 if (iap_event_sb_sbx_ib_ibx_ok_on_counter(ev, ri) == 0) 752 return (EINVAL); 753 break; 754 case PMC_CPU_INTEL_ATOM: 755 case PMC_CPU_INTEL_ATOM_SILVERMONT: 756 case PMC_CPU_INTEL_ATOM_GOLDMONT: 757 case PMC_CPU_INTEL_ATOM_GOLDMONT_P: 758 case PMC_CPU_INTEL_ATOM_TREMONT: 759 case PMC_CPU_INTEL_SKYLAKE: 760 case PMC_CPU_INTEL_SKYLAKE_XEON: 761 case PMC_CPU_INTEL_ICELAKE: 762 case PMC_CPU_INTEL_ICELAKE_XEON: 763 case PMC_CPU_INTEL_ALDERLAKE: 764 default: 765 break; 766 } 767 768 pm->pm_md.pm_iap.pm_iap_evsel = iap->pm_iap_config; 769 return (0); 770 } 771 772 static int 773 iap_config_pmc(int cpu, int ri, struct pmc *pm) 774 { 775 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 776 ("[core,%d] illegal CPU %d", __LINE__, cpu)); 777 778 KASSERT(ri >= 0 && ri < core_iap_npmc, 779 ("[core,%d] illegal row-index %d", __LINE__, ri)); 780 781 PMCDBG3(MDP,CFG,1, "iap-config cpu=%d ri=%d pm=%p", cpu, ri, pm); 782 783 KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__, 784 cpu)); 785 786 core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc = pm; 787 788 return (0); 789 } 790 791 static int 792 iap_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 793 { 794 struct pmc_hw *phw; 795 796 phw = &core_pcpu[cpu]->pc_corepmcs[ri]; 797 798 snprintf(pi->pm_name, sizeof(pi->pm_name), "IAP-%d", ri); 799 pi->pm_class = PMC_CLASS_IAP; 800 801 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 802 pi->pm_enabled = TRUE; 803 *ppmc = phw->phw_pmc; 804 } else { 805 pi->pm_enabled = FALSE; 806 *ppmc = NULL; 807 } 808 809 return (0); 810 } 811 812 static int 813 iap_get_config(int cpu, int ri, struct pmc **ppm) 814 { 815 *ppm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc; 816 817 return (0); 818 } 819 820 static int 821 iap_get_msr(int ri, uint32_t *msr) 822 { 823 KASSERT(ri >= 0 && ri < core_iap_npmc, 824 ("[iap,%d] ri %d out of range", __LINE__, ri)); 825 826 *msr = ri; 827 828 return (0); 829 } 830 831 static int 832 iap_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v) 833 { 834 pmc_value_t tmp; 835 836 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 837 ("[core,%d] illegal cpu value %d", __LINE__, cpu)); 838 KASSERT(ri >= 0 && ri < core_iap_npmc, 839 ("[core,%d] illegal row-index %d", __LINE__, ri)); 840 841 tmp = rdpmc(ri); 842 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 843 *v = iap_perfctr_value_to_reload_count(tmp); 844 else 845 *v = tmp & ((1ULL << core_iap_width) - 1); 846 847 PMCDBG4(MDP,REA,1, "iap-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri, 848 IAP_PMC0 + ri, *v); 849 850 return (0); 851 } 852 853 static int 854 iap_release_pmc(int cpu, int ri, struct pmc *pm) 855 { 856 (void) pm; 857 858 PMCDBG3(MDP,REL,1, "iap-release cpu=%d ri=%d pm=%p", cpu, ri, 859 pm); 860 861 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 862 ("[core,%d] illegal CPU value %d", __LINE__, cpu)); 863 KASSERT(ri >= 0 && ri < core_iap_npmc, 864 ("[core,%d] illegal row-index %d", __LINE__, ri)); 865 866 KASSERT(core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc 867 == NULL, ("[core,%d] PHW pmc non-NULL", __LINE__)); 868 869 return (0); 870 } 871 872 static int 873 iap_start_pmc(int cpu, int ri, struct pmc *pm) 874 { 875 uint64_t evsel; 876 struct core_cpu *cc; 877 878 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 879 ("[core,%d] illegal CPU value %d", __LINE__, cpu)); 880 KASSERT(ri >= 0 && ri < core_iap_npmc, 881 ("[core,%d] illegal row-index %d", __LINE__, ri)); 882 883 cc = core_pcpu[cpu]; 884 885 PMCDBG2(MDP,STA,1, "iap-start cpu=%d ri=%d", cpu, ri); 886 887 evsel = pm->pm_md.pm_iap.pm_iap_evsel; 888 889 PMCDBG4(MDP,STA,2, "iap-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x", 890 cpu, ri, IAP_EVSEL0 + ri, evsel); 891 892 /* Event specific configuration. */ 893 894 switch (IAP_EVSEL_GET(evsel)) { 895 case 0xB7: 896 wrmsr(IA_OFFCORE_RSP0, pm->pm_md.pm_iap.pm_iap_rsp); 897 break; 898 case 0xBB: 899 wrmsr(IA_OFFCORE_RSP1, pm->pm_md.pm_iap.pm_iap_rsp); 900 break; 901 default: 902 break; 903 } 904 905 wrmsr(IAP_EVSEL0 + ri, evsel | IAP_EN); 906 907 if (core_version >= 2) { 908 cc->pc_globalctrl |= (1ULL << ri); 909 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl); 910 } 911 912 return (0); 913 } 914 915 static int 916 iap_stop_pmc(int cpu, int ri, struct pmc *pm __unused) 917 { 918 919 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 920 ("[core,%d] illegal cpu value %d", __LINE__, cpu)); 921 KASSERT(ri >= 0 && ri < core_iap_npmc, 922 ("[core,%d] illegal row index %d", __LINE__, ri)); 923 924 PMCDBG2(MDP,STO,1, "iap-stop cpu=%d ri=%d", cpu, ri); 925 926 wrmsr(IAP_EVSEL0 + ri, 0); 927 928 /* Don't need to write IA_GLOBAL_CTRL, one disable is enough. */ 929 930 return (0); 931 } 932 933 static int 934 iap_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v) 935 { 936 937 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 938 ("[core,%d] illegal cpu value %d", __LINE__, cpu)); 939 KASSERT(ri >= 0 && ri < core_iap_npmc, 940 ("[core,%d] illegal row index %d", __LINE__, ri)); 941 942 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 943 v = iap_reload_count_to_perfctr_value(v); 944 945 v &= (1ULL << core_iap_width) - 1; 946 947 PMCDBG4(MDP,WRI,1, "iap-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri, 948 IAP_PMC0 + ri, v); 949 950 /* 951 * Write the new value to the counter (or it's alias). The 952 * counter will be in a stopped state when the pcd_write() 953 * entry point is called. 954 */ 955 wrmsr(core_iap_wroffset + IAP_PMC0 + ri, v); 956 return (0); 957 } 958 959 960 static void 961 iap_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth, 962 int flags) 963 { 964 struct pmc_classdep *pcd; 965 966 KASSERT(md != NULL, ("[iap,%d] md is NULL", __LINE__)); 967 968 PMCDBG0(MDP,INI,1, "iap-initialize"); 969 970 /* Remember the set of architectural events supported. */ 971 core_architectural_events = ~flags; 972 973 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP]; 974 975 pcd->pcd_caps = IAP_PMC_CAPS; 976 pcd->pcd_class = PMC_CLASS_IAP; 977 pcd->pcd_num = npmc; 978 pcd->pcd_ri = md->pmd_npmc; 979 pcd->pcd_width = pmcwidth; 980 981 pcd->pcd_allocate_pmc = iap_allocate_pmc; 982 pcd->pcd_config_pmc = iap_config_pmc; 983 pcd->pcd_describe = iap_describe; 984 pcd->pcd_get_config = iap_get_config; 985 pcd->pcd_get_msr = iap_get_msr; 986 pcd->pcd_pcpu_fini = core_pcpu_fini; 987 pcd->pcd_pcpu_init = core_pcpu_init; 988 pcd->pcd_read_pmc = iap_read_pmc; 989 pcd->pcd_release_pmc = iap_release_pmc; 990 pcd->pcd_start_pmc = iap_start_pmc; 991 pcd->pcd_stop_pmc = iap_stop_pmc; 992 pcd->pcd_write_pmc = iap_write_pmc; 993 994 md->pmd_npmc += npmc; 995 } 996 997 static int 998 core_intr(struct trapframe *tf) 999 { 1000 pmc_value_t v; 1001 struct pmc *pm; 1002 struct core_cpu *cc; 1003 int error, found_interrupt, ri; 1004 1005 PMCDBG3(MDP,INT, 1, "cpu=%d tf=%p um=%d", curcpu, (void *) tf, 1006 TRAPF_USERMODE(tf)); 1007 1008 found_interrupt = 0; 1009 cc = core_pcpu[curcpu]; 1010 1011 for (ri = 0; ri < core_iap_npmc; ri++) { 1012 1013 if ((pm = cc->pc_corepmcs[ri].phw_pmc) == NULL || 1014 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 1015 continue; 1016 1017 if (!iap_pmc_has_overflowed(ri)) 1018 continue; 1019 1020 found_interrupt = 1; 1021 1022 if (pm->pm_state != PMC_STATE_RUNNING) 1023 continue; 1024 1025 error = pmc_process_interrupt(PMC_HR, pm, tf); 1026 1027 v = pm->pm_sc.pm_reloadcount; 1028 v = iap_reload_count_to_perfctr_value(v); 1029 1030 /* 1031 * Stop the counter, reload it but only restart it if 1032 * the PMC is not stalled. 1033 */ 1034 wrmsr(IAP_EVSEL0 + ri, pm->pm_md.pm_iap.pm_iap_evsel); 1035 wrmsr(core_iap_wroffset + IAP_PMC0 + ri, v); 1036 1037 if (__predict_false(error)) 1038 continue; 1039 1040 wrmsr(IAP_EVSEL0 + ri, pm->pm_md.pm_iap.pm_iap_evsel | IAP_EN); 1041 } 1042 1043 if (found_interrupt) 1044 counter_u64_add(pmc_stats.pm_intr_processed, 1); 1045 else 1046 counter_u64_add(pmc_stats.pm_intr_ignored, 1); 1047 1048 if (found_interrupt) 1049 lapic_reenable_pmc(); 1050 1051 return (found_interrupt); 1052 } 1053 1054 static int 1055 core2_intr(struct trapframe *tf) 1056 { 1057 int error, found_interrupt = 0, n, cpu; 1058 uint64_t flag, intrstatus, intrdisable = 0; 1059 struct pmc *pm; 1060 struct core_cpu *cc; 1061 pmc_value_t v; 1062 1063 cpu = curcpu; 1064 PMCDBG3(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf, 1065 TRAPF_USERMODE(tf)); 1066 1067 /* 1068 * The IA_GLOBAL_STATUS (MSR 0x38E) register indicates which 1069 * PMCs have a pending PMI interrupt. We take a 'snapshot' of 1070 * the current set of interrupting PMCs and process these 1071 * after stopping them. 1072 */ 1073 intrstatus = rdmsr(IA_GLOBAL_STATUS); 1074 PMCDBG2(MDP,INT, 1, "cpu=%d intrstatus=%jx", cpu, 1075 (uintmax_t) intrstatus); 1076 1077 /* 1078 * Stop PMCs unless hardware already done it. 1079 */ 1080 if ((intrstatus & IA_GLOBAL_STATUS_FLAG_CTR_FRZ) == 0) 1081 wrmsr(IA_GLOBAL_CTRL, 0); 1082 1083 cc = core_pcpu[cpu]; 1084 KASSERT(cc != NULL, ("[core,%d] null pcpu", __LINE__)); 1085 1086 /* 1087 * Look for interrupts from fixed function PMCs. 1088 */ 1089 for (n = 0, flag = (1ULL << IAF_OFFSET); n < core_iaf_npmc; 1090 n++, flag <<= 1) { 1091 1092 if ((intrstatus & flag) == 0) 1093 continue; 1094 1095 found_interrupt = 1; 1096 1097 pm = cc->pc_corepmcs[n + core_iaf_ri].phw_pmc; 1098 if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING || 1099 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 1100 continue; 1101 1102 error = pmc_process_interrupt(PMC_HR, pm, tf); 1103 if (__predict_false(error)) 1104 intrdisable |= flag; 1105 1106 v = iaf_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount); 1107 1108 /* Reload sampling count. */ 1109 wrmsr(IAF_CTR0 + n, v); 1110 1111 PMCDBG4(MDP,INT, 1, "iaf-intr cpu=%d error=%d v=%jx(%jx)", curcpu, 1112 error, (uintmax_t) v, (uintmax_t) rdpmc(IAF_RI_TO_MSR(n))); 1113 } 1114 1115 /* 1116 * Process interrupts from the programmable counters. 1117 */ 1118 for (n = 0, flag = 1; n < core_iap_npmc; n++, flag <<= 1) { 1119 if ((intrstatus & flag) == 0) 1120 continue; 1121 1122 found_interrupt = 1; 1123 1124 pm = cc->pc_corepmcs[n].phw_pmc; 1125 if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING || 1126 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 1127 continue; 1128 1129 error = pmc_process_interrupt(PMC_HR, pm, tf); 1130 if (__predict_false(error)) 1131 intrdisable |= flag; 1132 1133 v = iap_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount); 1134 1135 PMCDBG3(MDP,INT, 1, "iap-intr cpu=%d error=%d v=%jx", cpu, error, 1136 (uintmax_t) v); 1137 1138 /* Reload sampling count. */ 1139 wrmsr(core_iap_wroffset + IAP_PMC0 + n, v); 1140 } 1141 1142 if (found_interrupt) 1143 counter_u64_add(pmc_stats.pm_intr_processed, 1); 1144 else 1145 counter_u64_add(pmc_stats.pm_intr_ignored, 1); 1146 1147 if (found_interrupt) 1148 lapic_reenable_pmc(); 1149 1150 /* 1151 * Reenable all non-stalled PMCs. 1152 */ 1153 if ((intrstatus & IA_GLOBAL_STATUS_FLAG_CTR_FRZ) == 0) { 1154 wrmsr(IA_GLOBAL_OVF_CTRL, intrstatus); 1155 cc->pc_globalctrl &= ~intrdisable; 1156 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl); 1157 } else { 1158 if (__predict_false(intrdisable)) { 1159 cc->pc_globalctrl &= ~intrdisable; 1160 wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl); 1161 } 1162 wrmsr(IA_GLOBAL_OVF_CTRL, intrstatus); 1163 } 1164 1165 PMCDBG4(MDP, INT, 1, "cpu=%d fixedctrl=%jx globalctrl=%jx status=%jx", 1166 cpu, (uintmax_t) rdmsr(IAF_CTRL), 1167 (uintmax_t) rdmsr(IA_GLOBAL_CTRL), 1168 (uintmax_t) rdmsr(IA_GLOBAL_STATUS)); 1169 1170 return (found_interrupt); 1171 } 1172 1173 int 1174 pmc_core_initialize(struct pmc_mdep *md, int maxcpu, int version_override) 1175 { 1176 int cpuid[CORE_CPUID_REQUEST_SIZE]; 1177 int flags, nflags; 1178 1179 do_cpuid(CORE_CPUID_REQUEST, cpuid); 1180 1181 core_cputype = md->pmd_cputype; 1182 core_version = (version_override > 0) ? version_override : 1183 cpuid[CORE_CPUID_EAX] & 0xFF; 1184 1185 PMCDBG3(MDP,INI,1,"core-init cputype=%d ncpu=%d version=%d", 1186 core_cputype, maxcpu, core_version); 1187 1188 if (core_version < 1 || core_version > 5 || 1189 (core_cputype != PMC_CPU_INTEL_CORE && core_version == 1)) { 1190 /* Unknown PMC architecture. */ 1191 printf("hwpmc_core: unknown PMC architecture: %d\n", 1192 core_version); 1193 return (EPROGMISMATCH); 1194 } 1195 1196 core_iap_wroffset = 0; 1197 if (cpu_feature2 & CPUID2_PDCM) { 1198 if (rdmsr(IA32_PERF_CAPABILITIES) & PERFCAP_FW_WRITE) { 1199 PMCDBG0(MDP, INI, 1, 1200 "core-init full-width write supported"); 1201 core_iap_wroffset = IAP_A_PMC0 - IAP_PMC0; 1202 } else 1203 PMCDBG0(MDP, INI, 1, 1204 "core-init full-width write NOT supported"); 1205 } else 1206 PMCDBG0(MDP, INI, 1, "core-init pdcm not supported"); 1207 1208 core_pmcmask = 0; 1209 1210 /* 1211 * Initialize programmable counters. 1212 */ 1213 core_iap_npmc = (cpuid[CORE_CPUID_EAX] >> 8) & 0xFF; 1214 core_iap_width = (cpuid[CORE_CPUID_EAX] >> 16) & 0xFF; 1215 1216 core_pmcmask |= ((1ULL << core_iap_npmc) - 1); 1217 1218 nflags = (cpuid[CORE_CPUID_EAX] >> 24) & 0xFF; 1219 flags = cpuid[CORE_CPUID_EBX] & ((1 << nflags) - 1); 1220 1221 iap_initialize(md, maxcpu, core_iap_npmc, core_iap_width, flags); 1222 1223 /* 1224 * Initialize fixed function counters, if present. 1225 */ 1226 if (core_version >= 2) { 1227 core_iaf_ri = core_iap_npmc; 1228 core_iaf_npmc = cpuid[CORE_CPUID_EDX] & 0x1F; 1229 core_iaf_width = (cpuid[CORE_CPUID_EDX] >> 5) & 0xFF; 1230 1231 iaf_initialize(md, maxcpu, core_iaf_npmc, core_iaf_width); 1232 core_pmcmask |= ((1ULL << core_iaf_npmc) - 1) << IAF_OFFSET; 1233 } 1234 1235 PMCDBG2(MDP,INI,1,"core-init pmcmask=0x%jx iafri=%d", core_pmcmask, 1236 core_iaf_ri); 1237 1238 core_pcpu = malloc(sizeof(*core_pcpu) * maxcpu, M_PMC, 1239 M_ZERO | M_WAITOK); 1240 1241 /* 1242 * Choose the appropriate interrupt handler. 1243 */ 1244 if (core_version >= 2) 1245 md->pmd_intr = core2_intr; 1246 else 1247 md->pmd_intr = core_intr; 1248 1249 return (0); 1250 } 1251 1252 void 1253 pmc_core_finalize(struct pmc_mdep *md) 1254 { 1255 PMCDBG0(MDP,INI,1, "core-finalize"); 1256 1257 free(core_pcpu, M_PMC); 1258 core_pcpu = NULL; 1259 } 1260