1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010 Fabien Thomas 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Intel Uncore PMCs. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/bus.h> 38 #include <sys/pmc.h> 39 #include <sys/pmckern.h> 40 #include <sys/systm.h> 41 42 #include <machine/intr_machdep.h> 43 #if (__FreeBSD_version >= 1100000) 44 #include <x86/apicvar.h> 45 #else 46 #include <machine/apicvar.h> 47 #endif 48 #include <machine/cpu.h> 49 #include <machine/cpufunc.h> 50 #include <machine/specialreg.h> 51 52 #define UCF_PMC_CAPS \ 53 (PMC_CAP_READ | PMC_CAP_WRITE) 54 55 #define UCP_PMC_CAPS \ 56 (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \ 57 PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE) 58 59 #define SELECTSEL(x) \ 60 (((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \ 61 UCP_CB0_EVSEL0 : UCP_EVSEL0) 62 63 #define SELECTOFF(x) \ 64 (((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \ 65 UCF_OFFSET_SB : UCF_OFFSET) 66 67 static enum pmc_cputype uncore_cputype; 68 69 struct uncore_cpu { 70 volatile uint32_t pc_resync; 71 volatile uint32_t pc_ucfctrl; /* Fixed function control. */ 72 volatile uint64_t pc_globalctrl; /* Global control register. */ 73 struct pmc_hw pc_uncorepmcs[]; 74 }; 75 76 static struct uncore_cpu **uncore_pcpu; 77 78 static uint64_t uncore_pmcmask; 79 80 static int uncore_ucf_ri; /* relative index of fixed counters */ 81 static int uncore_ucf_width; 82 static int uncore_ucf_npmc; 83 84 static int uncore_ucp_width; 85 static int uncore_ucp_npmc; 86 87 static int 88 uncore_pcpu_noop(struct pmc_mdep *md, int cpu) 89 { 90 (void) md; 91 (void) cpu; 92 return (0); 93 } 94 95 static int 96 uncore_pcpu_init(struct pmc_mdep *md, int cpu) 97 { 98 struct pmc_cpu *pc; 99 struct uncore_cpu *cc; 100 struct pmc_hw *phw; 101 int uncore_ri, n, npmc; 102 103 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 104 ("[ucf,%d] insane cpu number %d", __LINE__, cpu)); 105 106 PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu); 107 108 uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri; 109 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num; 110 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num; 111 112 cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw), 113 M_PMC, M_WAITOK | M_ZERO); 114 115 uncore_pcpu[cpu] = cc; 116 pc = pmc_pcpu[cpu]; 117 118 KASSERT(pc != NULL && cc != NULL, 119 ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu)); 120 121 for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) { 122 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 123 PMC_PHW_CPU_TO_STATE(cpu) | 124 PMC_PHW_INDEX_TO_STATE(n + uncore_ri); 125 phw->phw_pmc = NULL; 126 pc->pc_hwpmcs[n + uncore_ri] = phw; 127 } 128 129 return (0); 130 } 131 132 static int 133 uncore_pcpu_fini(struct pmc_mdep *md, int cpu) 134 { 135 int uncore_ri, n, npmc; 136 struct pmc_cpu *pc; 137 struct uncore_cpu *cc; 138 139 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 140 ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu)); 141 142 PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu); 143 144 if ((cc = uncore_pcpu[cpu]) == NULL) 145 return (0); 146 147 uncore_pcpu[cpu] = NULL; 148 149 pc = pmc_pcpu[cpu]; 150 151 KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__, 152 cpu)); 153 154 npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num; 155 uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri; 156 157 for (n = 0; n < npmc; n++) 158 wrmsr(SELECTSEL(uncore_cputype) + n, 0); 159 160 wrmsr(UCF_CTRL, 0); 161 npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num; 162 163 for (n = 0; n < npmc; n++) 164 pc->pc_hwpmcs[n + uncore_ri] = NULL; 165 166 free(cc, M_PMC); 167 168 return (0); 169 } 170 171 /* 172 * Fixed function counters. 173 */ 174 175 static pmc_value_t 176 ucf_perfctr_value_to_reload_count(pmc_value_t v) 177 { 178 v &= (1ULL << uncore_ucf_width) - 1; 179 return (1ULL << uncore_ucf_width) - v; 180 } 181 182 static pmc_value_t 183 ucf_reload_count_to_perfctr_value(pmc_value_t rlc) 184 { 185 return (1ULL << uncore_ucf_width) - rlc; 186 } 187 188 static int 189 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm, 190 const struct pmc_op_pmcallocate *a) 191 { 192 uint32_t caps, flags; 193 194 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 195 ("[uncore,%d] illegal CPU %d", __LINE__, cpu)); 196 197 PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps); 198 199 if (ri < 0 || ri > uncore_ucf_npmc) 200 return (EINVAL); 201 202 caps = a->pm_caps; 203 204 if (a->pm_class != PMC_CLASS_UCF || 205 (caps & UCF_PMC_CAPS) != caps) 206 return (EINVAL); 207 208 flags = UCF_EN; 209 210 pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4)); 211 212 PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx", 213 (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl); 214 215 return (0); 216 } 217 218 static int 219 ucf_config_pmc(int cpu, int ri, struct pmc *pm) 220 { 221 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 222 ("[uncore,%d] illegal CPU %d", __LINE__, cpu)); 223 224 KASSERT(ri >= 0 && ri < uncore_ucf_npmc, 225 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 226 227 PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm); 228 229 KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__, 230 cpu)); 231 232 uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm; 233 234 return (0); 235 } 236 237 static int 238 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 239 { 240 int error; 241 struct pmc_hw *phw; 242 char ucf_name[PMC_NAME_MAX]; 243 244 phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri]; 245 246 (void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri); 247 if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX, 248 NULL)) != 0) 249 return (error); 250 251 pi->pm_class = PMC_CLASS_UCF; 252 253 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 254 pi->pm_enabled = TRUE; 255 *ppmc = phw->phw_pmc; 256 } else { 257 pi->pm_enabled = FALSE; 258 *ppmc = NULL; 259 } 260 261 return (0); 262 } 263 264 static int 265 ucf_get_config(int cpu, int ri, struct pmc **ppm) 266 { 267 *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; 268 269 return (0); 270 } 271 272 static int 273 ucf_read_pmc(int cpu, int ri, pmc_value_t *v) 274 { 275 struct pmc *pm; 276 pmc_value_t tmp; 277 278 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 279 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); 280 KASSERT(ri >= 0 && ri < uncore_ucf_npmc, 281 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 282 283 pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; 284 285 KASSERT(pm, 286 ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu, 287 ri, ri + uncore_ucf_ri)); 288 289 tmp = rdmsr(UCF_CTR0 + ri); 290 291 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 292 *v = ucf_perfctr_value_to_reload_count(tmp); 293 else 294 *v = tmp; 295 296 PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v); 297 298 return (0); 299 } 300 301 static int 302 ucf_release_pmc(int cpu, int ri, struct pmc *pmc) 303 { 304 PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc); 305 306 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 307 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); 308 KASSERT(ri >= 0 && ri < uncore_ucf_npmc, 309 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 310 311 KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL, 312 ("[uncore,%d] PHW pmc non-NULL", __LINE__)); 313 314 return (0); 315 } 316 317 static int 318 ucf_start_pmc(int cpu, int ri) 319 { 320 struct pmc *pm; 321 struct uncore_cpu *ucfc; 322 323 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 324 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); 325 KASSERT(ri >= 0 && ri < uncore_ucf_npmc, 326 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 327 328 PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri); 329 330 ucfc = uncore_pcpu[cpu]; 331 pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; 332 333 ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl; 334 335 wrmsr(UCF_CTRL, ucfc->pc_ucfctrl); 336 337 do { 338 ucfc->pc_resync = 0; 339 ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype))); 340 wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl); 341 } while (ucfc->pc_resync != 0); 342 343 PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)", 344 ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL), 345 ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL)); 346 347 return (0); 348 } 349 350 static int 351 ucf_stop_pmc(int cpu, int ri) 352 { 353 uint32_t fc; 354 struct uncore_cpu *ucfc; 355 356 PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri); 357 358 ucfc = uncore_pcpu[cpu]; 359 360 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 361 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); 362 KASSERT(ri >= 0 && ri < uncore_ucf_npmc, 363 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 364 365 fc = (UCF_MASK << (ri * 4)); 366 367 ucfc->pc_ucfctrl &= ~fc; 368 369 PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl); 370 wrmsr(UCF_CTRL, ucfc->pc_ucfctrl); 371 372 do { 373 ucfc->pc_resync = 0; 374 ucfc->pc_globalctrl &= ~(1ULL << (ri + SELECTOFF(uncore_cputype))); 375 wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl); 376 } while (ucfc->pc_resync != 0); 377 378 PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)", 379 ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL), 380 ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL)); 381 382 return (0); 383 } 384 385 static int 386 ucf_write_pmc(int cpu, int ri, pmc_value_t v) 387 { 388 struct uncore_cpu *cc; 389 struct pmc *pm; 390 391 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 392 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); 393 KASSERT(ri >= 0 && ri < uncore_ucf_npmc, 394 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 395 396 cc = uncore_pcpu[cpu]; 397 pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc; 398 399 KASSERT(pm, 400 ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 401 402 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 403 v = ucf_reload_count_to_perfctr_value(v); 404 405 wrmsr(UCF_CTRL, 0); /* Turn off fixed counters */ 406 wrmsr(UCF_CTR0 + ri, v); 407 wrmsr(UCF_CTRL, cc->pc_ucfctrl); 408 409 PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ", 410 cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL)); 411 412 return (0); 413 } 414 415 416 static void 417 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth) 418 { 419 struct pmc_classdep *pcd; 420 421 KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__)); 422 423 PMCDBG0(MDP,INI,1, "ucf-initialize"); 424 425 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF]; 426 427 pcd->pcd_caps = UCF_PMC_CAPS; 428 pcd->pcd_class = PMC_CLASS_UCF; 429 pcd->pcd_num = npmc; 430 pcd->pcd_ri = md->pmd_npmc; 431 pcd->pcd_width = pmcwidth; 432 433 pcd->pcd_allocate_pmc = ucf_allocate_pmc; 434 pcd->pcd_config_pmc = ucf_config_pmc; 435 pcd->pcd_describe = ucf_describe; 436 pcd->pcd_get_config = ucf_get_config; 437 pcd->pcd_get_msr = NULL; 438 pcd->pcd_pcpu_fini = uncore_pcpu_noop; 439 pcd->pcd_pcpu_init = uncore_pcpu_noop; 440 pcd->pcd_read_pmc = ucf_read_pmc; 441 pcd->pcd_release_pmc = ucf_release_pmc; 442 pcd->pcd_start_pmc = ucf_start_pmc; 443 pcd->pcd_stop_pmc = ucf_stop_pmc; 444 pcd->pcd_write_pmc = ucf_write_pmc; 445 446 md->pmd_npmc += npmc; 447 } 448 449 /* 450 * Intel programmable PMCs. 451 */ 452 453 /* 454 * Event descriptor tables. 455 * 456 * For each event id, we track: 457 * 458 * 1. The CPUs that the event is valid for. 459 * 460 * 2. If the event uses a fixed UMASK, the value of the umask field. 461 * If the event doesn't use a fixed UMASK, a mask of legal bits 462 * to check against. 463 */ 464 465 struct ucp_event_descr { 466 enum pmc_event ucp_ev; 467 unsigned char ucp_evcode; 468 unsigned char ucp_umask; 469 unsigned char ucp_flags; 470 }; 471 472 #define UCP_F_I7 (1 << 0) /* CPU: Core i7 */ 473 #define UCP_F_WM (1 << 1) /* CPU: Westmere */ 474 #define UCP_F_SB (1 << 2) /* CPU: Sandy Bridge */ 475 #define UCP_F_HW (1 << 3) /* CPU: Haswell */ 476 #define UCP_F_FM (1 << 4) /* Fixed mask */ 477 478 #define UCP_F_ALLCPUS \ 479 (UCP_F_I7 | UCP_F_WM) 480 481 #define UCP_F_CMASK 0xFF000000 482 483 static pmc_value_t 484 ucp_perfctr_value_to_reload_count(pmc_value_t v) 485 { 486 v &= (1ULL << uncore_ucp_width) - 1; 487 return (1ULL << uncore_ucp_width) - v; 488 } 489 490 static pmc_value_t 491 ucp_reload_count_to_perfctr_value(pmc_value_t rlc) 492 { 493 return (1ULL << uncore_ucp_width) - rlc; 494 } 495 496 /* 497 * Counter specific event information for Sandybridge and Haswell 498 */ 499 static int 500 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri) 501 { 502 uint32_t mask; 503 504 switch (ev) { 505 /* 506 * Events valid only on counter 0. 507 */ 508 case 0x80: 509 case 0x83: 510 mask = (1 << 0); 511 break; 512 513 default: 514 mask = ~0; /* Any row index is ok. */ 515 } 516 517 return (mask & (1 << ri)); 518 } 519 520 static int 521 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm, 522 const struct pmc_op_pmcallocate *a) 523 { 524 uint8_t ev; 525 uint32_t caps; 526 const struct pmc_md_ucp_op_pmcallocate *ucp; 527 528 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 529 ("[uncore,%d] illegal CPU %d", __LINE__, cpu)); 530 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 531 ("[uncore,%d] illegal row-index value %d", __LINE__, ri)); 532 533 /* check requested capabilities */ 534 caps = a->pm_caps; 535 if ((UCP_PMC_CAPS & caps) != caps) 536 return (EPERM); 537 538 ucp = &a->pm_md.pm_ucp; 539 ev = UCP_EVSEL(ucp->pm_ucp_config); 540 switch (uncore_cputype) { 541 case PMC_CPU_INTEL_HASWELL: 542 case PMC_CPU_INTEL_SANDYBRIDGE: 543 if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0) 544 return (EINVAL); 545 break; 546 default: 547 break; 548 } 549 550 pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN; 551 552 return (0); 553 } 554 555 static int 556 ucp_config_pmc(int cpu, int ri, struct pmc *pm) 557 { 558 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 559 ("[uncore,%d] illegal CPU %d", __LINE__, cpu)); 560 561 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 562 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 563 564 PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm); 565 566 KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__, 567 cpu)); 568 569 uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm; 570 571 return (0); 572 } 573 574 static int 575 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 576 { 577 int error; 578 struct pmc_hw *phw; 579 char ucp_name[PMC_NAME_MAX]; 580 581 phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri]; 582 583 (void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri); 584 if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX, 585 NULL)) != 0) 586 return (error); 587 588 pi->pm_class = PMC_CLASS_UCP; 589 590 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 591 pi->pm_enabled = TRUE; 592 *ppmc = phw->phw_pmc; 593 } else { 594 pi->pm_enabled = FALSE; 595 *ppmc = NULL; 596 } 597 598 return (0); 599 } 600 601 static int 602 ucp_get_config(int cpu, int ri, struct pmc **ppm) 603 { 604 *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc; 605 606 return (0); 607 } 608 609 static int 610 ucp_read_pmc(int cpu, int ri, pmc_value_t *v) 611 { 612 struct pmc *pm; 613 pmc_value_t tmp; 614 615 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 616 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); 617 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 618 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 619 620 pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc; 621 622 KASSERT(pm, 623 ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, 624 ri)); 625 626 tmp = rdmsr(UCP_PMC0 + ri); 627 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 628 *v = ucp_perfctr_value_to_reload_count(tmp); 629 else 630 *v = tmp; 631 632 PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri, 633 ri, *v); 634 635 return (0); 636 } 637 638 static int 639 ucp_release_pmc(int cpu, int ri, struct pmc *pm) 640 { 641 (void) pm; 642 643 PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri, 644 pm); 645 646 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 647 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); 648 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 649 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 650 651 KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc 652 == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__)); 653 654 return (0); 655 } 656 657 static int 658 ucp_start_pmc(int cpu, int ri) 659 { 660 struct pmc *pm; 661 uint32_t evsel; 662 struct uncore_cpu *cc; 663 664 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 665 ("[uncore,%d] illegal CPU value %d", __LINE__, cpu)); 666 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 667 ("[uncore,%d] illegal row-index %d", __LINE__, ri)); 668 669 cc = uncore_pcpu[cpu]; 670 pm = cc->pc_uncorepmcs[ri].phw_pmc; 671 672 KASSERT(pm, 673 ("[uncore,%d] starting cpu%d,ri%d with no pmc configured", 674 __LINE__, cpu, ri)); 675 676 PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri); 677 678 evsel = pm->pm_md.pm_ucp.pm_ucp_evsel; 679 680 PMCDBG4(MDP,STA,2, 681 "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x", 682 cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel); 683 684 /* Event specific configuration. */ 685 switch (pm->pm_event) { 686 case PMC_EV_UCP_EVENT_0CH_04H_E: 687 case PMC_EV_UCP_EVENT_0CH_08H_E: 688 wrmsr(MSR_GQ_SNOOP_MESF,0x2); 689 break; 690 case PMC_EV_UCP_EVENT_0CH_04H_F: 691 case PMC_EV_UCP_EVENT_0CH_08H_F: 692 wrmsr(MSR_GQ_SNOOP_MESF,0x8); 693 break; 694 case PMC_EV_UCP_EVENT_0CH_04H_M: 695 case PMC_EV_UCP_EVENT_0CH_08H_M: 696 wrmsr(MSR_GQ_SNOOP_MESF,0x1); 697 break; 698 case PMC_EV_UCP_EVENT_0CH_04H_S: 699 case PMC_EV_UCP_EVENT_0CH_08H_S: 700 wrmsr(MSR_GQ_SNOOP_MESF,0x4); 701 break; 702 default: 703 break; 704 } 705 wrmsr(SELECTSEL(uncore_cputype) + ri, evsel); 706 707 do { 708 cc->pc_resync = 0; 709 cc->pc_globalctrl |= (1ULL << ri); 710 wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl); 711 } while (cc->pc_resync != 0); 712 713 return (0); 714 } 715 716 static int 717 ucp_stop_pmc(int cpu, int ri) 718 { 719 struct pmc *pm __diagused; 720 struct uncore_cpu *cc; 721 722 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 723 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); 724 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 725 ("[uncore,%d] illegal row index %d", __LINE__, ri)); 726 727 cc = uncore_pcpu[cpu]; 728 pm = cc->pc_uncorepmcs[ri].phw_pmc; 729 730 KASSERT(pm, 731 ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__, 732 cpu, ri)); 733 734 PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri); 735 736 /* stop hw. */ 737 wrmsr(SELECTSEL(uncore_cputype) + ri, 0); 738 739 do { 740 cc->pc_resync = 0; 741 cc->pc_globalctrl &= ~(1ULL << ri); 742 wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl); 743 } while (cc->pc_resync != 0); 744 745 return (0); 746 } 747 748 static int 749 ucp_write_pmc(int cpu, int ri, pmc_value_t v) 750 { 751 struct pmc *pm; 752 struct uncore_cpu *cc; 753 754 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 755 ("[uncore,%d] illegal cpu value %d", __LINE__, cpu)); 756 KASSERT(ri >= 0 && ri < uncore_ucp_npmc, 757 ("[uncore,%d] illegal row index %d", __LINE__, ri)); 758 759 cc = uncore_pcpu[cpu]; 760 pm = cc->pc_uncorepmcs[ri].phw_pmc; 761 762 KASSERT(pm, 763 ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__, 764 cpu, ri)); 765 766 PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri, 767 UCP_PMC0 + ri, v); 768 769 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 770 v = ucp_reload_count_to_perfctr_value(v); 771 772 /* 773 * Write the new value to the counter. The counter will be in 774 * a stopped state when the pcd_write() entry point is called. 775 */ 776 777 wrmsr(UCP_PMC0 + ri, v); 778 779 return (0); 780 } 781 782 783 static void 784 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth) 785 { 786 struct pmc_classdep *pcd; 787 788 KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__)); 789 790 PMCDBG0(MDP,INI,1, "ucp-initialize"); 791 792 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP]; 793 794 pcd->pcd_caps = UCP_PMC_CAPS; 795 pcd->pcd_class = PMC_CLASS_UCP; 796 pcd->pcd_num = npmc; 797 pcd->pcd_ri = md->pmd_npmc; 798 pcd->pcd_width = pmcwidth; 799 800 pcd->pcd_allocate_pmc = ucp_allocate_pmc; 801 pcd->pcd_config_pmc = ucp_config_pmc; 802 pcd->pcd_describe = ucp_describe; 803 pcd->pcd_get_config = ucp_get_config; 804 pcd->pcd_get_msr = NULL; 805 pcd->pcd_pcpu_fini = uncore_pcpu_fini; 806 pcd->pcd_pcpu_init = uncore_pcpu_init; 807 pcd->pcd_read_pmc = ucp_read_pmc; 808 pcd->pcd_release_pmc = ucp_release_pmc; 809 pcd->pcd_start_pmc = ucp_start_pmc; 810 pcd->pcd_stop_pmc = ucp_stop_pmc; 811 pcd->pcd_write_pmc = ucp_write_pmc; 812 813 md->pmd_npmc += npmc; 814 } 815 816 int 817 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu) 818 { 819 uncore_cputype = md->pmd_cputype; 820 uncore_pmcmask = 0; 821 822 /* 823 * Initialize programmable counters. 824 */ 825 826 uncore_ucp_npmc = 8; 827 uncore_ucp_width = 48; 828 829 uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1); 830 831 ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width); 832 833 /* 834 * Initialize fixed function counters, if present. 835 */ 836 uncore_ucf_ri = uncore_ucp_npmc; 837 uncore_ucf_npmc = 1; 838 uncore_ucf_width = 48; 839 840 ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width); 841 uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype); 842 843 PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask, 844 uncore_ucf_ri); 845 846 uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC, 847 M_ZERO | M_WAITOK); 848 849 return (0); 850 } 851 852 void 853 pmc_uncore_finalize(struct pmc_mdep *md) 854 { 855 PMCDBG0(MDP,INI,1, "uncore-finalize"); 856 857 free(uncore_pcpu, M_PMC); 858 uncore_pcpu = NULL; 859 } 860