1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by A. Joseph Koshy under 9 * sponsorship from the FreeBSD Foundation and Google, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* Support for the AMD K8 and later processors */ 34 35 #include <sys/param.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/pcpu.h> 40 #include <sys/pmc.h> 41 #include <sys/pmckern.h> 42 #include <sys/smp.h> 43 #include <sys/systm.h> 44 45 #include <machine/cpu.h> 46 #include <machine/cpufunc.h> 47 #include <machine/md_var.h> 48 #include <machine/specialreg.h> 49 50 #define OVERFLOW_WAIT_COUNT 50 51 52 DPCPU_DEFINE_STATIC(uint32_t, nmi_counter); 53 54 /* AMD K8 PMCs */ 55 struct amd_descr { 56 struct pmc_descr pm_descr; /* "base class" */ 57 uint32_t pm_evsel; /* address of EVSEL register */ 58 uint32_t pm_perfctr; /* address of PERFCTR register */ 59 enum sub_class pm_subclass; /* register subclass */ 60 }; 61 62 static int amd_npmcs; 63 static struct amd_descr amd_pmcdesc[AMD_NPMCS_MAX]; 64 65 struct amd_event_code_map { 66 enum pmc_event pe_ev; /* enum value */ 67 uint16_t pe_code; /* encoded event mask */ 68 uint8_t pe_mask; /* bits allowed in unit mask */ 69 }; 70 71 const struct amd_event_code_map amd_event_codes[] = { 72 { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F }, 73 { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 }, 74 { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 }, 75 76 { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F }, 77 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE, 78 0x21, 0x00 }, 79 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 }, 80 { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 }, 81 { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 }, 82 { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 }, 83 { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 }, 84 { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 }, 85 86 { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 }, 87 { PMC_EV_K8_DC_MISS, 0x41, 0x00 }, 88 { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F }, 89 { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F }, 90 { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F }, 91 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 }, 92 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 }, 93 { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 }, 94 { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 }, 95 { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 }, 96 { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 }, 97 { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 }, 98 { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 }, 99 100 { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 }, 101 { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F }, 102 { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 }, 103 { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 }, 104 105 { PMC_EV_K8_IC_FETCH, 0x80, 0x00 }, 106 { PMC_EV_K8_IC_MISS, 0x81, 0x00 }, 107 { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 }, 108 { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 }, 109 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 }, 110 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 }, 111 { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 }, 112 { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 }, 113 { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 }, 114 { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 }, 115 116 { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 }, 117 { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 }, 118 { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 }, 119 { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 }, 120 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 }, 121 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 }, 122 { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 }, 123 { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 }, 124 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 }, 125 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 }, 126 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE, 127 0xCA, 0x00 }, 128 { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F }, 129 { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS, 130 0xCC, 0x07 }, 131 { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 }, 132 { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 }, 133 { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 }, 134 135 { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 }, 136 { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 }, 137 { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE, 138 0xD2, 0x00 }, 139 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 }, 140 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 }, 141 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL, 142 0xD5, 0x00 }, 143 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL, 144 0xD6, 0x00 }, 145 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 }, 146 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 }, 147 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET, 148 0xD9, 0x00 }, 149 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING, 150 0xDA, 0x00 }, 151 { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F }, 152 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 }, 153 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 }, 154 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 }, 155 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 }, 156 157 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 }, 158 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 }, 159 { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED, 160 0xE2, 0x00 }, 161 { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 }, 162 { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F }, 163 { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F }, 164 { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F }, 165 { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F }, 166 { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F }, 167 { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F } 168 169 }; 170 171 const int amd_event_codes_size = nitems(amd_event_codes); 172 173 /* 174 * Per-processor information 175 */ 176 struct amd_cpu { 177 struct pmc_hw pc_amdpmcs[AMD_NPMCS_MAX]; 178 }; 179 static struct amd_cpu **amd_pcpu; 180 181 /* 182 * Read a PMC value from the MSR. 183 */ 184 static int 185 amd_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v) 186 { 187 const struct amd_descr *pd; 188 pmc_value_t tmp; 189 enum pmc_mode mode; 190 191 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 192 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 193 KASSERT(ri >= 0 && ri < amd_npmcs, 194 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 195 KASSERT(amd_pcpu[cpu], 196 ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu)); 197 198 pd = &amd_pmcdesc[ri]; 199 mode = PMC_TO_MODE(pm); 200 201 PMCDBG2(MDP, REA, 1, "amd-read id=%d class=%d", ri, 202 pd->pm_descr.pd_class); 203 204 tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */ 205 PMCDBG2(MDP, REA, 2, "amd-read (pre-munge) id=%d -> %jd", ri, tmp); 206 if (PMC_IS_SAMPLING_MODE(mode)) { 207 /* 208 * Clamp value to 0 if the counter just overflowed, 209 * otherwise the returned reload count would wrap to a 210 * huge value. 211 */ 212 if ((tmp & (1ULL << 47)) == 0) 213 tmp = 0; 214 else { 215 /* Sign extend 48 bit value to 64 bits. */ 216 tmp = (pmc_value_t) ((int64_t)(tmp << 16) >> 16); 217 tmp = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); 218 } 219 } 220 *v = tmp; 221 222 PMCDBG2(MDP, REA, 2, "amd-read (post-munge) id=%d -> %jd", ri, *v); 223 224 return (0); 225 } 226 227 /* 228 * Write a PMC MSR. 229 */ 230 static int 231 amd_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v) 232 { 233 const struct amd_descr *pd; 234 enum pmc_mode mode; 235 236 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 237 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 238 KASSERT(ri >= 0 && ri < amd_npmcs, 239 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 240 241 pd = &amd_pmcdesc[ri]; 242 mode = PMC_TO_MODE(pm); 243 244 /* use 2's complement of the count for sampling mode PMCs */ 245 if (PMC_IS_SAMPLING_MODE(mode)) 246 v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v); 247 248 PMCDBG3(MDP, WRI, 1, "amd-write cpu=%d ri=%d v=%jx", cpu, ri, v); 249 250 /* write the PMC value */ 251 wrmsr(pd->pm_perfctr, v); 252 return (0); 253 } 254 255 /* 256 * Configure hardware PMC according to the configuration recorded in 'pm'. 257 */ 258 static int 259 amd_config_pmc(int cpu, int ri, struct pmc *pm) 260 { 261 struct pmc_hw *phw; 262 263 PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 264 265 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 266 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 267 KASSERT(ri >= 0 && ri < amd_npmcs, 268 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 269 270 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri]; 271 272 KASSERT(pm == NULL || phw->phw_pmc == NULL, 273 ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured", 274 __LINE__, pm, phw->phw_pmc)); 275 276 phw->phw_pmc = pm; 277 return (0); 278 } 279 280 /* 281 * Retrieve a configured PMC pointer from hardware state. 282 */ 283 static int 284 amd_get_config(int cpu, int ri, struct pmc **ppm) 285 { 286 *ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc; 287 return (0); 288 } 289 290 /* 291 * Machine-dependent actions taken during the context switch in of a 292 * thread. 293 */ 294 static int 295 amd_switch_in(struct pmc_cpu *pc __pmcdbg_used, struct pmc_process *pp) 296 { 297 PMCDBG3(MDP, SWI, 1, "pc=%p pp=%p enable-msr=%d", pc, pp, 298 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0); 299 300 /* enable the RDPMC instruction if needed */ 301 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) 302 load_cr4(rcr4() | CR4_PCE); 303 304 return (0); 305 } 306 307 /* 308 * Machine-dependent actions taken during the context switch out of a 309 * thread. 310 */ 311 static int 312 amd_switch_out(struct pmc_cpu *pc __pmcdbg_used, 313 struct pmc_process *pp __pmcdbg_used) 314 { 315 PMCDBG3(MDP, SWO, 1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ? 316 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0); 317 318 /* always turn off the RDPMC instruction */ 319 load_cr4(rcr4() & ~CR4_PCE); 320 321 return (0); 322 } 323 324 /* 325 * Check if a given PMC allocation is feasible. 326 */ 327 static int 328 amd_allocate_pmc(int cpu __unused, int ri, struct pmc *pm, 329 const struct pmc_op_pmcallocate *a) 330 { 331 const struct pmc_descr *pd; 332 uint64_t allowed_unitmask, caps, config, unitmask; 333 enum pmc_event pe; 334 int i; 335 336 KASSERT(ri >= 0 && ri < amd_npmcs, 337 ("[amd,%d] illegal row index %d", __LINE__, ri)); 338 339 pd = &amd_pmcdesc[ri].pm_descr; 340 341 /* check class match */ 342 if (pd->pd_class != a->pm_class) 343 return (EINVAL); 344 345 if ((a->pm_flags & PMC_F_EV_PMU) == 0) 346 return (EINVAL); 347 348 caps = pm->pm_caps; 349 350 PMCDBG2(MDP, ALL, 1,"amd-allocate ri=%d caps=0x%x", ri, caps); 351 352 /* Validate sub-class. */ 353 if (amd_pmcdesc[ri].pm_subclass != a->pm_md.pm_amd.pm_amd_sub_class) 354 return (EINVAL); 355 356 if (strlen(pmc_cpuid) != 0) { 357 pm->pm_md.pm_amd.pm_amd_evsel = a->pm_md.pm_amd.pm_amd_config; 358 PMCDBG2(MDP, ALL, 2,"amd-allocate ri=%d -> config=0x%x", ri, 359 a->pm_md.pm_amd.pm_amd_config); 360 return (0); 361 } 362 363 pe = a->pm_ev; 364 365 /* map ev to the correct event mask code */ 366 config = allowed_unitmask = 0; 367 for (i = 0; i < amd_event_codes_size; i++) { 368 if (amd_event_codes[i].pe_ev == pe) { 369 config = 370 AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code); 371 allowed_unitmask = 372 AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask); 373 break; 374 } 375 } 376 if (i == amd_event_codes_size) 377 return (EINVAL); 378 379 unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK; 380 if ((unitmask & ~allowed_unitmask) != 0) /* disallow reserved bits */ 381 return (EINVAL); 382 383 if (unitmask && (caps & PMC_CAP_QUALIFIER) != 0) 384 config |= unitmask; 385 386 if ((caps & PMC_CAP_THRESHOLD) != 0) 387 config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK; 388 389 /* Set at least one of the 'usr' or 'os' caps. */ 390 if ((caps & PMC_CAP_USER) != 0) 391 config |= AMD_PMC_USR; 392 if ((caps & PMC_CAP_SYSTEM) != 0) 393 config |= AMD_PMC_OS; 394 if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) 395 config |= (AMD_PMC_USR|AMD_PMC_OS); 396 397 if ((caps & PMC_CAP_EDGE) != 0) 398 config |= AMD_PMC_EDGE; 399 if ((caps & PMC_CAP_INVERT) != 0) 400 config |= AMD_PMC_INVERT; 401 if ((caps & PMC_CAP_INTERRUPT) != 0) 402 config |= AMD_PMC_INT; 403 404 pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */ 405 406 PMCDBG2(MDP, ALL, 2, "amd-allocate ri=%d -> config=0x%x", ri, config); 407 408 return (0); 409 } 410 411 /* 412 * Release machine dependent state associated with a PMC. This is a 413 * no-op on this architecture. 414 */ 415 static int 416 amd_release_pmc(int cpu, int ri, struct pmc *pmc __unused) 417 { 418 struct pmc_hw *phw __diagused; 419 420 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 421 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 422 KASSERT(ri >= 0 && ri < amd_npmcs, 423 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 424 425 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri]; 426 427 KASSERT(phw->phw_pmc == NULL, 428 ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); 429 430 return (0); 431 } 432 433 /* 434 * Start a PMC. 435 */ 436 static int 437 amd_start_pmc(int cpu __diagused, int ri, struct pmc *pm) 438 { 439 const struct amd_descr *pd; 440 uint64_t config; 441 442 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 443 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 444 KASSERT(ri >= 0 && ri < amd_npmcs, 445 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 446 447 pd = &amd_pmcdesc[ri]; 448 449 PMCDBG2(MDP, STA, 1, "amd-start cpu=%d ri=%d", cpu, ri); 450 451 /* 452 * Triggered by DF counters because all DF MSRs are shared. We need to 453 * change the code to honor the per-package flag in the JSON event 454 * definitions. 455 */ 456 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 457 ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__, 458 ri, cpu, pd->pm_descr.pd_name)); 459 460 /* turn on the PMC ENABLE bit */ 461 config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE; 462 463 PMCDBG1(MDP, STA, 2, "amd-start config=0x%x", config); 464 465 wrmsr(pd->pm_evsel, config); 466 return (0); 467 } 468 469 /* 470 * Stop a PMC. 471 */ 472 static int 473 amd_stop_pmc(int cpu __diagused, int ri, struct pmc *pm) 474 { 475 const struct amd_descr *pd; 476 uint64_t config; 477 int i; 478 479 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 480 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 481 KASSERT(ri >= 0 && ri < amd_npmcs, 482 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 483 484 pd = &amd_pmcdesc[ri]; 485 486 KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel), 487 ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped", 488 __LINE__, ri, cpu, pd->pm_descr.pd_name)); 489 490 PMCDBG1(MDP, STO, 1, "amd-stop ri=%d", ri); 491 492 /* turn off the PMC ENABLE bit */ 493 config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE; 494 wrmsr(pd->pm_evsel, config); 495 496 /* 497 * Due to NMI latency on newer AMD processors 498 * NMI interrupts are ignored, which leads to 499 * panic or messages based on kernel configuration 500 */ 501 502 /* Wait for the count to be reset */ 503 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { 504 if (rdmsr(pd->pm_perfctr) & (1 << (pd->pm_descr.pd_width - 1))) 505 break; 506 507 DELAY(1); 508 } 509 510 return (0); 511 } 512 513 /* 514 * Interrupt handler. This function needs to return '1' if the 515 * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed 516 * to sleep or do anything a 'fast' interrupt handler is not allowed 517 * to do. 518 */ 519 static int 520 amd_intr(struct trapframe *tf) 521 { 522 struct amd_cpu *pac; 523 struct pmc *pm; 524 pmc_value_t v; 525 uint64_t config, evsel, perfctr; 526 uint32_t active = 0, count = 0; 527 int i, error, retval, cpu; 528 529 cpu = curcpu; 530 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 531 ("[amd,%d] out of range CPU %d", __LINE__, cpu)); 532 533 PMCDBG3(MDP, INT, 1, "cpu=%d tf=%p um=%d", cpu, tf, TRAPF_USERMODE(tf)); 534 535 retval = 0; 536 537 pac = amd_pcpu[cpu]; 538 539 /* 540 * look for all PMCs that have interrupted: 541 * - look for a running, sampling PMC which has overflowed 542 * and which has a valid 'struct pmc' association 543 * 544 * If found, we call a helper to process the interrupt. 545 * 546 * PMCs interrupting at the same time are collapsed into 547 * a single interrupt. Check all the valid pmcs for 548 * overflow. 549 */ 550 for (i = 0; i < amd_npmcs; i++) { 551 if (amd_pmcdesc[i].pm_subclass != PMC_AMD_SUB_CLASS_CORE) 552 break; 553 554 if ((pm = pac->pc_amdpmcs[i].phw_pmc) == NULL || 555 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 556 continue; 557 } 558 559 /* Consider pmc with valid handle as active */ 560 active++; 561 562 if (!AMD_PMC_HAS_OVERFLOWED(i)) 563 continue; 564 565 retval = 1; /* Found an interrupting PMC. */ 566 567 if (pm->pm_state != PMC_STATE_RUNNING) 568 continue; 569 570 /* Stop the PMC, reload count. */ 571 evsel = amd_pmcdesc[i].pm_evsel; 572 perfctr = amd_pmcdesc[i].pm_perfctr; 573 v = pm->pm_sc.pm_reloadcount; 574 config = rdmsr(evsel); 575 576 KASSERT((config & ~AMD_PMC_ENABLE) == 577 (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE), 578 ("[amd,%d] config mismatch reg=0x%jx pm=0x%jx", __LINE__, 579 (uintmax_t)config, (uintmax_t)pm->pm_md.pm_amd.pm_amd_evsel)); 580 581 wrmsr(evsel, config & ~AMD_PMC_ENABLE); 582 wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v)); 583 584 /* Restart the counter if logging succeeded. */ 585 error = pmc_process_interrupt(PMC_HR, pm, tf); 586 if (error == 0) 587 wrmsr(evsel, config); 588 } 589 590 /* 591 * Due to NMI latency, there can be a scenario in which 592 * multiple pmcs gets serviced in an earlier NMI and we 593 * do not find an overflow in the subsequent NMI. 594 * 595 * For such cases we keep a per-cpu count of active NMIs 596 * and compare it with min(active pmcs, 2) to determine 597 * if this NMI was for a pmc overflow which was serviced 598 * in an earlier request or should be ignored. 599 */ 600 if (retval) { 601 DPCPU_SET(nmi_counter, min(2, active)); 602 } else { 603 if ((count = DPCPU_GET(nmi_counter))) { 604 retval = 1; 605 DPCPU_SET(nmi_counter, --count); 606 } 607 } 608 609 if (retval) 610 counter_u64_add(pmc_stats.pm_intr_processed, 1); 611 else 612 counter_u64_add(pmc_stats.pm_intr_ignored, 1); 613 614 PMCDBG1(MDP, INT, 2, "retval=%d", retval); 615 return (retval); 616 } 617 618 /* 619 * Describe a PMC. 620 */ 621 static int 622 amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 623 { 624 const struct amd_descr *pd; 625 struct pmc_hw *phw; 626 627 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 628 ("[amd,%d] illegal CPU %d", __LINE__, cpu)); 629 KASSERT(ri >= 0 && ri < amd_npmcs, 630 ("[amd,%d] row-index %d out of range", __LINE__, ri)); 631 632 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri]; 633 pd = &amd_pmcdesc[ri]; 634 635 strlcpy(pi->pm_name, pd->pm_descr.pd_name, sizeof(pi->pm_name)); 636 pi->pm_class = pd->pm_descr.pd_class; 637 638 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) != 0) { 639 pi->pm_enabled = true; 640 *ppmc = phw->phw_pmc; 641 } else { 642 pi->pm_enabled = false; 643 *ppmc = NULL; 644 } 645 646 return (0); 647 } 648 649 /* 650 * Return the MSR address of the given PMC. 651 */ 652 static int 653 amd_get_msr(int ri, uint32_t *msr) 654 { 655 KASSERT(ri >= 0 && ri < amd_npmcs, 656 ("[amd,%d] ri %d out of range", __LINE__, ri)); 657 658 *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0; 659 return (0); 660 } 661 662 /* 663 * Processor-dependent initialization. 664 */ 665 static int 666 amd_pcpu_init(struct pmc_mdep *md, int cpu) 667 { 668 struct amd_cpu *pac; 669 struct pmc_cpu *pc; 670 struct pmc_hw *phw; 671 int first_ri, n; 672 673 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 674 ("[amd,%d] insane cpu number %d", __LINE__, cpu)); 675 676 PMCDBG1(MDP, INI, 1, "amd-init cpu=%d", cpu); 677 678 amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC, 679 M_WAITOK | M_ZERO); 680 681 /* 682 * Set the content of the hardware descriptors to a known 683 * state and initialize pointers in the MI per-cpu descriptor. 684 */ 685 pc = pmc_pcpu[cpu]; 686 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_K8].pcd_ri; 687 688 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__)); 689 690 for (n = 0, phw = pac->pc_amdpmcs; n < amd_npmcs; n++, phw++) { 691 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 692 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 693 phw->phw_pmc = NULL; 694 pc->pc_hwpmcs[n + first_ri] = phw; 695 } 696 697 return (0); 698 } 699 700 /* 701 * Processor-dependent cleanup prior to the KLD being unloaded. 702 */ 703 static int 704 amd_pcpu_fini(struct pmc_mdep *md, int cpu) 705 { 706 struct amd_cpu *pac; 707 struct pmc_cpu *pc; 708 int first_ri, i; 709 710 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 711 ("[amd,%d] insane cpu number (%d)", __LINE__, cpu)); 712 713 PMCDBG1(MDP, INI, 1, "amd-cleanup cpu=%d", cpu); 714 715 /* 716 * Next, free up allocated space. 717 */ 718 if ((pac = amd_pcpu[cpu]) == NULL) 719 return (0); 720 721 amd_pcpu[cpu] = NULL; 722 723 #ifdef HWPMC_DEBUG 724 for (i = 0; i < AMD_NPMCS_K8; i++) { 725 KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL, 726 ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i)); 727 KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + i), 728 ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i)); 729 } 730 #endif 731 732 pc = pmc_pcpu[cpu]; 733 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__)); 734 735 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_K8].pcd_ri; 736 737 /* 738 * Reset pointers in the MI 'per-cpu' state. 739 */ 740 for (i = 0; i < amd_npmcs; i++) 741 pc->pc_hwpmcs[i + first_ri] = NULL; 742 743 free(pac, M_PMC); 744 return (0); 745 } 746 747 /* 748 * Initialize ourselves. 749 */ 750 struct pmc_mdep * 751 pmc_amd_initialize(void) 752 { 753 struct pmc_classdep *pcd; 754 struct pmc_mdep *pmc_mdep; 755 enum pmc_cputype cputype; 756 int error, i, ncpus; 757 int family, model, stepping; 758 int amd_core_npmcs, amd_l3_npmcs, amd_df_npmcs; 759 struct amd_descr *d; 760 761 /* 762 * The presence of hardware performance counters on the AMD 763 * Athlon, Duron or later processors, is _not_ indicated by 764 * any of the processor feature flags set by the 'CPUID' 765 * instruction, so we only check the 'instruction family' 766 * field returned by CPUID for instruction family >= 6. 767 */ 768 769 family = CPUID_TO_FAMILY(cpu_id); 770 model = CPUID_TO_MODEL(cpu_id); 771 stepping = CPUID_TO_STEPPING(cpu_id); 772 773 if (family == 0x18) 774 snprintf(pmc_cpuid, sizeof(pmc_cpuid), "HygonGenuine-%d-%02X-%X", 775 family, model, stepping); 776 else 777 snprintf(pmc_cpuid, sizeof(pmc_cpuid), "AuthenticAMD-%d-%02X-%X", 778 family, model, stepping); 779 780 switch (cpu_id & 0xF00) { 781 case 0xF00: /* Athlon64/Opteron processor */ 782 cputype = PMC_CPU_AMD_K8; 783 break; 784 default: 785 printf("pmc: Unknown AMD CPU %x %d-%d.\n", cpu_id, family, 786 model); 787 return (NULL); 788 } 789 790 /* 791 * From PPR for AMD Family 1Ah, a new cpuid leaf specifies the maximum 792 * number of PMCs of each type. If we do not have that leaf, we use 793 * the prior default values that are only valid if we have the feature 794 * bit enabled in CPU. 795 */ 796 if ((amd_feature2 & AMDID2_PCXC) != 0) { 797 amd_core_npmcs = AMD_PMC_CORE_DEFAULT; 798 } else { 799 amd_core_npmcs = AMD_NPMCS_K8; 800 } 801 amd_l3_npmcs = AMD_PMC_L3_DEFAULT; 802 amd_df_npmcs = AMD_PMC_DF_DEFAULT; 803 804 if (cpu_exthigh >= CPUID_EXTPERFMON) { 805 u_int regs[4]; 806 do_cpuid(CPUID_EXTPERFMON, regs); 807 if (regs[1] != 0) { 808 amd_core_npmcs = EXTPERFMON_CORE_PMCS(regs[1]); 809 amd_df_npmcs = EXTPERFMON_DF_PMCS(regs[1]); 810 } 811 } 812 813 /* Enable the newer core counters */ 814 for (i = 0; i < amd_core_npmcs; i++) { 815 d = &amd_pmcdesc[i]; 816 snprintf(d->pm_descr.pd_name, PMC_NAME_MAX, 817 "K8-%d", i); 818 d->pm_descr.pd_class = PMC_CLASS_K8; 819 d->pm_descr.pd_caps = AMD_PMC_CAPS; 820 d->pm_descr.pd_width = 48; 821 if ((amd_feature2 & AMDID2_PCXC) != 0) { 822 d->pm_evsel = AMD_PMC_CORE_BASE + 2 * i; 823 d->pm_perfctr = AMD_PMC_CORE_BASE + 2 * i + 1; 824 } else { 825 d->pm_evsel = AMD_PMC_EVSEL_0 + i; 826 d->pm_perfctr = AMD_PMC_PERFCTR_0 + i; 827 } 828 d->pm_subclass = PMC_AMD_SUB_CLASS_CORE; 829 } 830 amd_npmcs = amd_core_npmcs; 831 832 if ((amd_feature2 & AMDID2_PTSCEL2I) != 0) { 833 /* Enable the LLC/L3 counters */ 834 for (i = 0; i < amd_l3_npmcs; i++) { 835 d = &amd_pmcdesc[amd_npmcs + i]; 836 snprintf(d->pm_descr.pd_name, PMC_NAME_MAX, 837 "K8-L3-%d", i); 838 d->pm_descr.pd_class = PMC_CLASS_K8; 839 d->pm_descr.pd_caps = AMD_PMC_CAPS; 840 d->pm_descr.pd_width = 48; 841 d->pm_evsel = AMD_PMC_L3_BASE + 2 * i; 842 d->pm_perfctr = AMD_PMC_L3_BASE + 2 * i + 1; 843 d->pm_subclass = PMC_AMD_SUB_CLASS_L3_CACHE; 844 } 845 amd_npmcs += amd_l3_npmcs; 846 } 847 848 if ((amd_feature2 & AMDID2_PNXC) != 0) { 849 /* Enable the data fabric counters */ 850 for (i = 0; i < amd_df_npmcs; i++) { 851 d = &amd_pmcdesc[amd_npmcs + i]; 852 snprintf(d->pm_descr.pd_name, PMC_NAME_MAX, 853 "K8-DF-%d", i); 854 d->pm_descr.pd_class = PMC_CLASS_K8; 855 d->pm_descr.pd_caps = AMD_PMC_CAPS; 856 d->pm_descr.pd_width = 48; 857 d->pm_evsel = AMD_PMC_DF_BASE + 2 * i; 858 d->pm_perfctr = AMD_PMC_DF_BASE + 2 * i + 1; 859 d->pm_subclass = PMC_AMD_SUB_CLASS_DATA_FABRIC; 860 } 861 amd_npmcs += amd_df_npmcs; 862 } 863 864 /* 865 * Allocate space for pointers to PMC HW descriptors and for 866 * the MDEP structure used by MI code. 867 */ 868 amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC, 869 M_WAITOK | M_ZERO); 870 871 /* 872 * These processors have two classes of PMCs: the TSC and 873 * programmable PMCs. 874 */ 875 pmc_mdep = pmc_mdep_alloc(2); 876 877 ncpus = pmc_cpu_max(); 878 879 /* Initialize the TSC. */ 880 error = pmc_tsc_initialize(pmc_mdep, ncpus); 881 if (error != 0) 882 goto error; 883 884 /* Initialize AMD K8 PMC handling. */ 885 pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_K8]; 886 887 pcd->pcd_caps = AMD_PMC_CAPS; 888 pcd->pcd_class = PMC_CLASS_K8; 889 pcd->pcd_num = amd_npmcs; 890 pcd->pcd_ri = pmc_mdep->pmd_npmc; 891 pcd->pcd_width = 48; 892 893 pcd->pcd_allocate_pmc = amd_allocate_pmc; 894 pcd->pcd_config_pmc = amd_config_pmc; 895 pcd->pcd_describe = amd_describe; 896 pcd->pcd_get_config = amd_get_config; 897 pcd->pcd_get_msr = amd_get_msr; 898 pcd->pcd_pcpu_fini = amd_pcpu_fini; 899 pcd->pcd_pcpu_init = amd_pcpu_init; 900 pcd->pcd_read_pmc = amd_read_pmc; 901 pcd->pcd_release_pmc = amd_release_pmc; 902 pcd->pcd_start_pmc = amd_start_pmc; 903 pcd->pcd_stop_pmc = amd_stop_pmc; 904 pcd->pcd_write_pmc = amd_write_pmc; 905 906 pmc_mdep->pmd_cputype = cputype; 907 pmc_mdep->pmd_intr = amd_intr; 908 pmc_mdep->pmd_switch_in = amd_switch_in; 909 pmc_mdep->pmd_switch_out = amd_switch_out; 910 911 pmc_mdep->pmd_npmc += amd_npmcs; 912 913 PMCDBG0(MDP, INI, 0, "amd-initialize"); 914 915 return (pmc_mdep); 916 917 error: 918 free(pmc_mdep, M_PMC); 919 return (NULL); 920 } 921 922 /* 923 * Finalization code for AMD CPUs. 924 */ 925 void 926 pmc_amd_finalize(struct pmc_mdep *md) 927 { 928 PMCDBG0(MDP, INI, 1, "amd-finalize"); 929 930 pmc_tsc_finalize(md); 931 932 for (int i = 0; i < pmc_cpu_max(); i++) 933 KASSERT(amd_pcpu[i] == NULL, 934 ("[amd,%d] non-null pcpu cpu %d", __LINE__, i)); 935 936 free(amd_pcpu, M_PMC); 937 amd_pcpu = NULL; 938 } 939