1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by A. Joseph Koshy under 9 * sponsorship from the FreeBSD Foundation and Google, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 /* Support for the AMD K7 and later processors */ 35 36 #include <sys/param.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/pcpu.h> 41 #include <sys/pmc.h> 42 #include <sys/pmckern.h> 43 #include <sys/smp.h> 44 #include <sys/systm.h> 45 46 #include <machine/cpu.h> 47 #include <machine/cpufunc.h> 48 #include <machine/md_var.h> 49 #include <machine/specialreg.h> 50 51 #ifdef HWPMC_DEBUG 52 enum pmc_class amd_pmc_class; 53 #endif 54 55 #define OVERFLOW_WAIT_COUNT 50 56 57 DPCPU_DEFINE_STATIC(uint32_t, nmi_counter); 58 59 /* AMD K7 & K8 PMCs */ 60 struct amd_descr { 61 struct pmc_descr pm_descr; /* "base class" */ 62 uint32_t pm_evsel; /* address of EVSEL register */ 63 uint32_t pm_perfctr; /* address of PERFCTR register */ 64 }; 65 66 static struct amd_descr amd_pmcdesc[AMD_NPMCS] = 67 { 68 { 69 .pm_descr = 70 { 71 .pd_name = "", 72 .pd_class = -1, 73 .pd_caps = AMD_PMC_CAPS, 74 .pd_width = 48 75 }, 76 .pm_evsel = AMD_PMC_EVSEL_0, 77 .pm_perfctr = AMD_PMC_PERFCTR_0 78 }, 79 { 80 .pm_descr = 81 { 82 .pd_name = "", 83 .pd_class = -1, 84 .pd_caps = AMD_PMC_CAPS, 85 .pd_width = 48 86 }, 87 .pm_evsel = AMD_PMC_EVSEL_1, 88 .pm_perfctr = AMD_PMC_PERFCTR_1 89 }, 90 { 91 .pm_descr = 92 { 93 .pd_name = "", 94 .pd_class = -1, 95 .pd_caps = AMD_PMC_CAPS, 96 .pd_width = 48 97 }, 98 .pm_evsel = AMD_PMC_EVSEL_2, 99 .pm_perfctr = AMD_PMC_PERFCTR_2 100 }, 101 { 102 .pm_descr = 103 { 104 .pd_name = "", 105 .pd_class = -1, 106 .pd_caps = AMD_PMC_CAPS, 107 .pd_width = 48 108 }, 109 .pm_evsel = AMD_PMC_EVSEL_3, 110 .pm_perfctr = AMD_PMC_PERFCTR_3 111 }, 112 { 113 .pm_descr = 114 { 115 .pd_name = "", 116 .pd_class = -1, 117 .pd_caps = AMD_PMC_CAPS, 118 .pd_width = 48 119 }, 120 .pm_evsel = AMD_PMC_EVSEL_4, 121 .pm_perfctr = AMD_PMC_PERFCTR_4 122 }, 123 { 124 .pm_descr = 125 { 126 .pd_name = "", 127 .pd_class = -1, 128 .pd_caps = AMD_PMC_CAPS, 129 .pd_width = 48 130 }, 131 .pm_evsel = AMD_PMC_EVSEL_5, 132 .pm_perfctr = AMD_PMC_PERFCTR_5 133 }, 134 { 135 .pm_descr = 136 { 137 .pd_name = "", 138 .pd_class = -1, 139 .pd_caps = AMD_PMC_CAPS, 140 .pd_width = 48 141 }, 142 .pm_evsel = AMD_PMC_EVSEL_EP_L3_0, 143 .pm_perfctr = AMD_PMC_PERFCTR_EP_L3_0 144 }, 145 { 146 .pm_descr = 147 { 148 .pd_name = "", 149 .pd_class = -1, 150 .pd_caps = AMD_PMC_CAPS, 151 .pd_width = 48 152 }, 153 .pm_evsel = AMD_PMC_EVSEL_EP_L3_1, 154 .pm_perfctr = AMD_PMC_PERFCTR_EP_L3_1 155 }, 156 { 157 .pm_descr = 158 { 159 .pd_name = "", 160 .pd_class = -1, 161 .pd_caps = AMD_PMC_CAPS, 162 .pd_width = 48 163 }, 164 .pm_evsel = AMD_PMC_EVSEL_EP_L3_2, 165 .pm_perfctr = AMD_PMC_PERFCTR_EP_L3_2 166 }, 167 { 168 .pm_descr = 169 { 170 .pd_name = "", 171 .pd_class = -1, 172 .pd_caps = AMD_PMC_CAPS, 173 .pd_width = 48 174 }, 175 .pm_evsel = AMD_PMC_EVSEL_EP_L3_3, 176 .pm_perfctr = AMD_PMC_PERFCTR_EP_L3_3 177 }, 178 { 179 .pm_descr = 180 { 181 .pd_name = "", 182 .pd_class = -1, 183 .pd_caps = AMD_PMC_CAPS, 184 .pd_width = 48 185 }, 186 .pm_evsel = AMD_PMC_EVSEL_EP_L3_4, 187 .pm_perfctr = AMD_PMC_PERFCTR_EP_L3_4 188 }, 189 { 190 .pm_descr = 191 { 192 .pd_name = "", 193 .pd_class = -1, 194 .pd_caps = AMD_PMC_CAPS, 195 .pd_width = 48 196 }, 197 .pm_evsel = AMD_PMC_EVSEL_EP_L3_5, 198 .pm_perfctr = AMD_PMC_PERFCTR_EP_L3_5 199 }, 200 { 201 .pm_descr = 202 { 203 .pd_name = "", 204 .pd_class = -1, 205 .pd_caps = AMD_PMC_CAPS, 206 .pd_width = 48 207 }, 208 .pm_evsel = AMD_PMC_EVSEL_EP_DF_0, 209 .pm_perfctr = AMD_PMC_PERFCTR_EP_DF_0 210 }, 211 { 212 .pm_descr = 213 { 214 .pd_name = "", 215 .pd_class = -1, 216 .pd_caps = AMD_PMC_CAPS, 217 .pd_width = 48 218 }, 219 .pm_evsel = AMD_PMC_EVSEL_EP_DF_1, 220 .pm_perfctr = AMD_PMC_PERFCTR_EP_DF_1 221 }, 222 { 223 .pm_descr = 224 { 225 .pd_name = "", 226 .pd_class = -1, 227 .pd_caps = AMD_PMC_CAPS, 228 .pd_width = 48 229 }, 230 .pm_evsel = AMD_PMC_EVSEL_EP_DF_2, 231 .pm_perfctr = AMD_PMC_PERFCTR_EP_DF_2 232 }, 233 { 234 .pm_descr = 235 { 236 .pd_name = "", 237 .pd_class = -1, 238 .pd_caps = AMD_PMC_CAPS, 239 .pd_width = 48 240 }, 241 .pm_evsel = AMD_PMC_EVSEL_EP_DF_3, 242 .pm_perfctr = AMD_PMC_PERFCTR_EP_DF_3 243 } 244 }; 245 246 struct amd_event_code_map { 247 enum pmc_event pe_ev; /* enum value */ 248 uint16_t pe_code; /* encoded event mask */ 249 uint8_t pe_mask; /* bits allowed in unit mask */ 250 }; 251 252 const struct amd_event_code_map amd_event_codes[] = { 253 #if defined(__i386__) /* 32 bit Athlon (K7) only */ 254 { PMC_EV_K7_DC_ACCESSES, 0x40, 0 }, 255 { PMC_EV_K7_DC_MISSES, 0x41, 0 }, 256 { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, AMD_PMC_UNITMASK_MOESI }, 257 { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, AMD_PMC_UNITMASK_MOESI }, 258 { PMC_EV_K7_DC_WRITEBACKS, 0x44, AMD_PMC_UNITMASK_MOESI }, 259 { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 }, 260 { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 }, 261 { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 }, 262 263 { PMC_EV_K7_IC_FETCHES, 0x80, 0 }, 264 { PMC_EV_K7_IC_MISSES, 0x81, 0 }, 265 266 { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 }, 267 { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 }, 268 269 { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 }, 270 { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 }, 271 { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 }, 272 { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 }, 273 { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 }, 274 { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 }, 275 { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 }, 276 { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 }, 277 { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 }, 278 { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 }, 279 { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 }, 280 #endif 281 282 { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F }, 283 { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 }, 284 { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 }, 285 286 { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F }, 287 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE, 288 0x21, 0x00 }, 289 { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 }, 290 { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 }, 291 { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 }, 292 { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 }, 293 { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 }, 294 { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 }, 295 296 { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 }, 297 { PMC_EV_K8_DC_MISS, 0x41, 0x00 }, 298 { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F }, 299 { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F }, 300 { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F }, 301 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 }, 302 { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 }, 303 { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 }, 304 { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 }, 305 { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 }, 306 { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 }, 307 { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 }, 308 { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 }, 309 310 { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 }, 311 { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F }, 312 { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 }, 313 { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 }, 314 315 { PMC_EV_K8_IC_FETCH, 0x80, 0x00 }, 316 { PMC_EV_K8_IC_MISS, 0x81, 0x00 }, 317 { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 }, 318 { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 }, 319 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 }, 320 { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 }, 321 { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 }, 322 { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 }, 323 { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 }, 324 { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 }, 325 326 { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 }, 327 { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 }, 328 { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 }, 329 { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 }, 330 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 }, 331 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 }, 332 { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 }, 333 { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 }, 334 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 }, 335 { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 }, 336 { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE, 337 0xCA, 0x00 }, 338 { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F }, 339 { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS, 340 0xCC, 0x07 }, 341 { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 }, 342 { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 }, 343 { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 }, 344 345 { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 }, 346 { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 }, 347 { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE, 348 0xD2, 0x00 }, 349 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 }, 350 { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 }, 351 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL, 352 0xD5, 0x00 }, 353 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL, 354 0xD6, 0x00 }, 355 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 }, 356 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 }, 357 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET, 358 0xD9, 0x00 }, 359 { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING, 360 0xDA, 0x00 }, 361 { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F }, 362 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 }, 363 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 }, 364 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 }, 365 { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 }, 366 367 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 }, 368 { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 }, 369 { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED, 370 0xE2, 0x00 }, 371 { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 }, 372 { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F }, 373 { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F }, 374 { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F }, 375 { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F }, 376 { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F }, 377 { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F } 378 379 }; 380 381 const int amd_event_codes_size = nitems(amd_event_codes); 382 383 /* 384 * Per-processor information 385 */ 386 387 struct amd_cpu { 388 struct pmc_hw pc_amdpmcs[AMD_NPMCS]; 389 }; 390 391 static struct amd_cpu **amd_pcpu; 392 393 /* 394 * read a pmc register 395 */ 396 397 static int 398 amd_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v) 399 { 400 enum pmc_mode mode; 401 const struct amd_descr *pd; 402 pmc_value_t tmp; 403 404 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 405 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 406 KASSERT(ri >= 0 && ri < AMD_NPMCS, 407 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 408 KASSERT(amd_pcpu[cpu], 409 ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu)); 410 411 pd = &amd_pmcdesc[ri]; 412 mode = PMC_TO_MODE(pm); 413 414 PMCDBG2(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class); 415 416 #ifdef HWPMC_DEBUG 417 KASSERT(pd->pm_descr.pd_class == amd_pmc_class, 418 ("[amd,%d] unknown PMC class (%d)", __LINE__, 419 pd->pm_descr.pd_class)); 420 #endif 421 422 tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */ 423 PMCDBG2(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp); 424 if (PMC_IS_SAMPLING_MODE(mode)) { 425 /* 426 * Clamp value to 0 if the counter just overflowed, 427 * otherwise the returned reload count would wrap to a 428 * huge value. 429 */ 430 if ((tmp & (1ULL << 47)) == 0) 431 tmp = 0; 432 else { 433 /* Sign extend 48 bit value to 64 bits. */ 434 tmp = (pmc_value_t) ((int64_t)(tmp << 16) >> 16); 435 tmp = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); 436 } 437 } 438 *v = tmp; 439 440 PMCDBG2(MDP,REA,2,"amd-read (post-munge) id=%d -> %jd", ri, *v); 441 442 return 0; 443 } 444 445 /* 446 * Write a PMC MSR. 447 */ 448 449 static int 450 amd_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v) 451 { 452 const struct amd_descr *pd; 453 enum pmc_mode mode; 454 455 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 456 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 457 KASSERT(ri >= 0 && ri < AMD_NPMCS, 458 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 459 460 pd = &amd_pmcdesc[ri]; 461 mode = PMC_TO_MODE(pm); 462 463 #ifdef HWPMC_DEBUG 464 KASSERT(pd->pm_descr.pd_class == amd_pmc_class, 465 ("[amd,%d] unknown PMC class (%d)", __LINE__, 466 pd->pm_descr.pd_class)); 467 #endif 468 469 /* use 2's complement of the count for sampling mode PMCs */ 470 if (PMC_IS_SAMPLING_MODE(mode)) 471 v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v); 472 473 PMCDBG3(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v); 474 475 /* write the PMC value */ 476 wrmsr(pd->pm_perfctr, v); 477 return 0; 478 } 479 480 /* 481 * configure hardware pmc according to the configuration recorded in 482 * pmc 'pm'. 483 */ 484 485 static int 486 amd_config_pmc(int cpu, int ri, struct pmc *pm) 487 { 488 struct pmc_hw *phw; 489 490 PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 491 492 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 493 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 494 KASSERT(ri >= 0 && ri < AMD_NPMCS, 495 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 496 497 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri]; 498 499 KASSERT(pm == NULL || phw->phw_pmc == NULL, 500 ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured", 501 __LINE__, pm, phw->phw_pmc)); 502 503 phw->phw_pmc = pm; 504 return 0; 505 } 506 507 /* 508 * Retrieve a configured PMC pointer from hardware state. 509 */ 510 511 static int 512 amd_get_config(int cpu, int ri, struct pmc **ppm) 513 { 514 *ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc; 515 516 return 0; 517 } 518 519 /* 520 * Machine dependent actions taken during the context switch in of a 521 * thread. 522 */ 523 524 static int 525 amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) 526 { 527 (void) pc; 528 529 PMCDBG3(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp, 530 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0); 531 532 /* enable the RDPMC instruction if needed */ 533 if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) 534 load_cr4(rcr4() | CR4_PCE); 535 536 return 0; 537 } 538 539 /* 540 * Machine dependent actions taken during the context switch out of a 541 * thread. 542 */ 543 544 static int 545 amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) 546 { 547 (void) pc; 548 (void) pp; /* can be NULL */ 549 550 PMCDBG3(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ? 551 (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0); 552 553 /* always turn off the RDPMC instruction */ 554 load_cr4(rcr4() & ~CR4_PCE); 555 556 return 0; 557 } 558 559 /* 560 * Check if a given allocation is feasible. 561 */ 562 563 static int 564 amd_allocate_pmc(int cpu, int ri, struct pmc *pm, 565 const struct pmc_op_pmcallocate *a) 566 { 567 int i; 568 uint64_t allowed_unitmask, caps, config, unitmask; 569 enum pmc_event pe; 570 const struct pmc_descr *pd; 571 572 (void) cpu; 573 574 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 575 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 576 KASSERT(ri >= 0 && ri < AMD_NPMCS, 577 ("[amd,%d] illegal row index %d", __LINE__, ri)); 578 579 pd = &amd_pmcdesc[ri].pm_descr; 580 581 /* check class match */ 582 if (pd->pd_class != a->pm_class) 583 return EINVAL; 584 585 if ((a->pm_flags & PMC_F_EV_PMU) == 0) 586 return (EINVAL); 587 588 caps = pm->pm_caps; 589 590 PMCDBG2(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps); 591 592 if((ri >= 0 && ri < 6) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_CORE)) 593 return EINVAL; 594 if((ri >= 6 && ri < 12) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_L3_CACHE)) 595 return EINVAL; 596 if((ri >= 12 && ri < 16) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_DATA_FABRIC)) 597 return EINVAL; 598 599 if (strlen(pmc_cpuid) != 0) { 600 pm->pm_md.pm_amd.pm_amd_evsel = 601 a->pm_md.pm_amd.pm_amd_config; 602 PMCDBG2(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, a->pm_md.pm_amd.pm_amd_config); 603 return (0); 604 } 605 606 pe = a->pm_ev; 607 608 /* map ev to the correct event mask code */ 609 config = allowed_unitmask = 0; 610 for (i = 0; i < amd_event_codes_size; i++) 611 if (amd_event_codes[i].pe_ev == pe) { 612 config = 613 AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code); 614 allowed_unitmask = 615 AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask); 616 break; 617 } 618 if (i == amd_event_codes_size) 619 return EINVAL; 620 621 unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK; 622 if (unitmask & ~allowed_unitmask) /* disallow reserved bits */ 623 return EINVAL; 624 625 if (unitmask && (caps & PMC_CAP_QUALIFIER)) 626 config |= unitmask; 627 628 if (caps & PMC_CAP_THRESHOLD) 629 config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK; 630 631 /* set at least one of the 'usr' or 'os' caps */ 632 if (caps & PMC_CAP_USER) 633 config |= AMD_PMC_USR; 634 if (caps & PMC_CAP_SYSTEM) 635 config |= AMD_PMC_OS; 636 if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0) 637 config |= (AMD_PMC_USR|AMD_PMC_OS); 638 639 if (caps & PMC_CAP_EDGE) 640 config |= AMD_PMC_EDGE; 641 if (caps & PMC_CAP_INVERT) 642 config |= AMD_PMC_INVERT; 643 if (caps & PMC_CAP_INTERRUPT) 644 config |= AMD_PMC_INT; 645 646 pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */ 647 648 PMCDBG2(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config); 649 650 return 0; 651 } 652 653 /* 654 * Release machine dependent state associated with a PMC. This is a 655 * no-op on this architecture. 656 * 657 */ 658 659 /* ARGSUSED0 */ 660 static int 661 amd_release_pmc(int cpu, int ri, struct pmc *pmc) 662 { 663 #ifdef HWPMC_DEBUG 664 const struct amd_descr *pd; 665 #endif 666 struct pmc_hw *phw __diagused; 667 668 (void) pmc; 669 670 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 671 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 672 KASSERT(ri >= 0 && ri < AMD_NPMCS, 673 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 674 675 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri]; 676 677 KASSERT(phw->phw_pmc == NULL, 678 ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); 679 680 #ifdef HWPMC_DEBUG 681 pd = &amd_pmcdesc[ri]; 682 if (pd->pm_descr.pd_class == amd_pmc_class) 683 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 684 ("[amd,%d] PMC %d released while active", __LINE__, ri)); 685 #endif 686 687 return 0; 688 } 689 690 /* 691 * start a PMC. 692 */ 693 694 static int 695 amd_start_pmc(int cpu, int ri, struct pmc *pm) 696 { 697 uint64_t config; 698 const struct amd_descr *pd; 699 700 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 701 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 702 KASSERT(ri >= 0 && ri < AMD_NPMCS, 703 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 704 705 pd = &amd_pmcdesc[ri]; 706 707 PMCDBG2(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri); 708 709 KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 710 ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__, 711 ri, cpu, pd->pm_descr.pd_name)); 712 713 /* turn on the PMC ENABLE bit */ 714 config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE; 715 716 PMCDBG1(MDP,STA,2,"amd-start config=0x%x", config); 717 718 wrmsr(pd->pm_evsel, config); 719 return 0; 720 } 721 722 /* 723 * Stop a PMC. 724 */ 725 726 static int 727 amd_stop_pmc(int cpu, int ri, struct pmc *pm) 728 { 729 const struct amd_descr *pd; 730 uint64_t config; 731 int i; 732 733 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 734 ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 735 KASSERT(ri >= 0 && ri < AMD_NPMCS, 736 ("[amd,%d] illegal row-index %d", __LINE__, ri)); 737 738 pd = &amd_pmcdesc[ri]; 739 740 KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel), 741 ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped", 742 __LINE__, ri, cpu, pd->pm_descr.pd_name)); 743 744 PMCDBG1(MDP,STO,1,"amd-stop ri=%d", ri); 745 746 /* turn off the PMC ENABLE bit */ 747 config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE; 748 wrmsr(pd->pm_evsel, config); 749 750 /* 751 * Due to NMI latency on newer AMD processors 752 * NMI interrupts are ignored, which leads to 753 * panic or messages based on kernel configuration 754 */ 755 756 /* Wait for the count to be reset */ 757 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { 758 if (rdmsr(pd->pm_perfctr) & (1 << (pd->pm_descr.pd_width - 1))) 759 break; 760 761 DELAY(1); 762 } 763 764 return 0; 765 } 766 767 /* 768 * Interrupt handler. This function needs to return '1' if the 769 * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed 770 * to sleep or do anything a 'fast' interrupt handler is not allowed 771 * to do. 772 */ 773 774 static int 775 amd_intr(struct trapframe *tf) 776 { 777 int i, error, retval, cpu; 778 uint64_t config, evsel, perfctr; 779 struct pmc *pm; 780 struct amd_cpu *pac; 781 pmc_value_t v; 782 uint32_t active = 0, count = 0; 783 784 cpu = curcpu; 785 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 786 ("[amd,%d] out of range CPU %d", __LINE__, cpu)); 787 788 PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf, 789 TRAPF_USERMODE(tf)); 790 791 retval = 0; 792 793 pac = amd_pcpu[cpu]; 794 795 /* 796 * look for all PMCs that have interrupted: 797 * - look for a running, sampling PMC which has overflowed 798 * and which has a valid 'struct pmc' association 799 * 800 * If found, we call a helper to process the interrupt. 801 * 802 * PMCs interrupting at the same time are collapsed into 803 * a single interrupt. Check all the valid pmcs for 804 * overflow. 805 */ 806 807 for (i = 0; i < AMD_CORE_NPMCS; i++) { 808 809 if ((pm = pac->pc_amdpmcs[i].phw_pmc) == NULL || 810 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 811 continue; 812 } 813 814 /* Consider pmc with valid handle as active */ 815 active++; 816 817 if (!AMD_PMC_HAS_OVERFLOWED(i)) 818 continue; 819 820 retval = 1; /* Found an interrupting PMC. */ 821 822 if (pm->pm_state != PMC_STATE_RUNNING) 823 continue; 824 825 /* Stop the PMC, reload count. */ 826 evsel = amd_pmcdesc[i].pm_evsel; 827 perfctr = amd_pmcdesc[i].pm_perfctr; 828 v = pm->pm_sc.pm_reloadcount; 829 config = rdmsr(evsel); 830 831 KASSERT((config & ~AMD_PMC_ENABLE) == 832 (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE), 833 ("[amd,%d] config mismatch reg=0x%jx pm=0x%jx", __LINE__, 834 (uintmax_t)config, (uintmax_t)pm->pm_md.pm_amd.pm_amd_evsel)); 835 836 wrmsr(evsel, config & ~AMD_PMC_ENABLE); 837 wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v)); 838 839 /* Restart the counter if logging succeeded. */ 840 error = pmc_process_interrupt(PMC_HR, pm, tf); 841 if (error == 0) 842 wrmsr(evsel, config); 843 } 844 845 /* 846 * Due to NMI latency, there can be a scenario in which 847 * multiple pmcs gets serviced in an earlier NMI and we 848 * do not find an overflow in the subsequent NMI. 849 * 850 * For such cases we keep a per-cpu count of active NMIs 851 * and compare it with min(active pmcs, 2) to determine 852 * if this NMI was for a pmc overflow which was serviced 853 * in an earlier request or should be ignored. 854 */ 855 856 if (retval) { 857 DPCPU_SET(nmi_counter, min(2, active)); 858 } else { 859 if ((count = DPCPU_GET(nmi_counter))) { 860 retval = 1; 861 DPCPU_SET(nmi_counter, --count); 862 } 863 } 864 865 if (retval) 866 counter_u64_add(pmc_stats.pm_intr_processed, 1); 867 else 868 counter_u64_add(pmc_stats.pm_intr_ignored, 1); 869 870 PMCDBG1(MDP,INT,2, "retval=%d", retval); 871 return (retval); 872 } 873 874 /* 875 * describe a PMC 876 */ 877 static int 878 amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 879 { 880 const struct amd_descr *pd; 881 struct pmc_hw *phw; 882 883 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 884 ("[amd,%d] illegal CPU %d", __LINE__, cpu)); 885 KASSERT(ri >= 0 && ri < AMD_NPMCS, 886 ("[amd,%d] row-index %d out of range", __LINE__, ri)); 887 888 phw = &amd_pcpu[cpu]->pc_amdpmcs[ri]; 889 pd = &amd_pmcdesc[ri]; 890 891 strlcpy(pi->pm_name, pd->pm_descr.pd_name, sizeof(pi->pm_name)); 892 pi->pm_class = pd->pm_descr.pd_class; 893 894 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 895 pi->pm_enabled = TRUE; 896 *ppmc = phw->phw_pmc; 897 } else { 898 pi->pm_enabled = FALSE; 899 *ppmc = NULL; 900 } 901 902 return 0; 903 } 904 905 /* 906 * i386 specific entry points 907 */ 908 909 /* 910 * return the MSR address of the given PMC. 911 */ 912 913 static int 914 amd_get_msr(int ri, uint32_t *msr) 915 { 916 KASSERT(ri >= 0 && ri < AMD_NPMCS, 917 ("[amd,%d] ri %d out of range", __LINE__, ri)); 918 919 *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0; 920 921 return (0); 922 } 923 924 /* 925 * processor dependent initialization. 926 */ 927 928 static int 929 amd_pcpu_init(struct pmc_mdep *md, int cpu) 930 { 931 int classindex, first_ri, n; 932 struct pmc_cpu *pc; 933 struct amd_cpu *pac; 934 struct pmc_hw *phw; 935 936 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 937 ("[amd,%d] insane cpu number %d", __LINE__, cpu)); 938 939 PMCDBG1(MDP,INI,1,"amd-init cpu=%d", cpu); 940 941 amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC, 942 M_WAITOK|M_ZERO); 943 944 /* 945 * Set the content of the hardware descriptors to a known 946 * state and initialize pointers in the MI per-cpu descriptor. 947 */ 948 pc = pmc_pcpu[cpu]; 949 #if defined(__amd64__) 950 classindex = PMC_MDEP_CLASS_INDEX_K8; 951 #elif defined(__i386__) 952 classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? 953 PMC_MDEP_CLASS_INDEX_K8 : PMC_MDEP_CLASS_INDEX_K7; 954 #endif 955 first_ri = md->pmd_classdep[classindex].pcd_ri; 956 957 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__)); 958 959 for (n = 0, phw = pac->pc_amdpmcs; n < AMD_NPMCS; n++, phw++) { 960 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 961 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 962 phw->phw_pmc = NULL; 963 pc->pc_hwpmcs[n + first_ri] = phw; 964 } 965 966 return (0); 967 } 968 969 970 /* 971 * processor dependent cleanup prior to the KLD 972 * being unloaded 973 */ 974 975 static int 976 amd_pcpu_fini(struct pmc_mdep *md, int cpu) 977 { 978 int classindex, first_ri, i; 979 uint32_t evsel; 980 struct pmc_cpu *pc; 981 struct amd_cpu *pac; 982 983 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 984 ("[amd,%d] insane cpu number (%d)", __LINE__, cpu)); 985 986 PMCDBG1(MDP,INI,1,"amd-cleanup cpu=%d", cpu); 987 988 /* 989 * First, turn off all PMCs on this CPU. 990 */ 991 for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */ 992 evsel = rdmsr(AMD_PMC_EVSEL_0 + i); 993 evsel &= ~AMD_PMC_ENABLE; 994 wrmsr(AMD_PMC_EVSEL_0 + i, evsel); 995 } 996 997 /* 998 * Next, free up allocated space. 999 */ 1000 if ((pac = amd_pcpu[cpu]) == NULL) 1001 return (0); 1002 1003 amd_pcpu[cpu] = NULL; 1004 1005 #ifdef HWPMC_DEBUG 1006 for (i = 0; i < AMD_NPMCS; i++) { 1007 KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL, 1008 ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i)); 1009 KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + i), 1010 ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i)); 1011 } 1012 #endif 1013 1014 pc = pmc_pcpu[cpu]; 1015 KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__)); 1016 1017 #if defined(__amd64__) 1018 classindex = PMC_MDEP_CLASS_INDEX_K8; 1019 #elif defined(__i386__) 1020 classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? PMC_MDEP_CLASS_INDEX_K8 : 1021 PMC_MDEP_CLASS_INDEX_K7; 1022 #endif 1023 first_ri = md->pmd_classdep[classindex].pcd_ri; 1024 1025 /* 1026 * Reset pointers in the MI 'per-cpu' state. 1027 */ 1028 for (i = 0; i < AMD_NPMCS; i++) { 1029 pc->pc_hwpmcs[i + first_ri] = NULL; 1030 } 1031 1032 1033 free(pac, M_PMC); 1034 1035 return (0); 1036 } 1037 1038 /* 1039 * Initialize ourselves. 1040 */ 1041 1042 struct pmc_mdep * 1043 pmc_amd_initialize(void) 1044 { 1045 int classindex, error, i, ncpus; 1046 struct pmc_classdep *pcd; 1047 enum pmc_cputype cputype; 1048 struct pmc_mdep *pmc_mdep; 1049 enum pmc_class class; 1050 int family, model, stepping; 1051 char *name; 1052 1053 /* 1054 * The presence of hardware performance counters on the AMD 1055 * Athlon, Duron or later processors, is _not_ indicated by 1056 * any of the processor feature flags set by the 'CPUID' 1057 * instruction, so we only check the 'instruction family' 1058 * field returned by CPUID for instruction family >= 6. 1059 */ 1060 1061 name = NULL; 1062 family = CPUID_TO_FAMILY(cpu_id); 1063 model = CPUID_TO_MODEL(cpu_id); 1064 stepping = CPUID_TO_STEPPING(cpu_id); 1065 1066 if (family == 0x18) 1067 snprintf(pmc_cpuid, sizeof(pmc_cpuid), "HygonGenuine-%d-%02X-%X", 1068 family, model, stepping); 1069 else 1070 snprintf(pmc_cpuid, sizeof(pmc_cpuid), "AuthenticAMD-%d-%02X-%X", 1071 family, model, stepping); 1072 1073 switch (cpu_id & 0xF00) { 1074 #if defined(__i386__) 1075 case 0x600: /* Athlon(tm) processor */ 1076 classindex = PMC_MDEP_CLASS_INDEX_K7; 1077 cputype = PMC_CPU_AMD_K7; 1078 class = PMC_CLASS_K7; 1079 name = "K7"; 1080 break; 1081 #endif 1082 case 0xF00: /* Athlon64/Opteron processor */ 1083 classindex = PMC_MDEP_CLASS_INDEX_K8; 1084 cputype = PMC_CPU_AMD_K8; 1085 class = PMC_CLASS_K8; 1086 name = "K8"; 1087 break; 1088 1089 default: 1090 (void) printf("pmc: Unknown AMD CPU %x %d-%d.\n", cpu_id, (cpu_id & 0xF00) >> 8, model); 1091 return NULL; 1092 } 1093 1094 #ifdef HWPMC_DEBUG 1095 amd_pmc_class = class; 1096 #endif 1097 1098 /* 1099 * Allocate space for pointers to PMC HW descriptors and for 1100 * the MDEP structure used by MI code. 1101 */ 1102 amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC, 1103 M_WAITOK|M_ZERO); 1104 1105 /* 1106 * These processors have two classes of PMCs: the TSC and 1107 * programmable PMCs. 1108 */ 1109 pmc_mdep = pmc_mdep_alloc(2); 1110 1111 pmc_mdep->pmd_cputype = cputype; 1112 1113 ncpus = pmc_cpu_max(); 1114 1115 /* Initialize the TSC. */ 1116 error = pmc_tsc_initialize(pmc_mdep, ncpus); 1117 if (error) 1118 goto error; 1119 1120 /* Initialize AMD K7 and K8 PMC handling. */ 1121 pcd = &pmc_mdep->pmd_classdep[classindex]; 1122 1123 pcd->pcd_caps = AMD_PMC_CAPS; 1124 pcd->pcd_class = class; 1125 pcd->pcd_num = AMD_NPMCS; 1126 pcd->pcd_ri = pmc_mdep->pmd_npmc; 1127 pcd->pcd_width = 48; 1128 1129 /* fill in the correct pmc name and class */ 1130 for (i = 0; i < AMD_NPMCS; i++) { 1131 (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name, 1132 sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d", 1133 name, i); 1134 amd_pmcdesc[i].pm_descr.pd_class = class; 1135 } 1136 1137 pcd->pcd_allocate_pmc = amd_allocate_pmc; 1138 pcd->pcd_config_pmc = amd_config_pmc; 1139 pcd->pcd_describe = amd_describe; 1140 pcd->pcd_get_config = amd_get_config; 1141 pcd->pcd_get_msr = amd_get_msr; 1142 pcd->pcd_pcpu_fini = amd_pcpu_fini; 1143 pcd->pcd_pcpu_init = amd_pcpu_init; 1144 pcd->pcd_read_pmc = amd_read_pmc; 1145 pcd->pcd_release_pmc = amd_release_pmc; 1146 pcd->pcd_start_pmc = amd_start_pmc; 1147 pcd->pcd_stop_pmc = amd_stop_pmc; 1148 pcd->pcd_write_pmc = amd_write_pmc; 1149 1150 pmc_mdep->pmd_intr = amd_intr; 1151 pmc_mdep->pmd_switch_in = amd_switch_in; 1152 pmc_mdep->pmd_switch_out = amd_switch_out; 1153 1154 pmc_mdep->pmd_npmc += AMD_NPMCS; 1155 1156 PMCDBG0(MDP,INI,0,"amd-initialize"); 1157 1158 return (pmc_mdep); 1159 1160 error: 1161 if (error) { 1162 free(pmc_mdep, M_PMC); 1163 pmc_mdep = NULL; 1164 } 1165 1166 return (NULL); 1167 } 1168 1169 /* 1170 * Finalization code for AMD CPUs. 1171 */ 1172 1173 void 1174 pmc_amd_finalize(struct pmc_mdep *md) 1175 { 1176 #if defined(INVARIANTS) 1177 int classindex, i, ncpus, pmcclass; 1178 #endif 1179 1180 pmc_tsc_finalize(md); 1181 1182 KASSERT(amd_pcpu != NULL, ("[amd,%d] NULL per-cpu array pointer", 1183 __LINE__)); 1184 1185 #if defined(INVARIANTS) 1186 switch (md->pmd_cputype) { 1187 #if defined(__i386__) 1188 case PMC_CPU_AMD_K7: 1189 classindex = PMC_MDEP_CLASS_INDEX_K7; 1190 pmcclass = PMC_CLASS_K7; 1191 break; 1192 #endif 1193 default: 1194 classindex = PMC_MDEP_CLASS_INDEX_K8; 1195 pmcclass = PMC_CLASS_K8; 1196 } 1197 1198 KASSERT(md->pmd_classdep[classindex].pcd_class == pmcclass, 1199 ("[amd,%d] pmc class mismatch", __LINE__)); 1200 1201 ncpus = pmc_cpu_max(); 1202 1203 for (i = 0; i < ncpus; i++) 1204 KASSERT(amd_pcpu[i] == NULL, ("[amd,%d] non-null pcpu", 1205 __LINE__)); 1206 #endif 1207 1208 free(amd_pcpu, M_PMC); 1209 amd_pcpu = NULL; 1210 } 1211