1ebccf1e3SJoseph Koshy /*- 2d07f36b0SJoseph Koshy * Copyright (c) 2003-2007 Joseph Koshy 3d07f36b0SJoseph Koshy * Copyright (c) 2007 The FreeBSD Foundation 4ebccf1e3SJoseph Koshy * All rights reserved. 5ebccf1e3SJoseph Koshy * 6d07f36b0SJoseph Koshy * Portions of this software were developed by A. Joseph Koshy under 7d07f36b0SJoseph Koshy * sponsorship from the FreeBSD Foundation and Google, Inc. 8d07f36b0SJoseph Koshy * 9ebccf1e3SJoseph Koshy * Redistribution and use in source and binary forms, with or without 10ebccf1e3SJoseph Koshy * modification, are permitted provided that the following conditions 11ebccf1e3SJoseph Koshy * are met: 12ebccf1e3SJoseph Koshy * 1. Redistributions of source code must retain the above copyright 13ebccf1e3SJoseph Koshy * notice, this list of conditions and the following disclaimer. 14ebccf1e3SJoseph Koshy * 2. Redistributions in binary form must reproduce the above copyright 15ebccf1e3SJoseph Koshy * notice, this list of conditions and the following disclaimer in the 16ebccf1e3SJoseph Koshy * documentation and/or other materials provided with the distribution. 17ebccf1e3SJoseph Koshy * 18ebccf1e3SJoseph Koshy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19ebccf1e3SJoseph Koshy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20ebccf1e3SJoseph Koshy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21ebccf1e3SJoseph Koshy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22ebccf1e3SJoseph Koshy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23ebccf1e3SJoseph Koshy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24ebccf1e3SJoseph Koshy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25ebccf1e3SJoseph Koshy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26ebccf1e3SJoseph Koshy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27ebccf1e3SJoseph Koshy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28ebccf1e3SJoseph Koshy * SUCH DAMAGE. 29ebccf1e3SJoseph Koshy * 30ebccf1e3SJoseph Koshy */ 31ebccf1e3SJoseph Koshy 32ebccf1e3SJoseph Koshy #include <sys/cdefs.h> 33ebccf1e3SJoseph Koshy __FBSDID("$FreeBSD$"); 34ebccf1e3SJoseph Koshy 35ebccf1e3SJoseph Koshy /* Support for the AMD K7 and later processors */ 36ebccf1e3SJoseph Koshy 37ebccf1e3SJoseph Koshy #include <sys/param.h> 38ebccf1e3SJoseph Koshy #include <sys/lock.h> 39ebccf1e3SJoseph Koshy #include <sys/malloc.h> 40ebccf1e3SJoseph Koshy #include <sys/mutex.h> 417ad17ef9SMarcel Moolenaar #include <sys/pmc.h> 42ebccf1e3SJoseph Koshy #include <sys/smp.h> 43ebccf1e3SJoseph Koshy #include <sys/systm.h> 44ebccf1e3SJoseph Koshy 45d07f36b0SJoseph Koshy #include <machine/cpu.h> 46f263522aSJoseph Koshy #include <machine/cpufunc.h> 47ebccf1e3SJoseph Koshy #include <machine/md_var.h> 48f263522aSJoseph Koshy #include <machine/specialreg.h> 49ebccf1e3SJoseph Koshy 50342ed5d9SRuslan Ermilov #ifdef DEBUG 51f263522aSJoseph Koshy enum pmc_class amd_pmc_class; 52ebccf1e3SJoseph Koshy #endif 53ebccf1e3SJoseph Koshy 54ebccf1e3SJoseph Koshy /* AMD K7 & K8 PMCs */ 55ebccf1e3SJoseph Koshy struct amd_descr { 56ebccf1e3SJoseph Koshy struct pmc_descr pm_descr; /* "base class" */ 57ebccf1e3SJoseph Koshy uint32_t pm_evsel; /* address of EVSEL register */ 58ebccf1e3SJoseph Koshy uint32_t pm_perfctr; /* address of PERFCTR register */ 59ebccf1e3SJoseph Koshy }; 60ebccf1e3SJoseph Koshy 61f263522aSJoseph Koshy static struct amd_descr amd_pmcdesc[AMD_NPMCS] = 62ebccf1e3SJoseph Koshy { 63ebccf1e3SJoseph Koshy { 64ebccf1e3SJoseph Koshy .pm_descr = 65ebccf1e3SJoseph Koshy { 66ebccf1e3SJoseph Koshy .pd_name = "TSC", 67ebccf1e3SJoseph Koshy .pd_class = PMC_CLASS_TSC, 68ebccf1e3SJoseph Koshy .pd_caps = PMC_CAP_READ, 69ebccf1e3SJoseph Koshy .pd_width = 64 70ebccf1e3SJoseph Koshy }, 71ebccf1e3SJoseph Koshy .pm_evsel = MSR_TSC, 72ebccf1e3SJoseph Koshy .pm_perfctr = 0 /* unused */ 73ebccf1e3SJoseph Koshy }, 74ebccf1e3SJoseph Koshy 75ebccf1e3SJoseph Koshy { 76ebccf1e3SJoseph Koshy .pm_descr = 77ebccf1e3SJoseph Koshy { 78f263522aSJoseph Koshy .pd_name = "", 79f263522aSJoseph Koshy .pd_class = -1, 80ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 81ebccf1e3SJoseph Koshy .pd_width = 48 82ebccf1e3SJoseph Koshy }, 83ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_0, 84ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_0 85ebccf1e3SJoseph Koshy }, 86ebccf1e3SJoseph Koshy { 87ebccf1e3SJoseph Koshy .pm_descr = 88ebccf1e3SJoseph Koshy { 89f263522aSJoseph Koshy .pd_name = "", 90f263522aSJoseph Koshy .pd_class = -1, 91ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 92ebccf1e3SJoseph Koshy .pd_width = 48 93ebccf1e3SJoseph Koshy }, 94ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_1, 95ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_1 96ebccf1e3SJoseph Koshy }, 97ebccf1e3SJoseph Koshy { 98ebccf1e3SJoseph Koshy .pm_descr = 99ebccf1e3SJoseph Koshy { 100f263522aSJoseph Koshy .pd_name = "", 101f263522aSJoseph Koshy .pd_class = -1, 102ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 103ebccf1e3SJoseph Koshy .pd_width = 48 104ebccf1e3SJoseph Koshy }, 105ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_2, 106ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_2 107ebccf1e3SJoseph Koshy }, 108ebccf1e3SJoseph Koshy { 109ebccf1e3SJoseph Koshy .pm_descr = 110ebccf1e3SJoseph Koshy { 111f263522aSJoseph Koshy .pd_name = "", 112f263522aSJoseph Koshy .pd_class = -1, 113ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 114ebccf1e3SJoseph Koshy .pd_width = 48 115ebccf1e3SJoseph Koshy }, 116ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_3, 117ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_3 118ebccf1e3SJoseph Koshy } 119ebccf1e3SJoseph Koshy }; 120ebccf1e3SJoseph Koshy 121ebccf1e3SJoseph Koshy struct amd_event_code_map { 122ebccf1e3SJoseph Koshy enum pmc_event pe_ev; /* enum value */ 123ebccf1e3SJoseph Koshy uint8_t pe_code; /* encoded event mask */ 124ebccf1e3SJoseph Koshy uint8_t pe_mask; /* bits allowed in unit mask */ 125ebccf1e3SJoseph Koshy }; 126ebccf1e3SJoseph Koshy 127ebccf1e3SJoseph Koshy const struct amd_event_code_map amd_event_codes[] = { 128f263522aSJoseph Koshy #if defined(__i386__) /* 32 bit Athlon (K7) only */ 129ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_ACCESSES, 0x40, 0 }, 130ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_MISSES, 0x41, 0 }, 131f263522aSJoseph Koshy { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, AMD_PMC_UNITMASK_MOESI }, 132f263522aSJoseph Koshy { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, AMD_PMC_UNITMASK_MOESI }, 133f263522aSJoseph Koshy { PMC_EV_K7_DC_WRITEBACKS, 0x44, AMD_PMC_UNITMASK_MOESI }, 134ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 }, 135ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 }, 136ebccf1e3SJoseph Koshy { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 }, 137ebccf1e3SJoseph Koshy 138ebccf1e3SJoseph Koshy { PMC_EV_K7_IC_FETCHES, 0x80, 0 }, 139ebccf1e3SJoseph Koshy { PMC_EV_K7_IC_MISSES, 0x81, 0 }, 140ebccf1e3SJoseph Koshy 141ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 }, 142ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 }, 143ebccf1e3SJoseph Koshy 144ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 }, 145ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 }, 146ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 }, 147ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 }, 148ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 }, 149ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 }, 150ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 }, 151ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 }, 152ebccf1e3SJoseph Koshy { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 }, 153ebccf1e3SJoseph Koshy { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 }, 154f263522aSJoseph Koshy { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 }, 155ebccf1e3SJoseph Koshy #endif 156ebccf1e3SJoseph Koshy 157ebccf1e3SJoseph Koshy { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F }, 158ebccf1e3SJoseph Koshy { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 }, 159ebccf1e3SJoseph Koshy { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 }, 160ebccf1e3SJoseph Koshy 161ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F }, 162ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE, 163ebccf1e3SJoseph Koshy 0x21, 0x00 }, 164ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 }, 165ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 }, 166ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 }, 167ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 }, 168ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 }, 169ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 }, 170ebccf1e3SJoseph Koshy 171ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 }, 172ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MISS, 0x41, 0x00 }, 173ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F }, 174ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F }, 175ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F }, 176ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 }, 177ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 }, 178ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 }, 179ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 }, 180ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 }, 181ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 }, 182ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 }, 183ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 }, 184ebccf1e3SJoseph Koshy 185ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 }, 186ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F }, 187ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 }, 188ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 }, 189ebccf1e3SJoseph Koshy 190ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_FETCH, 0x80, 0x00 }, 191ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_MISS, 0x81, 0x00 }, 192ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 }, 193ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 }, 194ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 }, 195ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 }, 196ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 }, 197ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 }, 198ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 }, 199ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 }, 200ebccf1e3SJoseph Koshy 201ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 }, 202ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 }, 203ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 }, 204ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 }, 205ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 }, 206ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 }, 207ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 }, 208ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 }, 209ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 }, 210ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 }, 211ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE, 212ebccf1e3SJoseph Koshy 0xCA, 0x00 }, 213ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F }, 214ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS, 215ebccf1e3SJoseph Koshy 0xCC, 0x07 }, 216ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 }, 217ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 }, 218ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 }, 219ebccf1e3SJoseph Koshy 220ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 }, 221ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 }, 222ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE, 223ebccf1e3SJoseph Koshy 0xD2, 0x00 }, 224ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 }, 225ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 }, 226ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL, 227ebccf1e3SJoseph Koshy 0xD5, 0x00 }, 228ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL, 229ebccf1e3SJoseph Koshy 0xD6, 0x00 }, 230ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 }, 231ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 }, 232ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET, 233ebccf1e3SJoseph Koshy 0xD9, 0x00 }, 234ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING, 235ebccf1e3SJoseph Koshy 0xDA, 0x00 }, 236ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F }, 237ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 }, 238ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 }, 239ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 }, 240ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 }, 241ebccf1e3SJoseph Koshy 242ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 }, 243ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 }, 244ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED, 245ebccf1e3SJoseph Koshy 0xE2, 0x00 }, 246ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 }, 247ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F }, 248ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F }, 249ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F }, 250ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F }, 251ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F }, 252ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F } 253ebccf1e3SJoseph Koshy 254ebccf1e3SJoseph Koshy }; 255ebccf1e3SJoseph Koshy 256ebccf1e3SJoseph Koshy const int amd_event_codes_size = 257ebccf1e3SJoseph Koshy sizeof(amd_event_codes) / sizeof(amd_event_codes[0]); 258ebccf1e3SJoseph Koshy 259ebccf1e3SJoseph Koshy /* 260ebccf1e3SJoseph Koshy * read a pmc register 261ebccf1e3SJoseph Koshy */ 262ebccf1e3SJoseph Koshy 263ebccf1e3SJoseph Koshy static int 264ebccf1e3SJoseph Koshy amd_read_pmc(int cpu, int ri, pmc_value_t *v) 265ebccf1e3SJoseph Koshy { 266ebccf1e3SJoseph Koshy enum pmc_mode mode; 267ebccf1e3SJoseph Koshy const struct amd_descr *pd; 268ebccf1e3SJoseph Koshy struct pmc *pm; 269ebccf1e3SJoseph Koshy const struct pmc_hw *phw; 270ebccf1e3SJoseph Koshy pmc_value_t tmp; 271ebccf1e3SJoseph Koshy 272ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 273ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 274ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 275ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 276ebccf1e3SJoseph Koshy 277ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 278ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 279ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 280ebccf1e3SJoseph Koshy 281ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 282ebccf1e3SJoseph Koshy ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, 283ebccf1e3SJoseph Koshy cpu, ri)); 284ebccf1e3SJoseph Koshy 285c5153e19SJoseph Koshy mode = PMC_TO_MODE(pm); 286ebccf1e3SJoseph Koshy 287ebccf1e3SJoseph Koshy PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class); 288ebccf1e3SJoseph Koshy 289ebccf1e3SJoseph Koshy /* Reading the TSC is a special case */ 290ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) { 291ebccf1e3SJoseph Koshy KASSERT(PMC_IS_COUNTING_MODE(mode), 292ebccf1e3SJoseph Koshy ("[amd,%d] TSC counter in non-counting mode", __LINE__)); 293ebccf1e3SJoseph Koshy *v = rdtsc(); 294ebccf1e3SJoseph Koshy PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v); 295ebccf1e3SJoseph Koshy return 0; 296ebccf1e3SJoseph Koshy } 297ebccf1e3SJoseph Koshy 298342ed5d9SRuslan Ermilov #ifdef DEBUG 299f263522aSJoseph Koshy KASSERT(pd->pm_descr.pd_class == amd_pmc_class, 300ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 301ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 302f263522aSJoseph Koshy #endif 303ebccf1e3SJoseph Koshy 304ebccf1e3SJoseph Koshy tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */ 305ebccf1e3SJoseph Koshy if (PMC_IS_SAMPLING_MODE(mode)) 306f263522aSJoseph Koshy *v = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp); 307ebccf1e3SJoseph Koshy else 308ebccf1e3SJoseph Koshy *v = tmp; 309ebccf1e3SJoseph Koshy 310ebccf1e3SJoseph Koshy PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v); 311ebccf1e3SJoseph Koshy 312ebccf1e3SJoseph Koshy return 0; 313ebccf1e3SJoseph Koshy } 314ebccf1e3SJoseph Koshy 315ebccf1e3SJoseph Koshy /* 316ebccf1e3SJoseph Koshy * Write a PMC MSR. 317ebccf1e3SJoseph Koshy */ 318ebccf1e3SJoseph Koshy 319ebccf1e3SJoseph Koshy static int 320ebccf1e3SJoseph Koshy amd_write_pmc(int cpu, int ri, pmc_value_t v) 321ebccf1e3SJoseph Koshy { 322ebccf1e3SJoseph Koshy const struct amd_descr *pd; 323ebccf1e3SJoseph Koshy struct pmc *pm; 324ebccf1e3SJoseph Koshy const struct pmc_hw *phw; 325ebccf1e3SJoseph Koshy enum pmc_mode mode; 326ebccf1e3SJoseph Koshy 327ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 328ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 329ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 330ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 331ebccf1e3SJoseph Koshy 332ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 333ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 334ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 335ebccf1e3SJoseph Koshy 336ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 337ebccf1e3SJoseph Koshy ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__, 338ebccf1e3SJoseph Koshy cpu, ri)); 339ebccf1e3SJoseph Koshy 340c5153e19SJoseph Koshy mode = PMC_TO_MODE(pm); 341ebccf1e3SJoseph Koshy 342ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 343ebccf1e3SJoseph Koshy return 0; 344ebccf1e3SJoseph Koshy 345342ed5d9SRuslan Ermilov #ifdef DEBUG 346f263522aSJoseph Koshy KASSERT(pd->pm_descr.pd_class == amd_pmc_class, 347ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 348ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 349f263522aSJoseph Koshy #endif 350ebccf1e3SJoseph Koshy 351ebccf1e3SJoseph Koshy /* use 2's complement of the count for sampling mode PMCs */ 352ebccf1e3SJoseph Koshy if (PMC_IS_SAMPLING_MODE(mode)) 353f263522aSJoseph Koshy v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v); 354ebccf1e3SJoseph Koshy 355ebccf1e3SJoseph Koshy PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v); 356ebccf1e3SJoseph Koshy 357ebccf1e3SJoseph Koshy /* write the PMC value */ 358ebccf1e3SJoseph Koshy wrmsr(pd->pm_perfctr, v); 359ebccf1e3SJoseph Koshy return 0; 360ebccf1e3SJoseph Koshy } 361ebccf1e3SJoseph Koshy 362ebccf1e3SJoseph Koshy /* 363ebccf1e3SJoseph Koshy * configure hardware pmc according to the configuration recorded in 364ebccf1e3SJoseph Koshy * pmc 'pm'. 365ebccf1e3SJoseph Koshy */ 366ebccf1e3SJoseph Koshy 367ebccf1e3SJoseph Koshy static int 368ebccf1e3SJoseph Koshy amd_config_pmc(int cpu, int ri, struct pmc *pm) 369ebccf1e3SJoseph Koshy { 370ebccf1e3SJoseph Koshy struct pmc_hw *phw; 371ebccf1e3SJoseph Koshy 3726b8c8cd8SJoseph Koshy PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 3736b8c8cd8SJoseph Koshy 374ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 375ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 376ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 377ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 378ebccf1e3SJoseph Koshy 379ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 380ebccf1e3SJoseph Koshy 381ebccf1e3SJoseph Koshy KASSERT(pm == NULL || phw->phw_pmc == NULL, 3826b8c8cd8SJoseph Koshy ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured", 3836b8c8cd8SJoseph Koshy __LINE__, pm, phw->phw_pmc)); 384ebccf1e3SJoseph Koshy 385ebccf1e3SJoseph Koshy phw->phw_pmc = pm; 386ebccf1e3SJoseph Koshy return 0; 387ebccf1e3SJoseph Koshy } 388ebccf1e3SJoseph Koshy 389ebccf1e3SJoseph Koshy /* 390c5153e19SJoseph Koshy * Retrieve a configured PMC pointer from hardware state. 391c5153e19SJoseph Koshy */ 392c5153e19SJoseph Koshy 393c5153e19SJoseph Koshy static int 394c5153e19SJoseph Koshy amd_get_config(int cpu, int ri, struct pmc **ppm) 395c5153e19SJoseph Koshy { 396c5153e19SJoseph Koshy *ppm = pmc_pcpu[cpu]->pc_hwpmcs[ri]->phw_pmc; 397c5153e19SJoseph Koshy 398c5153e19SJoseph Koshy return 0; 399c5153e19SJoseph Koshy } 400c5153e19SJoseph Koshy 401c5153e19SJoseph Koshy /* 402ebccf1e3SJoseph Koshy * Machine dependent actions taken during the context switch in of a 403ebccf1e3SJoseph Koshy * thread. 404ebccf1e3SJoseph Koshy */ 405ebccf1e3SJoseph Koshy 406ebccf1e3SJoseph Koshy static int 4076b8c8cd8SJoseph Koshy amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) 408ebccf1e3SJoseph Koshy { 409ebccf1e3SJoseph Koshy (void) pc; 410ebccf1e3SJoseph Koshy 4116b8c8cd8SJoseph Koshy PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp, 412c5153e19SJoseph Koshy (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0); 4136b8c8cd8SJoseph Koshy 4146b8c8cd8SJoseph Koshy /* enable the RDPMC instruction if needed */ 415c5153e19SJoseph Koshy if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) 416ebccf1e3SJoseph Koshy load_cr4(rcr4() | CR4_PCE); 4176b8c8cd8SJoseph Koshy 418ebccf1e3SJoseph Koshy return 0; 419ebccf1e3SJoseph Koshy } 420ebccf1e3SJoseph Koshy 421ebccf1e3SJoseph Koshy /* 422ebccf1e3SJoseph Koshy * Machine dependent actions taken during the context switch out of a 423ebccf1e3SJoseph Koshy * thread. 424ebccf1e3SJoseph Koshy */ 425ebccf1e3SJoseph Koshy 426ebccf1e3SJoseph Koshy static int 4276b8c8cd8SJoseph Koshy amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) 428ebccf1e3SJoseph Koshy { 429ebccf1e3SJoseph Koshy (void) pc; 4306b8c8cd8SJoseph Koshy (void) pp; /* can be NULL */ 431ebccf1e3SJoseph Koshy 4326b8c8cd8SJoseph Koshy PMCDBG(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ? 433c5153e19SJoseph Koshy (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0); 4346b8c8cd8SJoseph Koshy 4356b8c8cd8SJoseph Koshy /* always turn off the RDPMC instruction */ 436ebccf1e3SJoseph Koshy load_cr4(rcr4() & ~CR4_PCE); 4376b8c8cd8SJoseph Koshy 438ebccf1e3SJoseph Koshy return 0; 439ebccf1e3SJoseph Koshy } 440ebccf1e3SJoseph Koshy 441ebccf1e3SJoseph Koshy /* 442ebccf1e3SJoseph Koshy * Check if a given allocation is feasible. 443ebccf1e3SJoseph Koshy */ 444ebccf1e3SJoseph Koshy 445ebccf1e3SJoseph Koshy static int 446ebccf1e3SJoseph Koshy amd_allocate_pmc(int cpu, int ri, struct pmc *pm, 447ebccf1e3SJoseph Koshy const struct pmc_op_pmcallocate *a) 448ebccf1e3SJoseph Koshy { 449ebccf1e3SJoseph Koshy int i; 450ebccf1e3SJoseph Koshy uint32_t allowed_unitmask, caps, config, unitmask; 451ebccf1e3SJoseph Koshy enum pmc_event pe; 452ebccf1e3SJoseph Koshy const struct pmc_descr *pd; 453ebccf1e3SJoseph Koshy 454ebccf1e3SJoseph Koshy (void) cpu; 455ebccf1e3SJoseph Koshy 456ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 457ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 458ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 459ebccf1e3SJoseph Koshy ("[amd,%d] illegal row index %d", __LINE__, ri)); 460ebccf1e3SJoseph Koshy 461ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri].pm_descr; 462ebccf1e3SJoseph Koshy 463ebccf1e3SJoseph Koshy /* check class match */ 464c5153e19SJoseph Koshy if (pd->pd_class != a->pm_class) 465ebccf1e3SJoseph Koshy return EINVAL; 466ebccf1e3SJoseph Koshy 467ebccf1e3SJoseph Koshy caps = pm->pm_caps; 468ebccf1e3SJoseph Koshy 469ebccf1e3SJoseph Koshy PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps); 470ebccf1e3SJoseph Koshy 471ebccf1e3SJoseph Koshy if ((pd->pd_caps & caps) != caps) 472ebccf1e3SJoseph Koshy return EPERM; 473ebccf1e3SJoseph Koshy if (pd->pd_class == PMC_CLASS_TSC) { 474ebccf1e3SJoseph Koshy /* TSC's are always allocated in system-wide counting mode */ 475ebccf1e3SJoseph Koshy if (a->pm_ev != PMC_EV_TSC_TSC || 476ebccf1e3SJoseph Koshy a->pm_mode != PMC_MODE_SC) 477ebccf1e3SJoseph Koshy return EINVAL; 478ebccf1e3SJoseph Koshy return 0; 479ebccf1e3SJoseph Koshy } 480ebccf1e3SJoseph Koshy 481342ed5d9SRuslan Ermilov #ifdef DEBUG 482f263522aSJoseph Koshy KASSERT(pd->pd_class == amd_pmc_class, 483ebccf1e3SJoseph Koshy ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class)); 484f263522aSJoseph Koshy #endif 485ebccf1e3SJoseph Koshy 486ebccf1e3SJoseph Koshy pe = a->pm_ev; 487ebccf1e3SJoseph Koshy 488ebccf1e3SJoseph Koshy /* map ev to the correct event mask code */ 489ebccf1e3SJoseph Koshy config = allowed_unitmask = 0; 490ebccf1e3SJoseph Koshy for (i = 0; i < amd_event_codes_size; i++) 491ebccf1e3SJoseph Koshy if (amd_event_codes[i].pe_ev == pe) { 492ebccf1e3SJoseph Koshy config = 493ebccf1e3SJoseph Koshy AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code); 494ebccf1e3SJoseph Koshy allowed_unitmask = 495ebccf1e3SJoseph Koshy AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask); 496ebccf1e3SJoseph Koshy break; 497ebccf1e3SJoseph Koshy } 498ebccf1e3SJoseph Koshy if (i == amd_event_codes_size) 499ebccf1e3SJoseph Koshy return EINVAL; 500ebccf1e3SJoseph Koshy 501f263522aSJoseph Koshy unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK; 502ebccf1e3SJoseph Koshy if (unitmask & ~allowed_unitmask) /* disallow reserved bits */ 503ebccf1e3SJoseph Koshy return EINVAL; 504ebccf1e3SJoseph Koshy 505ebccf1e3SJoseph Koshy if (unitmask && (caps & PMC_CAP_QUALIFIER)) 506ebccf1e3SJoseph Koshy config |= unitmask; 507ebccf1e3SJoseph Koshy 508ebccf1e3SJoseph Koshy if (caps & PMC_CAP_THRESHOLD) 509f263522aSJoseph Koshy config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK; 510ebccf1e3SJoseph Koshy 511ebccf1e3SJoseph Koshy /* set at least one of the 'usr' or 'os' caps */ 512ebccf1e3SJoseph Koshy if (caps & PMC_CAP_USER) 513ebccf1e3SJoseph Koshy config |= AMD_PMC_USR; 514ebccf1e3SJoseph Koshy if (caps & PMC_CAP_SYSTEM) 515ebccf1e3SJoseph Koshy config |= AMD_PMC_OS; 516ebccf1e3SJoseph Koshy if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0) 517ebccf1e3SJoseph Koshy config |= (AMD_PMC_USR|AMD_PMC_OS); 518ebccf1e3SJoseph Koshy 519ebccf1e3SJoseph Koshy if (caps & PMC_CAP_EDGE) 520ebccf1e3SJoseph Koshy config |= AMD_PMC_EDGE; 521ebccf1e3SJoseph Koshy if (caps & PMC_CAP_INVERT) 522ebccf1e3SJoseph Koshy config |= AMD_PMC_INVERT; 523ebccf1e3SJoseph Koshy if (caps & PMC_CAP_INTERRUPT) 524ebccf1e3SJoseph Koshy config |= AMD_PMC_INT; 525ebccf1e3SJoseph Koshy 526ebccf1e3SJoseph Koshy pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */ 527ebccf1e3SJoseph Koshy 528ebccf1e3SJoseph Koshy PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config); 529ebccf1e3SJoseph Koshy 530ebccf1e3SJoseph Koshy return 0; 531ebccf1e3SJoseph Koshy } 532ebccf1e3SJoseph Koshy 533ebccf1e3SJoseph Koshy /* 534ebccf1e3SJoseph Koshy * Release machine dependent state associated with a PMC. This is a 535ebccf1e3SJoseph Koshy * no-op on this architecture. 536ebccf1e3SJoseph Koshy * 537ebccf1e3SJoseph Koshy */ 538ebccf1e3SJoseph Koshy 539ebccf1e3SJoseph Koshy /* ARGSUSED0 */ 540ebccf1e3SJoseph Koshy static int 541ebccf1e3SJoseph Koshy amd_release_pmc(int cpu, int ri, struct pmc *pmc) 542ebccf1e3SJoseph Koshy { 543342ed5d9SRuslan Ermilov #ifdef DEBUG 544ebccf1e3SJoseph Koshy const struct amd_descr *pd; 545ebccf1e3SJoseph Koshy #endif 546ebccf1e3SJoseph Koshy struct pmc_hw *phw; 547ebccf1e3SJoseph Koshy 548ebccf1e3SJoseph Koshy (void) pmc; 549ebccf1e3SJoseph Koshy 550ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 551ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 552ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 553ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 554ebccf1e3SJoseph Koshy 555ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 556ebccf1e3SJoseph Koshy 557ebccf1e3SJoseph Koshy KASSERT(phw->phw_pmc == NULL, 558ebccf1e3SJoseph Koshy ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); 559ebccf1e3SJoseph Koshy 560342ed5d9SRuslan Ermilov #ifdef DEBUG 561ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 562f263522aSJoseph Koshy if (pd->pm_descr.pd_class == amd_pmc_class) 563ebccf1e3SJoseph Koshy KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 564ebccf1e3SJoseph Koshy ("[amd,%d] PMC %d released while active", __LINE__, ri)); 565ebccf1e3SJoseph Koshy #endif 566ebccf1e3SJoseph Koshy 567ebccf1e3SJoseph Koshy return 0; 568ebccf1e3SJoseph Koshy } 569ebccf1e3SJoseph Koshy 570ebccf1e3SJoseph Koshy /* 571ebccf1e3SJoseph Koshy * start a PMC. 572ebccf1e3SJoseph Koshy */ 573ebccf1e3SJoseph Koshy 574ebccf1e3SJoseph Koshy static int 575ebccf1e3SJoseph Koshy amd_start_pmc(int cpu, int ri) 576ebccf1e3SJoseph Koshy { 577ebccf1e3SJoseph Koshy uint32_t config; 578ebccf1e3SJoseph Koshy struct pmc *pm; 579ebccf1e3SJoseph Koshy struct pmc_hw *phw; 580ebccf1e3SJoseph Koshy const struct amd_descr *pd; 581ebccf1e3SJoseph Koshy 582ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 583ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 584ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 585ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 586ebccf1e3SJoseph Koshy 587ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 588ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 589ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 590ebccf1e3SJoseph Koshy 591ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 592ebccf1e3SJoseph Koshy ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__, 593ebccf1e3SJoseph Koshy cpu, ri)); 594ebccf1e3SJoseph Koshy 595ebccf1e3SJoseph Koshy PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri); 596ebccf1e3SJoseph Koshy 597ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 598ebccf1e3SJoseph Koshy return 0; /* TSCs are always running */ 599ebccf1e3SJoseph Koshy 600342ed5d9SRuslan Ermilov #ifdef DEBUG 601f263522aSJoseph Koshy KASSERT(pd->pm_descr.pd_class == amd_pmc_class, 602ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 603ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 604f263522aSJoseph Koshy #endif 605ebccf1e3SJoseph Koshy 606ebccf1e3SJoseph Koshy KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 607ebccf1e3SJoseph Koshy ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__, 608ebccf1e3SJoseph Koshy ri, cpu, pd->pm_descr.pd_name)); 609ebccf1e3SJoseph Koshy 610ebccf1e3SJoseph Koshy /* turn on the PMC ENABLE bit */ 611ebccf1e3SJoseph Koshy config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE; 612ebccf1e3SJoseph Koshy 613ebccf1e3SJoseph Koshy PMCDBG(MDP,STA,2,"amd-start config=0x%x", config); 614ebccf1e3SJoseph Koshy 615ebccf1e3SJoseph Koshy wrmsr(pd->pm_evsel, config); 616ebccf1e3SJoseph Koshy return 0; 617ebccf1e3SJoseph Koshy } 618ebccf1e3SJoseph Koshy 619ebccf1e3SJoseph Koshy /* 620ebccf1e3SJoseph Koshy * Stop a PMC. 621ebccf1e3SJoseph Koshy */ 622ebccf1e3SJoseph Koshy 623ebccf1e3SJoseph Koshy static int 624ebccf1e3SJoseph Koshy amd_stop_pmc(int cpu, int ri) 625ebccf1e3SJoseph Koshy { 626ebccf1e3SJoseph Koshy struct pmc *pm; 627ebccf1e3SJoseph Koshy struct pmc_hw *phw; 628ebccf1e3SJoseph Koshy const struct amd_descr *pd; 629ebccf1e3SJoseph Koshy uint64_t config; 630ebccf1e3SJoseph Koshy 631ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 632ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 633ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 634ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 635ebccf1e3SJoseph Koshy 636ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 637ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 638ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 639ebccf1e3SJoseph Koshy 640ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 641ebccf1e3SJoseph Koshy ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__, 642ebccf1e3SJoseph Koshy cpu, ri)); 643ebccf1e3SJoseph Koshy 644ebccf1e3SJoseph Koshy /* can't stop a TSC */ 645ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 646ebccf1e3SJoseph Koshy return 0; 647ebccf1e3SJoseph Koshy 648342ed5d9SRuslan Ermilov #ifdef DEBUG 649f263522aSJoseph Koshy KASSERT(pd->pm_descr.pd_class == amd_pmc_class, 650ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 651ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 652f263522aSJoseph Koshy #endif 653ebccf1e3SJoseph Koshy 654ebccf1e3SJoseph Koshy KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel), 655ebccf1e3SJoseph Koshy ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped", 656ebccf1e3SJoseph Koshy __LINE__, ri, cpu, pd->pm_descr.pd_name)); 657ebccf1e3SJoseph Koshy 658ebccf1e3SJoseph Koshy PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri); 659ebccf1e3SJoseph Koshy 660ebccf1e3SJoseph Koshy /* turn off the PMC ENABLE bit */ 661ebccf1e3SJoseph Koshy config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE; 662ebccf1e3SJoseph Koshy wrmsr(pd->pm_evsel, config); 663ebccf1e3SJoseph Koshy return 0; 664ebccf1e3SJoseph Koshy } 665ebccf1e3SJoseph Koshy 666ebccf1e3SJoseph Koshy /* 667ebccf1e3SJoseph Koshy * Interrupt handler. This function needs to return '1' if the 668ebccf1e3SJoseph Koshy * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed 669ebccf1e3SJoseph Koshy * to sleep or do anything a 'fast' interrupt handler is not allowed 670ebccf1e3SJoseph Koshy * to do. 671ebccf1e3SJoseph Koshy */ 672ebccf1e3SJoseph Koshy 673ebccf1e3SJoseph Koshy static int 674d07f36b0SJoseph Koshy amd_intr(int cpu, struct trapframe *tf) 675ebccf1e3SJoseph Koshy { 676f263522aSJoseph Koshy int i, error, retval, ri; 677f263522aSJoseph Koshy uint32_t config, evsel, perfctr; 678ebccf1e3SJoseph Koshy struct pmc *pm; 679ebccf1e3SJoseph Koshy struct pmc_cpu *pc; 680ebccf1e3SJoseph Koshy struct pmc_hw *phw; 681f263522aSJoseph Koshy pmc_value_t v; 68236c0fd9dSJoseph Koshy 683ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 684ebccf1e3SJoseph Koshy ("[amd,%d] out of range CPU %d", __LINE__, cpu)); 685ebccf1e3SJoseph Koshy 686d07f36b0SJoseph Koshy PMCDBG(MDP,INT,1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf, 687d07f36b0SJoseph Koshy TRAPF_USERMODE(tf)); 688f263522aSJoseph Koshy 689ebccf1e3SJoseph Koshy retval = 0; 690ebccf1e3SJoseph Koshy 691ebccf1e3SJoseph Koshy pc = pmc_pcpu[cpu]; 692ebccf1e3SJoseph Koshy 693ebccf1e3SJoseph Koshy /* 694ebccf1e3SJoseph Koshy * look for all PMCs that have interrupted: 695ebccf1e3SJoseph Koshy * - skip over the TSC [PMC#0] 696f263522aSJoseph Koshy * - look for a running, sampling PMC which has overflowed 697f263522aSJoseph Koshy * and which has a valid 'struct pmc' association 698f263522aSJoseph Koshy * 699f263522aSJoseph Koshy * If found, we call a helper to process the interrupt. 700bebaef4aSJoseph Koshy * 701bebaef4aSJoseph Koshy * If multiple PMCs interrupt at the same time, the AMD64 702bebaef4aSJoseph Koshy * processor appears to deliver as many NMIs as there are 703d07f36b0SJoseph Koshy * outstanding PMC interrupts. So we process only one NMI 704d07f36b0SJoseph Koshy * interrupt at a time. 705ebccf1e3SJoseph Koshy */ 706ebccf1e3SJoseph Koshy 707bebaef4aSJoseph Koshy for (i = 0; retval == 0 && i < AMD_NPMCS-1; i++) { 708ebccf1e3SJoseph Koshy 709f263522aSJoseph Koshy ri = i + 1; /* row index; TSC is at ri == 0 */ 710f263522aSJoseph Koshy 711f263522aSJoseph Koshy if (!AMD_PMC_HAS_OVERFLOWED(i)) 712f263522aSJoseph Koshy continue; 713f263522aSJoseph Koshy 714f263522aSJoseph Koshy phw = pc->pc_hwpmcs[ri]; 715f263522aSJoseph Koshy 716ebccf1e3SJoseph Koshy KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__)); 717ebccf1e3SJoseph Koshy 718ebccf1e3SJoseph Koshy if ((pm = phw->phw_pmc) == NULL || 719f263522aSJoseph Koshy pm->pm_state != PMC_STATE_RUNNING || 720f263522aSJoseph Koshy !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 721ebccf1e3SJoseph Koshy continue; 722ebccf1e3SJoseph Koshy } 723ebccf1e3SJoseph Koshy 724d07f36b0SJoseph Koshy retval = 1; /* Found an interrupting PMC. */ 725bebaef4aSJoseph Koshy 726d07f36b0SJoseph Koshy /* Stop the PMC, reload count. */ 727f263522aSJoseph Koshy evsel = AMD_PMC_EVSEL_0 + i; 728f263522aSJoseph Koshy perfctr = AMD_PMC_PERFCTR_0 + i; 729f263522aSJoseph Koshy v = pm->pm_sc.pm_reloadcount; 730f263522aSJoseph Koshy config = rdmsr(evsel); 731f263522aSJoseph Koshy 732f263522aSJoseph Koshy KASSERT((config & ~AMD_PMC_ENABLE) == 733f263522aSJoseph Koshy (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE), 734f263522aSJoseph Koshy ("[amd,%d] config mismatch reg=0x%x pm=0x%x", __LINE__, 735f263522aSJoseph Koshy config, pm->pm_md.pm_amd.pm_amd_evsel)); 736f263522aSJoseph Koshy 737f263522aSJoseph Koshy wrmsr(evsel, config & ~AMD_PMC_ENABLE); 738f263522aSJoseph Koshy wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v)); 739f263522aSJoseph Koshy 740d07f36b0SJoseph Koshy /* Restart the counter if logging succeeded. */ 741d07f36b0SJoseph Koshy error = pmc_process_interrupt(cpu, pm, tf, TRAPF_USERMODE(tf)); 742f263522aSJoseph Koshy if (error == 0) 743f263522aSJoseph Koshy wrmsr(evsel, config | AMD_PMC_ENABLE); 744ebccf1e3SJoseph Koshy } 745f263522aSJoseph Koshy 746fbf1556dSJoseph Koshy atomic_add_int(retval ? &pmc_stats.pm_intr_processed : 747fbf1556dSJoseph Koshy &pmc_stats.pm_intr_ignored, 1); 748fbf1556dSJoseph Koshy 749d07f36b0SJoseph Koshy return (retval); 750ebccf1e3SJoseph Koshy } 751ebccf1e3SJoseph Koshy 752ebccf1e3SJoseph Koshy /* 753ebccf1e3SJoseph Koshy * describe a PMC 754ebccf1e3SJoseph Koshy */ 755ebccf1e3SJoseph Koshy static int 756ebccf1e3SJoseph Koshy amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 757ebccf1e3SJoseph Koshy { 758ebccf1e3SJoseph Koshy int error; 759ebccf1e3SJoseph Koshy size_t copied; 760ebccf1e3SJoseph Koshy const struct amd_descr *pd; 761ebccf1e3SJoseph Koshy struct pmc_hw *phw; 762ebccf1e3SJoseph Koshy 763ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 764ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU %d", __LINE__, cpu)); 765ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 766ebccf1e3SJoseph Koshy ("[amd,%d] row-index %d out of range", __LINE__, ri)); 767ebccf1e3SJoseph Koshy 768ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 769ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 770ebccf1e3SJoseph Koshy 771ebccf1e3SJoseph Koshy if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name, 772ebccf1e3SJoseph Koshy PMC_NAME_MAX, &copied)) != 0) 773ebccf1e3SJoseph Koshy return error; 774ebccf1e3SJoseph Koshy 775ebccf1e3SJoseph Koshy pi->pm_class = pd->pm_descr.pd_class; 776ebccf1e3SJoseph Koshy 777ebccf1e3SJoseph Koshy if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 778ebccf1e3SJoseph Koshy pi->pm_enabled = TRUE; 779ebccf1e3SJoseph Koshy *ppmc = phw->phw_pmc; 780ebccf1e3SJoseph Koshy } else { 781ebccf1e3SJoseph Koshy pi->pm_enabled = FALSE; 782ebccf1e3SJoseph Koshy *ppmc = NULL; 783ebccf1e3SJoseph Koshy } 784ebccf1e3SJoseph Koshy 785ebccf1e3SJoseph Koshy return 0; 786ebccf1e3SJoseph Koshy } 787ebccf1e3SJoseph Koshy 788ebccf1e3SJoseph Koshy /* 789ebccf1e3SJoseph Koshy * i386 specific entry points 790ebccf1e3SJoseph Koshy */ 791ebccf1e3SJoseph Koshy 792ebccf1e3SJoseph Koshy /* 793ebccf1e3SJoseph Koshy * return the MSR address of the given PMC. 794ebccf1e3SJoseph Koshy */ 795ebccf1e3SJoseph Koshy 796ebccf1e3SJoseph Koshy static int 797ebccf1e3SJoseph Koshy amd_get_msr(int ri, uint32_t *msr) 798ebccf1e3SJoseph Koshy { 799ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 800ebccf1e3SJoseph Koshy ("[amd,%d] ri %d out of range", __LINE__, ri)); 801ebccf1e3SJoseph Koshy 8026b8c8cd8SJoseph Koshy *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0; 803ebccf1e3SJoseph Koshy return 0; 804ebccf1e3SJoseph Koshy } 805ebccf1e3SJoseph Koshy 806ebccf1e3SJoseph Koshy /* 807ebccf1e3SJoseph Koshy * processor dependent initialization. 808ebccf1e3SJoseph Koshy */ 809ebccf1e3SJoseph Koshy 810ebccf1e3SJoseph Koshy /* 811ebccf1e3SJoseph Koshy * Per-processor data structure 812ebccf1e3SJoseph Koshy * 813ebccf1e3SJoseph Koshy * [common stuff] 814ebccf1e3SJoseph Koshy * [5 struct pmc_hw pointers] 815ebccf1e3SJoseph Koshy * [5 struct pmc_hw structures] 816ebccf1e3SJoseph Koshy */ 817ebccf1e3SJoseph Koshy 818ebccf1e3SJoseph Koshy struct amd_cpu { 819ebccf1e3SJoseph Koshy struct pmc_cpu pc_common; 820ebccf1e3SJoseph Koshy struct pmc_hw *pc_hwpmcs[AMD_NPMCS]; 821ebccf1e3SJoseph Koshy struct pmc_hw pc_amdpmcs[AMD_NPMCS]; 822ebccf1e3SJoseph Koshy }; 823ebccf1e3SJoseph Koshy 824ebccf1e3SJoseph Koshy 825ebccf1e3SJoseph Koshy static int 826ebccf1e3SJoseph Koshy amd_init(int cpu) 827ebccf1e3SJoseph Koshy { 828ebccf1e3SJoseph Koshy int n; 829ebccf1e3SJoseph Koshy struct amd_cpu *pcs; 830ebccf1e3SJoseph Koshy struct pmc_hw *phw; 831ebccf1e3SJoseph Koshy 832ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 833ebccf1e3SJoseph Koshy ("[amd,%d] insane cpu number %d", __LINE__, cpu)); 834ebccf1e3SJoseph Koshy 835ebccf1e3SJoseph Koshy PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu); 836ebccf1e3SJoseph Koshy 837ebccf1e3SJoseph Koshy MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC, 838ebccf1e3SJoseph Koshy M_WAITOK|M_ZERO); 839ebccf1e3SJoseph Koshy 840ebccf1e3SJoseph Koshy phw = &pcs->pc_amdpmcs[0]; 841ebccf1e3SJoseph Koshy 842ebccf1e3SJoseph Koshy /* 843ebccf1e3SJoseph Koshy * Initialize the per-cpu mutex and set the content of the 844ebccf1e3SJoseph Koshy * hardware descriptors to a known state. 845ebccf1e3SJoseph Koshy */ 846ebccf1e3SJoseph Koshy 847ebccf1e3SJoseph Koshy for (n = 0; n < AMD_NPMCS; n++, phw++) { 848ebccf1e3SJoseph Koshy phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 849ebccf1e3SJoseph Koshy PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 850ebccf1e3SJoseph Koshy phw->phw_pmc = NULL; 851ebccf1e3SJoseph Koshy pcs->pc_hwpmcs[n] = phw; 852ebccf1e3SJoseph Koshy } 853ebccf1e3SJoseph Koshy 854ebccf1e3SJoseph Koshy /* Mark the TSC as shareable */ 855ebccf1e3SJoseph Koshy pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE; 856ebccf1e3SJoseph Koshy 857ebccf1e3SJoseph Koshy pmc_pcpu[cpu] = (struct pmc_cpu *) pcs; 858ebccf1e3SJoseph Koshy 859ebccf1e3SJoseph Koshy return 0; 860ebccf1e3SJoseph Koshy } 861ebccf1e3SJoseph Koshy 862ebccf1e3SJoseph Koshy 863ebccf1e3SJoseph Koshy /* 864ebccf1e3SJoseph Koshy * processor dependent cleanup prior to the KLD 865ebccf1e3SJoseph Koshy * being unloaded 866ebccf1e3SJoseph Koshy */ 867ebccf1e3SJoseph Koshy 868ebccf1e3SJoseph Koshy static int 869ebccf1e3SJoseph Koshy amd_cleanup(int cpu) 870ebccf1e3SJoseph Koshy { 871ebccf1e3SJoseph Koshy int i; 872ebccf1e3SJoseph Koshy uint32_t evsel; 873ebccf1e3SJoseph Koshy struct pmc_cpu *pcs; 874ebccf1e3SJoseph Koshy 875ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 876ebccf1e3SJoseph Koshy ("[amd,%d] insane cpu number (%d)", __LINE__, cpu)); 877ebccf1e3SJoseph Koshy 878ebccf1e3SJoseph Koshy PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu); 879ebccf1e3SJoseph Koshy 880ebccf1e3SJoseph Koshy /* 881ebccf1e3SJoseph Koshy * First, turn off all PMCs on this CPU. 882ebccf1e3SJoseph Koshy */ 883ebccf1e3SJoseph Koshy 884ebccf1e3SJoseph Koshy for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */ 885ebccf1e3SJoseph Koshy evsel = rdmsr(AMD_PMC_EVSEL_0 + i); 886ebccf1e3SJoseph Koshy evsel &= ~AMD_PMC_ENABLE; 887ebccf1e3SJoseph Koshy wrmsr(AMD_PMC_EVSEL_0 + i, evsel); 888ebccf1e3SJoseph Koshy } 889ebccf1e3SJoseph Koshy 890ebccf1e3SJoseph Koshy /* 891ebccf1e3SJoseph Koshy * Next, free up allocated space. 892ebccf1e3SJoseph Koshy */ 893ebccf1e3SJoseph Koshy 894f263522aSJoseph Koshy if ((pcs = pmc_pcpu[cpu]) == NULL) 895f263522aSJoseph Koshy return 0; 896ebccf1e3SJoseph Koshy 897342ed5d9SRuslan Ermilov #ifdef DEBUG 898ebccf1e3SJoseph Koshy /* check the TSC */ 899ebccf1e3SJoseph Koshy KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL, 900ebccf1e3SJoseph Koshy ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu)); 901ebccf1e3SJoseph Koshy for (i = 1; i < AMD_NPMCS; i++) { 902ebccf1e3SJoseph Koshy KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL, 903ebccf1e3SJoseph Koshy ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i)); 904ebccf1e3SJoseph Koshy KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)), 905ebccf1e3SJoseph Koshy ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i)); 906ebccf1e3SJoseph Koshy } 907ebccf1e3SJoseph Koshy #endif 908ebccf1e3SJoseph Koshy 909ebccf1e3SJoseph Koshy pmc_pcpu[cpu] = NULL; 910ebccf1e3SJoseph Koshy FREE(pcs, M_PMC); 911ebccf1e3SJoseph Koshy return 0; 912ebccf1e3SJoseph Koshy } 913ebccf1e3SJoseph Koshy 914ebccf1e3SJoseph Koshy /* 915ebccf1e3SJoseph Koshy * Initialize ourselves. 916ebccf1e3SJoseph Koshy */ 917ebccf1e3SJoseph Koshy 918ebccf1e3SJoseph Koshy struct pmc_mdep * 919ebccf1e3SJoseph Koshy pmc_amd_initialize(void) 920ebccf1e3SJoseph Koshy { 921f263522aSJoseph Koshy enum pmc_cputype cputype; 922f263522aSJoseph Koshy enum pmc_class class; 923ebccf1e3SJoseph Koshy struct pmc_mdep *pmc_mdep; 924f263522aSJoseph Koshy char *name; 925f263522aSJoseph Koshy int i; 926ebccf1e3SJoseph Koshy 927f263522aSJoseph Koshy /* 928f263522aSJoseph Koshy * The presence of hardware performance counters on the AMD 929f263522aSJoseph Koshy * Athlon, Duron or later processors, is _not_ indicated by 930f263522aSJoseph Koshy * any of the processor feature flags set by the 'CPUID' 931f263522aSJoseph Koshy * instruction, so we only check the 'instruction family' 932f263522aSJoseph Koshy * field returned by CPUID for instruction family >= 6. 933f263522aSJoseph Koshy */ 934ebccf1e3SJoseph Koshy 93554bad7c6SJoseph Koshy class = cputype = -1; 93654bad7c6SJoseph Koshy name = NULL; 937f263522aSJoseph Koshy switch (cpu_id & 0xF00) { 938f263522aSJoseph Koshy case 0x600: /* Athlon(tm) processor */ 939f263522aSJoseph Koshy cputype = PMC_CPU_AMD_K7; 940f263522aSJoseph Koshy class = PMC_CLASS_K7; 941f263522aSJoseph Koshy name = "K7"; 942f263522aSJoseph Koshy break; 943f263522aSJoseph Koshy case 0xF00: /* Athlon64/Opteron processor */ 944f263522aSJoseph Koshy cputype = PMC_CPU_AMD_K8; 945f263522aSJoseph Koshy class = PMC_CLASS_K8; 946f263522aSJoseph Koshy name = "K8"; 947f263522aSJoseph Koshy break; 948f263522aSJoseph Koshy } 949f263522aSJoseph Koshy 950f263522aSJoseph Koshy if ((int) cputype == -1) { 951f263522aSJoseph Koshy (void) printf("pmc: Unknown AMD CPU.\n"); 952ebccf1e3SJoseph Koshy return NULL; 953f263522aSJoseph Koshy } 954f263522aSJoseph Koshy 955342ed5d9SRuslan Ermilov #ifdef DEBUG 956f263522aSJoseph Koshy amd_pmc_class = class; 957f263522aSJoseph Koshy #endif 958ebccf1e3SJoseph Koshy 959ebccf1e3SJoseph Koshy MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep), 960ebccf1e3SJoseph Koshy M_PMC, M_WAITOK|M_ZERO); 961ebccf1e3SJoseph Koshy 962f263522aSJoseph Koshy pmc_mdep->pmd_cputype = cputype; 963ebccf1e3SJoseph Koshy pmc_mdep->pmd_npmc = AMD_NPMCS; 964ebccf1e3SJoseph Koshy 965ebccf1e3SJoseph Koshy /* this processor has two classes of usable PMCs */ 966ebccf1e3SJoseph Koshy pmc_mdep->pmd_nclass = 2; 967c5153e19SJoseph Koshy 968c5153e19SJoseph Koshy /* TSC */ 969c5153e19SJoseph Koshy pmc_mdep->pmd_classes[0].pm_class = PMC_CLASS_TSC; 970c5153e19SJoseph Koshy pmc_mdep->pmd_classes[0].pm_caps = PMC_CAP_READ; 971c5153e19SJoseph Koshy pmc_mdep->pmd_classes[0].pm_width = 64; 972c5153e19SJoseph Koshy 973c5153e19SJoseph Koshy /* AMD K7/K8 PMCs */ 974f263522aSJoseph Koshy pmc_mdep->pmd_classes[1].pm_class = class; 975c5153e19SJoseph Koshy pmc_mdep->pmd_classes[1].pm_caps = AMD_PMC_CAPS; 976c5153e19SJoseph Koshy pmc_mdep->pmd_classes[1].pm_width = 48; 977c5153e19SJoseph Koshy 978ebccf1e3SJoseph Koshy pmc_mdep->pmd_nclasspmcs[0] = 1; 979ebccf1e3SJoseph Koshy pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1); 980ebccf1e3SJoseph Koshy 981f263522aSJoseph Koshy /* fill in the correct pmc name and class */ 982f263522aSJoseph Koshy for (i = 1; i < AMD_NPMCS; i++) { 983f263522aSJoseph Koshy (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name, 984f263522aSJoseph Koshy sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d", 985f263522aSJoseph Koshy name, i-1); 986f263522aSJoseph Koshy amd_pmcdesc[i].pm_descr.pd_class = class; 987f263522aSJoseph Koshy } 988f263522aSJoseph Koshy 989ebccf1e3SJoseph Koshy pmc_mdep->pmd_init = amd_init; 990ebccf1e3SJoseph Koshy pmc_mdep->pmd_cleanup = amd_cleanup; 991ebccf1e3SJoseph Koshy pmc_mdep->pmd_switch_in = amd_switch_in; 992ebccf1e3SJoseph Koshy pmc_mdep->pmd_switch_out = amd_switch_out; 993ebccf1e3SJoseph Koshy pmc_mdep->pmd_read_pmc = amd_read_pmc; 994ebccf1e3SJoseph Koshy pmc_mdep->pmd_write_pmc = amd_write_pmc; 995ebccf1e3SJoseph Koshy pmc_mdep->pmd_config_pmc = amd_config_pmc; 996c5153e19SJoseph Koshy pmc_mdep->pmd_get_config = amd_get_config; 997ebccf1e3SJoseph Koshy pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc; 998ebccf1e3SJoseph Koshy pmc_mdep->pmd_release_pmc = amd_release_pmc; 999ebccf1e3SJoseph Koshy pmc_mdep->pmd_start_pmc = amd_start_pmc; 1000ebccf1e3SJoseph Koshy pmc_mdep->pmd_stop_pmc = amd_stop_pmc; 1001ebccf1e3SJoseph Koshy pmc_mdep->pmd_intr = amd_intr; 1002ebccf1e3SJoseph Koshy pmc_mdep->pmd_describe = amd_describe; 1003ebccf1e3SJoseph Koshy pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */ 1004ebccf1e3SJoseph Koshy 1005ebccf1e3SJoseph Koshy PMCDBG(MDP,INI,0,"%s","amd-initialize"); 1006ebccf1e3SJoseph Koshy 1007ebccf1e3SJoseph Koshy return pmc_mdep; 1008ebccf1e3SJoseph Koshy } 1009