1ebccf1e3SJoseph Koshy /*- 2ebccf1e3SJoseph Koshy * Copyright (c) 2003-2005 Joseph Koshy 3ebccf1e3SJoseph Koshy * All rights reserved. 4ebccf1e3SJoseph Koshy * 5ebccf1e3SJoseph Koshy * Redistribution and use in source and binary forms, with or without 6ebccf1e3SJoseph Koshy * modification, are permitted provided that the following conditions 7ebccf1e3SJoseph Koshy * are met: 8ebccf1e3SJoseph Koshy * 1. Redistributions of source code must retain the above copyright 9ebccf1e3SJoseph Koshy * notice, this list of conditions and the following disclaimer. 10ebccf1e3SJoseph Koshy * 2. Redistributions in binary form must reproduce the above copyright 11ebccf1e3SJoseph Koshy * notice, this list of conditions and the following disclaimer in the 12ebccf1e3SJoseph Koshy * documentation and/or other materials provided with the distribution. 13ebccf1e3SJoseph Koshy * 14ebccf1e3SJoseph Koshy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ebccf1e3SJoseph Koshy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ebccf1e3SJoseph Koshy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ebccf1e3SJoseph Koshy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ebccf1e3SJoseph Koshy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ebccf1e3SJoseph Koshy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ebccf1e3SJoseph Koshy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ebccf1e3SJoseph Koshy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ebccf1e3SJoseph Koshy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ebccf1e3SJoseph Koshy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ebccf1e3SJoseph Koshy * SUCH DAMAGE. 25ebccf1e3SJoseph Koshy * 26ebccf1e3SJoseph Koshy */ 27ebccf1e3SJoseph Koshy 28ebccf1e3SJoseph Koshy #include <sys/cdefs.h> 29ebccf1e3SJoseph Koshy __FBSDID("$FreeBSD$"); 30ebccf1e3SJoseph Koshy 31ebccf1e3SJoseph Koshy /* Support for the AMD K7 and later processors */ 32ebccf1e3SJoseph Koshy 33ebccf1e3SJoseph Koshy #include <sys/param.h> 34ebccf1e3SJoseph Koshy #include <sys/lock.h> 35ebccf1e3SJoseph Koshy #include <sys/malloc.h> 36ebccf1e3SJoseph Koshy #include <sys/mutex.h> 377ad17ef9SMarcel Moolenaar #include <sys/pmc.h> 38ebccf1e3SJoseph Koshy #include <sys/smp.h> 39ebccf1e3SJoseph Koshy #include <sys/systm.h> 40ebccf1e3SJoseph Koshy 41ebccf1e3SJoseph Koshy #include <machine/md_var.h> 42ebccf1e3SJoseph Koshy 43ebccf1e3SJoseph Koshy /* AMD K7 and K8 PMCs */ 44ebccf1e3SJoseph Koshy 45ebccf1e3SJoseph Koshy #define AMD_PMC_EVSEL_0 0xC0010000 46ebccf1e3SJoseph Koshy #define AMD_PMC_EVSEL_1 0xC0010001 47ebccf1e3SJoseph Koshy #define AMD_PMC_EVSEL_2 0xC0010002 48ebccf1e3SJoseph Koshy #define AMD_PMC_EVSEL_3 0xC0010003 49ebccf1e3SJoseph Koshy 50ebccf1e3SJoseph Koshy #define AMD_PMC_PERFCTR_0 0xC0010004 51ebccf1e3SJoseph Koshy #define AMD_PMC_PERFCTR_1 0xC0010005 52ebccf1e3SJoseph Koshy #define AMD_PMC_PERFCTR_2 0xC0010006 53ebccf1e3SJoseph Koshy #define AMD_PMC_PERFCTR_3 0xC0010007 54ebccf1e3SJoseph Koshy 55ebccf1e3SJoseph Koshy #define K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) || \ 56ebccf1e3SJoseph Koshy ((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) || \ 57ebccf1e3SJoseph Koshy ((c) >= 0xCD && (c) <= 0xCF)) 58ebccf1e3SJoseph Koshy 59ebccf1e3SJoseph Koshy #define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \ 60ebccf1e3SJoseph Koshy PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \ 61ebccf1e3SJoseph Koshy PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER) 62ebccf1e3SJoseph Koshy 63ebccf1e3SJoseph Koshy /* reserved bits include bit 21 and the top two bits of the unit mask */ 64ebccf1e3SJoseph Koshy #define K7_PMC_RESERVED ((1 << 21) | (3 << 13)) 65ebccf1e3SJoseph Koshy 66ebccf1e3SJoseph Koshy #define K8_PMC_RESERVED (1 << 21) 67ebccf1e3SJoseph Koshy 68ebccf1e3SJoseph Koshy #define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0) 69ebccf1e3SJoseph Koshy #define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0) 70ebccf1e3SJoseph Koshy 71ebccf1e3SJoseph Koshy #if __i386__ 72ebccf1e3SJoseph Koshy #define AMD_NPMCS K7_NPMCS 73ebccf1e3SJoseph Koshy #define AMD_PMC_CLASS PMC_CLASS_K7 74ebccf1e3SJoseph Koshy #define AMD_PMC_COUNTERMASK K7_PMC_COUNTERMASK 75ebccf1e3SJoseph Koshy #define AMD_PMC_TO_COUNTER(x) K7_PMC_TO_COUNTER(x) 76ebccf1e3SJoseph Koshy #define AMD_PMC_INVERT K7_PMC_INVERT 77ebccf1e3SJoseph Koshy #define AMD_PMC_ENABLE K7_PMC_ENABLE 78ebccf1e3SJoseph Koshy #define AMD_PMC_INT K7_PMC_INT 79ebccf1e3SJoseph Koshy #define AMD_PMC_PC K7_PMC_PC 80ebccf1e3SJoseph Koshy #define AMD_PMC_EDGE K7_PMC_EDGE 81ebccf1e3SJoseph Koshy #define AMD_PMC_OS K7_PMC_OS 82ebccf1e3SJoseph Koshy #define AMD_PMC_USR K7_PMC_USR 83ebccf1e3SJoseph Koshy 84ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_M K7_PMC_UNITMASK_M 85ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_O K7_PMC_UNITMASK_O 86ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_E K7_PMC_UNITMASK_E 87ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_S K7_PMC_UNITMASK_S 88ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_I K7_PMC_UNITMASK_I 89ebccf1e3SJoseph Koshy 90ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK K7_PMC_UNITMASK 91ebccf1e3SJoseph Koshy #define AMD_PMC_EVENTMASK K7_PMC_EVENTMASK 92ebccf1e3SJoseph Koshy #define AMD_PMC_TO_UNITMASK(x) K7_PMC_TO_UNITMASK(x) 93ebccf1e3SJoseph Koshy #define AMD_PMC_TO_EVENTMASK(x) K7_PMC_TO_EVENTMASK(x) 94ebccf1e3SJoseph Koshy #define AMD_VALID_BITS K7_VALID_BITS 95ebccf1e3SJoseph Koshy 96ebccf1e3SJoseph Koshy #define AMD_PMC_CLASS_NAME "K7-" 97ebccf1e3SJoseph Koshy 98ebccf1e3SJoseph Koshy #elif __amd64__ 99ebccf1e3SJoseph Koshy 100ebccf1e3SJoseph Koshy #define AMD_NPMCS K8_NPMCS 101ebccf1e3SJoseph Koshy #define AMD_PMC_CLASS PMC_CLASS_K8 102ebccf1e3SJoseph Koshy #define AMD_PMC_COUNTERMASK K8_PMC_COUNTERMASK 103ebccf1e3SJoseph Koshy #define AMD_PMC_TO_COUNTER(x) K8_PMC_TO_COUNTER(x) 104ebccf1e3SJoseph Koshy #define AMD_PMC_INVERT K8_PMC_INVERT 105ebccf1e3SJoseph Koshy #define AMD_PMC_ENABLE K8_PMC_ENABLE 106ebccf1e3SJoseph Koshy #define AMD_PMC_INT K8_PMC_INT 107ebccf1e3SJoseph Koshy #define AMD_PMC_PC K8_PMC_PC 108ebccf1e3SJoseph Koshy #define AMD_PMC_EDGE K8_PMC_EDGE 109ebccf1e3SJoseph Koshy #define AMD_PMC_OS K8_PMC_OS 110ebccf1e3SJoseph Koshy #define AMD_PMC_USR K8_PMC_USR 111ebccf1e3SJoseph Koshy 112ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_M K8_PMC_UNITMASK_M 113ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_O K8_PMC_UNITMASK_O 114ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_E K8_PMC_UNITMASK_E 115ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_S K8_PMC_UNITMASK_S 116ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK_I K8_PMC_UNITMASK_I 117ebccf1e3SJoseph Koshy 118ebccf1e3SJoseph Koshy #define AMD_PMC_UNITMASK K8_PMC_UNITMASK 119ebccf1e3SJoseph Koshy #define AMD_PMC_EVENTMASK K8_PMC_EVENTMASK 120ebccf1e3SJoseph Koshy #define AMD_PMC_TO_UNITMASK(x) K8_PMC_TO_UNITMASK(x) 121ebccf1e3SJoseph Koshy #define AMD_PMC_TO_EVENTMASK(x) K8_PMC_TO_EVENTMASK(x) 122ebccf1e3SJoseph Koshy #define AMD_VALID_BITS K8_VALID_BITS 123ebccf1e3SJoseph Koshy 124ebccf1e3SJoseph Koshy #define AMD_PMC_CLASS_NAME "K8-" 125ebccf1e3SJoseph Koshy 126ebccf1e3SJoseph Koshy #else 127ebccf1e3SJoseph Koshy #error Unsupported architecture. 128ebccf1e3SJoseph Koshy #endif 129ebccf1e3SJoseph Koshy 130ebccf1e3SJoseph Koshy /* AMD K7 & K8 PMCs */ 131ebccf1e3SJoseph Koshy struct amd_descr { 132ebccf1e3SJoseph Koshy struct pmc_descr pm_descr; /* "base class" */ 133ebccf1e3SJoseph Koshy uint32_t pm_evsel; /* address of EVSEL register */ 134ebccf1e3SJoseph Koshy uint32_t pm_perfctr; /* address of PERFCTR register */ 135ebccf1e3SJoseph Koshy }; 136ebccf1e3SJoseph Koshy 137ebccf1e3SJoseph Koshy static const struct amd_descr amd_pmcdesc[AMD_NPMCS] = 138ebccf1e3SJoseph Koshy { 139ebccf1e3SJoseph Koshy { 140ebccf1e3SJoseph Koshy .pm_descr = 141ebccf1e3SJoseph Koshy { 142ebccf1e3SJoseph Koshy .pd_name = "TSC", 143ebccf1e3SJoseph Koshy .pd_class = PMC_CLASS_TSC, 144ebccf1e3SJoseph Koshy .pd_caps = PMC_CAP_READ, 145ebccf1e3SJoseph Koshy .pd_width = 64 146ebccf1e3SJoseph Koshy }, 147ebccf1e3SJoseph Koshy .pm_evsel = MSR_TSC, 148ebccf1e3SJoseph Koshy .pm_perfctr = 0 /* unused */ 149ebccf1e3SJoseph Koshy }, 150ebccf1e3SJoseph Koshy 151ebccf1e3SJoseph Koshy { 152ebccf1e3SJoseph Koshy .pm_descr = 153ebccf1e3SJoseph Koshy { 154ebccf1e3SJoseph Koshy .pd_name = AMD_PMC_CLASS_NAME "0", 155ebccf1e3SJoseph Koshy .pd_class = AMD_PMC_CLASS, 156ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 157ebccf1e3SJoseph Koshy .pd_width = 48 158ebccf1e3SJoseph Koshy }, 159ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_0, 160ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_0 161ebccf1e3SJoseph Koshy }, 162ebccf1e3SJoseph Koshy { 163ebccf1e3SJoseph Koshy .pm_descr = 164ebccf1e3SJoseph Koshy { 165ebccf1e3SJoseph Koshy .pd_name = AMD_PMC_CLASS_NAME "1", 166ebccf1e3SJoseph Koshy .pd_class = AMD_PMC_CLASS, 167ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 168ebccf1e3SJoseph Koshy .pd_width = 48 169ebccf1e3SJoseph Koshy }, 170ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_1, 171ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_1 172ebccf1e3SJoseph Koshy }, 173ebccf1e3SJoseph Koshy { 174ebccf1e3SJoseph Koshy .pm_descr = 175ebccf1e3SJoseph Koshy { 176ebccf1e3SJoseph Koshy .pd_name = AMD_PMC_CLASS_NAME "2", 177ebccf1e3SJoseph Koshy .pd_class = AMD_PMC_CLASS, 178ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 179ebccf1e3SJoseph Koshy .pd_width = 48 180ebccf1e3SJoseph Koshy }, 181ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_2, 182ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_2 183ebccf1e3SJoseph Koshy }, 184ebccf1e3SJoseph Koshy { 185ebccf1e3SJoseph Koshy .pm_descr = 186ebccf1e3SJoseph Koshy { 187ebccf1e3SJoseph Koshy .pd_name = AMD_PMC_CLASS_NAME "3", 188ebccf1e3SJoseph Koshy .pd_class = AMD_PMC_CLASS, 189ebccf1e3SJoseph Koshy .pd_caps = AMD_PMC_CAPS, 190ebccf1e3SJoseph Koshy .pd_width = 48 191ebccf1e3SJoseph Koshy }, 192ebccf1e3SJoseph Koshy .pm_evsel = AMD_PMC_EVSEL_3, 193ebccf1e3SJoseph Koshy .pm_perfctr = AMD_PMC_PERFCTR_3 194ebccf1e3SJoseph Koshy } 195ebccf1e3SJoseph Koshy }; 196ebccf1e3SJoseph Koshy 197ebccf1e3SJoseph Koshy struct amd_event_code_map { 198ebccf1e3SJoseph Koshy enum pmc_event pe_ev; /* enum value */ 199ebccf1e3SJoseph Koshy uint8_t pe_code; /* encoded event mask */ 200ebccf1e3SJoseph Koshy uint8_t pe_mask; /* bits allowed in unit mask */ 201ebccf1e3SJoseph Koshy }; 202ebccf1e3SJoseph Koshy 203ebccf1e3SJoseph Koshy const struct amd_event_code_map amd_event_codes[] = { 204ebccf1e3SJoseph Koshy #if __i386__ 205ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_ACCESSES, 0x40, 0 }, 206ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_MISSES, 0x41, 0 }, 207ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, K7_PMC_UNITMASK_MOESI }, 208ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, K7_PMC_UNITMASK_MOESI }, 209ebccf1e3SJoseph Koshy { PMC_EV_K7_DC_WRITEBACKS, 0x44, K7_PMC_UNITMASK_MOESI }, 210ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 }, 211ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 }, 212ebccf1e3SJoseph Koshy { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 }, 213ebccf1e3SJoseph Koshy 214ebccf1e3SJoseph Koshy { PMC_EV_K7_IC_FETCHES, 0x80, 0 }, 215ebccf1e3SJoseph Koshy { PMC_EV_K7_IC_MISSES, 0x81, 0 }, 216ebccf1e3SJoseph Koshy 217ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 }, 218ebccf1e3SJoseph Koshy { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 }, 219ebccf1e3SJoseph Koshy 220ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 }, 221ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 }, 222ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 }, 223ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 }, 224ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 }, 225ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 }, 226ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 }, 227ebccf1e3SJoseph Koshy { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 }, 228ebccf1e3SJoseph Koshy { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 }, 229ebccf1e3SJoseph Koshy { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 }, 230ebccf1e3SJoseph Koshy { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 } 231ebccf1e3SJoseph Koshy #endif 232ebccf1e3SJoseph Koshy 233ebccf1e3SJoseph Koshy #if __amd64__ 234ebccf1e3SJoseph Koshy { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F }, 235ebccf1e3SJoseph Koshy { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 }, 236ebccf1e3SJoseph Koshy { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 }, 237ebccf1e3SJoseph Koshy 238ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F }, 239ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE, 240ebccf1e3SJoseph Koshy 0x21, 0x00 }, 241ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 }, 242ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 }, 243ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 }, 244ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 }, 245ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 }, 246ebccf1e3SJoseph Koshy { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 }, 247ebccf1e3SJoseph Koshy 248ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 }, 249ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MISS, 0x41, 0x00 }, 250ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F }, 251ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F }, 252ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F }, 253ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 }, 254ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 }, 255ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 }, 256ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 }, 257ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 }, 258ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 }, 259ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 }, 260ebccf1e3SJoseph Koshy { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 }, 261ebccf1e3SJoseph Koshy 262ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 }, 263ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F }, 264ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 }, 265ebccf1e3SJoseph Koshy { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 }, 266ebccf1e3SJoseph Koshy 267ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_FETCH, 0x80, 0x00 }, 268ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_MISS, 0x81, 0x00 }, 269ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 }, 270ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 }, 271ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 }, 272ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 }, 273ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 }, 274ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 }, 275ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 }, 276ebccf1e3SJoseph Koshy { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 }, 277ebccf1e3SJoseph Koshy 278ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 }, 279ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 }, 280ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 }, 281ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 }, 282ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 }, 283ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 }, 284ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 }, 285ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 }, 286ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 }, 287ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 }, 288ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE, 289ebccf1e3SJoseph Koshy 0xCA, 0x00 }, 290ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F }, 291ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS, 292ebccf1e3SJoseph Koshy 0xCC, 0x07 }, 293ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 }, 294ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 }, 295ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 }, 296ebccf1e3SJoseph Koshy 297ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 }, 298ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 }, 299ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE, 300ebccf1e3SJoseph Koshy 0xD2, 0x00 }, 301ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 }, 302ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 }, 303ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL, 304ebccf1e3SJoseph Koshy 0xD5, 0x00 }, 305ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL, 306ebccf1e3SJoseph Koshy 0xD6, 0x00 }, 307ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 }, 308ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 }, 309ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET, 310ebccf1e3SJoseph Koshy 0xD9, 0x00 }, 311ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING, 312ebccf1e3SJoseph Koshy 0xDA, 0x00 }, 313ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F }, 314ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 }, 315ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 }, 316ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 }, 317ebccf1e3SJoseph Koshy { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 }, 318ebccf1e3SJoseph Koshy 319ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 }, 320ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 }, 321ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED, 322ebccf1e3SJoseph Koshy 0xE2, 0x00 }, 323ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 }, 324ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F }, 325ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F }, 326ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F }, 327ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F }, 328ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F }, 329ebccf1e3SJoseph Koshy { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F } 330ebccf1e3SJoseph Koshy #endif 331ebccf1e3SJoseph Koshy 332ebccf1e3SJoseph Koshy }; 333ebccf1e3SJoseph Koshy 334ebccf1e3SJoseph Koshy const int amd_event_codes_size = 335ebccf1e3SJoseph Koshy sizeof(amd_event_codes) / sizeof(amd_event_codes[0]); 336ebccf1e3SJoseph Koshy 337ebccf1e3SJoseph Koshy /* 338ebccf1e3SJoseph Koshy * read a pmc register 339ebccf1e3SJoseph Koshy */ 340ebccf1e3SJoseph Koshy 341ebccf1e3SJoseph Koshy static int 342ebccf1e3SJoseph Koshy amd_read_pmc(int cpu, int ri, pmc_value_t *v) 343ebccf1e3SJoseph Koshy { 344ebccf1e3SJoseph Koshy enum pmc_mode mode; 345ebccf1e3SJoseph Koshy const struct amd_descr *pd; 346ebccf1e3SJoseph Koshy struct pmc *pm; 347ebccf1e3SJoseph Koshy const struct pmc_hw *phw; 348ebccf1e3SJoseph Koshy pmc_value_t tmp; 349ebccf1e3SJoseph Koshy 350ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 351ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 352ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 353ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 354ebccf1e3SJoseph Koshy 355ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 356ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 357ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 358ebccf1e3SJoseph Koshy 359ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 360ebccf1e3SJoseph Koshy ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, 361ebccf1e3SJoseph Koshy cpu, ri)); 362ebccf1e3SJoseph Koshy 363c5153e19SJoseph Koshy mode = PMC_TO_MODE(pm); 364ebccf1e3SJoseph Koshy 365ebccf1e3SJoseph Koshy PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class); 366ebccf1e3SJoseph Koshy 367ebccf1e3SJoseph Koshy /* Reading the TSC is a special case */ 368ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) { 369ebccf1e3SJoseph Koshy KASSERT(PMC_IS_COUNTING_MODE(mode), 370ebccf1e3SJoseph Koshy ("[amd,%d] TSC counter in non-counting mode", __LINE__)); 371ebccf1e3SJoseph Koshy *v = rdtsc(); 372ebccf1e3SJoseph Koshy PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v); 373ebccf1e3SJoseph Koshy return 0; 374ebccf1e3SJoseph Koshy } 375ebccf1e3SJoseph Koshy 376ebccf1e3SJoseph Koshy KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS, 377ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 378ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 379ebccf1e3SJoseph Koshy 380ebccf1e3SJoseph Koshy tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */ 381ebccf1e3SJoseph Koshy if (PMC_IS_SAMPLING_MODE(mode)) 382ebccf1e3SJoseph Koshy *v = -tmp; 383ebccf1e3SJoseph Koshy else 384ebccf1e3SJoseph Koshy *v = tmp; 385ebccf1e3SJoseph Koshy 386ebccf1e3SJoseph Koshy PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v); 387ebccf1e3SJoseph Koshy 388ebccf1e3SJoseph Koshy return 0; 389ebccf1e3SJoseph Koshy } 390ebccf1e3SJoseph Koshy 391ebccf1e3SJoseph Koshy /* 392ebccf1e3SJoseph Koshy * Write a PMC MSR. 393ebccf1e3SJoseph Koshy */ 394ebccf1e3SJoseph Koshy 395ebccf1e3SJoseph Koshy static int 396ebccf1e3SJoseph Koshy amd_write_pmc(int cpu, int ri, pmc_value_t v) 397ebccf1e3SJoseph Koshy { 398ebccf1e3SJoseph Koshy const struct amd_descr *pd; 399ebccf1e3SJoseph Koshy struct pmc *pm; 400ebccf1e3SJoseph Koshy const struct pmc_hw *phw; 401ebccf1e3SJoseph Koshy enum pmc_mode mode; 402ebccf1e3SJoseph Koshy 403ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 404ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 405ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 406ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 407ebccf1e3SJoseph Koshy 408ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 409ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 410ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 411ebccf1e3SJoseph Koshy 412ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 413ebccf1e3SJoseph Koshy ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__, 414ebccf1e3SJoseph Koshy cpu, ri)); 415ebccf1e3SJoseph Koshy 416c5153e19SJoseph Koshy mode = PMC_TO_MODE(pm); 417ebccf1e3SJoseph Koshy 418ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 419ebccf1e3SJoseph Koshy return 0; 420ebccf1e3SJoseph Koshy 421ebccf1e3SJoseph Koshy KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS, 422ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 423ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 424ebccf1e3SJoseph Koshy 425ebccf1e3SJoseph Koshy /* use 2's complement of the count for sampling mode PMCs */ 426ebccf1e3SJoseph Koshy if (PMC_IS_SAMPLING_MODE(mode)) 427ebccf1e3SJoseph Koshy v = -v; 428ebccf1e3SJoseph Koshy 429ebccf1e3SJoseph Koshy PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v); 430ebccf1e3SJoseph Koshy 431ebccf1e3SJoseph Koshy /* write the PMC value */ 432ebccf1e3SJoseph Koshy wrmsr(pd->pm_perfctr, v); 433ebccf1e3SJoseph Koshy return 0; 434ebccf1e3SJoseph Koshy } 435ebccf1e3SJoseph Koshy 436ebccf1e3SJoseph Koshy /* 437ebccf1e3SJoseph Koshy * configure hardware pmc according to the configuration recorded in 438ebccf1e3SJoseph Koshy * pmc 'pm'. 439ebccf1e3SJoseph Koshy */ 440ebccf1e3SJoseph Koshy 441ebccf1e3SJoseph Koshy static int 442ebccf1e3SJoseph Koshy amd_config_pmc(int cpu, int ri, struct pmc *pm) 443ebccf1e3SJoseph Koshy { 444ebccf1e3SJoseph Koshy struct pmc_hw *phw; 445ebccf1e3SJoseph Koshy 4466b8c8cd8SJoseph Koshy PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 4476b8c8cd8SJoseph Koshy 448ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 449ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 450ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 451ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 452ebccf1e3SJoseph Koshy 453ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 454ebccf1e3SJoseph Koshy 455ebccf1e3SJoseph Koshy KASSERT(pm == NULL || phw->phw_pmc == NULL, 4566b8c8cd8SJoseph Koshy ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured", 4576b8c8cd8SJoseph Koshy __LINE__, pm, phw->phw_pmc)); 458ebccf1e3SJoseph Koshy 459ebccf1e3SJoseph Koshy phw->phw_pmc = pm; 460ebccf1e3SJoseph Koshy return 0; 461ebccf1e3SJoseph Koshy } 462ebccf1e3SJoseph Koshy 463ebccf1e3SJoseph Koshy /* 464c5153e19SJoseph Koshy * Retrieve a configured PMC pointer from hardware state. 465c5153e19SJoseph Koshy */ 466c5153e19SJoseph Koshy 467c5153e19SJoseph Koshy static int 468c5153e19SJoseph Koshy amd_get_config(int cpu, int ri, struct pmc **ppm) 469c5153e19SJoseph Koshy { 470c5153e19SJoseph Koshy *ppm = pmc_pcpu[cpu]->pc_hwpmcs[ri]->phw_pmc; 471c5153e19SJoseph Koshy 472c5153e19SJoseph Koshy return 0; 473c5153e19SJoseph Koshy } 474c5153e19SJoseph Koshy 475c5153e19SJoseph Koshy /* 476ebccf1e3SJoseph Koshy * Machine dependent actions taken during the context switch in of a 477ebccf1e3SJoseph Koshy * thread. 478ebccf1e3SJoseph Koshy */ 479ebccf1e3SJoseph Koshy 480ebccf1e3SJoseph Koshy static int 4816b8c8cd8SJoseph Koshy amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) 482ebccf1e3SJoseph Koshy { 483ebccf1e3SJoseph Koshy (void) pc; 484ebccf1e3SJoseph Koshy 4856b8c8cd8SJoseph Koshy PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp, 486c5153e19SJoseph Koshy (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0); 4876b8c8cd8SJoseph Koshy 4886b8c8cd8SJoseph Koshy /* enable the RDPMC instruction if needed */ 489c5153e19SJoseph Koshy if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) 490ebccf1e3SJoseph Koshy load_cr4(rcr4() | CR4_PCE); 4916b8c8cd8SJoseph Koshy 492ebccf1e3SJoseph Koshy return 0; 493ebccf1e3SJoseph Koshy } 494ebccf1e3SJoseph Koshy 495ebccf1e3SJoseph Koshy /* 496ebccf1e3SJoseph Koshy * Machine dependent actions taken during the context switch out of a 497ebccf1e3SJoseph Koshy * thread. 498ebccf1e3SJoseph Koshy */ 499ebccf1e3SJoseph Koshy 500ebccf1e3SJoseph Koshy static int 5016b8c8cd8SJoseph Koshy amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) 502ebccf1e3SJoseph Koshy { 503ebccf1e3SJoseph Koshy (void) pc; 5046b8c8cd8SJoseph Koshy (void) pp; /* can be NULL */ 505ebccf1e3SJoseph Koshy 5066b8c8cd8SJoseph Koshy PMCDBG(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ? 507c5153e19SJoseph Koshy (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0); 5086b8c8cd8SJoseph Koshy 5096b8c8cd8SJoseph Koshy /* always turn off the RDPMC instruction */ 510ebccf1e3SJoseph Koshy load_cr4(rcr4() & ~CR4_PCE); 5116b8c8cd8SJoseph Koshy 512ebccf1e3SJoseph Koshy return 0; 513ebccf1e3SJoseph Koshy } 514ebccf1e3SJoseph Koshy 515ebccf1e3SJoseph Koshy /* 516ebccf1e3SJoseph Koshy * Check if a given allocation is feasible. 517ebccf1e3SJoseph Koshy */ 518ebccf1e3SJoseph Koshy 519ebccf1e3SJoseph Koshy static int 520ebccf1e3SJoseph Koshy amd_allocate_pmc(int cpu, int ri, struct pmc *pm, 521ebccf1e3SJoseph Koshy const struct pmc_op_pmcallocate *a) 522ebccf1e3SJoseph Koshy { 523ebccf1e3SJoseph Koshy int i; 524ebccf1e3SJoseph Koshy uint32_t allowed_unitmask, caps, config, unitmask; 525ebccf1e3SJoseph Koshy enum pmc_event pe; 526ebccf1e3SJoseph Koshy const struct pmc_descr *pd; 527ebccf1e3SJoseph Koshy 528ebccf1e3SJoseph Koshy (void) cpu; 529ebccf1e3SJoseph Koshy 530ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 531ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 532ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 533ebccf1e3SJoseph Koshy ("[amd,%d] illegal row index %d", __LINE__, ri)); 534ebccf1e3SJoseph Koshy 535ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri].pm_descr; 536ebccf1e3SJoseph Koshy 537ebccf1e3SJoseph Koshy /* check class match */ 538c5153e19SJoseph Koshy if (pd->pd_class != a->pm_class) 539ebccf1e3SJoseph Koshy return EINVAL; 540ebccf1e3SJoseph Koshy 541ebccf1e3SJoseph Koshy caps = pm->pm_caps; 542ebccf1e3SJoseph Koshy 543ebccf1e3SJoseph Koshy PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps); 544ebccf1e3SJoseph Koshy 545ebccf1e3SJoseph Koshy if ((pd->pd_caps & caps) != caps) 546ebccf1e3SJoseph Koshy return EPERM; 547ebccf1e3SJoseph Koshy if (pd->pd_class == PMC_CLASS_TSC) { 548ebccf1e3SJoseph Koshy /* TSC's are always allocated in system-wide counting mode */ 549ebccf1e3SJoseph Koshy if (a->pm_ev != PMC_EV_TSC_TSC || 550ebccf1e3SJoseph Koshy a->pm_mode != PMC_MODE_SC) 551ebccf1e3SJoseph Koshy return EINVAL; 552ebccf1e3SJoseph Koshy return 0; 553ebccf1e3SJoseph Koshy } 554ebccf1e3SJoseph Koshy 555ebccf1e3SJoseph Koshy KASSERT(pd->pd_class == AMD_PMC_CLASS, 556ebccf1e3SJoseph Koshy ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class)); 557ebccf1e3SJoseph Koshy 558ebccf1e3SJoseph Koshy pe = a->pm_ev; 559ebccf1e3SJoseph Koshy 560ebccf1e3SJoseph Koshy /* map ev to the correct event mask code */ 561ebccf1e3SJoseph Koshy config = allowed_unitmask = 0; 562ebccf1e3SJoseph Koshy for (i = 0; i < amd_event_codes_size; i++) 563ebccf1e3SJoseph Koshy if (amd_event_codes[i].pe_ev == pe) { 564ebccf1e3SJoseph Koshy config = 565ebccf1e3SJoseph Koshy AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code); 566ebccf1e3SJoseph Koshy allowed_unitmask = 567ebccf1e3SJoseph Koshy AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask); 568ebccf1e3SJoseph Koshy break; 569ebccf1e3SJoseph Koshy } 570ebccf1e3SJoseph Koshy if (i == amd_event_codes_size) 571ebccf1e3SJoseph Koshy return EINVAL; 572ebccf1e3SJoseph Koshy 573ebccf1e3SJoseph Koshy unitmask = a->pm_amd_config & AMD_PMC_UNITMASK; 574ebccf1e3SJoseph Koshy if (unitmask & ~allowed_unitmask) /* disallow reserved bits */ 575ebccf1e3SJoseph Koshy return EINVAL; 576ebccf1e3SJoseph Koshy 577ebccf1e3SJoseph Koshy if (unitmask && (caps & PMC_CAP_QUALIFIER)) 578ebccf1e3SJoseph Koshy config |= unitmask; 579ebccf1e3SJoseph Koshy 580ebccf1e3SJoseph Koshy if (caps & PMC_CAP_THRESHOLD) 581ebccf1e3SJoseph Koshy config |= a->pm_amd_config & AMD_PMC_COUNTERMASK; 582ebccf1e3SJoseph Koshy 583ebccf1e3SJoseph Koshy /* set at least one of the 'usr' or 'os' caps */ 584ebccf1e3SJoseph Koshy if (caps & PMC_CAP_USER) 585ebccf1e3SJoseph Koshy config |= AMD_PMC_USR; 586ebccf1e3SJoseph Koshy if (caps & PMC_CAP_SYSTEM) 587ebccf1e3SJoseph Koshy config |= AMD_PMC_OS; 588ebccf1e3SJoseph Koshy if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0) 589ebccf1e3SJoseph Koshy config |= (AMD_PMC_USR|AMD_PMC_OS); 590ebccf1e3SJoseph Koshy 591ebccf1e3SJoseph Koshy if (caps & PMC_CAP_EDGE) 592ebccf1e3SJoseph Koshy config |= AMD_PMC_EDGE; 593ebccf1e3SJoseph Koshy if (caps & PMC_CAP_INVERT) 594ebccf1e3SJoseph Koshy config |= AMD_PMC_INVERT; 595ebccf1e3SJoseph Koshy if (caps & PMC_CAP_INTERRUPT) 596ebccf1e3SJoseph Koshy config |= AMD_PMC_INT; 597ebccf1e3SJoseph Koshy 598ebccf1e3SJoseph Koshy pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */ 599ebccf1e3SJoseph Koshy 600ebccf1e3SJoseph Koshy PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config); 601ebccf1e3SJoseph Koshy 602ebccf1e3SJoseph Koshy return 0; 603ebccf1e3SJoseph Koshy } 604ebccf1e3SJoseph Koshy 605ebccf1e3SJoseph Koshy /* 606ebccf1e3SJoseph Koshy * Release machine dependent state associated with a PMC. This is a 607ebccf1e3SJoseph Koshy * no-op on this architecture. 608ebccf1e3SJoseph Koshy * 609ebccf1e3SJoseph Koshy */ 610ebccf1e3SJoseph Koshy 611ebccf1e3SJoseph Koshy /* ARGSUSED0 */ 612ebccf1e3SJoseph Koshy static int 613ebccf1e3SJoseph Koshy amd_release_pmc(int cpu, int ri, struct pmc *pmc) 614ebccf1e3SJoseph Koshy { 615ebccf1e3SJoseph Koshy #if DEBUG 616ebccf1e3SJoseph Koshy const struct amd_descr *pd; 617ebccf1e3SJoseph Koshy #endif 618ebccf1e3SJoseph Koshy struct pmc_hw *phw; 619ebccf1e3SJoseph Koshy 620ebccf1e3SJoseph Koshy (void) pmc; 621ebccf1e3SJoseph Koshy 622ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 623ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 624ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 625ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 626ebccf1e3SJoseph Koshy 627ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 628ebccf1e3SJoseph Koshy 629ebccf1e3SJoseph Koshy KASSERT(phw->phw_pmc == NULL, 630ebccf1e3SJoseph Koshy ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); 631ebccf1e3SJoseph Koshy 632ebccf1e3SJoseph Koshy #if DEBUG 633ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 634ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == AMD_PMC_CLASS) 635ebccf1e3SJoseph Koshy KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 636ebccf1e3SJoseph Koshy ("[amd,%d] PMC %d released while active", __LINE__, ri)); 637ebccf1e3SJoseph Koshy #endif 638ebccf1e3SJoseph Koshy 639ebccf1e3SJoseph Koshy return 0; 640ebccf1e3SJoseph Koshy } 641ebccf1e3SJoseph Koshy 642ebccf1e3SJoseph Koshy /* 643ebccf1e3SJoseph Koshy * start a PMC. 644ebccf1e3SJoseph Koshy */ 645ebccf1e3SJoseph Koshy 646ebccf1e3SJoseph Koshy static int 647ebccf1e3SJoseph Koshy amd_start_pmc(int cpu, int ri) 648ebccf1e3SJoseph Koshy { 649ebccf1e3SJoseph Koshy uint32_t config; 650ebccf1e3SJoseph Koshy struct pmc *pm; 651ebccf1e3SJoseph Koshy struct pmc_hw *phw; 652ebccf1e3SJoseph Koshy const struct amd_descr *pd; 653ebccf1e3SJoseph Koshy 654ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 655ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 656ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 657ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 658ebccf1e3SJoseph Koshy 659ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 660ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 661ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 662ebccf1e3SJoseph Koshy 663ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 664ebccf1e3SJoseph Koshy ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__, 665ebccf1e3SJoseph Koshy cpu, ri)); 666ebccf1e3SJoseph Koshy 667ebccf1e3SJoseph Koshy PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri); 668ebccf1e3SJoseph Koshy 669ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 670ebccf1e3SJoseph Koshy return 0; /* TSCs are always running */ 671ebccf1e3SJoseph Koshy 672ebccf1e3SJoseph Koshy KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS, 673ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 674ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 675ebccf1e3SJoseph Koshy 676ebccf1e3SJoseph Koshy KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel), 677ebccf1e3SJoseph Koshy ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__, 678ebccf1e3SJoseph Koshy ri, cpu, pd->pm_descr.pd_name)); 679ebccf1e3SJoseph Koshy 680ebccf1e3SJoseph Koshy /* turn on the PMC ENABLE bit */ 681ebccf1e3SJoseph Koshy config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE; 682ebccf1e3SJoseph Koshy 683ebccf1e3SJoseph Koshy PMCDBG(MDP,STA,2,"amd-start config=0x%x", config); 684ebccf1e3SJoseph Koshy 685ebccf1e3SJoseph Koshy wrmsr(pd->pm_evsel, config); 686ebccf1e3SJoseph Koshy return 0; 687ebccf1e3SJoseph Koshy } 688ebccf1e3SJoseph Koshy 689ebccf1e3SJoseph Koshy /* 690ebccf1e3SJoseph Koshy * Stop a PMC. 691ebccf1e3SJoseph Koshy */ 692ebccf1e3SJoseph Koshy 693ebccf1e3SJoseph Koshy static int 694ebccf1e3SJoseph Koshy amd_stop_pmc(int cpu, int ri) 695ebccf1e3SJoseph Koshy { 696ebccf1e3SJoseph Koshy struct pmc *pm; 697ebccf1e3SJoseph Koshy struct pmc_hw *phw; 698ebccf1e3SJoseph Koshy const struct amd_descr *pd; 699ebccf1e3SJoseph Koshy uint64_t config; 700ebccf1e3SJoseph Koshy 701ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 702ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU value %d", __LINE__, cpu)); 703ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 704ebccf1e3SJoseph Koshy ("[amd,%d] illegal row-index %d", __LINE__, ri)); 705ebccf1e3SJoseph Koshy 706ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 707ebccf1e3SJoseph Koshy pm = phw->phw_pmc; 708ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 709ebccf1e3SJoseph Koshy 710ebccf1e3SJoseph Koshy KASSERT(pm != NULL, 711ebccf1e3SJoseph Koshy ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__, 712ebccf1e3SJoseph Koshy cpu, ri)); 713ebccf1e3SJoseph Koshy 714ebccf1e3SJoseph Koshy /* can't stop a TSC */ 715ebccf1e3SJoseph Koshy if (pd->pm_descr.pd_class == PMC_CLASS_TSC) 716ebccf1e3SJoseph Koshy return 0; 717ebccf1e3SJoseph Koshy 718ebccf1e3SJoseph Koshy KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS, 719ebccf1e3SJoseph Koshy ("[amd,%d] unknown PMC class (%d)", __LINE__, 720ebccf1e3SJoseph Koshy pd->pm_descr.pd_class)); 721ebccf1e3SJoseph Koshy 722ebccf1e3SJoseph Koshy KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel), 723ebccf1e3SJoseph Koshy ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped", 724ebccf1e3SJoseph Koshy __LINE__, ri, cpu, pd->pm_descr.pd_name)); 725ebccf1e3SJoseph Koshy 726ebccf1e3SJoseph Koshy PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri); 727ebccf1e3SJoseph Koshy 728ebccf1e3SJoseph Koshy /* turn off the PMC ENABLE bit */ 729ebccf1e3SJoseph Koshy config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE; 730ebccf1e3SJoseph Koshy wrmsr(pd->pm_evsel, config); 731ebccf1e3SJoseph Koshy return 0; 732ebccf1e3SJoseph Koshy } 733ebccf1e3SJoseph Koshy 734ebccf1e3SJoseph Koshy /* 735ebccf1e3SJoseph Koshy * Interrupt handler. This function needs to return '1' if the 736ebccf1e3SJoseph Koshy * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed 737ebccf1e3SJoseph Koshy * to sleep or do anything a 'fast' interrupt handler is not allowed 738ebccf1e3SJoseph Koshy * to do. 739ebccf1e3SJoseph Koshy */ 740ebccf1e3SJoseph Koshy 741ebccf1e3SJoseph Koshy static int 742ebccf1e3SJoseph Koshy amd_intr(int cpu, uintptr_t eip) 743ebccf1e3SJoseph Koshy { 744ebccf1e3SJoseph Koshy int i, retval; 745ebccf1e3SJoseph Koshy enum pmc_mode mode; 746ebccf1e3SJoseph Koshy uint32_t perfctr; 747ebccf1e3SJoseph Koshy struct pmc *pm; 748ebccf1e3SJoseph Koshy struct pmc_cpu *pc; 749ebccf1e3SJoseph Koshy struct pmc_hw *phw; 750ebccf1e3SJoseph Koshy 751ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 752ebccf1e3SJoseph Koshy ("[amd,%d] out of range CPU %d", __LINE__, cpu)); 753ebccf1e3SJoseph Koshy 754ebccf1e3SJoseph Koshy retval = 0; 755ebccf1e3SJoseph Koshy 756ebccf1e3SJoseph Koshy pc = pmc_pcpu[cpu]; 757ebccf1e3SJoseph Koshy 758ebccf1e3SJoseph Koshy /* 759ebccf1e3SJoseph Koshy * look for all PMCs that have interrupted: 760ebccf1e3SJoseph Koshy * - skip over the TSC [PMC#0] 761ebccf1e3SJoseph Koshy * - look for a PMC with a valid 'struct pmc' association 762ebccf1e3SJoseph Koshy * - look for a PMC in (a) sampling mode and (b) which has 763ebccf1e3SJoseph Koshy * overflowed. If found, we update the process's 764ebccf1e3SJoseph Koshy * histogram or send it a profiling signal by calling 765ebccf1e3SJoseph Koshy * the appropriate helper function. 766ebccf1e3SJoseph Koshy */ 767ebccf1e3SJoseph Koshy 768ebccf1e3SJoseph Koshy for (i = 1; i < AMD_NPMCS; i++) { 769ebccf1e3SJoseph Koshy 770ebccf1e3SJoseph Koshy phw = pc->pc_hwpmcs[i]; 771ebccf1e3SJoseph Koshy perfctr = amd_pmcdesc[i].pm_perfctr; 772ebccf1e3SJoseph Koshy KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__)); 773ebccf1e3SJoseph Koshy 774ebccf1e3SJoseph Koshy if ((pm = phw->phw_pmc) == NULL || 775ebccf1e3SJoseph Koshy pm->pm_state != PMC_STATE_RUNNING) { 776ebccf1e3SJoseph Koshy atomic_add_int(&pmc_stats.pm_intr_ignored, 1); 777ebccf1e3SJoseph Koshy continue; 778ebccf1e3SJoseph Koshy } 779ebccf1e3SJoseph Koshy 780c5153e19SJoseph Koshy mode = PMC_TO_MODE(pm); 781ebccf1e3SJoseph Koshy if (PMC_IS_SAMPLING_MODE(mode) && 782ebccf1e3SJoseph Koshy AMD_PMC_HAS_OVERFLOWED(perfctr)) { 783ebccf1e3SJoseph Koshy atomic_add_int(&pmc_stats.pm_intr_processed, 1); 784ebccf1e3SJoseph Koshy if (PMC_IS_SYSTEM_MODE(mode)) 785ebccf1e3SJoseph Koshy pmc_update_histogram(phw, eip); 786ebccf1e3SJoseph Koshy else if (PMC_IS_VIRTUAL_MODE(mode)) 787ebccf1e3SJoseph Koshy pmc_send_signal(pm); 788ebccf1e3SJoseph Koshy retval = 1; 789ebccf1e3SJoseph Koshy } 790ebccf1e3SJoseph Koshy } 791ebccf1e3SJoseph Koshy return retval; 792ebccf1e3SJoseph Koshy } 793ebccf1e3SJoseph Koshy 794ebccf1e3SJoseph Koshy /* 795ebccf1e3SJoseph Koshy * describe a PMC 796ebccf1e3SJoseph Koshy */ 797ebccf1e3SJoseph Koshy static int 798ebccf1e3SJoseph Koshy amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 799ebccf1e3SJoseph Koshy { 800ebccf1e3SJoseph Koshy int error; 801ebccf1e3SJoseph Koshy size_t copied; 802ebccf1e3SJoseph Koshy const struct amd_descr *pd; 803ebccf1e3SJoseph Koshy struct pmc_hw *phw; 804ebccf1e3SJoseph Koshy 805ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 806ebccf1e3SJoseph Koshy ("[amd,%d] illegal CPU %d", __LINE__, cpu)); 807ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 808ebccf1e3SJoseph Koshy ("[amd,%d] row-index %d out of range", __LINE__, ri)); 809ebccf1e3SJoseph Koshy 810ebccf1e3SJoseph Koshy phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 811ebccf1e3SJoseph Koshy pd = &amd_pmcdesc[ri]; 812ebccf1e3SJoseph Koshy 813ebccf1e3SJoseph Koshy if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name, 814ebccf1e3SJoseph Koshy PMC_NAME_MAX, &copied)) != 0) 815ebccf1e3SJoseph Koshy return error; 816ebccf1e3SJoseph Koshy 817ebccf1e3SJoseph Koshy pi->pm_class = pd->pm_descr.pd_class; 818ebccf1e3SJoseph Koshy 819ebccf1e3SJoseph Koshy if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 820ebccf1e3SJoseph Koshy pi->pm_enabled = TRUE; 821ebccf1e3SJoseph Koshy *ppmc = phw->phw_pmc; 822ebccf1e3SJoseph Koshy } else { 823ebccf1e3SJoseph Koshy pi->pm_enabled = FALSE; 824ebccf1e3SJoseph Koshy *ppmc = NULL; 825ebccf1e3SJoseph Koshy } 826ebccf1e3SJoseph Koshy 827ebccf1e3SJoseph Koshy return 0; 828ebccf1e3SJoseph Koshy } 829ebccf1e3SJoseph Koshy 830ebccf1e3SJoseph Koshy /* 831ebccf1e3SJoseph Koshy * i386 specific entry points 832ebccf1e3SJoseph Koshy */ 833ebccf1e3SJoseph Koshy 834ebccf1e3SJoseph Koshy /* 835ebccf1e3SJoseph Koshy * return the MSR address of the given PMC. 836ebccf1e3SJoseph Koshy */ 837ebccf1e3SJoseph Koshy 838ebccf1e3SJoseph Koshy static int 839ebccf1e3SJoseph Koshy amd_get_msr(int ri, uint32_t *msr) 840ebccf1e3SJoseph Koshy { 841ebccf1e3SJoseph Koshy KASSERT(ri >= 0 && ri < AMD_NPMCS, 842ebccf1e3SJoseph Koshy ("[amd,%d] ri %d out of range", __LINE__, ri)); 843ebccf1e3SJoseph Koshy 8446b8c8cd8SJoseph Koshy *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0; 845ebccf1e3SJoseph Koshy return 0; 846ebccf1e3SJoseph Koshy } 847ebccf1e3SJoseph Koshy 848ebccf1e3SJoseph Koshy /* 849ebccf1e3SJoseph Koshy * processor dependent initialization. 850ebccf1e3SJoseph Koshy */ 851ebccf1e3SJoseph Koshy 852ebccf1e3SJoseph Koshy /* 853ebccf1e3SJoseph Koshy * Per-processor data structure 854ebccf1e3SJoseph Koshy * 855ebccf1e3SJoseph Koshy * [common stuff] 856ebccf1e3SJoseph Koshy * [5 struct pmc_hw pointers] 857ebccf1e3SJoseph Koshy * [5 struct pmc_hw structures] 858ebccf1e3SJoseph Koshy */ 859ebccf1e3SJoseph Koshy 860ebccf1e3SJoseph Koshy struct amd_cpu { 861ebccf1e3SJoseph Koshy struct pmc_cpu pc_common; 862ebccf1e3SJoseph Koshy struct pmc_hw *pc_hwpmcs[AMD_NPMCS]; 863ebccf1e3SJoseph Koshy struct pmc_hw pc_amdpmcs[AMD_NPMCS]; 864ebccf1e3SJoseph Koshy }; 865ebccf1e3SJoseph Koshy 866ebccf1e3SJoseph Koshy 867ebccf1e3SJoseph Koshy static int 868ebccf1e3SJoseph Koshy amd_init(int cpu) 869ebccf1e3SJoseph Koshy { 870ebccf1e3SJoseph Koshy int n; 871ebccf1e3SJoseph Koshy struct amd_cpu *pcs; 872ebccf1e3SJoseph Koshy struct pmc_hw *phw; 873ebccf1e3SJoseph Koshy 874ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 875ebccf1e3SJoseph Koshy ("[amd,%d] insane cpu number %d", __LINE__, cpu)); 876ebccf1e3SJoseph Koshy 877ebccf1e3SJoseph Koshy PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu); 878ebccf1e3SJoseph Koshy 879ebccf1e3SJoseph Koshy MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC, 880ebccf1e3SJoseph Koshy M_WAITOK|M_ZERO); 881ebccf1e3SJoseph Koshy 882ebccf1e3SJoseph Koshy if (pcs == NULL) 883ebccf1e3SJoseph Koshy return ENOMEM; 884ebccf1e3SJoseph Koshy 885ebccf1e3SJoseph Koshy phw = &pcs->pc_amdpmcs[0]; 886ebccf1e3SJoseph Koshy 887ebccf1e3SJoseph Koshy /* 888ebccf1e3SJoseph Koshy * Initialize the per-cpu mutex and set the content of the 889ebccf1e3SJoseph Koshy * hardware descriptors to a known state. 890ebccf1e3SJoseph Koshy */ 891ebccf1e3SJoseph Koshy 892ebccf1e3SJoseph Koshy for (n = 0; n < AMD_NPMCS; n++, phw++) { 893ebccf1e3SJoseph Koshy phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 894ebccf1e3SJoseph Koshy PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 895ebccf1e3SJoseph Koshy phw->phw_pmc = NULL; 896ebccf1e3SJoseph Koshy pcs->pc_hwpmcs[n] = phw; 897ebccf1e3SJoseph Koshy } 898ebccf1e3SJoseph Koshy 899ebccf1e3SJoseph Koshy /* Mark the TSC as shareable */ 900ebccf1e3SJoseph Koshy pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE; 901ebccf1e3SJoseph Koshy 902ebccf1e3SJoseph Koshy pmc_pcpu[cpu] = (struct pmc_cpu *) pcs; 903ebccf1e3SJoseph Koshy 904ebccf1e3SJoseph Koshy return 0; 905ebccf1e3SJoseph Koshy } 906ebccf1e3SJoseph Koshy 907ebccf1e3SJoseph Koshy 908ebccf1e3SJoseph Koshy /* 909ebccf1e3SJoseph Koshy * processor dependent cleanup prior to the KLD 910ebccf1e3SJoseph Koshy * being unloaded 911ebccf1e3SJoseph Koshy */ 912ebccf1e3SJoseph Koshy 913ebccf1e3SJoseph Koshy static int 914ebccf1e3SJoseph Koshy amd_cleanup(int cpu) 915ebccf1e3SJoseph Koshy { 916ebccf1e3SJoseph Koshy int i; 917ebccf1e3SJoseph Koshy uint32_t evsel; 918ebccf1e3SJoseph Koshy struct pmc_cpu *pcs; 919ebccf1e3SJoseph Koshy 920ebccf1e3SJoseph Koshy KASSERT(cpu >= 0 && cpu < mp_ncpus, 921ebccf1e3SJoseph Koshy ("[amd,%d] insane cpu number (%d)", __LINE__, cpu)); 922ebccf1e3SJoseph Koshy 923ebccf1e3SJoseph Koshy PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu); 924ebccf1e3SJoseph Koshy 925ebccf1e3SJoseph Koshy /* 926ebccf1e3SJoseph Koshy * First, turn off all PMCs on this CPU. 927ebccf1e3SJoseph Koshy */ 928ebccf1e3SJoseph Koshy 929ebccf1e3SJoseph Koshy for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */ 930ebccf1e3SJoseph Koshy evsel = rdmsr(AMD_PMC_EVSEL_0 + i); 931ebccf1e3SJoseph Koshy evsel &= ~AMD_PMC_ENABLE; 932ebccf1e3SJoseph Koshy wrmsr(AMD_PMC_EVSEL_0 + i, evsel); 933ebccf1e3SJoseph Koshy } 934ebccf1e3SJoseph Koshy 935ebccf1e3SJoseph Koshy /* 936ebccf1e3SJoseph Koshy * Next, free up allocated space. 937ebccf1e3SJoseph Koshy */ 938ebccf1e3SJoseph Koshy 939ebccf1e3SJoseph Koshy pcs = pmc_pcpu[cpu]; 940ebccf1e3SJoseph Koshy 941ebccf1e3SJoseph Koshy #if DEBUG 942ebccf1e3SJoseph Koshy /* check the TSC */ 943ebccf1e3SJoseph Koshy KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL, 944ebccf1e3SJoseph Koshy ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu)); 945ebccf1e3SJoseph Koshy for (i = 1; i < AMD_NPMCS; i++) { 946ebccf1e3SJoseph Koshy KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL, 947ebccf1e3SJoseph Koshy ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i)); 948ebccf1e3SJoseph Koshy KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)), 949ebccf1e3SJoseph Koshy ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i)); 950ebccf1e3SJoseph Koshy } 951ebccf1e3SJoseph Koshy #endif 952ebccf1e3SJoseph Koshy KASSERT(pcs != NULL, 953ebccf1e3SJoseph Koshy ("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu)); 954ebccf1e3SJoseph Koshy 955ebccf1e3SJoseph Koshy pmc_pcpu[cpu] = NULL; 956ebccf1e3SJoseph Koshy FREE(pcs, M_PMC); 957ebccf1e3SJoseph Koshy return 0; 958ebccf1e3SJoseph Koshy } 959ebccf1e3SJoseph Koshy 960ebccf1e3SJoseph Koshy /* 961ebccf1e3SJoseph Koshy * Initialize ourselves. 962ebccf1e3SJoseph Koshy */ 963ebccf1e3SJoseph Koshy 964ebccf1e3SJoseph Koshy struct pmc_mdep * 965ebccf1e3SJoseph Koshy pmc_amd_initialize(void) 966ebccf1e3SJoseph Koshy { 967ebccf1e3SJoseph Koshy 968ebccf1e3SJoseph Koshy struct pmc_mdep *pmc_mdep; 969ebccf1e3SJoseph Koshy 970ebccf1e3SJoseph Koshy /* The presence of hardware performance counters on the AMD 971ebccf1e3SJoseph Koshy Athlon, Duron or later processors, is _not_ indicated by 972ebccf1e3SJoseph Koshy any of the processor feature flags set by the 'CPUID' 973ebccf1e3SJoseph Koshy instruction, so we only check the 'instruction family' 974ebccf1e3SJoseph Koshy field returned by CPUID for instruction family >= 6. This 975ebccf1e3SJoseph Koshy test needs to be be refined. */ 976ebccf1e3SJoseph Koshy 977ebccf1e3SJoseph Koshy if ((cpu_id & 0xF00) < 0x600) 978ebccf1e3SJoseph Koshy return NULL; 979ebccf1e3SJoseph Koshy 980ebccf1e3SJoseph Koshy MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep), 981ebccf1e3SJoseph Koshy M_PMC, M_WAITOK|M_ZERO); 982ebccf1e3SJoseph Koshy 983ebccf1e3SJoseph Koshy #if __i386__ 984ebccf1e3SJoseph Koshy pmc_mdep->pmd_cputype = PMC_CPU_AMD_K7; 985ebccf1e3SJoseph Koshy #elif __amd64__ 986ebccf1e3SJoseph Koshy pmc_mdep->pmd_cputype = PMC_CPU_AMD_K8; 987ebccf1e3SJoseph Koshy #else 988ebccf1e3SJoseph Koshy #error Unknown AMD CPU type. 989ebccf1e3SJoseph Koshy #endif 990ebccf1e3SJoseph Koshy 991ebccf1e3SJoseph Koshy pmc_mdep->pmd_npmc = AMD_NPMCS; 992ebccf1e3SJoseph Koshy 993ebccf1e3SJoseph Koshy /* this processor has two classes of usable PMCs */ 994ebccf1e3SJoseph Koshy pmc_mdep->pmd_nclass = 2; 995c5153e19SJoseph Koshy 996c5153e19SJoseph Koshy /* TSC */ 997c5153e19SJoseph Koshy pmc_mdep->pmd_classes[0].pm_class = PMC_CLASS_TSC; 998c5153e19SJoseph Koshy pmc_mdep->pmd_classes[0].pm_caps = PMC_CAP_READ; 999c5153e19SJoseph Koshy pmc_mdep->pmd_classes[0].pm_width = 64; 1000c5153e19SJoseph Koshy 1001c5153e19SJoseph Koshy /* AMD K7/K8 PMCs */ 1002c5153e19SJoseph Koshy pmc_mdep->pmd_classes[1].pm_class = AMD_PMC_CLASS; 1003c5153e19SJoseph Koshy pmc_mdep->pmd_classes[1].pm_caps = AMD_PMC_CAPS; 1004c5153e19SJoseph Koshy pmc_mdep->pmd_classes[1].pm_width = 48; 1005c5153e19SJoseph Koshy 1006ebccf1e3SJoseph Koshy pmc_mdep->pmd_nclasspmcs[0] = 1; 1007ebccf1e3SJoseph Koshy pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1); 1008ebccf1e3SJoseph Koshy 1009ebccf1e3SJoseph Koshy pmc_mdep->pmd_init = amd_init; 1010ebccf1e3SJoseph Koshy pmc_mdep->pmd_cleanup = amd_cleanup; 1011ebccf1e3SJoseph Koshy pmc_mdep->pmd_switch_in = amd_switch_in; 1012ebccf1e3SJoseph Koshy pmc_mdep->pmd_switch_out = amd_switch_out; 1013ebccf1e3SJoseph Koshy pmc_mdep->pmd_read_pmc = amd_read_pmc; 1014ebccf1e3SJoseph Koshy pmc_mdep->pmd_write_pmc = amd_write_pmc; 1015ebccf1e3SJoseph Koshy pmc_mdep->pmd_config_pmc = amd_config_pmc; 1016c5153e19SJoseph Koshy pmc_mdep->pmd_get_config = amd_get_config; 1017ebccf1e3SJoseph Koshy pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc; 1018ebccf1e3SJoseph Koshy pmc_mdep->pmd_release_pmc = amd_release_pmc; 1019ebccf1e3SJoseph Koshy pmc_mdep->pmd_start_pmc = amd_start_pmc; 1020ebccf1e3SJoseph Koshy pmc_mdep->pmd_stop_pmc = amd_stop_pmc; 1021ebccf1e3SJoseph Koshy pmc_mdep->pmd_intr = amd_intr; 1022ebccf1e3SJoseph Koshy pmc_mdep->pmd_describe = amd_describe; 1023ebccf1e3SJoseph Koshy pmc_mdep->pmd_get_msr = amd_get_msr; /* i386 */ 1024ebccf1e3SJoseph Koshy 1025ebccf1e3SJoseph Koshy PMCDBG(MDP,INI,0,"%s","amd-initialize"); 1026ebccf1e3SJoseph Koshy 1027ebccf1e3SJoseph Koshy return pmc_mdep; 1028ebccf1e3SJoseph Koshy } 1029