xref: /freebsd/sys/dev/hwpmc/hwpmc_amd.c (revision 6b8c8cd85fae1f4947956b06ec59b4f808951f29)
1ebccf1e3SJoseph Koshy /*-
2ebccf1e3SJoseph Koshy  * Copyright (c) 2003-2005 Joseph Koshy
3ebccf1e3SJoseph Koshy  * All rights reserved.
4ebccf1e3SJoseph Koshy  *
5ebccf1e3SJoseph Koshy  * Redistribution and use in source and binary forms, with or without
6ebccf1e3SJoseph Koshy  * modification, are permitted provided that the following conditions
7ebccf1e3SJoseph Koshy  * are met:
8ebccf1e3SJoseph Koshy  * 1. Redistributions of source code must retain the above copyright
9ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer.
10ebccf1e3SJoseph Koshy  * 2. Redistributions in binary form must reproduce the above copyright
11ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer in the
12ebccf1e3SJoseph Koshy  *    documentation and/or other materials provided with the distribution.
13ebccf1e3SJoseph Koshy  *
14ebccf1e3SJoseph Koshy  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15ebccf1e3SJoseph Koshy  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16ebccf1e3SJoseph Koshy  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17ebccf1e3SJoseph Koshy  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18ebccf1e3SJoseph Koshy  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19ebccf1e3SJoseph Koshy  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20ebccf1e3SJoseph Koshy  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21ebccf1e3SJoseph Koshy  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22ebccf1e3SJoseph Koshy  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23ebccf1e3SJoseph Koshy  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24ebccf1e3SJoseph Koshy  * SUCH DAMAGE.
25ebccf1e3SJoseph Koshy  *
26ebccf1e3SJoseph Koshy  */
27ebccf1e3SJoseph Koshy 
28ebccf1e3SJoseph Koshy #include <sys/cdefs.h>
29ebccf1e3SJoseph Koshy __FBSDID("$FreeBSD$");
30ebccf1e3SJoseph Koshy 
31ebccf1e3SJoseph Koshy /* Support for the AMD K7 and later processors */
32ebccf1e3SJoseph Koshy 
33ebccf1e3SJoseph Koshy #include <sys/param.h>
34ebccf1e3SJoseph Koshy #include <sys/lock.h>
35ebccf1e3SJoseph Koshy #include <sys/malloc.h>
36ebccf1e3SJoseph Koshy #include <sys/mutex.h>
377ad17ef9SMarcel Moolenaar #include <sys/pmc.h>
38ebccf1e3SJoseph Koshy #include <sys/smp.h>
39ebccf1e3SJoseph Koshy #include <sys/systm.h>
40ebccf1e3SJoseph Koshy 
41ebccf1e3SJoseph Koshy #include <machine/md_var.h>
42ebccf1e3SJoseph Koshy 
43ebccf1e3SJoseph Koshy /* AMD K7 and K8 PMCs */
44ebccf1e3SJoseph Koshy 
45ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_0		0xC0010000
46ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_1		0xC0010001
47ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_2		0xC0010002
48ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_3		0xC0010003
49ebccf1e3SJoseph Koshy 
50ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_0	0xC0010004
51ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_1	0xC0010005
52ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_2	0xC0010006
53ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_3	0xC0010007
54ebccf1e3SJoseph Koshy 
55ebccf1e3SJoseph Koshy #define	K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) ||		\
56ebccf1e3SJoseph Koshy 	((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) ||	\
57ebccf1e3SJoseph Koshy 	((c) >= 0xCD && (c) <= 0xCF))
58ebccf1e3SJoseph Koshy 
59ebccf1e3SJoseph Koshy #define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
60ebccf1e3SJoseph Koshy 	PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
61ebccf1e3SJoseph Koshy 	PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
62ebccf1e3SJoseph Koshy 
63ebccf1e3SJoseph Koshy /* reserved bits include bit 21 and the top two bits of the unit mask */
64ebccf1e3SJoseph Koshy #define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
65ebccf1e3SJoseph Koshy 
66ebccf1e3SJoseph Koshy #define	K8_PMC_RESERVED (1 << 21)
67ebccf1e3SJoseph Koshy 
68ebccf1e3SJoseph Koshy #define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
69ebccf1e3SJoseph Koshy #define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
70ebccf1e3SJoseph Koshy 
71ebccf1e3SJoseph Koshy #if	__i386__
72ebccf1e3SJoseph Koshy #define	AMD_NPMCS		K7_NPMCS
73ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS		PMC_CLASS_K7
74ebccf1e3SJoseph Koshy #define	AMD_PMC_COUNTERMASK	K7_PMC_COUNTERMASK
75ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_COUNTER(x)	K7_PMC_TO_COUNTER(x)
76ebccf1e3SJoseph Koshy #define	AMD_PMC_INVERT		K7_PMC_INVERT
77ebccf1e3SJoseph Koshy #define	AMD_PMC_ENABLE		K7_PMC_ENABLE
78ebccf1e3SJoseph Koshy #define	AMD_PMC_INT		K7_PMC_INT
79ebccf1e3SJoseph Koshy #define	AMD_PMC_PC		K7_PMC_PC
80ebccf1e3SJoseph Koshy #define	AMD_PMC_EDGE		K7_PMC_EDGE
81ebccf1e3SJoseph Koshy #define	AMD_PMC_OS		K7_PMC_OS
82ebccf1e3SJoseph Koshy #define	AMD_PMC_USR		K7_PMC_USR
83ebccf1e3SJoseph Koshy 
84ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_M	K7_PMC_UNITMASK_M
85ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_O	K7_PMC_UNITMASK_O
86ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_E	K7_PMC_UNITMASK_E
87ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_S	K7_PMC_UNITMASK_S
88ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_I	K7_PMC_UNITMASK_I
89ebccf1e3SJoseph Koshy 
90ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK	K7_PMC_UNITMASK
91ebccf1e3SJoseph Koshy #define	AMD_PMC_EVENTMASK	K7_PMC_EVENTMASK
92ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_UNITMASK(x)	K7_PMC_TO_UNITMASK(x)
93ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_EVENTMASK(x)	K7_PMC_TO_EVENTMASK(x)
94ebccf1e3SJoseph Koshy #define	AMD_VALID_BITS		K7_VALID_BITS
95ebccf1e3SJoseph Koshy 
96ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS_NAME	"K7-"
97ebccf1e3SJoseph Koshy 
98ebccf1e3SJoseph Koshy #elif	__amd64__
99ebccf1e3SJoseph Koshy 
100ebccf1e3SJoseph Koshy #define	AMD_NPMCS		K8_NPMCS
101ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS		PMC_CLASS_K8
102ebccf1e3SJoseph Koshy #define	AMD_PMC_COUNTERMASK	K8_PMC_COUNTERMASK
103ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_COUNTER(x)	K8_PMC_TO_COUNTER(x)
104ebccf1e3SJoseph Koshy #define	AMD_PMC_INVERT		K8_PMC_INVERT
105ebccf1e3SJoseph Koshy #define	AMD_PMC_ENABLE		K8_PMC_ENABLE
106ebccf1e3SJoseph Koshy #define	AMD_PMC_INT		K8_PMC_INT
107ebccf1e3SJoseph Koshy #define	AMD_PMC_PC		K8_PMC_PC
108ebccf1e3SJoseph Koshy #define	AMD_PMC_EDGE		K8_PMC_EDGE
109ebccf1e3SJoseph Koshy #define	AMD_PMC_OS		K8_PMC_OS
110ebccf1e3SJoseph Koshy #define	AMD_PMC_USR		K8_PMC_USR
111ebccf1e3SJoseph Koshy 
112ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_M	K8_PMC_UNITMASK_M
113ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_O	K8_PMC_UNITMASK_O
114ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_E	K8_PMC_UNITMASK_E
115ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_S	K8_PMC_UNITMASK_S
116ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_I	K8_PMC_UNITMASK_I
117ebccf1e3SJoseph Koshy 
118ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK	K8_PMC_UNITMASK
119ebccf1e3SJoseph Koshy #define	AMD_PMC_EVENTMASK	K8_PMC_EVENTMASK
120ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_UNITMASK(x)	K8_PMC_TO_UNITMASK(x)
121ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_EVENTMASK(x)	K8_PMC_TO_EVENTMASK(x)
122ebccf1e3SJoseph Koshy #define	AMD_VALID_BITS		K8_VALID_BITS
123ebccf1e3SJoseph Koshy 
124ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS_NAME	"K8-"
125ebccf1e3SJoseph Koshy 
126ebccf1e3SJoseph Koshy #else
127ebccf1e3SJoseph Koshy #error	Unsupported architecture.
128ebccf1e3SJoseph Koshy #endif
129ebccf1e3SJoseph Koshy 
130ebccf1e3SJoseph Koshy /* AMD K7 & K8 PMCs */
131ebccf1e3SJoseph Koshy struct amd_descr {
132ebccf1e3SJoseph Koshy 	struct pmc_descr pm_descr;  /* "base class" */
133ebccf1e3SJoseph Koshy 	uint32_t	pm_evsel;   /* address of EVSEL register */
134ebccf1e3SJoseph Koshy 	uint32_t	pm_perfctr; /* address of PERFCTR register */
135ebccf1e3SJoseph Koshy };
136ebccf1e3SJoseph Koshy 
137ebccf1e3SJoseph Koshy static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
138ebccf1e3SJoseph Koshy {
139ebccf1e3SJoseph Koshy     {
140ebccf1e3SJoseph Koshy 	.pm_descr =
141ebccf1e3SJoseph Koshy 	{
142ebccf1e3SJoseph Koshy 		.pd_name  = "TSC",
143ebccf1e3SJoseph Koshy 		.pd_class = PMC_CLASS_TSC,
144ebccf1e3SJoseph Koshy 		.pd_caps  = PMC_CAP_READ,
145ebccf1e3SJoseph Koshy 		.pd_width = 64
146ebccf1e3SJoseph Koshy 	},
147ebccf1e3SJoseph Koshy 	.pm_evsel   = MSR_TSC,
148ebccf1e3SJoseph Koshy 	.pm_perfctr = 0	/* unused */
149ebccf1e3SJoseph Koshy     },
150ebccf1e3SJoseph Koshy 
151ebccf1e3SJoseph Koshy     {
152ebccf1e3SJoseph Koshy 	.pm_descr =
153ebccf1e3SJoseph Koshy 	{
154ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "0",
155ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
156ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
157ebccf1e3SJoseph Koshy 		.pd_width = 48
158ebccf1e3SJoseph Koshy 	},
159ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_0,
160ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_0
161ebccf1e3SJoseph Koshy     },
162ebccf1e3SJoseph Koshy     {
163ebccf1e3SJoseph Koshy 	.pm_descr =
164ebccf1e3SJoseph Koshy 	{
165ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "1",
166ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
167ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
168ebccf1e3SJoseph Koshy 		.pd_width = 48
169ebccf1e3SJoseph Koshy 	},
170ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_1,
171ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_1
172ebccf1e3SJoseph Koshy     },
173ebccf1e3SJoseph Koshy     {
174ebccf1e3SJoseph Koshy 	.pm_descr =
175ebccf1e3SJoseph Koshy 	{
176ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "2",
177ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
178ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
179ebccf1e3SJoseph Koshy 		.pd_width = 48
180ebccf1e3SJoseph Koshy 	},
181ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_2,
182ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_2
183ebccf1e3SJoseph Koshy     },
184ebccf1e3SJoseph Koshy     {
185ebccf1e3SJoseph Koshy 	.pm_descr =
186ebccf1e3SJoseph Koshy 	{
187ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "3",
188ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
189ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
190ebccf1e3SJoseph Koshy 		.pd_width = 48
191ebccf1e3SJoseph Koshy 	},
192ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_3,
193ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_3
194ebccf1e3SJoseph Koshy     }
195ebccf1e3SJoseph Koshy };
196ebccf1e3SJoseph Koshy 
197ebccf1e3SJoseph Koshy struct amd_event_code_map {
198ebccf1e3SJoseph Koshy 	enum pmc_event	pe_ev;	 /* enum value */
199ebccf1e3SJoseph Koshy 	uint8_t		pe_code; /* encoded event mask */
200ebccf1e3SJoseph Koshy 	uint8_t		pe_mask; /* bits allowed in unit mask */
201ebccf1e3SJoseph Koshy };
202ebccf1e3SJoseph Koshy 
203ebccf1e3SJoseph Koshy const struct amd_event_code_map amd_event_codes[] = {
204ebccf1e3SJoseph Koshy #if	__i386__
205ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_ACCESSES, 		0x40, 0 },
206ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_MISSES,			0x41, 0 },
207ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_L2,		0x42, K7_PMC_UNITMASK_MOESI },
208ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM,	0x43, K7_PMC_UNITMASK_MOESI },
209ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_WRITEBACKS,		0x44, K7_PMC_UNITMASK_MOESI },
210ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
211ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES,	0x46, 0 },
212ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_MISALIGNED_REFERENCES,	0x47, 0 },
213ebccf1e3SJoseph Koshy 
214ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_FETCHES,			0x80, 0 },
215ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_MISSES,			0x81, 0 },
216ebccf1e3SJoseph Koshy 
217ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_ITLB_MISSES,		0x84, 0 },
218ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_L2_ITLB_MISSES,		0x85, 0 },
219ebccf1e3SJoseph Koshy 
220ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_INSTRUCTIONS,	0xC0, 0 },
221ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_OPS,		0xC1, 0 },
222ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES,		0xC2, 0 },
223ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
224ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 	0xC4, 0 },
225ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
226ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
227ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES,	0xC7, 0 },
228ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES,	0xCD, 0 },
229ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
230ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_HARDWARE_INTERRUPTS,	0xCF, 0 }
231ebccf1e3SJoseph Koshy #endif
232ebccf1e3SJoseph Koshy 
233ebccf1e3SJoseph Koshy #if	__amd64__
234ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS,		0x00, 0x3F },
235ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED,	0x01, 0x00 },
236ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS,	0x02, 0x00 },
237ebccf1e3SJoseph Koshy 
238ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 		0x20, 0x7F },
239ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
240ebccf1e3SJoseph Koshy 	  						0x21, 0x00 },
241ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
242ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_BUFFER2_FULL,			0x23, 0x00 },
243ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_LOCKED_OPERATION,		0x24, 0x07 },
244ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL,	0x25, 0x00 },
245ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS,	0x26, 0x00 },
246ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS,	0x27, 0x00 },
247ebccf1e3SJoseph Koshy 
248ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ACCESS,				0x40, 0x00 },
249ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISS,				0x41, 0x00 },
250ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_L2,			0x42, 0x1F },
251ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM,		0x43, 0x1F },
252ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_COPYBACK,			0x44, 0x1F },
253ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT,	0x45, 0x00 },
254ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS,	0x46, 0x00 },
255ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE,	0x47, 0x00 },
256ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL,	0x48, 0x00 },
257ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
258ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR,		0x4A, 0x03 },
259ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
260ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS,	0x4C, 0x03 },
261ebccf1e3SJoseph Koshy 
262ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_CPU_CLK_UNHALTED,		0x76, 0x00 },
263ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST,		0x7D, 0x1F },
264ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS,		0x7E, 0x07 },
265ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_INTO_L2,			0x7F, 0x03 },
266ebccf1e3SJoseph Koshy 
267ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_FETCH,				0x80, 0x00 },
268ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MISS,				0x81, 0x00 },
269ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_L2,			0x82, 0x00 },
270ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM,		0x83, 0x00 },
271ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT,	0x84, 0x00 },
272ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS,	0x85, 0x00 },
273ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
274ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL,		0x87, 0x00 },
275ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_HIT,		0x88, 0x00 },
276ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW,		0x89, 0x00 },
277ebccf1e3SJoseph Koshy 
278ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS,	0xC0, 0x00 },
279ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_UOPS,			0xC1, 0x00 },
280ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES,		0xC2, 0x00 },
281ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED,	0xC3, 0x00 },
282ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES,		0xC4, 0x00 },
283ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
284ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS,	0xC6, 0x00 },
285ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_RESYNCS,			0xC7, 0x00 },
286ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS,		0xC8, 0x00 },
287ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
288ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
289ebccf1e3SJoseph Koshy 							0xCA, 0x00 },
290ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS,	0xCB, 0x0F },
291ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
292ebccf1e3SJoseph Koshy 							0xCC, 0x07 },
293ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES,	0xCD, 0x00 },
294ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
295ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS,	0xCF, 0x00 },
296ebccf1e3SJoseph Koshy 
297ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DECODER_EMPTY,			0xD0, 0x00 },
298ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALLS,			0xD1, 0x00 },
299ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
300ebccf1e3SJoseph Koshy 							0xD2, 0x00 },
301ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
302ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD,	0xD4, 0x00 },
303ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
304ebccf1e3SJoseph Koshy 							0xD5, 0x00 },
305ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
306ebccf1e3SJoseph Koshy 							0xD6, 0x00 },
307ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL,	0xD7, 0x00 },
308ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL,	0xD8, 0x00 },
309ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
310ebccf1e3SJoseph Koshy 							0xD9, 0x00 },
311ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
312ebccf1e3SJoseph Koshy 							0xDA, 0x00 },
313ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_FPU_EXCEPTIONS,			0xDB, 0x0F },
314ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0,	0xDC, 0x00 },
315ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1,	0xDD, 0x00 },
316ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2,	0xDE, 0x00 },
317ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3,	0xDF, 0x00 },
318ebccf1e3SJoseph Koshy 
319ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
320ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
321ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
322ebccf1e3SJoseph Koshy 							0xE2, 0x00 },
323ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND,	0xE3, 0x07 },
324ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
325ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_SIZED_COMMANDS,			0xEB, 0x7F },
326ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_PROBE_RESULT,			0xEC, 0x0F },
327ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH,		0xF6, 0x0F },
328ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH,		0xF7, 0x0F },
329ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH,		0xF8, 0x0F }
330ebccf1e3SJoseph Koshy #endif
331ebccf1e3SJoseph Koshy 
332ebccf1e3SJoseph Koshy };
333ebccf1e3SJoseph Koshy 
334ebccf1e3SJoseph Koshy const int amd_event_codes_size =
335ebccf1e3SJoseph Koshy 	sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
336ebccf1e3SJoseph Koshy 
337ebccf1e3SJoseph Koshy /*
338ebccf1e3SJoseph Koshy  * read a pmc register
339ebccf1e3SJoseph Koshy  */
340ebccf1e3SJoseph Koshy 
341ebccf1e3SJoseph Koshy static int
342ebccf1e3SJoseph Koshy amd_read_pmc(int cpu, int ri, pmc_value_t *v)
343ebccf1e3SJoseph Koshy {
344ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
345ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
346ebccf1e3SJoseph Koshy 	struct pmc *pm;
347ebccf1e3SJoseph Koshy 	const struct pmc_hw *phw;
348ebccf1e3SJoseph Koshy 	pmc_value_t tmp;
349ebccf1e3SJoseph Koshy 
350ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
351ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
352ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
353ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
354ebccf1e3SJoseph Koshy 
355ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
356ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
357ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
358ebccf1e3SJoseph Koshy 
359ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
360ebccf1e3SJoseph Koshy 	    ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
361ebccf1e3SJoseph Koshy 		cpu, ri));
362ebccf1e3SJoseph Koshy 
363ebccf1e3SJoseph Koshy 	mode = pm->pm_mode;
364ebccf1e3SJoseph Koshy 
365ebccf1e3SJoseph Koshy 	PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
366ebccf1e3SJoseph Koshy 
367ebccf1e3SJoseph Koshy 	/* Reading the TSC is a special case */
368ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
369ebccf1e3SJoseph Koshy 		KASSERT(PMC_IS_COUNTING_MODE(mode),
370ebccf1e3SJoseph Koshy 		    ("[amd,%d] TSC counter in non-counting mode", __LINE__));
371ebccf1e3SJoseph Koshy 		*v = rdtsc();
372ebccf1e3SJoseph Koshy 		PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
373ebccf1e3SJoseph Koshy 		return 0;
374ebccf1e3SJoseph Koshy 	}
375ebccf1e3SJoseph Koshy 
376ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
377ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
378ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
379ebccf1e3SJoseph Koshy 
380ebccf1e3SJoseph Koshy 	tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
381ebccf1e3SJoseph Koshy 	if (PMC_IS_SAMPLING_MODE(mode))
382ebccf1e3SJoseph Koshy 		*v = -tmp;
383ebccf1e3SJoseph Koshy 	else
384ebccf1e3SJoseph Koshy 		*v = tmp;
385ebccf1e3SJoseph Koshy 
386ebccf1e3SJoseph Koshy 	PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
387ebccf1e3SJoseph Koshy 
388ebccf1e3SJoseph Koshy 	return 0;
389ebccf1e3SJoseph Koshy }
390ebccf1e3SJoseph Koshy 
391ebccf1e3SJoseph Koshy /*
392ebccf1e3SJoseph Koshy  * Write a PMC MSR.
393ebccf1e3SJoseph Koshy  */
394ebccf1e3SJoseph Koshy 
395ebccf1e3SJoseph Koshy static int
396ebccf1e3SJoseph Koshy amd_write_pmc(int cpu, int ri, pmc_value_t v)
397ebccf1e3SJoseph Koshy {
398ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
399ebccf1e3SJoseph Koshy 	struct pmc *pm;
400ebccf1e3SJoseph Koshy 	const struct pmc_hw *phw;
401ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
402ebccf1e3SJoseph Koshy 
403ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
404ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
405ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
406ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
407ebccf1e3SJoseph Koshy 
408ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
409ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
410ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
411ebccf1e3SJoseph Koshy 
412ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
413ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
414ebccf1e3SJoseph Koshy 		cpu, ri));
415ebccf1e3SJoseph Koshy 
416ebccf1e3SJoseph Koshy 	mode = pm->pm_mode;
417ebccf1e3SJoseph Koshy 
418ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
419ebccf1e3SJoseph Koshy 		return 0;
420ebccf1e3SJoseph Koshy 
421ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
422ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
423ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
424ebccf1e3SJoseph Koshy 
425ebccf1e3SJoseph Koshy 	/* use 2's complement of the count for sampling mode PMCs */
426ebccf1e3SJoseph Koshy 	if (PMC_IS_SAMPLING_MODE(mode))
427ebccf1e3SJoseph Koshy 		v = -v;
428ebccf1e3SJoseph Koshy 
429ebccf1e3SJoseph Koshy 	PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
430ebccf1e3SJoseph Koshy 
431ebccf1e3SJoseph Koshy 	/* write the PMC value */
432ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_perfctr, v);
433ebccf1e3SJoseph Koshy 	return 0;
434ebccf1e3SJoseph Koshy }
435ebccf1e3SJoseph Koshy 
436ebccf1e3SJoseph Koshy /*
437ebccf1e3SJoseph Koshy  * configure hardware pmc according to the configuration recorded in
438ebccf1e3SJoseph Koshy  * pmc 'pm'.
439ebccf1e3SJoseph Koshy  */
440ebccf1e3SJoseph Koshy 
441ebccf1e3SJoseph Koshy static int
442ebccf1e3SJoseph Koshy amd_config_pmc(int cpu, int ri, struct pmc *pm)
443ebccf1e3SJoseph Koshy {
444ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
445ebccf1e3SJoseph Koshy 
4466b8c8cd8SJoseph Koshy 	PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
4476b8c8cd8SJoseph Koshy 
448ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
449ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
450ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
451ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
452ebccf1e3SJoseph Koshy 
453ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
454ebccf1e3SJoseph Koshy 
455ebccf1e3SJoseph Koshy 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
4566b8c8cd8SJoseph Koshy 	    ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
4576b8c8cd8SJoseph Koshy 		__LINE__, pm, phw->phw_pmc));
458ebccf1e3SJoseph Koshy 
459ebccf1e3SJoseph Koshy 	phw->phw_pmc = pm;
460ebccf1e3SJoseph Koshy 	return 0;
461ebccf1e3SJoseph Koshy }
462ebccf1e3SJoseph Koshy 
463ebccf1e3SJoseph Koshy /*
464ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch in of a
465ebccf1e3SJoseph Koshy  * thread.
466ebccf1e3SJoseph Koshy  */
467ebccf1e3SJoseph Koshy 
468ebccf1e3SJoseph Koshy static int
4696b8c8cd8SJoseph Koshy amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
470ebccf1e3SJoseph Koshy {
471ebccf1e3SJoseph Koshy 	(void) pc;
472ebccf1e3SJoseph Koshy 
4736b8c8cd8SJoseph Koshy 	PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
4746b8c8cd8SJoseph Koshy 	    (pp->pp_flags & PMC_FLAG_ENABLE_MSR_ACCESS) != 0);
4756b8c8cd8SJoseph Koshy 
4766b8c8cd8SJoseph Koshy 	/* enable the RDPMC instruction if needed */
4776b8c8cd8SJoseph Koshy 	if (pp->pp_flags & PMC_FLAG_ENABLE_MSR_ACCESS)
478ebccf1e3SJoseph Koshy 		load_cr4(rcr4() | CR4_PCE);
4796b8c8cd8SJoseph Koshy 
480ebccf1e3SJoseph Koshy 	return 0;
481ebccf1e3SJoseph Koshy }
482ebccf1e3SJoseph Koshy 
483ebccf1e3SJoseph Koshy /*
484ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch out of a
485ebccf1e3SJoseph Koshy  * thread.
486ebccf1e3SJoseph Koshy  */
487ebccf1e3SJoseph Koshy 
488ebccf1e3SJoseph Koshy static int
4896b8c8cd8SJoseph Koshy amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
490ebccf1e3SJoseph Koshy {
491ebccf1e3SJoseph Koshy 	(void) pc;
4926b8c8cd8SJoseph Koshy 	(void) pp;		/* can be NULL */
493ebccf1e3SJoseph Koshy 
4946b8c8cd8SJoseph Koshy 	PMCDBG(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ?
4956b8c8cd8SJoseph Koshy 	    (pp->pp_flags & PMC_FLAG_ENABLE_MSR_ACCESS) == 1 : 0);
4966b8c8cd8SJoseph Koshy 
4976b8c8cd8SJoseph Koshy 	/* always turn off the RDPMC instruction */
498ebccf1e3SJoseph Koshy 	load_cr4(rcr4() & ~CR4_PCE);
4996b8c8cd8SJoseph Koshy 
500ebccf1e3SJoseph Koshy 	return 0;
501ebccf1e3SJoseph Koshy }
502ebccf1e3SJoseph Koshy 
503ebccf1e3SJoseph Koshy /*
504ebccf1e3SJoseph Koshy  * Check if a given allocation is feasible.
505ebccf1e3SJoseph Koshy  */
506ebccf1e3SJoseph Koshy 
507ebccf1e3SJoseph Koshy static int
508ebccf1e3SJoseph Koshy amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
509ebccf1e3SJoseph Koshy     const struct pmc_op_pmcallocate *a)
510ebccf1e3SJoseph Koshy {
511ebccf1e3SJoseph Koshy 	int i;
512ebccf1e3SJoseph Koshy 	uint32_t allowed_unitmask, caps, config, unitmask;
513ebccf1e3SJoseph Koshy 	enum pmc_event pe;
514ebccf1e3SJoseph Koshy 	const struct pmc_descr *pd;
515ebccf1e3SJoseph Koshy 
516ebccf1e3SJoseph Koshy 	(void) cpu;
517ebccf1e3SJoseph Koshy 
518ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
519ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
520ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
521ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row index %d", __LINE__, ri));
522ebccf1e3SJoseph Koshy 
523ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri].pm_descr;
524ebccf1e3SJoseph Koshy 
525ebccf1e3SJoseph Koshy 	/* check class match */
526ebccf1e3SJoseph Koshy 	if (pd->pd_class != pm->pm_class)
527ebccf1e3SJoseph Koshy 		return EINVAL;
528ebccf1e3SJoseph Koshy 
529ebccf1e3SJoseph Koshy 	caps = pm->pm_caps;
530ebccf1e3SJoseph Koshy 
531ebccf1e3SJoseph Koshy 	PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
532ebccf1e3SJoseph Koshy 
533ebccf1e3SJoseph Koshy 	if ((pd->pd_caps & caps) != caps)
534ebccf1e3SJoseph Koshy 		return EPERM;
535ebccf1e3SJoseph Koshy 	if (pd->pd_class == PMC_CLASS_TSC) {
536ebccf1e3SJoseph Koshy 		/* TSC's are always allocated in system-wide counting mode */
537ebccf1e3SJoseph Koshy 		if (a->pm_ev != PMC_EV_TSC_TSC ||
538ebccf1e3SJoseph Koshy 		    a->pm_mode != PMC_MODE_SC)
539ebccf1e3SJoseph Koshy 			return EINVAL;
540ebccf1e3SJoseph Koshy 		return 0;
541ebccf1e3SJoseph Koshy 	}
542ebccf1e3SJoseph Koshy 
543ebccf1e3SJoseph Koshy 	KASSERT(pd->pd_class == AMD_PMC_CLASS,
544ebccf1e3SJoseph Koshy 	    ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
545ebccf1e3SJoseph Koshy 
546ebccf1e3SJoseph Koshy 	pe = a->pm_ev;
547ebccf1e3SJoseph Koshy 
548ebccf1e3SJoseph Koshy 	/* map ev to the correct event mask code */
549ebccf1e3SJoseph Koshy 	config = allowed_unitmask = 0;
550ebccf1e3SJoseph Koshy 	for (i = 0; i < amd_event_codes_size; i++)
551ebccf1e3SJoseph Koshy 		if (amd_event_codes[i].pe_ev == pe) {
552ebccf1e3SJoseph Koshy 			config =
553ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
554ebccf1e3SJoseph Koshy 			allowed_unitmask =
555ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
556ebccf1e3SJoseph Koshy 			break;
557ebccf1e3SJoseph Koshy 		}
558ebccf1e3SJoseph Koshy 	if (i == amd_event_codes_size)
559ebccf1e3SJoseph Koshy 		return EINVAL;
560ebccf1e3SJoseph Koshy 
561ebccf1e3SJoseph Koshy 	unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
562ebccf1e3SJoseph Koshy 	if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
563ebccf1e3SJoseph Koshy 		return EINVAL;
564ebccf1e3SJoseph Koshy 
565ebccf1e3SJoseph Koshy 	if (unitmask && (caps & PMC_CAP_QUALIFIER))
566ebccf1e3SJoseph Koshy 		config |= unitmask;
567ebccf1e3SJoseph Koshy 
568ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_THRESHOLD)
569ebccf1e3SJoseph Koshy 		config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
570ebccf1e3SJoseph Koshy 
571ebccf1e3SJoseph Koshy 	/* set at least one of the 'usr' or 'os' caps */
572ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_USER)
573ebccf1e3SJoseph Koshy 		config |= AMD_PMC_USR;
574ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_SYSTEM)
575ebccf1e3SJoseph Koshy 		config |= AMD_PMC_OS;
576ebccf1e3SJoseph Koshy 	if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
577ebccf1e3SJoseph Koshy 		config |= (AMD_PMC_USR|AMD_PMC_OS);
578ebccf1e3SJoseph Koshy 
579ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_EDGE)
580ebccf1e3SJoseph Koshy 		config |= AMD_PMC_EDGE;
581ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INVERT)
582ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INVERT;
583ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INTERRUPT)
584ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INT;
585ebccf1e3SJoseph Koshy 
586ebccf1e3SJoseph Koshy 	pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
587ebccf1e3SJoseph Koshy 
588ebccf1e3SJoseph Koshy 	PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
589ebccf1e3SJoseph Koshy 
590ebccf1e3SJoseph Koshy 	return 0;
591ebccf1e3SJoseph Koshy }
592ebccf1e3SJoseph Koshy 
593ebccf1e3SJoseph Koshy /*
594ebccf1e3SJoseph Koshy  * Release machine dependent state associated with a PMC.  This is a
595ebccf1e3SJoseph Koshy  * no-op on this architecture.
596ebccf1e3SJoseph Koshy  *
597ebccf1e3SJoseph Koshy  */
598ebccf1e3SJoseph Koshy 
599ebccf1e3SJoseph Koshy /* ARGSUSED0 */
600ebccf1e3SJoseph Koshy static int
601ebccf1e3SJoseph Koshy amd_release_pmc(int cpu, int ri, struct pmc *pmc)
602ebccf1e3SJoseph Koshy {
603ebccf1e3SJoseph Koshy #if	DEBUG
604ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
605ebccf1e3SJoseph Koshy #endif
606ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
607ebccf1e3SJoseph Koshy 
608ebccf1e3SJoseph Koshy 	(void) pmc;
609ebccf1e3SJoseph Koshy 
610ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
611ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
612ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
613ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
614ebccf1e3SJoseph Koshy 
615ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
616ebccf1e3SJoseph Koshy 
617ebccf1e3SJoseph Koshy 	KASSERT(phw->phw_pmc == NULL,
618ebccf1e3SJoseph Koshy 	    ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
619ebccf1e3SJoseph Koshy 
620ebccf1e3SJoseph Koshy #if 	DEBUG
621ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
622ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
623ebccf1e3SJoseph Koshy 		KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
624ebccf1e3SJoseph Koshy 		    ("[amd,%d] PMC %d released while active", __LINE__, ri));
625ebccf1e3SJoseph Koshy #endif
626ebccf1e3SJoseph Koshy 
627ebccf1e3SJoseph Koshy 	return 0;
628ebccf1e3SJoseph Koshy }
629ebccf1e3SJoseph Koshy 
630ebccf1e3SJoseph Koshy /*
631ebccf1e3SJoseph Koshy  * start a PMC.
632ebccf1e3SJoseph Koshy  */
633ebccf1e3SJoseph Koshy 
634ebccf1e3SJoseph Koshy static int
635ebccf1e3SJoseph Koshy amd_start_pmc(int cpu, int ri)
636ebccf1e3SJoseph Koshy {
637ebccf1e3SJoseph Koshy 	uint32_t config;
638ebccf1e3SJoseph Koshy 	struct pmc *pm;
639ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
640ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
641ebccf1e3SJoseph Koshy 
642ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
643ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
644ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
645ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
646ebccf1e3SJoseph Koshy 
647ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
648ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
649ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
650ebccf1e3SJoseph Koshy 
651ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
652ebccf1e3SJoseph Koshy 	    ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
653ebccf1e3SJoseph Koshy 		cpu, ri));
654ebccf1e3SJoseph Koshy 
655ebccf1e3SJoseph Koshy 	PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
656ebccf1e3SJoseph Koshy 
657ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
658ebccf1e3SJoseph Koshy 		return 0;	/* TSCs are always running */
659ebccf1e3SJoseph Koshy 
660ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
661ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
662ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
663ebccf1e3SJoseph Koshy 
664ebccf1e3SJoseph Koshy 	KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
665ebccf1e3SJoseph Koshy 	    ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
666ebccf1e3SJoseph Koshy 	    ri, cpu, pd->pm_descr.pd_name));
667ebccf1e3SJoseph Koshy 
668ebccf1e3SJoseph Koshy 	/* turn on the PMC ENABLE bit */
669ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
670ebccf1e3SJoseph Koshy 
671ebccf1e3SJoseph Koshy 	PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
672ebccf1e3SJoseph Koshy 
673ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
674ebccf1e3SJoseph Koshy 	return 0;
675ebccf1e3SJoseph Koshy }
676ebccf1e3SJoseph Koshy 
677ebccf1e3SJoseph Koshy /*
678ebccf1e3SJoseph Koshy  * Stop a PMC.
679ebccf1e3SJoseph Koshy  */
680ebccf1e3SJoseph Koshy 
681ebccf1e3SJoseph Koshy static int
682ebccf1e3SJoseph Koshy amd_stop_pmc(int cpu, int ri)
683ebccf1e3SJoseph Koshy {
684ebccf1e3SJoseph Koshy 	struct pmc *pm;
685ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
686ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
687ebccf1e3SJoseph Koshy 	uint64_t config;
688ebccf1e3SJoseph Koshy 
689ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
690ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
691ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
692ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
693ebccf1e3SJoseph Koshy 
694ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
695ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
696ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
697ebccf1e3SJoseph Koshy 
698ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
699ebccf1e3SJoseph Koshy 	    ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
700ebccf1e3SJoseph Koshy 		cpu, ri));
701ebccf1e3SJoseph Koshy 
702ebccf1e3SJoseph Koshy 	/* can't stop a TSC */
703ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
704ebccf1e3SJoseph Koshy 		return 0;
705ebccf1e3SJoseph Koshy 
706ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
707ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
708ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
709ebccf1e3SJoseph Koshy 
710ebccf1e3SJoseph Koshy 	KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
711ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
712ebccf1e3SJoseph Koshy 		__LINE__, ri, cpu, pd->pm_descr.pd_name));
713ebccf1e3SJoseph Koshy 
714ebccf1e3SJoseph Koshy 	PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
715ebccf1e3SJoseph Koshy 
716ebccf1e3SJoseph Koshy 	/* turn off the PMC ENABLE bit */
717ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
718ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
719ebccf1e3SJoseph Koshy 	return 0;
720ebccf1e3SJoseph Koshy }
721ebccf1e3SJoseph Koshy 
722ebccf1e3SJoseph Koshy /*
723ebccf1e3SJoseph Koshy  * Interrupt handler.  This function needs to return '1' if the
724ebccf1e3SJoseph Koshy  * interrupt was this CPU's PMCs or '0' otherwise.  It is not allowed
725ebccf1e3SJoseph Koshy  * to sleep or do anything a 'fast' interrupt handler is not allowed
726ebccf1e3SJoseph Koshy  * to do.
727ebccf1e3SJoseph Koshy  */
728ebccf1e3SJoseph Koshy 
729ebccf1e3SJoseph Koshy static int
730ebccf1e3SJoseph Koshy amd_intr(int cpu, uintptr_t eip)
731ebccf1e3SJoseph Koshy {
732ebccf1e3SJoseph Koshy 	int i, retval;
733ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
734ebccf1e3SJoseph Koshy 	uint32_t perfctr;
735ebccf1e3SJoseph Koshy 	struct pmc *pm;
736ebccf1e3SJoseph Koshy 	struct pmc_cpu *pc;
737ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
738ebccf1e3SJoseph Koshy 
739ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
740ebccf1e3SJoseph Koshy 	    ("[amd,%d] out of range CPU %d", __LINE__, cpu));
741ebccf1e3SJoseph Koshy 
742ebccf1e3SJoseph Koshy 	retval = 0;
743ebccf1e3SJoseph Koshy 
744ebccf1e3SJoseph Koshy 	pc = pmc_pcpu[cpu];
745ebccf1e3SJoseph Koshy 
746ebccf1e3SJoseph Koshy 	/*
747ebccf1e3SJoseph Koshy 	 * look for all PMCs that have interrupted:
748ebccf1e3SJoseph Koshy 	 * - skip over the TSC [PMC#0]
749ebccf1e3SJoseph Koshy 	 * - look for a PMC with a valid 'struct pmc' association
750ebccf1e3SJoseph Koshy 	 * - look for a PMC in (a) sampling mode and (b) which has
751ebccf1e3SJoseph Koshy 	 *   overflowed.  If found, we update the process's
752ebccf1e3SJoseph Koshy 	 *   histogram or send it a profiling signal by calling
753ebccf1e3SJoseph Koshy 	 *   the appropriate helper function.
754ebccf1e3SJoseph Koshy 	 */
755ebccf1e3SJoseph Koshy 
756ebccf1e3SJoseph Koshy 	for (i = 1; i < AMD_NPMCS; i++) {
757ebccf1e3SJoseph Koshy 
758ebccf1e3SJoseph Koshy 		phw = pc->pc_hwpmcs[i];
759ebccf1e3SJoseph Koshy 		perfctr = amd_pmcdesc[i].pm_perfctr;
760ebccf1e3SJoseph Koshy 		KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
761ebccf1e3SJoseph Koshy 
762ebccf1e3SJoseph Koshy 		if ((pm = phw->phw_pmc) == NULL ||
763ebccf1e3SJoseph Koshy 		    pm->pm_state != PMC_STATE_RUNNING) {
764ebccf1e3SJoseph Koshy 			atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
765ebccf1e3SJoseph Koshy 			continue;
766ebccf1e3SJoseph Koshy 		}
767ebccf1e3SJoseph Koshy 
768ebccf1e3SJoseph Koshy 		mode = pm->pm_mode;
769ebccf1e3SJoseph Koshy 		if (PMC_IS_SAMPLING_MODE(mode) &&
770ebccf1e3SJoseph Koshy 		    AMD_PMC_HAS_OVERFLOWED(perfctr)) {
771ebccf1e3SJoseph Koshy 			atomic_add_int(&pmc_stats.pm_intr_processed, 1);
772ebccf1e3SJoseph Koshy 			if (PMC_IS_SYSTEM_MODE(mode))
773ebccf1e3SJoseph Koshy 				pmc_update_histogram(phw, eip);
774ebccf1e3SJoseph Koshy 			else if (PMC_IS_VIRTUAL_MODE(mode))
775ebccf1e3SJoseph Koshy 				pmc_send_signal(pm);
776ebccf1e3SJoseph Koshy 			retval = 1;
777ebccf1e3SJoseph Koshy 		}
778ebccf1e3SJoseph Koshy 	}
779ebccf1e3SJoseph Koshy 	return retval;
780ebccf1e3SJoseph Koshy }
781ebccf1e3SJoseph Koshy 
782ebccf1e3SJoseph Koshy /*
783ebccf1e3SJoseph Koshy  * describe a PMC
784ebccf1e3SJoseph Koshy  */
785ebccf1e3SJoseph Koshy static int
786ebccf1e3SJoseph Koshy amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
787ebccf1e3SJoseph Koshy {
788ebccf1e3SJoseph Koshy 	int error;
789ebccf1e3SJoseph Koshy 	size_t copied;
790ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
791ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
792ebccf1e3SJoseph Koshy 
793ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
794ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU %d", __LINE__, cpu));
795ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
796ebccf1e3SJoseph Koshy 	    ("[amd,%d] row-index %d out of range", __LINE__, ri));
797ebccf1e3SJoseph Koshy 
798ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
799ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
800ebccf1e3SJoseph Koshy 
801ebccf1e3SJoseph Koshy 	if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
802ebccf1e3SJoseph Koshy 		 PMC_NAME_MAX, &copied)) != 0)
803ebccf1e3SJoseph Koshy 		return error;
804ebccf1e3SJoseph Koshy 
805ebccf1e3SJoseph Koshy 	pi->pm_class = pd->pm_descr.pd_class;
806ebccf1e3SJoseph Koshy 	pi->pm_caps  = pd->pm_descr.pd_caps;
807ebccf1e3SJoseph Koshy 	pi->pm_width = pd->pm_descr.pd_width;
808ebccf1e3SJoseph Koshy 
809ebccf1e3SJoseph Koshy 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
810ebccf1e3SJoseph Koshy 		pi->pm_enabled = TRUE;
811ebccf1e3SJoseph Koshy 		*ppmc          = phw->phw_pmc;
812ebccf1e3SJoseph Koshy 	} else {
813ebccf1e3SJoseph Koshy 		pi->pm_enabled = FALSE;
814ebccf1e3SJoseph Koshy 		*ppmc          = NULL;
815ebccf1e3SJoseph Koshy 	}
816ebccf1e3SJoseph Koshy 
817ebccf1e3SJoseph Koshy 	return 0;
818ebccf1e3SJoseph Koshy }
819ebccf1e3SJoseph Koshy 
820ebccf1e3SJoseph Koshy /*
821ebccf1e3SJoseph Koshy  * i386 specific entry points
822ebccf1e3SJoseph Koshy  */
823ebccf1e3SJoseph Koshy 
824ebccf1e3SJoseph Koshy /*
825ebccf1e3SJoseph Koshy  * return the MSR address of the given PMC.
826ebccf1e3SJoseph Koshy  */
827ebccf1e3SJoseph Koshy 
828ebccf1e3SJoseph Koshy static int
829ebccf1e3SJoseph Koshy amd_get_msr(int ri, uint32_t *msr)
830ebccf1e3SJoseph Koshy {
831ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
832ebccf1e3SJoseph Koshy 	    ("[amd,%d] ri %d out of range", __LINE__, ri));
833ebccf1e3SJoseph Koshy 
8346b8c8cd8SJoseph Koshy 	*msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
835ebccf1e3SJoseph Koshy 	return 0;
836ebccf1e3SJoseph Koshy }
837ebccf1e3SJoseph Koshy 
838ebccf1e3SJoseph Koshy /*
839ebccf1e3SJoseph Koshy  * processor dependent initialization.
840ebccf1e3SJoseph Koshy  */
841ebccf1e3SJoseph Koshy 
842ebccf1e3SJoseph Koshy /*
843ebccf1e3SJoseph Koshy  * Per-processor data structure
844ebccf1e3SJoseph Koshy  *
845ebccf1e3SJoseph Koshy  * [common stuff]
846ebccf1e3SJoseph Koshy  * [5 struct pmc_hw pointers]
847ebccf1e3SJoseph Koshy  * [5 struct pmc_hw structures]
848ebccf1e3SJoseph Koshy  */
849ebccf1e3SJoseph Koshy 
850ebccf1e3SJoseph Koshy struct amd_cpu {
851ebccf1e3SJoseph Koshy 	struct pmc_cpu	pc_common;
852ebccf1e3SJoseph Koshy 	struct pmc_hw	*pc_hwpmcs[AMD_NPMCS];
853ebccf1e3SJoseph Koshy 	struct pmc_hw	pc_amdpmcs[AMD_NPMCS];
854ebccf1e3SJoseph Koshy };
855ebccf1e3SJoseph Koshy 
856ebccf1e3SJoseph Koshy 
857ebccf1e3SJoseph Koshy static int
858ebccf1e3SJoseph Koshy amd_init(int cpu)
859ebccf1e3SJoseph Koshy {
860ebccf1e3SJoseph Koshy 	int n;
861ebccf1e3SJoseph Koshy 	struct amd_cpu *pcs;
862ebccf1e3SJoseph Koshy 	struct pmc_hw  *phw;
863ebccf1e3SJoseph Koshy 
864ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
865ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number %d", __LINE__, cpu));
866ebccf1e3SJoseph Koshy 
867ebccf1e3SJoseph Koshy 	PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
868ebccf1e3SJoseph Koshy 
869ebccf1e3SJoseph Koshy 	MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
870ebccf1e3SJoseph Koshy 	    M_WAITOK|M_ZERO);
871ebccf1e3SJoseph Koshy 
872ebccf1e3SJoseph Koshy 	if (pcs == NULL)
873ebccf1e3SJoseph Koshy 		return ENOMEM;
874ebccf1e3SJoseph Koshy 
875ebccf1e3SJoseph Koshy 	phw = &pcs->pc_amdpmcs[0];
876ebccf1e3SJoseph Koshy 
877ebccf1e3SJoseph Koshy 	/*
878ebccf1e3SJoseph Koshy 	 * Initialize the per-cpu mutex and set the content of the
879ebccf1e3SJoseph Koshy 	 * hardware descriptors to a known state.
880ebccf1e3SJoseph Koshy 	 */
881ebccf1e3SJoseph Koshy 
882ebccf1e3SJoseph Koshy 	for (n = 0; n < AMD_NPMCS; n++, phw++) {
883ebccf1e3SJoseph Koshy 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
884ebccf1e3SJoseph Koshy 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
885ebccf1e3SJoseph Koshy 		phw->phw_pmc	  = NULL;
886ebccf1e3SJoseph Koshy 		pcs->pc_hwpmcs[n] = phw;
887ebccf1e3SJoseph Koshy 	}
888ebccf1e3SJoseph Koshy 
889ebccf1e3SJoseph Koshy 	/* Mark the TSC as shareable */
890ebccf1e3SJoseph Koshy 	pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
891ebccf1e3SJoseph Koshy 
892ebccf1e3SJoseph Koshy 	pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
893ebccf1e3SJoseph Koshy 
894ebccf1e3SJoseph Koshy 	return 0;
895ebccf1e3SJoseph Koshy }
896ebccf1e3SJoseph Koshy 
897ebccf1e3SJoseph Koshy 
898ebccf1e3SJoseph Koshy /*
899ebccf1e3SJoseph Koshy  * processor dependent cleanup prior to the KLD
900ebccf1e3SJoseph Koshy  * being unloaded
901ebccf1e3SJoseph Koshy  */
902ebccf1e3SJoseph Koshy 
903ebccf1e3SJoseph Koshy static int
904ebccf1e3SJoseph Koshy amd_cleanup(int cpu)
905ebccf1e3SJoseph Koshy {
906ebccf1e3SJoseph Koshy 	int i;
907ebccf1e3SJoseph Koshy 	uint32_t evsel;
908ebccf1e3SJoseph Koshy 	struct pmc_cpu *pcs;
909ebccf1e3SJoseph Koshy 
910ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
911ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
912ebccf1e3SJoseph Koshy 
913ebccf1e3SJoseph Koshy 	PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
914ebccf1e3SJoseph Koshy 
915ebccf1e3SJoseph Koshy 	/*
916ebccf1e3SJoseph Koshy 	 * First, turn off all PMCs on this CPU.
917ebccf1e3SJoseph Koshy 	 */
918ebccf1e3SJoseph Koshy 
919ebccf1e3SJoseph Koshy 	for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
920ebccf1e3SJoseph Koshy 		evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
921ebccf1e3SJoseph Koshy 		evsel &= ~AMD_PMC_ENABLE;
922ebccf1e3SJoseph Koshy 		wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
923ebccf1e3SJoseph Koshy 	}
924ebccf1e3SJoseph Koshy 
925ebccf1e3SJoseph Koshy 	/*
926ebccf1e3SJoseph Koshy 	 * Next, free up allocated space.
927ebccf1e3SJoseph Koshy 	 */
928ebccf1e3SJoseph Koshy 
929ebccf1e3SJoseph Koshy 	pcs = pmc_pcpu[cpu];
930ebccf1e3SJoseph Koshy 
931ebccf1e3SJoseph Koshy #if	DEBUG
932ebccf1e3SJoseph Koshy 	/* check the TSC */
933ebccf1e3SJoseph Koshy 	KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
934ebccf1e3SJoseph Koshy 	    ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
935ebccf1e3SJoseph Koshy 	for (i = 1; i < AMD_NPMCS; i++) {
936ebccf1e3SJoseph Koshy 		KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
937ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
938ebccf1e3SJoseph Koshy 		KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
939ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
940ebccf1e3SJoseph Koshy 	}
941ebccf1e3SJoseph Koshy #endif
942ebccf1e3SJoseph Koshy 	KASSERT(pcs != NULL,
943ebccf1e3SJoseph Koshy 	    ("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
944ebccf1e3SJoseph Koshy 
945ebccf1e3SJoseph Koshy 	pmc_pcpu[cpu] = NULL;
946ebccf1e3SJoseph Koshy 	FREE(pcs, M_PMC);
947ebccf1e3SJoseph Koshy 	return 0;
948ebccf1e3SJoseph Koshy }
949ebccf1e3SJoseph Koshy 
950ebccf1e3SJoseph Koshy /*
951ebccf1e3SJoseph Koshy  * Initialize ourselves.
952ebccf1e3SJoseph Koshy  */
953ebccf1e3SJoseph Koshy 
954ebccf1e3SJoseph Koshy struct pmc_mdep *
955ebccf1e3SJoseph Koshy pmc_amd_initialize(void)
956ebccf1e3SJoseph Koshy {
957ebccf1e3SJoseph Koshy 
958ebccf1e3SJoseph Koshy 	struct pmc_mdep *pmc_mdep;
959ebccf1e3SJoseph Koshy 
960ebccf1e3SJoseph Koshy 	/* The presence of hardware performance counters on the AMD
961ebccf1e3SJoseph Koshy 	   Athlon, Duron or later processors, is _not_ indicated by
962ebccf1e3SJoseph Koshy 	   any of the processor feature flags set by the 'CPUID'
963ebccf1e3SJoseph Koshy 	   instruction, so we only check the 'instruction family'
964ebccf1e3SJoseph Koshy 	   field returned by CPUID for instruction family >= 6. This
965ebccf1e3SJoseph Koshy 	   test needs to be be refined. */
966ebccf1e3SJoseph Koshy 
967ebccf1e3SJoseph Koshy 	if ((cpu_id & 0xF00) < 0x600)
968ebccf1e3SJoseph Koshy 		return NULL;
969ebccf1e3SJoseph Koshy 
970ebccf1e3SJoseph Koshy 	MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
971ebccf1e3SJoseph Koshy 	    M_PMC, M_WAITOK|M_ZERO);
972ebccf1e3SJoseph Koshy 
973ebccf1e3SJoseph Koshy #if	__i386__
974ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_cputype	   = PMC_CPU_AMD_K7;
975ebccf1e3SJoseph Koshy #elif	__amd64__
976ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_cputype	   = PMC_CPU_AMD_K8;
977ebccf1e3SJoseph Koshy #else
978ebccf1e3SJoseph Koshy #error	Unknown AMD CPU type.
979ebccf1e3SJoseph Koshy #endif
980ebccf1e3SJoseph Koshy 
981ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_npmc 	   = AMD_NPMCS;
982ebccf1e3SJoseph Koshy 
983ebccf1e3SJoseph Koshy 	/* this processor has two classes of usable PMCs */
984ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_nclass       = 2;
985ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_classes[0]   = PMC_CLASS_TSC;
986ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_classes[1]   = AMD_PMC_CLASS;
987ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_nclasspmcs[0] = 1;
988ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
989ebccf1e3SJoseph Koshy 
990ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_init    	   = amd_init;
991ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_cleanup 	   = amd_cleanup;
992ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_in    = amd_switch_in;
993ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_out   = amd_switch_out;
994ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_read_pmc 	   = amd_read_pmc;
995ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_write_pmc    = amd_write_pmc;
996ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_config_pmc   = amd_config_pmc;
997ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
998ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_release_pmc  = amd_release_pmc;
999ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_start_pmc    = amd_start_pmc;
1000ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_stop_pmc     = amd_stop_pmc;
1001ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_intr	   = amd_intr;
1002ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_describe     = amd_describe;
1003ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_get_msr  	   = amd_get_msr; /* i386 */
1004ebccf1e3SJoseph Koshy 
1005ebccf1e3SJoseph Koshy 	PMCDBG(MDP,INI,0,"%s","amd-initialize");
1006ebccf1e3SJoseph Koshy 
1007ebccf1e3SJoseph Koshy 	return pmc_mdep;
1008ebccf1e3SJoseph Koshy }
1009