xref: /freebsd/sys/dev/hwpmc/hwpmc_amd.c (revision ebccf1e3a6b11b97cbf5f813dd76636e892a9035)
1ebccf1e3SJoseph Koshy /*-
2ebccf1e3SJoseph Koshy  * Copyright (c) 2003-2005 Joseph Koshy
3ebccf1e3SJoseph Koshy  * All rights reserved.
4ebccf1e3SJoseph Koshy  *
5ebccf1e3SJoseph Koshy  * Redistribution and use in source and binary forms, with or without
6ebccf1e3SJoseph Koshy  * modification, are permitted provided that the following conditions
7ebccf1e3SJoseph Koshy  * are met:
8ebccf1e3SJoseph Koshy  * 1. Redistributions of source code must retain the above copyright
9ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer.
10ebccf1e3SJoseph Koshy  * 2. Redistributions in binary form must reproduce the above copyright
11ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer in the
12ebccf1e3SJoseph Koshy  *    documentation and/or other materials provided with the distribution.
13ebccf1e3SJoseph Koshy  *
14ebccf1e3SJoseph Koshy  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15ebccf1e3SJoseph Koshy  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16ebccf1e3SJoseph Koshy  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17ebccf1e3SJoseph Koshy  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18ebccf1e3SJoseph Koshy  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19ebccf1e3SJoseph Koshy  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20ebccf1e3SJoseph Koshy  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21ebccf1e3SJoseph Koshy  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22ebccf1e3SJoseph Koshy  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23ebccf1e3SJoseph Koshy  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24ebccf1e3SJoseph Koshy  * SUCH DAMAGE.
25ebccf1e3SJoseph Koshy  *
26ebccf1e3SJoseph Koshy  */
27ebccf1e3SJoseph Koshy 
28ebccf1e3SJoseph Koshy #include <sys/cdefs.h>
29ebccf1e3SJoseph Koshy __FBSDID("$FreeBSD$");
30ebccf1e3SJoseph Koshy 
31ebccf1e3SJoseph Koshy /* Support for the AMD K7 and later processors */
32ebccf1e3SJoseph Koshy 
33ebccf1e3SJoseph Koshy #include <sys/param.h>
34ebccf1e3SJoseph Koshy #include <sys/lock.h>
35ebccf1e3SJoseph Koshy #include <sys/malloc.h>
36ebccf1e3SJoseph Koshy #include <sys/mutex.h>
37ebccf1e3SJoseph Koshy #include <sys/smp.h>
38ebccf1e3SJoseph Koshy #include <sys/systm.h>
39ebccf1e3SJoseph Koshy 
40ebccf1e3SJoseph Koshy #include <machine/md_var.h>
41ebccf1e3SJoseph Koshy #include <machine/pmc_mdep.h>
42ebccf1e3SJoseph Koshy #include <machine/specialreg.h>
43ebccf1e3SJoseph Koshy 
44ebccf1e3SJoseph Koshy /* AMD K7 and K8 PMCs */
45ebccf1e3SJoseph Koshy 
46ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_0		0xC0010000
47ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_1		0xC0010001
48ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_2		0xC0010002
49ebccf1e3SJoseph Koshy #define	AMD_PMC_EVSEL_3		0xC0010003
50ebccf1e3SJoseph Koshy 
51ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_0	0xC0010004
52ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_1	0xC0010005
53ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_2	0xC0010006
54ebccf1e3SJoseph Koshy #define	AMD_PMC_PERFCTR_3	0xC0010007
55ebccf1e3SJoseph Koshy 
56ebccf1e3SJoseph Koshy #define	K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) ||		\
57ebccf1e3SJoseph Koshy 	((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) ||	\
58ebccf1e3SJoseph Koshy 	((c) >= 0xCD && (c) <= 0xCF))
59ebccf1e3SJoseph Koshy 
60ebccf1e3SJoseph Koshy #define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
61ebccf1e3SJoseph Koshy 	PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
62ebccf1e3SJoseph Koshy 	PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
63ebccf1e3SJoseph Koshy 
64ebccf1e3SJoseph Koshy /* reserved bits include bit 21 and the top two bits of the unit mask */
65ebccf1e3SJoseph Koshy #define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
66ebccf1e3SJoseph Koshy 
67ebccf1e3SJoseph Koshy #define	K8_PMC_RESERVED (1 << 21)
68ebccf1e3SJoseph Koshy 
69ebccf1e3SJoseph Koshy #define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
70ebccf1e3SJoseph Koshy #define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
71ebccf1e3SJoseph Koshy 
72ebccf1e3SJoseph Koshy #if	__i386__
73ebccf1e3SJoseph Koshy #define	AMD_NPMCS		K7_NPMCS
74ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS		PMC_CLASS_K7
75ebccf1e3SJoseph Koshy #define	AMD_PMC_COUNTERMASK	K7_PMC_COUNTERMASK
76ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_COUNTER(x)	K7_PMC_TO_COUNTER(x)
77ebccf1e3SJoseph Koshy #define	AMD_PMC_INVERT		K7_PMC_INVERT
78ebccf1e3SJoseph Koshy #define	AMD_PMC_ENABLE		K7_PMC_ENABLE
79ebccf1e3SJoseph Koshy #define	AMD_PMC_INT		K7_PMC_INT
80ebccf1e3SJoseph Koshy #define	AMD_PMC_PC		K7_PMC_PC
81ebccf1e3SJoseph Koshy #define	AMD_PMC_EDGE		K7_PMC_EDGE
82ebccf1e3SJoseph Koshy #define	AMD_PMC_OS		K7_PMC_OS
83ebccf1e3SJoseph Koshy #define	AMD_PMC_USR		K7_PMC_USR
84ebccf1e3SJoseph Koshy 
85ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_M	K7_PMC_UNITMASK_M
86ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_O	K7_PMC_UNITMASK_O
87ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_E	K7_PMC_UNITMASK_E
88ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_S	K7_PMC_UNITMASK_S
89ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_I	K7_PMC_UNITMASK_I
90ebccf1e3SJoseph Koshy 
91ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK	K7_PMC_UNITMASK
92ebccf1e3SJoseph Koshy #define	AMD_PMC_EVENTMASK	K7_PMC_EVENTMASK
93ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_UNITMASK(x)	K7_PMC_TO_UNITMASK(x)
94ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_EVENTMASK(x)	K7_PMC_TO_EVENTMASK(x)
95ebccf1e3SJoseph Koshy #define	AMD_VALID_BITS		K7_VALID_BITS
96ebccf1e3SJoseph Koshy 
97ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS_NAME	"K7-"
98ebccf1e3SJoseph Koshy 
99ebccf1e3SJoseph Koshy #elif	__amd64__
100ebccf1e3SJoseph Koshy 
101ebccf1e3SJoseph Koshy #define	AMD_NPMCS		K8_NPMCS
102ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS		PMC_CLASS_K8
103ebccf1e3SJoseph Koshy #define	AMD_PMC_COUNTERMASK	K8_PMC_COUNTERMASK
104ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_COUNTER(x)	K8_PMC_TO_COUNTER(x)
105ebccf1e3SJoseph Koshy #define	AMD_PMC_INVERT		K8_PMC_INVERT
106ebccf1e3SJoseph Koshy #define	AMD_PMC_ENABLE		K8_PMC_ENABLE
107ebccf1e3SJoseph Koshy #define	AMD_PMC_INT		K8_PMC_INT
108ebccf1e3SJoseph Koshy #define	AMD_PMC_PC		K8_PMC_PC
109ebccf1e3SJoseph Koshy #define	AMD_PMC_EDGE		K8_PMC_EDGE
110ebccf1e3SJoseph Koshy #define	AMD_PMC_OS		K8_PMC_OS
111ebccf1e3SJoseph Koshy #define	AMD_PMC_USR		K8_PMC_USR
112ebccf1e3SJoseph Koshy 
113ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_M	K8_PMC_UNITMASK_M
114ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_O	K8_PMC_UNITMASK_O
115ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_E	K8_PMC_UNITMASK_E
116ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_S	K8_PMC_UNITMASK_S
117ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK_I	K8_PMC_UNITMASK_I
118ebccf1e3SJoseph Koshy 
119ebccf1e3SJoseph Koshy #define	AMD_PMC_UNITMASK	K8_PMC_UNITMASK
120ebccf1e3SJoseph Koshy #define	AMD_PMC_EVENTMASK	K8_PMC_EVENTMASK
121ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_UNITMASK(x)	K8_PMC_TO_UNITMASK(x)
122ebccf1e3SJoseph Koshy #define	AMD_PMC_TO_EVENTMASK(x)	K8_PMC_TO_EVENTMASK(x)
123ebccf1e3SJoseph Koshy #define	AMD_VALID_BITS		K8_VALID_BITS
124ebccf1e3SJoseph Koshy 
125ebccf1e3SJoseph Koshy #define	AMD_PMC_CLASS_NAME	"K8-"
126ebccf1e3SJoseph Koshy 
127ebccf1e3SJoseph Koshy #else
128ebccf1e3SJoseph Koshy #error	Unsupported architecture.
129ebccf1e3SJoseph Koshy #endif
130ebccf1e3SJoseph Koshy 
131ebccf1e3SJoseph Koshy /* AMD K7 & K8 PMCs */
132ebccf1e3SJoseph Koshy struct amd_descr {
133ebccf1e3SJoseph Koshy 	struct pmc_descr pm_descr;  /* "base class" */
134ebccf1e3SJoseph Koshy 	uint32_t	pm_evsel;   /* address of EVSEL register */
135ebccf1e3SJoseph Koshy 	uint32_t	pm_perfctr; /* address of PERFCTR register */
136ebccf1e3SJoseph Koshy };
137ebccf1e3SJoseph Koshy 
138ebccf1e3SJoseph Koshy static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
139ebccf1e3SJoseph Koshy {
140ebccf1e3SJoseph Koshy     {
141ebccf1e3SJoseph Koshy 	.pm_descr =
142ebccf1e3SJoseph Koshy 	{
143ebccf1e3SJoseph Koshy 		.pd_name  = "TSC",
144ebccf1e3SJoseph Koshy 		.pd_class = PMC_CLASS_TSC,
145ebccf1e3SJoseph Koshy 		.pd_caps  = PMC_CAP_READ,
146ebccf1e3SJoseph Koshy 		.pd_width = 64
147ebccf1e3SJoseph Koshy 	},
148ebccf1e3SJoseph Koshy 	.pm_evsel   = MSR_TSC,
149ebccf1e3SJoseph Koshy 	.pm_perfctr = 0	/* unused */
150ebccf1e3SJoseph Koshy     },
151ebccf1e3SJoseph Koshy 
152ebccf1e3SJoseph Koshy     {
153ebccf1e3SJoseph Koshy 	.pm_descr =
154ebccf1e3SJoseph Koshy 	{
155ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "0",
156ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
157ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
158ebccf1e3SJoseph Koshy 		.pd_width = 48
159ebccf1e3SJoseph Koshy 	},
160ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_0,
161ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_0
162ebccf1e3SJoseph Koshy     },
163ebccf1e3SJoseph Koshy     {
164ebccf1e3SJoseph Koshy 	.pm_descr =
165ebccf1e3SJoseph Koshy 	{
166ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "1",
167ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
168ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
169ebccf1e3SJoseph Koshy 		.pd_width = 48
170ebccf1e3SJoseph Koshy 	},
171ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_1,
172ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_1
173ebccf1e3SJoseph Koshy     },
174ebccf1e3SJoseph Koshy     {
175ebccf1e3SJoseph Koshy 	.pm_descr =
176ebccf1e3SJoseph Koshy 	{
177ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "2",
178ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
179ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
180ebccf1e3SJoseph Koshy 		.pd_width = 48
181ebccf1e3SJoseph Koshy 	},
182ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_2,
183ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_2
184ebccf1e3SJoseph Koshy     },
185ebccf1e3SJoseph Koshy     {
186ebccf1e3SJoseph Koshy 	.pm_descr =
187ebccf1e3SJoseph Koshy 	{
188ebccf1e3SJoseph Koshy 		.pd_name  = AMD_PMC_CLASS_NAME "3",
189ebccf1e3SJoseph Koshy 		.pd_class = AMD_PMC_CLASS,
190ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
191ebccf1e3SJoseph Koshy 		.pd_width = 48
192ebccf1e3SJoseph Koshy 	},
193ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_3,
194ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_3
195ebccf1e3SJoseph Koshy     }
196ebccf1e3SJoseph Koshy };
197ebccf1e3SJoseph Koshy 
198ebccf1e3SJoseph Koshy struct amd_event_code_map {
199ebccf1e3SJoseph Koshy 	enum pmc_event	pe_ev;	 /* enum value */
200ebccf1e3SJoseph Koshy 	uint8_t		pe_code; /* encoded event mask */
201ebccf1e3SJoseph Koshy 	uint8_t		pe_mask; /* bits allowed in unit mask */
202ebccf1e3SJoseph Koshy };
203ebccf1e3SJoseph Koshy 
204ebccf1e3SJoseph Koshy const struct amd_event_code_map amd_event_codes[] = {
205ebccf1e3SJoseph Koshy #if	__i386__
206ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_ACCESSES, 		0x40, 0 },
207ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_MISSES,			0x41, 0 },
208ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_L2,		0x42, K7_PMC_UNITMASK_MOESI },
209ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM,	0x43, K7_PMC_UNITMASK_MOESI },
210ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_WRITEBACKS,		0x44, K7_PMC_UNITMASK_MOESI },
211ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
212ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES,	0x46, 0 },
213ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_MISALIGNED_REFERENCES,	0x47, 0 },
214ebccf1e3SJoseph Koshy 
215ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_FETCHES,			0x80, 0 },
216ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_MISSES,			0x81, 0 },
217ebccf1e3SJoseph Koshy 
218ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_ITLB_MISSES,		0x84, 0 },
219ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_L2_ITLB_MISSES,		0x85, 0 },
220ebccf1e3SJoseph Koshy 
221ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_INSTRUCTIONS,	0xC0, 0 },
222ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_OPS,		0xC1, 0 },
223ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES,		0xC2, 0 },
224ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
225ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 	0xC4, 0 },
226ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
227ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
228ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES,	0xC7, 0 },
229ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES,	0xCD, 0 },
230ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
231ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_HARDWARE_INTERRUPTS,	0xCF, 0 }
232ebccf1e3SJoseph Koshy #endif
233ebccf1e3SJoseph Koshy 
234ebccf1e3SJoseph Koshy #if	__amd64__
235ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS,		0x00, 0x3F },
236ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED,	0x01, 0x00 },
237ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS,	0x02, 0x00 },
238ebccf1e3SJoseph Koshy 
239ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 		0x20, 0x7F },
240ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
241ebccf1e3SJoseph Koshy 	  						0x21, 0x00 },
242ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
243ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_BUFFER2_FULL,			0x23, 0x00 },
244ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_LOCKED_OPERATION,		0x24, 0x07 },
245ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL,	0x25, 0x00 },
246ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS,	0x26, 0x00 },
247ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS,	0x27, 0x00 },
248ebccf1e3SJoseph Koshy 
249ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ACCESS,				0x40, 0x00 },
250ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISS,				0x41, 0x00 },
251ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_L2,			0x42, 0x1F },
252ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM,		0x43, 0x1F },
253ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_COPYBACK,			0x44, 0x1F },
254ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT,	0x45, 0x00 },
255ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS,	0x46, 0x00 },
256ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE,	0x47, 0x00 },
257ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL,	0x48, 0x00 },
258ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
259ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR,		0x4A, 0x03 },
260ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
261ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS,	0x4C, 0x03 },
262ebccf1e3SJoseph Koshy 
263ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_CPU_CLK_UNHALTED,		0x76, 0x00 },
264ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST,		0x7D, 0x1F },
265ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS,		0x7E, 0x07 },
266ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_INTO_L2,			0x7F, 0x03 },
267ebccf1e3SJoseph Koshy 
268ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_FETCH,				0x80, 0x00 },
269ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MISS,				0x81, 0x00 },
270ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_L2,			0x82, 0x00 },
271ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM,		0x83, 0x00 },
272ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT,	0x84, 0x00 },
273ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS,	0x85, 0x00 },
274ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
275ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL,		0x87, 0x00 },
276ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_HIT,		0x88, 0x00 },
277ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW,		0x89, 0x00 },
278ebccf1e3SJoseph Koshy 
279ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS,	0xC0, 0x00 },
280ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_UOPS,			0xC1, 0x00 },
281ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES,		0xC2, 0x00 },
282ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED,	0xC3, 0x00 },
283ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES,		0xC4, 0x00 },
284ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
285ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS,	0xC6, 0x00 },
286ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_RESYNCS,			0xC7, 0x00 },
287ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS,		0xC8, 0x00 },
288ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
289ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
290ebccf1e3SJoseph Koshy 							0xCA, 0x00 },
291ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS,	0xCB, 0x0F },
292ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
293ebccf1e3SJoseph Koshy 							0xCC, 0x07 },
294ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES,	0xCD, 0x00 },
295ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
296ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS,	0xCF, 0x00 },
297ebccf1e3SJoseph Koshy 
298ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DECODER_EMPTY,			0xD0, 0x00 },
299ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALLS,			0xD1, 0x00 },
300ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
301ebccf1e3SJoseph Koshy 							0xD2, 0x00 },
302ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
303ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD,	0xD4, 0x00 },
304ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
305ebccf1e3SJoseph Koshy 							0xD5, 0x00 },
306ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
307ebccf1e3SJoseph Koshy 							0xD6, 0x00 },
308ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL,	0xD7, 0x00 },
309ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL,	0xD8, 0x00 },
310ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
311ebccf1e3SJoseph Koshy 							0xD9, 0x00 },
312ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
313ebccf1e3SJoseph Koshy 							0xDA, 0x00 },
314ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_FPU_EXCEPTIONS,			0xDB, 0x0F },
315ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0,	0xDC, 0x00 },
316ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1,	0xDD, 0x00 },
317ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2,	0xDE, 0x00 },
318ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3,	0xDF, 0x00 },
319ebccf1e3SJoseph Koshy 
320ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
321ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
322ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
323ebccf1e3SJoseph Koshy 							0xE2, 0x00 },
324ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND,	0xE3, 0x07 },
325ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
326ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_SIZED_COMMANDS,			0xEB, 0x7F },
327ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_PROBE_RESULT,			0xEC, 0x0F },
328ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH,		0xF6, 0x0F },
329ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH,		0xF7, 0x0F },
330ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH,		0xF8, 0x0F }
331ebccf1e3SJoseph Koshy #endif
332ebccf1e3SJoseph Koshy 
333ebccf1e3SJoseph Koshy };
334ebccf1e3SJoseph Koshy 
335ebccf1e3SJoseph Koshy const int amd_event_codes_size =
336ebccf1e3SJoseph Koshy 	sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
337ebccf1e3SJoseph Koshy 
338ebccf1e3SJoseph Koshy /*
339ebccf1e3SJoseph Koshy  * read a pmc register
340ebccf1e3SJoseph Koshy  */
341ebccf1e3SJoseph Koshy 
342ebccf1e3SJoseph Koshy static int
343ebccf1e3SJoseph Koshy amd_read_pmc(int cpu, int ri, pmc_value_t *v)
344ebccf1e3SJoseph Koshy {
345ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
346ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
347ebccf1e3SJoseph Koshy 	struct pmc *pm;
348ebccf1e3SJoseph Koshy 	const struct pmc_hw *phw;
349ebccf1e3SJoseph Koshy 	pmc_value_t tmp;
350ebccf1e3SJoseph Koshy 
351ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
352ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
353ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
354ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
355ebccf1e3SJoseph Koshy 
356ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
357ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
358ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
359ebccf1e3SJoseph Koshy 
360ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
361ebccf1e3SJoseph Koshy 	    ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
362ebccf1e3SJoseph Koshy 		cpu, ri));
363ebccf1e3SJoseph Koshy 
364ebccf1e3SJoseph Koshy 	mode = pm->pm_mode;
365ebccf1e3SJoseph Koshy 
366ebccf1e3SJoseph Koshy 	PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
367ebccf1e3SJoseph Koshy 
368ebccf1e3SJoseph Koshy 	/* Reading the TSC is a special case */
369ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
370ebccf1e3SJoseph Koshy 		KASSERT(PMC_IS_COUNTING_MODE(mode),
371ebccf1e3SJoseph Koshy 		    ("[amd,%d] TSC counter in non-counting mode", __LINE__));
372ebccf1e3SJoseph Koshy 		*v = rdtsc();
373ebccf1e3SJoseph Koshy 		PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
374ebccf1e3SJoseph Koshy 		return 0;
375ebccf1e3SJoseph Koshy 	}
376ebccf1e3SJoseph Koshy 
377ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
378ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
379ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
380ebccf1e3SJoseph Koshy 
381ebccf1e3SJoseph Koshy 	tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
382ebccf1e3SJoseph Koshy 	if (PMC_IS_SAMPLING_MODE(mode))
383ebccf1e3SJoseph Koshy 		*v = -tmp;
384ebccf1e3SJoseph Koshy 	else
385ebccf1e3SJoseph Koshy 		*v = tmp;
386ebccf1e3SJoseph Koshy 
387ebccf1e3SJoseph Koshy 	PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
388ebccf1e3SJoseph Koshy 
389ebccf1e3SJoseph Koshy 	return 0;
390ebccf1e3SJoseph Koshy }
391ebccf1e3SJoseph Koshy 
392ebccf1e3SJoseph Koshy /*
393ebccf1e3SJoseph Koshy  * Write a PMC MSR.
394ebccf1e3SJoseph Koshy  */
395ebccf1e3SJoseph Koshy 
396ebccf1e3SJoseph Koshy static int
397ebccf1e3SJoseph Koshy amd_write_pmc(int cpu, int ri, pmc_value_t v)
398ebccf1e3SJoseph Koshy {
399ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
400ebccf1e3SJoseph Koshy 	struct pmc *pm;
401ebccf1e3SJoseph Koshy 	const struct pmc_hw *phw;
402ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
403ebccf1e3SJoseph Koshy 
404ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
405ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
406ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
407ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
408ebccf1e3SJoseph Koshy 
409ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
410ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
411ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
412ebccf1e3SJoseph Koshy 
413ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
414ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
415ebccf1e3SJoseph Koshy 		cpu, ri));
416ebccf1e3SJoseph Koshy 
417ebccf1e3SJoseph Koshy 	mode = pm->pm_mode;
418ebccf1e3SJoseph Koshy 
419ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
420ebccf1e3SJoseph Koshy 		return 0;
421ebccf1e3SJoseph Koshy 
422ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
423ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
424ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
425ebccf1e3SJoseph Koshy 
426ebccf1e3SJoseph Koshy 	/* use 2's complement of the count for sampling mode PMCs */
427ebccf1e3SJoseph Koshy 	if (PMC_IS_SAMPLING_MODE(mode))
428ebccf1e3SJoseph Koshy 		v = -v;
429ebccf1e3SJoseph Koshy 
430ebccf1e3SJoseph Koshy 	PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
431ebccf1e3SJoseph Koshy 
432ebccf1e3SJoseph Koshy 	/* write the PMC value */
433ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_perfctr, v);
434ebccf1e3SJoseph Koshy 	return 0;
435ebccf1e3SJoseph Koshy }
436ebccf1e3SJoseph Koshy 
437ebccf1e3SJoseph Koshy /*
438ebccf1e3SJoseph Koshy  * configure hardware pmc according to the configuration recorded in
439ebccf1e3SJoseph Koshy  * pmc 'pm'.
440ebccf1e3SJoseph Koshy  */
441ebccf1e3SJoseph Koshy 
442ebccf1e3SJoseph Koshy static int
443ebccf1e3SJoseph Koshy amd_config_pmc(int cpu, int ri, struct pmc *pm)
444ebccf1e3SJoseph Koshy {
445ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
446ebccf1e3SJoseph Koshy 
447ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
448ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
449ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
450ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
451ebccf1e3SJoseph Koshy 
452ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
453ebccf1e3SJoseph Koshy 
454ebccf1e3SJoseph Koshy 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
455ebccf1e3SJoseph Koshy 	    ("[amd,%d] hwpmc not unconfigured before re-config", __LINE__));
456ebccf1e3SJoseph Koshy 
457ebccf1e3SJoseph Koshy 	phw->phw_pmc = pm;
458ebccf1e3SJoseph Koshy 	return 0;
459ebccf1e3SJoseph Koshy }
460ebccf1e3SJoseph Koshy 
461ebccf1e3SJoseph Koshy /*
462ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch in of a
463ebccf1e3SJoseph Koshy  * thread.
464ebccf1e3SJoseph Koshy  */
465ebccf1e3SJoseph Koshy 
466ebccf1e3SJoseph Koshy static int
467ebccf1e3SJoseph Koshy amd_switch_in(struct pmc_cpu *pc)
468ebccf1e3SJoseph Koshy {
469ebccf1e3SJoseph Koshy 	(void) pc;
470ebccf1e3SJoseph Koshy 
471ebccf1e3SJoseph Koshy 	/* enable the RDPMC instruction */
472ebccf1e3SJoseph Koshy 	load_cr4(rcr4() | CR4_PCE);
473ebccf1e3SJoseph Koshy 	return 0;
474ebccf1e3SJoseph Koshy }
475ebccf1e3SJoseph Koshy 
476ebccf1e3SJoseph Koshy /*
477ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch out of a
478ebccf1e3SJoseph Koshy  * thread.
479ebccf1e3SJoseph Koshy  */
480ebccf1e3SJoseph Koshy 
481ebccf1e3SJoseph Koshy static int
482ebccf1e3SJoseph Koshy amd_switch_out(struct pmc_cpu *pc)
483ebccf1e3SJoseph Koshy {
484ebccf1e3SJoseph Koshy 	(void) pc;
485ebccf1e3SJoseph Koshy 
486ebccf1e3SJoseph Koshy 	/* disallow RDPMC instruction */
487ebccf1e3SJoseph Koshy 	load_cr4(rcr4() & ~CR4_PCE);
488ebccf1e3SJoseph Koshy 	return 0;
489ebccf1e3SJoseph Koshy }
490ebccf1e3SJoseph Koshy 
491ebccf1e3SJoseph Koshy /*
492ebccf1e3SJoseph Koshy  * Check if a given allocation is feasible.
493ebccf1e3SJoseph Koshy  */
494ebccf1e3SJoseph Koshy 
495ebccf1e3SJoseph Koshy static int
496ebccf1e3SJoseph Koshy amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
497ebccf1e3SJoseph Koshy     const struct pmc_op_pmcallocate *a)
498ebccf1e3SJoseph Koshy {
499ebccf1e3SJoseph Koshy 	int i;
500ebccf1e3SJoseph Koshy 	uint32_t allowed_unitmask, caps, config, unitmask;
501ebccf1e3SJoseph Koshy 	enum pmc_event pe;
502ebccf1e3SJoseph Koshy 	const struct pmc_descr *pd;
503ebccf1e3SJoseph Koshy 
504ebccf1e3SJoseph Koshy 	(void) cpu;
505ebccf1e3SJoseph Koshy 
506ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
507ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
508ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
509ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row index %d", __LINE__, ri));
510ebccf1e3SJoseph Koshy 
511ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri].pm_descr;
512ebccf1e3SJoseph Koshy 
513ebccf1e3SJoseph Koshy 	/* check class match */
514ebccf1e3SJoseph Koshy 	if (pd->pd_class != pm->pm_class)
515ebccf1e3SJoseph Koshy 		return EINVAL;
516ebccf1e3SJoseph Koshy 
517ebccf1e3SJoseph Koshy 	caps = pm->pm_caps;
518ebccf1e3SJoseph Koshy 
519ebccf1e3SJoseph Koshy 	PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
520ebccf1e3SJoseph Koshy 
521ebccf1e3SJoseph Koshy 	if ((pd->pd_caps & caps) != caps)
522ebccf1e3SJoseph Koshy 		return EPERM;
523ebccf1e3SJoseph Koshy 	if (pd->pd_class == PMC_CLASS_TSC) {
524ebccf1e3SJoseph Koshy 		/* TSC's are always allocated in system-wide counting mode */
525ebccf1e3SJoseph Koshy 		if (a->pm_ev != PMC_EV_TSC_TSC ||
526ebccf1e3SJoseph Koshy 		    a->pm_mode != PMC_MODE_SC)
527ebccf1e3SJoseph Koshy 			return EINVAL;
528ebccf1e3SJoseph Koshy 		return 0;
529ebccf1e3SJoseph Koshy 	}
530ebccf1e3SJoseph Koshy 
531ebccf1e3SJoseph Koshy 	KASSERT(pd->pd_class == AMD_PMC_CLASS,
532ebccf1e3SJoseph Koshy 	    ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
533ebccf1e3SJoseph Koshy 
534ebccf1e3SJoseph Koshy 	pe = a->pm_ev;
535ebccf1e3SJoseph Koshy 
536ebccf1e3SJoseph Koshy 	/* map ev to the correct event mask code */
537ebccf1e3SJoseph Koshy 	config = allowed_unitmask = 0;
538ebccf1e3SJoseph Koshy 	for (i = 0; i < amd_event_codes_size; i++)
539ebccf1e3SJoseph Koshy 		if (amd_event_codes[i].pe_ev == pe) {
540ebccf1e3SJoseph Koshy 			config =
541ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
542ebccf1e3SJoseph Koshy 			allowed_unitmask =
543ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
544ebccf1e3SJoseph Koshy 			break;
545ebccf1e3SJoseph Koshy 		}
546ebccf1e3SJoseph Koshy 	if (i == amd_event_codes_size)
547ebccf1e3SJoseph Koshy 		return EINVAL;
548ebccf1e3SJoseph Koshy 
549ebccf1e3SJoseph Koshy 	unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
550ebccf1e3SJoseph Koshy 	if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
551ebccf1e3SJoseph Koshy 		return EINVAL;
552ebccf1e3SJoseph Koshy 
553ebccf1e3SJoseph Koshy 	if (unitmask && (caps & PMC_CAP_QUALIFIER))
554ebccf1e3SJoseph Koshy 		config |= unitmask;
555ebccf1e3SJoseph Koshy 
556ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_THRESHOLD)
557ebccf1e3SJoseph Koshy 		config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
558ebccf1e3SJoseph Koshy 
559ebccf1e3SJoseph Koshy 	/* set at least one of the 'usr' or 'os' caps */
560ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_USER)
561ebccf1e3SJoseph Koshy 		config |= AMD_PMC_USR;
562ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_SYSTEM)
563ebccf1e3SJoseph Koshy 		config |= AMD_PMC_OS;
564ebccf1e3SJoseph Koshy 	if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
565ebccf1e3SJoseph Koshy 		config |= (AMD_PMC_USR|AMD_PMC_OS);
566ebccf1e3SJoseph Koshy 
567ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_EDGE)
568ebccf1e3SJoseph Koshy 		config |= AMD_PMC_EDGE;
569ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INVERT)
570ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INVERT;
571ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INTERRUPT)
572ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INT;
573ebccf1e3SJoseph Koshy 
574ebccf1e3SJoseph Koshy 	pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
575ebccf1e3SJoseph Koshy 
576ebccf1e3SJoseph Koshy 	PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
577ebccf1e3SJoseph Koshy 
578ebccf1e3SJoseph Koshy 	return 0;
579ebccf1e3SJoseph Koshy }
580ebccf1e3SJoseph Koshy 
581ebccf1e3SJoseph Koshy /*
582ebccf1e3SJoseph Koshy  * Release machine dependent state associated with a PMC.  This is a
583ebccf1e3SJoseph Koshy  * no-op on this architecture.
584ebccf1e3SJoseph Koshy  *
585ebccf1e3SJoseph Koshy  */
586ebccf1e3SJoseph Koshy 
587ebccf1e3SJoseph Koshy /* ARGSUSED0 */
588ebccf1e3SJoseph Koshy static int
589ebccf1e3SJoseph Koshy amd_release_pmc(int cpu, int ri, struct pmc *pmc)
590ebccf1e3SJoseph Koshy {
591ebccf1e3SJoseph Koshy #if	DEBUG
592ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
593ebccf1e3SJoseph Koshy #endif
594ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
595ebccf1e3SJoseph Koshy 
596ebccf1e3SJoseph Koshy 	(void) pmc;
597ebccf1e3SJoseph Koshy 
598ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
599ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
600ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
601ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
602ebccf1e3SJoseph Koshy 
603ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
604ebccf1e3SJoseph Koshy 
605ebccf1e3SJoseph Koshy 	KASSERT(phw->phw_pmc == NULL,
606ebccf1e3SJoseph Koshy 	    ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
607ebccf1e3SJoseph Koshy 
608ebccf1e3SJoseph Koshy #if 	DEBUG
609ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
610ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
611ebccf1e3SJoseph Koshy 		KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
612ebccf1e3SJoseph Koshy 		    ("[amd,%d] PMC %d released while active", __LINE__, ri));
613ebccf1e3SJoseph Koshy #endif
614ebccf1e3SJoseph Koshy 
615ebccf1e3SJoseph Koshy 	return 0;
616ebccf1e3SJoseph Koshy }
617ebccf1e3SJoseph Koshy 
618ebccf1e3SJoseph Koshy /*
619ebccf1e3SJoseph Koshy  * start a PMC.
620ebccf1e3SJoseph Koshy  */
621ebccf1e3SJoseph Koshy 
622ebccf1e3SJoseph Koshy static int
623ebccf1e3SJoseph Koshy amd_start_pmc(int cpu, int ri)
624ebccf1e3SJoseph Koshy {
625ebccf1e3SJoseph Koshy 	uint32_t config;
626ebccf1e3SJoseph Koshy 	struct pmc *pm;
627ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
628ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
629ebccf1e3SJoseph Koshy 
630ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
631ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
632ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
633ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
634ebccf1e3SJoseph Koshy 
635ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
636ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
637ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
638ebccf1e3SJoseph Koshy 
639ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
640ebccf1e3SJoseph Koshy 	    ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
641ebccf1e3SJoseph Koshy 		cpu, ri));
642ebccf1e3SJoseph Koshy 
643ebccf1e3SJoseph Koshy 	PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
644ebccf1e3SJoseph Koshy 
645ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
646ebccf1e3SJoseph Koshy 		return 0;	/* TSCs are always running */
647ebccf1e3SJoseph Koshy 
648ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
649ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
650ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
651ebccf1e3SJoseph Koshy 
652ebccf1e3SJoseph Koshy 	KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
653ebccf1e3SJoseph Koshy 	    ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
654ebccf1e3SJoseph Koshy 	    ri, cpu, pd->pm_descr.pd_name));
655ebccf1e3SJoseph Koshy 
656ebccf1e3SJoseph Koshy 	/* turn on the PMC ENABLE bit */
657ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
658ebccf1e3SJoseph Koshy 
659ebccf1e3SJoseph Koshy 	PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
660ebccf1e3SJoseph Koshy 
661ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
662ebccf1e3SJoseph Koshy 	return 0;
663ebccf1e3SJoseph Koshy }
664ebccf1e3SJoseph Koshy 
665ebccf1e3SJoseph Koshy /*
666ebccf1e3SJoseph Koshy  * Stop a PMC.
667ebccf1e3SJoseph Koshy  */
668ebccf1e3SJoseph Koshy 
669ebccf1e3SJoseph Koshy static int
670ebccf1e3SJoseph Koshy amd_stop_pmc(int cpu, int ri)
671ebccf1e3SJoseph Koshy {
672ebccf1e3SJoseph Koshy 	struct pmc *pm;
673ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
674ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
675ebccf1e3SJoseph Koshy 	uint64_t config;
676ebccf1e3SJoseph Koshy 
677ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
678ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
679ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
680ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
681ebccf1e3SJoseph Koshy 
682ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
683ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
684ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
685ebccf1e3SJoseph Koshy 
686ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
687ebccf1e3SJoseph Koshy 	    ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
688ebccf1e3SJoseph Koshy 		cpu, ri));
689ebccf1e3SJoseph Koshy 
690ebccf1e3SJoseph Koshy 	/* can't stop a TSC */
691ebccf1e3SJoseph Koshy 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
692ebccf1e3SJoseph Koshy 		return 0;
693ebccf1e3SJoseph Koshy 
694ebccf1e3SJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
695ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
696ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
697ebccf1e3SJoseph Koshy 
698ebccf1e3SJoseph Koshy 	KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
699ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
700ebccf1e3SJoseph Koshy 		__LINE__, ri, cpu, pd->pm_descr.pd_name));
701ebccf1e3SJoseph Koshy 
702ebccf1e3SJoseph Koshy 	PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
703ebccf1e3SJoseph Koshy 
704ebccf1e3SJoseph Koshy 	/* turn off the PMC ENABLE bit */
705ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
706ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
707ebccf1e3SJoseph Koshy 	return 0;
708ebccf1e3SJoseph Koshy }
709ebccf1e3SJoseph Koshy 
710ebccf1e3SJoseph Koshy /*
711ebccf1e3SJoseph Koshy  * Interrupt handler.  This function needs to return '1' if the
712ebccf1e3SJoseph Koshy  * interrupt was this CPU's PMCs or '0' otherwise.  It is not allowed
713ebccf1e3SJoseph Koshy  * to sleep or do anything a 'fast' interrupt handler is not allowed
714ebccf1e3SJoseph Koshy  * to do.
715ebccf1e3SJoseph Koshy  */
716ebccf1e3SJoseph Koshy 
717ebccf1e3SJoseph Koshy static int
718ebccf1e3SJoseph Koshy amd_intr(int cpu, uintptr_t eip)
719ebccf1e3SJoseph Koshy {
720ebccf1e3SJoseph Koshy 	int i, retval;
721ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
722ebccf1e3SJoseph Koshy 	uint32_t perfctr;
723ebccf1e3SJoseph Koshy 	struct pmc *pm;
724ebccf1e3SJoseph Koshy 	struct pmc_cpu *pc;
725ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
726ebccf1e3SJoseph Koshy 
727ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
728ebccf1e3SJoseph Koshy 	    ("[amd,%d] out of range CPU %d", __LINE__, cpu));
729ebccf1e3SJoseph Koshy 
730ebccf1e3SJoseph Koshy 	retval = 0;
731ebccf1e3SJoseph Koshy 
732ebccf1e3SJoseph Koshy 	pc = pmc_pcpu[cpu];
733ebccf1e3SJoseph Koshy 
734ebccf1e3SJoseph Koshy 	/*
735ebccf1e3SJoseph Koshy 	 * look for all PMCs that have interrupted:
736ebccf1e3SJoseph Koshy 	 * - skip over the TSC [PMC#0]
737ebccf1e3SJoseph Koshy 	 * - look for a PMC with a valid 'struct pmc' association
738ebccf1e3SJoseph Koshy 	 * - look for a PMC in (a) sampling mode and (b) which has
739ebccf1e3SJoseph Koshy 	 *   overflowed.  If found, we update the process's
740ebccf1e3SJoseph Koshy 	 *   histogram or send it a profiling signal by calling
741ebccf1e3SJoseph Koshy 	 *   the appropriate helper function.
742ebccf1e3SJoseph Koshy 	 */
743ebccf1e3SJoseph Koshy 
744ebccf1e3SJoseph Koshy 	for (i = 1; i < AMD_NPMCS; i++) {
745ebccf1e3SJoseph Koshy 
746ebccf1e3SJoseph Koshy 		phw = pc->pc_hwpmcs[i];
747ebccf1e3SJoseph Koshy 		perfctr = amd_pmcdesc[i].pm_perfctr;
748ebccf1e3SJoseph Koshy 		KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
749ebccf1e3SJoseph Koshy 
750ebccf1e3SJoseph Koshy 		if ((pm = phw->phw_pmc) == NULL ||
751ebccf1e3SJoseph Koshy 		    pm->pm_state != PMC_STATE_RUNNING) {
752ebccf1e3SJoseph Koshy 			atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
753ebccf1e3SJoseph Koshy 			continue;
754ebccf1e3SJoseph Koshy 		}
755ebccf1e3SJoseph Koshy 
756ebccf1e3SJoseph Koshy 		mode = pm->pm_mode;
757ebccf1e3SJoseph Koshy 		if (PMC_IS_SAMPLING_MODE(mode) &&
758ebccf1e3SJoseph Koshy 		    AMD_PMC_HAS_OVERFLOWED(perfctr)) {
759ebccf1e3SJoseph Koshy 			atomic_add_int(&pmc_stats.pm_intr_processed, 1);
760ebccf1e3SJoseph Koshy 			if (PMC_IS_SYSTEM_MODE(mode))
761ebccf1e3SJoseph Koshy 				pmc_update_histogram(phw, eip);
762ebccf1e3SJoseph Koshy 			else if (PMC_IS_VIRTUAL_MODE(mode))
763ebccf1e3SJoseph Koshy 				pmc_send_signal(pm);
764ebccf1e3SJoseph Koshy 			retval = 1;
765ebccf1e3SJoseph Koshy 		}
766ebccf1e3SJoseph Koshy 	}
767ebccf1e3SJoseph Koshy 	return retval;
768ebccf1e3SJoseph Koshy }
769ebccf1e3SJoseph Koshy 
770ebccf1e3SJoseph Koshy /*
771ebccf1e3SJoseph Koshy  * describe a PMC
772ebccf1e3SJoseph Koshy  */
773ebccf1e3SJoseph Koshy static int
774ebccf1e3SJoseph Koshy amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
775ebccf1e3SJoseph Koshy {
776ebccf1e3SJoseph Koshy 	int error;
777ebccf1e3SJoseph Koshy 	size_t copied;
778ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
779ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
780ebccf1e3SJoseph Koshy 
781ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
782ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU %d", __LINE__, cpu));
783ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
784ebccf1e3SJoseph Koshy 	    ("[amd,%d] row-index %d out of range", __LINE__, ri));
785ebccf1e3SJoseph Koshy 
786ebccf1e3SJoseph Koshy 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
787ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
788ebccf1e3SJoseph Koshy 
789ebccf1e3SJoseph Koshy 	if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
790ebccf1e3SJoseph Koshy 		 PMC_NAME_MAX, &copied)) != 0)
791ebccf1e3SJoseph Koshy 		return error;
792ebccf1e3SJoseph Koshy 
793ebccf1e3SJoseph Koshy 	pi->pm_class = pd->pm_descr.pd_class;
794ebccf1e3SJoseph Koshy 	pi->pm_caps  = pd->pm_descr.pd_caps;
795ebccf1e3SJoseph Koshy 	pi->pm_width = pd->pm_descr.pd_width;
796ebccf1e3SJoseph Koshy 
797ebccf1e3SJoseph Koshy 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
798ebccf1e3SJoseph Koshy 		pi->pm_enabled = TRUE;
799ebccf1e3SJoseph Koshy 		*ppmc          = phw->phw_pmc;
800ebccf1e3SJoseph Koshy 	} else {
801ebccf1e3SJoseph Koshy 		pi->pm_enabled = FALSE;
802ebccf1e3SJoseph Koshy 		*ppmc          = NULL;
803ebccf1e3SJoseph Koshy 	}
804ebccf1e3SJoseph Koshy 
805ebccf1e3SJoseph Koshy 	return 0;
806ebccf1e3SJoseph Koshy }
807ebccf1e3SJoseph Koshy 
808ebccf1e3SJoseph Koshy /*
809ebccf1e3SJoseph Koshy  * i386 specific entry points
810ebccf1e3SJoseph Koshy  */
811ebccf1e3SJoseph Koshy 
812ebccf1e3SJoseph Koshy /*
813ebccf1e3SJoseph Koshy  * return the MSR address of the given PMC.
814ebccf1e3SJoseph Koshy  */
815ebccf1e3SJoseph Koshy 
816ebccf1e3SJoseph Koshy static int
817ebccf1e3SJoseph Koshy amd_get_msr(int ri, uint32_t *msr)
818ebccf1e3SJoseph Koshy {
819ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
820ebccf1e3SJoseph Koshy 	    ("[amd,%d] ri %d out of range", __LINE__, ri));
821ebccf1e3SJoseph Koshy 
822ebccf1e3SJoseph Koshy 	*msr = amd_pmcdesc[ri].pm_perfctr;
823ebccf1e3SJoseph Koshy 	return 0;
824ebccf1e3SJoseph Koshy }
825ebccf1e3SJoseph Koshy 
826ebccf1e3SJoseph Koshy /*
827ebccf1e3SJoseph Koshy  * processor dependent initialization.
828ebccf1e3SJoseph Koshy  */
829ebccf1e3SJoseph Koshy 
830ebccf1e3SJoseph Koshy /*
831ebccf1e3SJoseph Koshy  * Per-processor data structure
832ebccf1e3SJoseph Koshy  *
833ebccf1e3SJoseph Koshy  * [common stuff]
834ebccf1e3SJoseph Koshy  * [5 struct pmc_hw pointers]
835ebccf1e3SJoseph Koshy  * [5 struct pmc_hw structures]
836ebccf1e3SJoseph Koshy  */
837ebccf1e3SJoseph Koshy 
838ebccf1e3SJoseph Koshy struct amd_cpu {
839ebccf1e3SJoseph Koshy 	struct pmc_cpu	pc_common;
840ebccf1e3SJoseph Koshy 	struct pmc_hw	*pc_hwpmcs[AMD_NPMCS];
841ebccf1e3SJoseph Koshy 	struct pmc_hw	pc_amdpmcs[AMD_NPMCS];
842ebccf1e3SJoseph Koshy };
843ebccf1e3SJoseph Koshy 
844ebccf1e3SJoseph Koshy 
845ebccf1e3SJoseph Koshy static int
846ebccf1e3SJoseph Koshy amd_init(int cpu)
847ebccf1e3SJoseph Koshy {
848ebccf1e3SJoseph Koshy 	int n;
849ebccf1e3SJoseph Koshy 	struct amd_cpu *pcs;
850ebccf1e3SJoseph Koshy 	struct pmc_hw  *phw;
851ebccf1e3SJoseph Koshy 
852ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
853ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number %d", __LINE__, cpu));
854ebccf1e3SJoseph Koshy 
855ebccf1e3SJoseph Koshy 	PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
856ebccf1e3SJoseph Koshy 
857ebccf1e3SJoseph Koshy 	MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
858ebccf1e3SJoseph Koshy 	    M_WAITOK|M_ZERO);
859ebccf1e3SJoseph Koshy 
860ebccf1e3SJoseph Koshy 	if (pcs == NULL)
861ebccf1e3SJoseph Koshy 		return ENOMEM;
862ebccf1e3SJoseph Koshy 
863ebccf1e3SJoseph Koshy 	phw = &pcs->pc_amdpmcs[0];
864ebccf1e3SJoseph Koshy 
865ebccf1e3SJoseph Koshy 	/*
866ebccf1e3SJoseph Koshy 	 * Initialize the per-cpu mutex and set the content of the
867ebccf1e3SJoseph Koshy 	 * hardware descriptors to a known state.
868ebccf1e3SJoseph Koshy 	 */
869ebccf1e3SJoseph Koshy 
870ebccf1e3SJoseph Koshy 	for (n = 0; n < AMD_NPMCS; n++, phw++) {
871ebccf1e3SJoseph Koshy 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
872ebccf1e3SJoseph Koshy 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
873ebccf1e3SJoseph Koshy 		phw->phw_pmc	  = NULL;
874ebccf1e3SJoseph Koshy 		pcs->pc_hwpmcs[n] = phw;
875ebccf1e3SJoseph Koshy 	}
876ebccf1e3SJoseph Koshy 
877ebccf1e3SJoseph Koshy 	/* Mark the TSC as shareable */
878ebccf1e3SJoseph Koshy 	pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
879ebccf1e3SJoseph Koshy 
880ebccf1e3SJoseph Koshy 	pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
881ebccf1e3SJoseph Koshy 
882ebccf1e3SJoseph Koshy 	return 0;
883ebccf1e3SJoseph Koshy }
884ebccf1e3SJoseph Koshy 
885ebccf1e3SJoseph Koshy 
886ebccf1e3SJoseph Koshy /*
887ebccf1e3SJoseph Koshy  * processor dependent cleanup prior to the KLD
888ebccf1e3SJoseph Koshy  * being unloaded
889ebccf1e3SJoseph Koshy  */
890ebccf1e3SJoseph Koshy 
891ebccf1e3SJoseph Koshy static int
892ebccf1e3SJoseph Koshy amd_cleanup(int cpu)
893ebccf1e3SJoseph Koshy {
894ebccf1e3SJoseph Koshy 	int i;
895ebccf1e3SJoseph Koshy 	uint32_t evsel;
896ebccf1e3SJoseph Koshy 	struct pmc_cpu *pcs;
897ebccf1e3SJoseph Koshy 
898ebccf1e3SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
899ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
900ebccf1e3SJoseph Koshy 
901ebccf1e3SJoseph Koshy 	PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
902ebccf1e3SJoseph Koshy 
903ebccf1e3SJoseph Koshy 	/*
904ebccf1e3SJoseph Koshy 	 * First, turn off all PMCs on this CPU.
905ebccf1e3SJoseph Koshy 	 */
906ebccf1e3SJoseph Koshy 
907ebccf1e3SJoseph Koshy 	for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
908ebccf1e3SJoseph Koshy 		evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
909ebccf1e3SJoseph Koshy 		evsel &= ~AMD_PMC_ENABLE;
910ebccf1e3SJoseph Koshy 		wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
911ebccf1e3SJoseph Koshy 	}
912ebccf1e3SJoseph Koshy 
913ebccf1e3SJoseph Koshy 	/*
914ebccf1e3SJoseph Koshy 	 * Next, free up allocated space.
915ebccf1e3SJoseph Koshy 	 */
916ebccf1e3SJoseph Koshy 
917ebccf1e3SJoseph Koshy 	pcs = pmc_pcpu[cpu];
918ebccf1e3SJoseph Koshy 
919ebccf1e3SJoseph Koshy #if	DEBUG
920ebccf1e3SJoseph Koshy 	/* check the TSC */
921ebccf1e3SJoseph Koshy 	KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
922ebccf1e3SJoseph Koshy 	    ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
923ebccf1e3SJoseph Koshy 	for (i = 1; i < AMD_NPMCS; i++) {
924ebccf1e3SJoseph Koshy 		KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
925ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
926ebccf1e3SJoseph Koshy 		KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
927ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
928ebccf1e3SJoseph Koshy 	}
929ebccf1e3SJoseph Koshy #endif
930ebccf1e3SJoseph Koshy 	KASSERT(pcs != NULL,
931ebccf1e3SJoseph Koshy 	    ("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
932ebccf1e3SJoseph Koshy 
933ebccf1e3SJoseph Koshy 	pmc_pcpu[cpu] = NULL;
934ebccf1e3SJoseph Koshy 	FREE(pcs, M_PMC);
935ebccf1e3SJoseph Koshy 	return 0;
936ebccf1e3SJoseph Koshy }
937ebccf1e3SJoseph Koshy 
938ebccf1e3SJoseph Koshy /*
939ebccf1e3SJoseph Koshy  * Initialize ourselves.
940ebccf1e3SJoseph Koshy  */
941ebccf1e3SJoseph Koshy 
942ebccf1e3SJoseph Koshy struct pmc_mdep *
943ebccf1e3SJoseph Koshy pmc_amd_initialize(void)
944ebccf1e3SJoseph Koshy {
945ebccf1e3SJoseph Koshy 
946ebccf1e3SJoseph Koshy 	struct pmc_mdep *pmc_mdep;
947ebccf1e3SJoseph Koshy 
948ebccf1e3SJoseph Koshy 	/* The presence of hardware performance counters on the AMD
949ebccf1e3SJoseph Koshy 	   Athlon, Duron or later processors, is _not_ indicated by
950ebccf1e3SJoseph Koshy 	   any of the processor feature flags set by the 'CPUID'
951ebccf1e3SJoseph Koshy 	   instruction, so we only check the 'instruction family'
952ebccf1e3SJoseph Koshy 	   field returned by CPUID for instruction family >= 6. This
953ebccf1e3SJoseph Koshy 	   test needs to be be refined. */
954ebccf1e3SJoseph Koshy 
955ebccf1e3SJoseph Koshy 	if ((cpu_id & 0xF00) < 0x600)
956ebccf1e3SJoseph Koshy 		return NULL;
957ebccf1e3SJoseph Koshy 
958ebccf1e3SJoseph Koshy 	MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
959ebccf1e3SJoseph Koshy 	    M_PMC, M_WAITOK|M_ZERO);
960ebccf1e3SJoseph Koshy 
961ebccf1e3SJoseph Koshy #if	__i386__
962ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_cputype	   = PMC_CPU_AMD_K7;
963ebccf1e3SJoseph Koshy #elif	__amd64__
964ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_cputype	   = PMC_CPU_AMD_K8;
965ebccf1e3SJoseph Koshy #else
966ebccf1e3SJoseph Koshy #error	Unknown AMD CPU type.
967ebccf1e3SJoseph Koshy #endif
968ebccf1e3SJoseph Koshy 
969ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_npmc 	   = AMD_NPMCS;
970ebccf1e3SJoseph Koshy 
971ebccf1e3SJoseph Koshy 	/* this processor has two classes of usable PMCs */
972ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_nclass       = 2;
973ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_classes[0]   = PMC_CLASS_TSC;
974ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_classes[1]   = AMD_PMC_CLASS;
975ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_nclasspmcs[0] = 1;
976ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
977ebccf1e3SJoseph Koshy 
978ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_init    	   = amd_init;
979ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_cleanup 	   = amd_cleanup;
980ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_in    = amd_switch_in;
981ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_out   = amd_switch_out;
982ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_read_pmc 	   = amd_read_pmc;
983ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_write_pmc    = amd_write_pmc;
984ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_config_pmc   = amd_config_pmc;
985ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
986ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_release_pmc  = amd_release_pmc;
987ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_start_pmc    = amd_start_pmc;
988ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_stop_pmc     = amd_stop_pmc;
989ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_intr	   = amd_intr;
990ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_describe     = amd_describe;
991ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_get_msr  	   = amd_get_msr; /* i386 */
992ebccf1e3SJoseph Koshy 
993ebccf1e3SJoseph Koshy 	PMCDBG(MDP,INI,0,"%s","amd-initialize");
994ebccf1e3SJoseph Koshy 
995ebccf1e3SJoseph Koshy 	return pmc_mdep;
996ebccf1e3SJoseph Koshy }
997