xref: /freebsd/sys/dev/hwpmc/hwpmc_amd.c (revision 31610e34b7e39d573d927ea98346c88f23cabdab)
1ebccf1e3SJoseph Koshy /*-
2718cf2ccSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3718cf2ccSPedro F. Giffuni  *
4122ccdc1SJoseph Koshy  * Copyright (c) 2003-2008 Joseph Koshy
5d07f36b0SJoseph Koshy  * Copyright (c) 2007 The FreeBSD Foundation
6ebccf1e3SJoseph Koshy  * All rights reserved.
7ebccf1e3SJoseph Koshy  *
8d07f36b0SJoseph Koshy  * Portions of this software were developed by A. Joseph Koshy under
9d07f36b0SJoseph Koshy  * sponsorship from the FreeBSD Foundation and Google, Inc.
10d07f36b0SJoseph Koshy  *
11ebccf1e3SJoseph Koshy  * Redistribution and use in source and binary forms, with or without
12ebccf1e3SJoseph Koshy  * modification, are permitted provided that the following conditions
13ebccf1e3SJoseph Koshy  * are met:
14ebccf1e3SJoseph Koshy  * 1. Redistributions of source code must retain the above copyright
15ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer.
16ebccf1e3SJoseph Koshy  * 2. Redistributions in binary form must reproduce the above copyright
17ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer in the
18ebccf1e3SJoseph Koshy  *    documentation and/or other materials provided with the distribution.
19ebccf1e3SJoseph Koshy  *
20ebccf1e3SJoseph Koshy  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21ebccf1e3SJoseph Koshy  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22ebccf1e3SJoseph Koshy  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ebccf1e3SJoseph Koshy  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24ebccf1e3SJoseph Koshy  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25ebccf1e3SJoseph Koshy  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26ebccf1e3SJoseph Koshy  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27ebccf1e3SJoseph Koshy  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28ebccf1e3SJoseph Koshy  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29ebccf1e3SJoseph Koshy  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30ebccf1e3SJoseph Koshy  * SUCH DAMAGE.
31ebccf1e3SJoseph Koshy  */
32ebccf1e3SJoseph Koshy 
33ebccf1e3SJoseph Koshy #include <sys/cdefs.h>
34ebccf1e3SJoseph Koshy __FBSDID("$FreeBSD$");
35ebccf1e3SJoseph Koshy 
36ebccf1e3SJoseph Koshy /* Support for the AMD K7 and later processors */
37ebccf1e3SJoseph Koshy 
38ebccf1e3SJoseph Koshy #include <sys/param.h>
39ebccf1e3SJoseph Koshy #include <sys/lock.h>
40ebccf1e3SJoseph Koshy #include <sys/malloc.h>
41ebccf1e3SJoseph Koshy #include <sys/mutex.h>
42c5445f8bSAndrew Gallatin #include <sys/pcpu.h>
437ad17ef9SMarcel Moolenaar #include <sys/pmc.h>
44122ccdc1SJoseph Koshy #include <sys/pmckern.h>
45ebccf1e3SJoseph Koshy #include <sys/smp.h>
46ebccf1e3SJoseph Koshy #include <sys/systm.h>
47ebccf1e3SJoseph Koshy 
48d07f36b0SJoseph Koshy #include <machine/cpu.h>
49f263522aSJoseph Koshy #include <machine/cpufunc.h>
50ebccf1e3SJoseph Koshy #include <machine/md_var.h>
51f263522aSJoseph Koshy #include <machine/specialreg.h>
52ebccf1e3SJoseph Koshy 
53680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
54f263522aSJoseph Koshy enum pmc_class	amd_pmc_class;
55ebccf1e3SJoseph Koshy #endif
56ebccf1e3SJoseph Koshy 
57c5445f8bSAndrew Gallatin #define	OVERFLOW_WAIT_COUNT	50
58c5445f8bSAndrew Gallatin 
59c5445f8bSAndrew Gallatin DPCPU_DEFINE_STATIC(uint32_t, nmi_counter);
60c5445f8bSAndrew Gallatin 
61ebccf1e3SJoseph Koshy /* AMD K7 & K8 PMCs */
62ebccf1e3SJoseph Koshy struct amd_descr {
63ebccf1e3SJoseph Koshy 	struct pmc_descr pm_descr;  /* "base class" */
64ebccf1e3SJoseph Koshy 	uint32_t	pm_evsel;   /* address of EVSEL register */
65ebccf1e3SJoseph Koshy 	uint32_t	pm_perfctr; /* address of PERFCTR register */
66ebccf1e3SJoseph Koshy };
67ebccf1e3SJoseph Koshy 
68f263522aSJoseph Koshy static  struct amd_descr amd_pmcdesc[AMD_NPMCS] =
69ebccf1e3SJoseph Koshy {
70ebccf1e3SJoseph Koshy     {
71ebccf1e3SJoseph Koshy 	.pm_descr =
72ebccf1e3SJoseph Koshy 	{
73f263522aSJoseph Koshy 		.pd_name  = "",
74f263522aSJoseph Koshy 		.pd_class = -1,
75ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
76ebccf1e3SJoseph Koshy 		.pd_width = 48
77ebccf1e3SJoseph Koshy 	},
78ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_0,
79ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_0
80ebccf1e3SJoseph Koshy     },
81ebccf1e3SJoseph Koshy     {
82ebccf1e3SJoseph Koshy 	.pm_descr =
83ebccf1e3SJoseph Koshy 	{
84f263522aSJoseph Koshy 		.pd_name  = "",
85f263522aSJoseph Koshy 		.pd_class = -1,
86ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
87ebccf1e3SJoseph Koshy 		.pd_width = 48
88ebccf1e3SJoseph Koshy 	},
89ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_1,
90ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_1
91ebccf1e3SJoseph Koshy     },
92ebccf1e3SJoseph Koshy     {
93ebccf1e3SJoseph Koshy 	.pm_descr =
94ebccf1e3SJoseph Koshy 	{
95f263522aSJoseph Koshy 		.pd_name  = "",
96f263522aSJoseph Koshy 		.pd_class = -1,
97ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
98ebccf1e3SJoseph Koshy 		.pd_width = 48
99ebccf1e3SJoseph Koshy 	},
100ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_2,
101ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_2
102ebccf1e3SJoseph Koshy     },
103ebccf1e3SJoseph Koshy     {
104ebccf1e3SJoseph Koshy 	.pm_descr =
105ebccf1e3SJoseph Koshy 	{
106f263522aSJoseph Koshy 		.pd_name  = "",
107f263522aSJoseph Koshy 		.pd_class = -1,
108ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
109ebccf1e3SJoseph Koshy 		.pd_width = 48
110ebccf1e3SJoseph Koshy 	},
111ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_3,
112ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_3
113dacc43dfSMatt Macy      },
114dacc43dfSMatt Macy     {
115dacc43dfSMatt Macy 	.pm_descr =
116dacc43dfSMatt Macy 	{
117dacc43dfSMatt Macy 		.pd_name  = "",
118dacc43dfSMatt Macy 		.pd_class = -1,
119dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
120dacc43dfSMatt Macy 		.pd_width = 48
121dacc43dfSMatt Macy 	},
122dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_4,
123dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_4
124dacc43dfSMatt Macy     },
125dacc43dfSMatt Macy     {
126dacc43dfSMatt Macy 	.pm_descr =
127dacc43dfSMatt Macy 	{
128dacc43dfSMatt Macy 		.pd_name  = "",
129dacc43dfSMatt Macy 		.pd_class = -1,
130dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
131dacc43dfSMatt Macy 		.pd_width = 48
132dacc43dfSMatt Macy 	},
133dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_5,
134dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_5
135dacc43dfSMatt Macy     },
136dacc43dfSMatt Macy     {
137dacc43dfSMatt Macy 	.pm_descr =
138dacc43dfSMatt Macy 	{
139dacc43dfSMatt Macy 		.pd_name  = "",
140dacc43dfSMatt Macy 		.pd_class = -1,
141dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
142dacc43dfSMatt Macy 		.pd_width = 48
143dacc43dfSMatt Macy 	},
144dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_0,
145dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_0
146dacc43dfSMatt Macy     },
147dacc43dfSMatt Macy     {
148dacc43dfSMatt Macy 	.pm_descr =
149dacc43dfSMatt Macy 	{
150dacc43dfSMatt Macy 		.pd_name  = "",
151dacc43dfSMatt Macy 		.pd_class = -1,
152dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
153dacc43dfSMatt Macy 		.pd_width = 48
154dacc43dfSMatt Macy 	},
155dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_1,
156dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_1
157dacc43dfSMatt Macy     },
158dacc43dfSMatt Macy     {
159dacc43dfSMatt Macy 	.pm_descr =
160dacc43dfSMatt Macy 	{
161dacc43dfSMatt Macy 		.pd_name  = "",
162dacc43dfSMatt Macy 		.pd_class = -1,
163dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
164dacc43dfSMatt Macy 		.pd_width = 48
165dacc43dfSMatt Macy 	},
166dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_2,
167dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_2
168dacc43dfSMatt Macy     },
169dacc43dfSMatt Macy     {
170dacc43dfSMatt Macy 	.pm_descr =
171dacc43dfSMatt Macy 	{
172dacc43dfSMatt Macy 		.pd_name  = "",
173dacc43dfSMatt Macy 		.pd_class = -1,
174dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
175dacc43dfSMatt Macy 		.pd_width = 48
176dacc43dfSMatt Macy 	},
177dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_3,
178dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_3
179dacc43dfSMatt Macy     },
180dacc43dfSMatt Macy     {
181dacc43dfSMatt Macy 	.pm_descr =
182dacc43dfSMatt Macy 	{
183dacc43dfSMatt Macy 		.pd_name  = "",
184dacc43dfSMatt Macy 		.pd_class = -1,
185dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
186dacc43dfSMatt Macy 		.pd_width = 48
187dacc43dfSMatt Macy 	},
188dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_4,
189dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_4
190dacc43dfSMatt Macy     },
191dacc43dfSMatt Macy     {
192dacc43dfSMatt Macy 	.pm_descr =
193dacc43dfSMatt Macy 	{
194dacc43dfSMatt Macy 		.pd_name  = "",
195dacc43dfSMatt Macy 		.pd_class = -1,
196dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
197dacc43dfSMatt Macy 		.pd_width = 48
198dacc43dfSMatt Macy 	},
199dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_5,
200dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_5
201dacc43dfSMatt Macy     },
202dacc43dfSMatt Macy     {
203dacc43dfSMatt Macy 	.pm_descr =
204dacc43dfSMatt Macy 	{
205dacc43dfSMatt Macy 		.pd_name  = "",
206dacc43dfSMatt Macy 		.pd_class = -1,
207dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
208dacc43dfSMatt Macy 		.pd_width = 48
209dacc43dfSMatt Macy 	},
210dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_0,
211dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_0
212dacc43dfSMatt Macy     },
213dacc43dfSMatt Macy     {
214dacc43dfSMatt Macy 	.pm_descr =
215dacc43dfSMatt Macy 	{
216dacc43dfSMatt Macy 		.pd_name  = "",
217dacc43dfSMatt Macy 		.pd_class = -1,
218dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
219dacc43dfSMatt Macy 		.pd_width = 48
220dacc43dfSMatt Macy 	},
221dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_1,
222dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_1
223dacc43dfSMatt Macy     },
224dacc43dfSMatt Macy     {
225dacc43dfSMatt Macy 	.pm_descr =
226dacc43dfSMatt Macy 	{
227dacc43dfSMatt Macy 		.pd_name  = "",
228dacc43dfSMatt Macy 		.pd_class = -1,
229dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
230dacc43dfSMatt Macy 		.pd_width = 48
231dacc43dfSMatt Macy 	},
232dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_2,
233dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_2
234dacc43dfSMatt Macy     },
235dacc43dfSMatt Macy     {
236dacc43dfSMatt Macy 	.pm_descr =
237dacc43dfSMatt Macy 	{
238dacc43dfSMatt Macy 		.pd_name  = "",
239dacc43dfSMatt Macy 		.pd_class = -1,
240dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
241dacc43dfSMatt Macy 		.pd_width = 48
242dacc43dfSMatt Macy 	},
243dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_3,
244dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_3
245ebccf1e3SJoseph Koshy      }
246ebccf1e3SJoseph Koshy };
247ebccf1e3SJoseph Koshy 
248ebccf1e3SJoseph Koshy struct amd_event_code_map {
249ebccf1e3SJoseph Koshy 	enum pmc_event	pe_ev;	 /* enum value */
2501d3aa362SConrad Meyer 	uint16_t	pe_code; /* encoded event mask */
251ebccf1e3SJoseph Koshy 	uint8_t		pe_mask; /* bits allowed in unit mask */
252ebccf1e3SJoseph Koshy };
253ebccf1e3SJoseph Koshy 
254ebccf1e3SJoseph Koshy const struct amd_event_code_map amd_event_codes[] = {
255f263522aSJoseph Koshy #if	defined(__i386__)	/* 32 bit Athlon (K7) only */
256ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_ACCESSES, 		0x40, 0 },
257ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_MISSES,			0x41, 0 },
258f263522aSJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_L2,		0x42, AMD_PMC_UNITMASK_MOESI },
259f263522aSJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM,	0x43, AMD_PMC_UNITMASK_MOESI },
260f263522aSJoseph Koshy 	{ PMC_EV_K7_DC_WRITEBACKS,		0x44, AMD_PMC_UNITMASK_MOESI },
261ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
262ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES,	0x46, 0 },
263ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_MISALIGNED_REFERENCES,	0x47, 0 },
264ebccf1e3SJoseph Koshy 
265ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_FETCHES,			0x80, 0 },
266ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_MISSES,			0x81, 0 },
267ebccf1e3SJoseph Koshy 
268ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_ITLB_MISSES,		0x84, 0 },
269ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_L2_ITLB_MISSES,		0x85, 0 },
270ebccf1e3SJoseph Koshy 
271ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_INSTRUCTIONS,	0xC0, 0 },
272ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_OPS,		0xC1, 0 },
273ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES,		0xC2, 0 },
274ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
275ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 	0xC4, 0 },
276ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
277ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
278ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES,	0xC7, 0 },
279ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES,	0xCD, 0 },
280ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
281f263522aSJoseph Koshy 	{ PMC_EV_K7_HARDWARE_INTERRUPTS,	0xCF, 0 },
282ebccf1e3SJoseph Koshy #endif
283ebccf1e3SJoseph Koshy 
284ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS,		0x00, 0x3F },
285ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED,	0x01, 0x00 },
286ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS,	0x02, 0x00 },
287ebccf1e3SJoseph Koshy 
288ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 		0x20, 0x7F },
289ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
290ebccf1e3SJoseph Koshy 	  						0x21, 0x00 },
291ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
292ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_BUFFER2_FULL,			0x23, 0x00 },
293ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_LOCKED_OPERATION,		0x24, 0x07 },
294ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL,	0x25, 0x00 },
295ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS,	0x26, 0x00 },
296ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS,	0x27, 0x00 },
297ebccf1e3SJoseph Koshy 
298ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ACCESS,				0x40, 0x00 },
299ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISS,				0x41, 0x00 },
300ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_L2,			0x42, 0x1F },
301ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM,		0x43, 0x1F },
302ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_COPYBACK,			0x44, 0x1F },
303ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT,	0x45, 0x00 },
304ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS,	0x46, 0x00 },
305ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE,	0x47, 0x00 },
306ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL,	0x48, 0x00 },
307ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
308ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR,		0x4A, 0x03 },
309ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
310ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS,	0x4C, 0x03 },
311ebccf1e3SJoseph Koshy 
312ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_CPU_CLK_UNHALTED,		0x76, 0x00 },
313ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST,		0x7D, 0x1F },
314ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS,		0x7E, 0x07 },
315ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_INTO_L2,			0x7F, 0x03 },
316ebccf1e3SJoseph Koshy 
317ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_FETCH,				0x80, 0x00 },
318ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MISS,				0x81, 0x00 },
319ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_L2,			0x82, 0x00 },
320ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM,		0x83, 0x00 },
321ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT,	0x84, 0x00 },
322ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS,	0x85, 0x00 },
323ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
324ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL,		0x87, 0x00 },
325ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_HIT,		0x88, 0x00 },
326ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW,		0x89, 0x00 },
327ebccf1e3SJoseph Koshy 
328ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS,	0xC0, 0x00 },
329ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_UOPS,			0xC1, 0x00 },
330ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES,		0xC2, 0x00 },
331ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED,	0xC3, 0x00 },
332ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES,		0xC4, 0x00 },
333ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
334ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS,	0xC6, 0x00 },
335ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_RESYNCS,			0xC7, 0x00 },
336ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS,		0xC8, 0x00 },
337ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
338ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
339ebccf1e3SJoseph Koshy 							0xCA, 0x00 },
340ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS,	0xCB, 0x0F },
341ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
342ebccf1e3SJoseph Koshy 							0xCC, 0x07 },
343ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES,	0xCD, 0x00 },
344ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
345ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS,	0xCF, 0x00 },
346ebccf1e3SJoseph Koshy 
347ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DECODER_EMPTY,			0xD0, 0x00 },
348ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALLS,			0xD1, 0x00 },
349ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
350ebccf1e3SJoseph Koshy 							0xD2, 0x00 },
351ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
352ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD,	0xD4, 0x00 },
353ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
354ebccf1e3SJoseph Koshy 							0xD5, 0x00 },
355ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
356ebccf1e3SJoseph Koshy 							0xD6, 0x00 },
357ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL,	0xD7, 0x00 },
358ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL,	0xD8, 0x00 },
359ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
360ebccf1e3SJoseph Koshy 							0xD9, 0x00 },
361ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
362ebccf1e3SJoseph Koshy 							0xDA, 0x00 },
363ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_FPU_EXCEPTIONS,			0xDB, 0x0F },
364ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0,	0xDC, 0x00 },
365ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1,	0xDD, 0x00 },
366ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2,	0xDE, 0x00 },
367ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3,	0xDF, 0x00 },
368ebccf1e3SJoseph Koshy 
369ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
370ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
371ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
372ebccf1e3SJoseph Koshy 							0xE2, 0x00 },
373ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND,	0xE3, 0x07 },
374ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
375ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_SIZED_COMMANDS,			0xEB, 0x7F },
376ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_PROBE_RESULT,			0xEC, 0x0F },
377ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH,		0xF6, 0x0F },
378ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH,		0xF7, 0x0F },
379ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH,		0xF8, 0x0F }
380ebccf1e3SJoseph Koshy 
381ebccf1e3SJoseph Koshy };
382ebccf1e3SJoseph Koshy 
383323b076eSPedro F. Giffuni const int amd_event_codes_size = nitems(amd_event_codes);
384ebccf1e3SJoseph Koshy 
385ebccf1e3SJoseph Koshy /*
386e829eb6dSJoseph Koshy  * Per-processor information
387e829eb6dSJoseph Koshy  */
388e829eb6dSJoseph Koshy 
389e829eb6dSJoseph Koshy struct amd_cpu {
390e829eb6dSJoseph Koshy 	struct pmc_hw	pc_amdpmcs[AMD_NPMCS];
391e829eb6dSJoseph Koshy };
392e829eb6dSJoseph Koshy 
393e829eb6dSJoseph Koshy static struct amd_cpu **amd_pcpu;
394e829eb6dSJoseph Koshy 
395e829eb6dSJoseph Koshy /*
396ebccf1e3SJoseph Koshy  * read a pmc register
397ebccf1e3SJoseph Koshy  */
398ebccf1e3SJoseph Koshy 
399ebccf1e3SJoseph Koshy static int
400ebccf1e3SJoseph Koshy amd_read_pmc(int cpu, int ri, pmc_value_t *v)
401ebccf1e3SJoseph Koshy {
402ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
403ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
404ebccf1e3SJoseph Koshy 	struct pmc *pm;
405ebccf1e3SJoseph Koshy 	pmc_value_t tmp;
406ebccf1e3SJoseph Koshy 
407122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
408ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
409ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
410ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
411e829eb6dSJoseph Koshy 	KASSERT(amd_pcpu[cpu],
412e829eb6dSJoseph Koshy 	    ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu));
413ebccf1e3SJoseph Koshy 
414e829eb6dSJoseph Koshy 	pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
415ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
416ebccf1e3SJoseph Koshy 
417ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
418ebccf1e3SJoseph Koshy 	    ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
419ebccf1e3SJoseph Koshy 		cpu, ri));
420ebccf1e3SJoseph Koshy 
421c5153e19SJoseph Koshy 	mode = PMC_TO_MODE(pm);
422ebccf1e3SJoseph Koshy 
4234a3690dfSJohn Baldwin 	PMCDBG2(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
424ebccf1e3SJoseph Koshy 
425680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
426f263522aSJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
427ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
428ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
429f263522aSJoseph Koshy #endif
430ebccf1e3SJoseph Koshy 
431ebccf1e3SJoseph Koshy 	tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
4324a3690dfSJohn Baldwin 	PMCDBG2(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp);
43305e486c7SAdrian Chadd 	if (PMC_IS_SAMPLING_MODE(mode)) {
434e74c7ffcSJessica Clarke 		/*
435e74c7ffcSJessica Clarke 		 * Clamp value to 0 if the counter just overflowed,
436e74c7ffcSJessica Clarke 		 * otherwise the returned reload count would wrap to a
437e74c7ffcSJessica Clarke 		 * huge value.
438e74c7ffcSJessica Clarke 		 */
439e74c7ffcSJessica Clarke 		if ((tmp & (1ULL << 47)) == 0)
440e74c7ffcSJessica Clarke 			tmp = 0;
441e74c7ffcSJessica Clarke 		else {
44205e486c7SAdrian Chadd 			/* Sign extend 48 bit value to 64 bits. */
443e74c7ffcSJessica Clarke 			tmp = (pmc_value_t) ((int64_t)(tmp << 16) >> 16);
44405e486c7SAdrian Chadd 			tmp = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
44505e486c7SAdrian Chadd 		}
446e74c7ffcSJessica Clarke 	}
447ebccf1e3SJoseph Koshy 	*v = tmp;
448ebccf1e3SJoseph Koshy 
4494a3690dfSJohn Baldwin 	PMCDBG2(MDP,REA,2,"amd-read (post-munge) id=%d -> %jd", ri, *v);
450ebccf1e3SJoseph Koshy 
451ebccf1e3SJoseph Koshy 	return 0;
452ebccf1e3SJoseph Koshy }
453ebccf1e3SJoseph Koshy 
454ebccf1e3SJoseph Koshy /*
455ebccf1e3SJoseph Koshy  * Write a PMC MSR.
456ebccf1e3SJoseph Koshy  */
457ebccf1e3SJoseph Koshy 
458ebccf1e3SJoseph Koshy static int
459ebccf1e3SJoseph Koshy amd_write_pmc(int cpu, int ri, pmc_value_t v)
460ebccf1e3SJoseph Koshy {
461ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
462ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
463e829eb6dSJoseph Koshy 	struct pmc *pm;
464ebccf1e3SJoseph Koshy 
465122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
466ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
467ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
468ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
469ebccf1e3SJoseph Koshy 
470e829eb6dSJoseph Koshy 	pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
471ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
472ebccf1e3SJoseph Koshy 
473ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
474ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
475ebccf1e3SJoseph Koshy 		cpu, ri));
476ebccf1e3SJoseph Koshy 
477c5153e19SJoseph Koshy 	mode = PMC_TO_MODE(pm);
478ebccf1e3SJoseph Koshy 
479680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
480f263522aSJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
481ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
482ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
483f263522aSJoseph Koshy #endif
484ebccf1e3SJoseph Koshy 
485ebccf1e3SJoseph Koshy 	/* use 2's complement of the count for sampling mode PMCs */
486ebccf1e3SJoseph Koshy 	if (PMC_IS_SAMPLING_MODE(mode))
487f263522aSJoseph Koshy 		v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
488ebccf1e3SJoseph Koshy 
4894a3690dfSJohn Baldwin 	PMCDBG3(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
490ebccf1e3SJoseph Koshy 
491ebccf1e3SJoseph Koshy 	/* write the PMC value */
492ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_perfctr, v);
493ebccf1e3SJoseph Koshy 	return 0;
494ebccf1e3SJoseph Koshy }
495ebccf1e3SJoseph Koshy 
496ebccf1e3SJoseph Koshy /*
497ebccf1e3SJoseph Koshy  * configure hardware pmc according to the configuration recorded in
498ebccf1e3SJoseph Koshy  * pmc 'pm'.
499ebccf1e3SJoseph Koshy  */
500ebccf1e3SJoseph Koshy 
501ebccf1e3SJoseph Koshy static int
502ebccf1e3SJoseph Koshy amd_config_pmc(int cpu, int ri, struct pmc *pm)
503ebccf1e3SJoseph Koshy {
504ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
505ebccf1e3SJoseph Koshy 
5064a3690dfSJohn Baldwin 	PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
5076b8c8cd8SJoseph Koshy 
508122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
509ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
510ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
511ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
512ebccf1e3SJoseph Koshy 
513e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
514ebccf1e3SJoseph Koshy 
515ebccf1e3SJoseph Koshy 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
5166b8c8cd8SJoseph Koshy 	    ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
5176b8c8cd8SJoseph Koshy 		__LINE__, pm, phw->phw_pmc));
518ebccf1e3SJoseph Koshy 
519ebccf1e3SJoseph Koshy 	phw->phw_pmc = pm;
520ebccf1e3SJoseph Koshy 	return 0;
521ebccf1e3SJoseph Koshy }
522ebccf1e3SJoseph Koshy 
523ebccf1e3SJoseph Koshy /*
524c5153e19SJoseph Koshy  * Retrieve a configured PMC pointer from hardware state.
525c5153e19SJoseph Koshy  */
526c5153e19SJoseph Koshy 
527c5153e19SJoseph Koshy static int
528c5153e19SJoseph Koshy amd_get_config(int cpu, int ri, struct pmc **ppm)
529c5153e19SJoseph Koshy {
530e829eb6dSJoseph Koshy 	*ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
531c5153e19SJoseph Koshy 
532c5153e19SJoseph Koshy 	return 0;
533c5153e19SJoseph Koshy }
534c5153e19SJoseph Koshy 
535c5153e19SJoseph Koshy /*
536ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch in of a
537ebccf1e3SJoseph Koshy  * thread.
538ebccf1e3SJoseph Koshy  */
539ebccf1e3SJoseph Koshy 
540ebccf1e3SJoseph Koshy static int
5416b8c8cd8SJoseph Koshy amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
542ebccf1e3SJoseph Koshy {
543ebccf1e3SJoseph Koshy 	(void) pc;
544ebccf1e3SJoseph Koshy 
5454a3690dfSJohn Baldwin 	PMCDBG3(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
546c5153e19SJoseph Koshy 	    (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0);
5476b8c8cd8SJoseph Koshy 
5486b8c8cd8SJoseph Koshy 	/* enable the RDPMC instruction if needed */
549c5153e19SJoseph Koshy 	if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
550ebccf1e3SJoseph Koshy 		load_cr4(rcr4() | CR4_PCE);
5516b8c8cd8SJoseph Koshy 
552ebccf1e3SJoseph Koshy 	return 0;
553ebccf1e3SJoseph Koshy }
554ebccf1e3SJoseph Koshy 
555ebccf1e3SJoseph Koshy /*
556ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch out of a
557ebccf1e3SJoseph Koshy  * thread.
558ebccf1e3SJoseph Koshy  */
559ebccf1e3SJoseph Koshy 
560ebccf1e3SJoseph Koshy static int
5616b8c8cd8SJoseph Koshy amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
562ebccf1e3SJoseph Koshy {
563ebccf1e3SJoseph Koshy 	(void) pc;
5646b8c8cd8SJoseph Koshy 	(void) pp;		/* can be NULL */
565ebccf1e3SJoseph Koshy 
5664a3690dfSJohn Baldwin 	PMCDBG3(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ?
567c5153e19SJoseph Koshy 	    (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0);
5686b8c8cd8SJoseph Koshy 
5696b8c8cd8SJoseph Koshy 	/* always turn off the RDPMC instruction */
570ebccf1e3SJoseph Koshy 	load_cr4(rcr4() & ~CR4_PCE);
5716b8c8cd8SJoseph Koshy 
572ebccf1e3SJoseph Koshy 	return 0;
573ebccf1e3SJoseph Koshy }
574ebccf1e3SJoseph Koshy 
575ebccf1e3SJoseph Koshy /*
576ebccf1e3SJoseph Koshy  * Check if a given allocation is feasible.
577ebccf1e3SJoseph Koshy  */
578ebccf1e3SJoseph Koshy 
579ebccf1e3SJoseph Koshy static int
580ebccf1e3SJoseph Koshy amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
581ebccf1e3SJoseph Koshy     const struct pmc_op_pmcallocate *a)
582ebccf1e3SJoseph Koshy {
583ebccf1e3SJoseph Koshy 	int i;
584dacc43dfSMatt Macy 	uint64_t allowed_unitmask, caps, config, unitmask;
585ebccf1e3SJoseph Koshy 	enum pmc_event pe;
586ebccf1e3SJoseph Koshy 	const struct pmc_descr *pd;
587ebccf1e3SJoseph Koshy 
588ebccf1e3SJoseph Koshy 	(void) cpu;
589ebccf1e3SJoseph Koshy 
590122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
591ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
592ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
593ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row index %d", __LINE__, ri));
594ebccf1e3SJoseph Koshy 
595ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri].pm_descr;
596ebccf1e3SJoseph Koshy 
597ebccf1e3SJoseph Koshy 	/* check class match */
598c5153e19SJoseph Koshy 	if (pd->pd_class != a->pm_class)
599ebccf1e3SJoseph Koshy 		return EINVAL;
600ebccf1e3SJoseph Koshy 
601ebccf1e3SJoseph Koshy 	caps = pm->pm_caps;
602ebccf1e3SJoseph Koshy 
6034a3690dfSJohn Baldwin 	PMCDBG2(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
604ebccf1e3SJoseph Koshy 
605dacc43dfSMatt Macy 	if((ri >= 0 && ri < 6) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_CORE))
606dacc43dfSMatt Macy 		return EINVAL;
607dacc43dfSMatt Macy 	if((ri >= 6 && ri < 12) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_L3_CACHE))
608dacc43dfSMatt Macy 		return EINVAL;
609dacc43dfSMatt Macy 	if((ri >= 12 && ri < 16) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_DATA_FABRIC))
610dacc43dfSMatt Macy 		return EINVAL;
611dacc43dfSMatt Macy 
61281eb4dcfSMatt Macy 	if (strlen(pmc_cpuid) != 0) {
61381eb4dcfSMatt Macy 		pm->pm_md.pm_amd.pm_amd_evsel =
61481eb4dcfSMatt Macy 			a->pm_md.pm_amd.pm_amd_config;
61581eb4dcfSMatt Macy 		PMCDBG2(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, a->pm_md.pm_amd.pm_amd_config);
61681eb4dcfSMatt Macy 		return (0);
61781eb4dcfSMatt Macy 	}
618ebccf1e3SJoseph Koshy 
619ebccf1e3SJoseph Koshy 	pe = a->pm_ev;
620ebccf1e3SJoseph Koshy 
621ebccf1e3SJoseph Koshy 	/* map ev to the correct event mask code */
622ebccf1e3SJoseph Koshy 	config = allowed_unitmask = 0;
623ebccf1e3SJoseph Koshy 	for (i = 0; i < amd_event_codes_size; i++)
624ebccf1e3SJoseph Koshy 		if (amd_event_codes[i].pe_ev == pe) {
625ebccf1e3SJoseph Koshy 			config =
626ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
627ebccf1e3SJoseph Koshy 			allowed_unitmask =
628ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
629ebccf1e3SJoseph Koshy 			break;
630ebccf1e3SJoseph Koshy 		}
631ebccf1e3SJoseph Koshy 	if (i == amd_event_codes_size)
632ebccf1e3SJoseph Koshy 		return EINVAL;
633ebccf1e3SJoseph Koshy 
634f263522aSJoseph Koshy 	unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK;
635ebccf1e3SJoseph Koshy 	if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
636ebccf1e3SJoseph Koshy 		return EINVAL;
637ebccf1e3SJoseph Koshy 
638ebccf1e3SJoseph Koshy 	if (unitmask && (caps & PMC_CAP_QUALIFIER))
639ebccf1e3SJoseph Koshy 		config |= unitmask;
640ebccf1e3SJoseph Koshy 
641ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_THRESHOLD)
642f263522aSJoseph Koshy 		config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK;
643ebccf1e3SJoseph Koshy 
644ebccf1e3SJoseph Koshy 	/* set at least one of the 'usr' or 'os' caps */
645ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_USER)
646ebccf1e3SJoseph Koshy 		config |= AMD_PMC_USR;
647ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_SYSTEM)
648ebccf1e3SJoseph Koshy 		config |= AMD_PMC_OS;
649ebccf1e3SJoseph Koshy 	if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
650ebccf1e3SJoseph Koshy 		config |= (AMD_PMC_USR|AMD_PMC_OS);
651ebccf1e3SJoseph Koshy 
652ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_EDGE)
653ebccf1e3SJoseph Koshy 		config |= AMD_PMC_EDGE;
654ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INVERT)
655ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INVERT;
656ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INTERRUPT)
657ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INT;
658ebccf1e3SJoseph Koshy 
659ebccf1e3SJoseph Koshy 	pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
660ebccf1e3SJoseph Koshy 
6614a3690dfSJohn Baldwin 	PMCDBG2(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
662ebccf1e3SJoseph Koshy 
663ebccf1e3SJoseph Koshy 	return 0;
664ebccf1e3SJoseph Koshy }
665ebccf1e3SJoseph Koshy 
666ebccf1e3SJoseph Koshy /*
667ebccf1e3SJoseph Koshy  * Release machine dependent state associated with a PMC.  This is a
668ebccf1e3SJoseph Koshy  * no-op on this architecture.
669ebccf1e3SJoseph Koshy  *
670ebccf1e3SJoseph Koshy  */
671ebccf1e3SJoseph Koshy 
672ebccf1e3SJoseph Koshy /* ARGSUSED0 */
673ebccf1e3SJoseph Koshy static int
674ebccf1e3SJoseph Koshy amd_release_pmc(int cpu, int ri, struct pmc *pmc)
675ebccf1e3SJoseph Koshy {
676680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
677ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
678ebccf1e3SJoseph Koshy #endif
679aee6e7dcSMateusz Guzik 	struct pmc_hw *phw __diagused;
680ebccf1e3SJoseph Koshy 
681ebccf1e3SJoseph Koshy 	(void) pmc;
682ebccf1e3SJoseph Koshy 
683122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
684ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
685ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
686ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
687ebccf1e3SJoseph Koshy 
688e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
689ebccf1e3SJoseph Koshy 
690ebccf1e3SJoseph Koshy 	KASSERT(phw->phw_pmc == NULL,
691ebccf1e3SJoseph Koshy 	    ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
692ebccf1e3SJoseph Koshy 
693680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
694ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
695f263522aSJoseph Koshy 	if (pd->pm_descr.pd_class == amd_pmc_class)
696ebccf1e3SJoseph Koshy 		KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
697ebccf1e3SJoseph Koshy 		    ("[amd,%d] PMC %d released while active", __LINE__, ri));
698ebccf1e3SJoseph Koshy #endif
699ebccf1e3SJoseph Koshy 
700ebccf1e3SJoseph Koshy 	return 0;
701ebccf1e3SJoseph Koshy }
702ebccf1e3SJoseph Koshy 
703ebccf1e3SJoseph Koshy /*
704ebccf1e3SJoseph Koshy  * start a PMC.
705ebccf1e3SJoseph Koshy  */
706ebccf1e3SJoseph Koshy 
707ebccf1e3SJoseph Koshy static int
708ebccf1e3SJoseph Koshy amd_start_pmc(int cpu, int ri)
709ebccf1e3SJoseph Koshy {
710dacc43dfSMatt Macy 	uint64_t config;
711ebccf1e3SJoseph Koshy 	struct pmc *pm;
712ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
713ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
714ebccf1e3SJoseph Koshy 
715122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
716ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
717ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
718ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
719ebccf1e3SJoseph Koshy 
720e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
721ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
722ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
723ebccf1e3SJoseph Koshy 
724ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
725ebccf1e3SJoseph Koshy 	    ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
726ebccf1e3SJoseph Koshy 		cpu, ri));
727ebccf1e3SJoseph Koshy 
7284a3690dfSJohn Baldwin 	PMCDBG2(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
729ebccf1e3SJoseph Koshy 
730ebccf1e3SJoseph Koshy 	KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
731ebccf1e3SJoseph Koshy 	    ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
732ebccf1e3SJoseph Koshy 	    ri, cpu, pd->pm_descr.pd_name));
733ebccf1e3SJoseph Koshy 
734ebccf1e3SJoseph Koshy 	/* turn on the PMC ENABLE bit */
735ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
736ebccf1e3SJoseph Koshy 
7374a3690dfSJohn Baldwin 	PMCDBG1(MDP,STA,2,"amd-start config=0x%x", config);
738ebccf1e3SJoseph Koshy 
739ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
740ebccf1e3SJoseph Koshy 	return 0;
741ebccf1e3SJoseph Koshy }
742ebccf1e3SJoseph Koshy 
743ebccf1e3SJoseph Koshy /*
744ebccf1e3SJoseph Koshy  * Stop a PMC.
745ebccf1e3SJoseph Koshy  */
746ebccf1e3SJoseph Koshy 
747ebccf1e3SJoseph Koshy static int
748ebccf1e3SJoseph Koshy amd_stop_pmc(int cpu, int ri)
749ebccf1e3SJoseph Koshy {
750ebccf1e3SJoseph Koshy 	struct pmc *pm;
751ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
752ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
753ebccf1e3SJoseph Koshy 	uint64_t config;
754c5445f8bSAndrew Gallatin 	int i;
755ebccf1e3SJoseph Koshy 
756122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
757ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
758ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
759ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
760ebccf1e3SJoseph Koshy 
761e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
762ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
763ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
764ebccf1e3SJoseph Koshy 
765ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
766ebccf1e3SJoseph Koshy 	    ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
767ebccf1e3SJoseph Koshy 		cpu, ri));
768ebccf1e3SJoseph Koshy 	KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
769ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
770ebccf1e3SJoseph Koshy 		__LINE__, ri, cpu, pd->pm_descr.pd_name));
771ebccf1e3SJoseph Koshy 
7724a3690dfSJohn Baldwin 	PMCDBG1(MDP,STO,1,"amd-stop ri=%d", ri);
773ebccf1e3SJoseph Koshy 
774ebccf1e3SJoseph Koshy 	/* turn off the PMC ENABLE bit */
775ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
776ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
777c5445f8bSAndrew Gallatin 
778c5445f8bSAndrew Gallatin 	/*
779c5445f8bSAndrew Gallatin 	 * Due to NMI latency on newer AMD processors
780c5445f8bSAndrew Gallatin 	 * NMI interrupts are ignored, which leads to
78104389c85SGordon Bergling 	 * panic or messages based on kernel configuration
782c5445f8bSAndrew Gallatin 	 */
783c5445f8bSAndrew Gallatin 
784c5445f8bSAndrew Gallatin 	/* Wait for the count to be reset */
785c5445f8bSAndrew Gallatin 	for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
786c5445f8bSAndrew Gallatin 		if (rdmsr(pd->pm_perfctr) & (1 << (pd->pm_descr.pd_width - 1)))
787c5445f8bSAndrew Gallatin 			break;
788c5445f8bSAndrew Gallatin 
789c5445f8bSAndrew Gallatin 		DELAY(1);
790c5445f8bSAndrew Gallatin 	}
791c5445f8bSAndrew Gallatin 
792ebccf1e3SJoseph Koshy 	return 0;
793ebccf1e3SJoseph Koshy }
794ebccf1e3SJoseph Koshy 
795ebccf1e3SJoseph Koshy /*
796ebccf1e3SJoseph Koshy  * Interrupt handler.  This function needs to return '1' if the
797ebccf1e3SJoseph Koshy  * interrupt was this CPU's PMCs or '0' otherwise.  It is not allowed
798ebccf1e3SJoseph Koshy  * to sleep or do anything a 'fast' interrupt handler is not allowed
799ebccf1e3SJoseph Koshy  * to do.
800ebccf1e3SJoseph Koshy  */
801ebccf1e3SJoseph Koshy 
802ebccf1e3SJoseph Koshy static int
803eb7c9019SMatt Macy amd_intr(struct trapframe *tf)
804ebccf1e3SJoseph Koshy {
805eb7c9019SMatt Macy 	int i, error, retval, cpu;
806dacc43dfSMatt Macy 	uint64_t config, evsel, perfctr;
807ebccf1e3SJoseph Koshy 	struct pmc *pm;
808e829eb6dSJoseph Koshy 	struct amd_cpu *pac;
809f263522aSJoseph Koshy 	pmc_value_t v;
810c5445f8bSAndrew Gallatin 	uint32_t active = 0, count = 0;
81136c0fd9dSJoseph Koshy 
812eb7c9019SMatt Macy 	cpu = curcpu;
813122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
814ebccf1e3SJoseph Koshy 	    ("[amd,%d] out of range CPU %d", __LINE__, cpu));
815ebccf1e3SJoseph Koshy 
8164a3690dfSJohn Baldwin 	PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
817d07f36b0SJoseph Koshy 	    TRAPF_USERMODE(tf));
818f263522aSJoseph Koshy 
819ebccf1e3SJoseph Koshy 	retval = 0;
820ebccf1e3SJoseph Koshy 
821e829eb6dSJoseph Koshy 	pac = amd_pcpu[cpu];
822ebccf1e3SJoseph Koshy 
823ebccf1e3SJoseph Koshy 	/*
824ebccf1e3SJoseph Koshy 	 * look for all PMCs that have interrupted:
825f263522aSJoseph Koshy 	 * - look for a running, sampling PMC which has overflowed
826f263522aSJoseph Koshy 	 *   and which has a valid 'struct pmc' association
827f263522aSJoseph Koshy 	 *
828f263522aSJoseph Koshy 	 * If found, we call a helper to process the interrupt.
829bebaef4aSJoseph Koshy 	 *
830c5445f8bSAndrew Gallatin 	 * PMCs interrupting at the same time are collapsed into
831c5445f8bSAndrew Gallatin 	 * a single interrupt. Check all the valid pmcs for
832c5445f8bSAndrew Gallatin 	 * overflow.
833ebccf1e3SJoseph Koshy 	 */
834ebccf1e3SJoseph Koshy 
835c5445f8bSAndrew Gallatin 	for (i = 0; i < AMD_CORE_NPMCS; i++) {
836f263522aSJoseph Koshy 
837dfd9bc23SJoseph Koshy 		if ((pm = pac->pc_amdpmcs[i].phw_pmc) == NULL ||
838f263522aSJoseph Koshy 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
839ebccf1e3SJoseph Koshy 			continue;
840ebccf1e3SJoseph Koshy 		}
841ebccf1e3SJoseph Koshy 
842c5445f8bSAndrew Gallatin 		/* Consider pmc with valid handle as active */
843c5445f8bSAndrew Gallatin 		active++;
844c5445f8bSAndrew Gallatin 
845dfd9bc23SJoseph Koshy 		if (!AMD_PMC_HAS_OVERFLOWED(i))
846dfd9bc23SJoseph Koshy 			continue;
847dfd9bc23SJoseph Koshy 
848d07f36b0SJoseph Koshy 		retval = 1;	/* Found an interrupting PMC. */
849bebaef4aSJoseph Koshy 
850dfd9bc23SJoseph Koshy 		if (pm->pm_state != PMC_STATE_RUNNING)
851dfd9bc23SJoseph Koshy 			continue;
852dfd9bc23SJoseph Koshy 
853d07f36b0SJoseph Koshy 		/* Stop the PMC, reload count. */
854c5445f8bSAndrew Gallatin 		evsel	= amd_pmcdesc[i].pm_evsel;
855c5445f8bSAndrew Gallatin 		perfctr	= amd_pmcdesc[i].pm_perfctr;
856f263522aSJoseph Koshy 		v       = pm->pm_sc.pm_reloadcount;
857f263522aSJoseph Koshy 		config  = rdmsr(evsel);
858f263522aSJoseph Koshy 
859f263522aSJoseph Koshy 		KASSERT((config & ~AMD_PMC_ENABLE) ==
860f263522aSJoseph Koshy 		    (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
861dacc43dfSMatt Macy 		    ("[amd,%d] config mismatch reg=0x%jx pm=0x%jx", __LINE__,
862dacc43dfSMatt Macy 			 (uintmax_t)config, (uintmax_t)pm->pm_md.pm_amd.pm_amd_evsel));
863f263522aSJoseph Koshy 
864f263522aSJoseph Koshy 		wrmsr(evsel, config & ~AMD_PMC_ENABLE);
865f263522aSJoseph Koshy 		wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
866f263522aSJoseph Koshy 
867d07f36b0SJoseph Koshy 		/* Restart the counter if logging succeeded. */
868eb7c9019SMatt Macy 		error = pmc_process_interrupt(PMC_HR, pm, tf);
869f263522aSJoseph Koshy 		if (error == 0)
8703c1f73b1SAndriy Gapon 			wrmsr(evsel, config);
871ebccf1e3SJoseph Koshy 	}
872f263522aSJoseph Koshy 
873c5445f8bSAndrew Gallatin 	/*
874c5445f8bSAndrew Gallatin 	 * Due to NMI latency, there can be a scenario in which
875c5445f8bSAndrew Gallatin 	 * multiple pmcs gets serviced in an earlier NMI and we
876c5445f8bSAndrew Gallatin 	 * do not find an overflow in the subsequent NMI.
877c5445f8bSAndrew Gallatin 	 *
878c5445f8bSAndrew Gallatin 	 * For such cases we keep a per-cpu count of active NMIs
879c5445f8bSAndrew Gallatin 	 * and compare it with min(active pmcs, 2) to determine
880c5445f8bSAndrew Gallatin 	 * if this NMI was for a pmc overflow which was serviced
881c5445f8bSAndrew Gallatin 	 * in an earlier request or should be ignored.
882c5445f8bSAndrew Gallatin 	 */
883c5445f8bSAndrew Gallatin 
884c5445f8bSAndrew Gallatin 	if (retval) {
885c5445f8bSAndrew Gallatin 		DPCPU_SET(nmi_counter, min(2, active));
886c5445f8bSAndrew Gallatin 	} else {
887c5445f8bSAndrew Gallatin 		if ((count = DPCPU_GET(nmi_counter))) {
888c5445f8bSAndrew Gallatin 			retval = 1;
889c5445f8bSAndrew Gallatin 			DPCPU_SET(nmi_counter, --count);
890c5445f8bSAndrew Gallatin 		}
891c5445f8bSAndrew Gallatin 	}
892c5445f8bSAndrew Gallatin 
893e6b475e0SMatt Macy 	if (retval)
894e6b475e0SMatt Macy 		counter_u64_add(pmc_stats.pm_intr_processed, 1);
895e6b475e0SMatt Macy 	else
896e6b475e0SMatt Macy 		counter_u64_add(pmc_stats.pm_intr_ignored, 1);
897fbf1556dSJoseph Koshy 
8983c1f73b1SAndriy Gapon 	PMCDBG1(MDP,INT,2, "retval=%d", retval);
899d07f36b0SJoseph Koshy 	return (retval);
900ebccf1e3SJoseph Koshy }
901ebccf1e3SJoseph Koshy 
902ebccf1e3SJoseph Koshy /*
903ebccf1e3SJoseph Koshy  * describe a PMC
904ebccf1e3SJoseph Koshy  */
905ebccf1e3SJoseph Koshy static int
906ebccf1e3SJoseph Koshy amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
907ebccf1e3SJoseph Koshy {
908ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
909ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
910ebccf1e3SJoseph Koshy 
911122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
912ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU %d", __LINE__, cpu));
913ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
914ebccf1e3SJoseph Koshy 	    ("[amd,%d] row-index %d out of range", __LINE__, ri));
915ebccf1e3SJoseph Koshy 
916e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
917ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
918ebccf1e3SJoseph Koshy 
919*31610e34SMitchell Horne 	strlcpy(pi->pm_name, pd->pm_descr.pd_name, sizeof(pi->pm_name));
920ebccf1e3SJoseph Koshy 	pi->pm_class = pd->pm_descr.pd_class;
921ebccf1e3SJoseph Koshy 
922ebccf1e3SJoseph Koshy 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
923ebccf1e3SJoseph Koshy 		pi->pm_enabled = TRUE;
924ebccf1e3SJoseph Koshy 		*ppmc          = phw->phw_pmc;
925ebccf1e3SJoseph Koshy 	} else {
926ebccf1e3SJoseph Koshy 		pi->pm_enabled = FALSE;
927ebccf1e3SJoseph Koshy 		*ppmc          = NULL;
928ebccf1e3SJoseph Koshy 	}
929ebccf1e3SJoseph Koshy 
930ebccf1e3SJoseph Koshy 	return 0;
931ebccf1e3SJoseph Koshy }
932ebccf1e3SJoseph Koshy 
933ebccf1e3SJoseph Koshy /*
934ebccf1e3SJoseph Koshy  * i386 specific entry points
935ebccf1e3SJoseph Koshy  */
936ebccf1e3SJoseph Koshy 
937ebccf1e3SJoseph Koshy /*
938ebccf1e3SJoseph Koshy  * return the MSR address of the given PMC.
939ebccf1e3SJoseph Koshy  */
940ebccf1e3SJoseph Koshy 
941ebccf1e3SJoseph Koshy static int
942ebccf1e3SJoseph Koshy amd_get_msr(int ri, uint32_t *msr)
943ebccf1e3SJoseph Koshy {
944ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
945ebccf1e3SJoseph Koshy 	    ("[amd,%d] ri %d out of range", __LINE__, ri));
946ebccf1e3SJoseph Koshy 
9476b8c8cd8SJoseph Koshy 	*msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
948e829eb6dSJoseph Koshy 
949e829eb6dSJoseph Koshy 	return (0);
950ebccf1e3SJoseph Koshy }
951ebccf1e3SJoseph Koshy 
952ebccf1e3SJoseph Koshy /*
953ebccf1e3SJoseph Koshy  * processor dependent initialization.
954ebccf1e3SJoseph Koshy  */
955ebccf1e3SJoseph Koshy 
956ebccf1e3SJoseph Koshy static int
957e829eb6dSJoseph Koshy amd_pcpu_init(struct pmc_mdep *md, int cpu)
958ebccf1e3SJoseph Koshy {
959e829eb6dSJoseph Koshy 	int classindex, first_ri, n;
960e829eb6dSJoseph Koshy 	struct pmc_cpu *pc;
961e829eb6dSJoseph Koshy 	struct amd_cpu *pac;
962ebccf1e3SJoseph Koshy 	struct pmc_hw  *phw;
963ebccf1e3SJoseph Koshy 
964122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
965ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number %d", __LINE__, cpu));
966ebccf1e3SJoseph Koshy 
9674a3690dfSJohn Baldwin 	PMCDBG1(MDP,INI,1,"amd-init cpu=%d", cpu);
968ebccf1e3SJoseph Koshy 
969e829eb6dSJoseph Koshy 	amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC,
970ebccf1e3SJoseph Koshy 	    M_WAITOK|M_ZERO);
971ebccf1e3SJoseph Koshy 
972ebccf1e3SJoseph Koshy 	/*
973e829eb6dSJoseph Koshy 	 * Set the content of the hardware descriptors to a known
974e829eb6dSJoseph Koshy 	 * state and initialize pointers in the MI per-cpu descriptor.
975ebccf1e3SJoseph Koshy 	 */
976e829eb6dSJoseph Koshy 	pc = pmc_pcpu[cpu];
977e829eb6dSJoseph Koshy #if	defined(__amd64__)
978e829eb6dSJoseph Koshy 	classindex = PMC_MDEP_CLASS_INDEX_K8;
979e829eb6dSJoseph Koshy #elif	defined(__i386__)
980e829eb6dSJoseph Koshy 	classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ?
981e829eb6dSJoseph Koshy 	    PMC_MDEP_CLASS_INDEX_K8 : PMC_MDEP_CLASS_INDEX_K7;
982e829eb6dSJoseph Koshy #endif
983e829eb6dSJoseph Koshy 	first_ri = md->pmd_classdep[classindex].pcd_ri;
984ebccf1e3SJoseph Koshy 
985e829eb6dSJoseph Koshy 	KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__));
986e829eb6dSJoseph Koshy 
987e829eb6dSJoseph Koshy 	for (n = 0, phw = pac->pc_amdpmcs; n < AMD_NPMCS; n++, phw++) {
988ebccf1e3SJoseph Koshy 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
989ebccf1e3SJoseph Koshy 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
990ebccf1e3SJoseph Koshy 		phw->phw_pmc	  = NULL;
991e829eb6dSJoseph Koshy 		pc->pc_hwpmcs[n + first_ri]  = phw;
992ebccf1e3SJoseph Koshy 	}
993ebccf1e3SJoseph Koshy 
994e829eb6dSJoseph Koshy 	return (0);
995ebccf1e3SJoseph Koshy }
996ebccf1e3SJoseph Koshy 
997ebccf1e3SJoseph Koshy 
998ebccf1e3SJoseph Koshy /*
999ebccf1e3SJoseph Koshy  * processor dependent cleanup prior to the KLD
1000ebccf1e3SJoseph Koshy  * being unloaded
1001ebccf1e3SJoseph Koshy  */
1002ebccf1e3SJoseph Koshy 
1003ebccf1e3SJoseph Koshy static int
1004e829eb6dSJoseph Koshy amd_pcpu_fini(struct pmc_mdep *md, int cpu)
1005ebccf1e3SJoseph Koshy {
1006e829eb6dSJoseph Koshy 	int classindex, first_ri, i;
1007ebccf1e3SJoseph Koshy 	uint32_t evsel;
1008e829eb6dSJoseph Koshy 	struct pmc_cpu *pc;
1009e829eb6dSJoseph Koshy 	struct amd_cpu *pac;
1010ebccf1e3SJoseph Koshy 
1011122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1012ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
1013ebccf1e3SJoseph Koshy 
10144a3690dfSJohn Baldwin 	PMCDBG1(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
1015ebccf1e3SJoseph Koshy 
1016ebccf1e3SJoseph Koshy 	/*
1017ebccf1e3SJoseph Koshy 	 * First, turn off all PMCs on this CPU.
1018ebccf1e3SJoseph Koshy 	 */
1019ebccf1e3SJoseph Koshy 	for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
1020ebccf1e3SJoseph Koshy 		evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
1021ebccf1e3SJoseph Koshy 		evsel &= ~AMD_PMC_ENABLE;
1022ebccf1e3SJoseph Koshy 		wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
1023ebccf1e3SJoseph Koshy 	}
1024ebccf1e3SJoseph Koshy 
1025ebccf1e3SJoseph Koshy 	/*
1026ebccf1e3SJoseph Koshy 	 * Next, free up allocated space.
1027ebccf1e3SJoseph Koshy 	 */
1028e829eb6dSJoseph Koshy 	if ((pac = amd_pcpu[cpu]) == NULL)
1029e829eb6dSJoseph Koshy 		return (0);
1030ebccf1e3SJoseph Koshy 
1031e829eb6dSJoseph Koshy 	amd_pcpu[cpu] = NULL;
1032ebccf1e3SJoseph Koshy 
1033680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
1034e829eb6dSJoseph Koshy 	for (i = 0; i < AMD_NPMCS; i++) {
1035e829eb6dSJoseph Koshy 		KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL,
1036ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
1037e67c0426SAndriy Gapon 		KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + i),
1038ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
1039ebccf1e3SJoseph Koshy 	}
1040ebccf1e3SJoseph Koshy #endif
1041ebccf1e3SJoseph Koshy 
1042e829eb6dSJoseph Koshy 	pc = pmc_pcpu[cpu];
1043e829eb6dSJoseph Koshy 	KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__));
1044e829eb6dSJoseph Koshy 
1045e829eb6dSJoseph Koshy #if	defined(__amd64__)
1046e829eb6dSJoseph Koshy 	classindex = PMC_MDEP_CLASS_INDEX_K8;
1047e829eb6dSJoseph Koshy #elif	defined(__i386__)
1048e829eb6dSJoseph Koshy 	classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? PMC_MDEP_CLASS_INDEX_K8 :
1049e829eb6dSJoseph Koshy 	    PMC_MDEP_CLASS_INDEX_K7;
1050e829eb6dSJoseph Koshy #endif
1051e829eb6dSJoseph Koshy 	first_ri = md->pmd_classdep[classindex].pcd_ri;
1052e829eb6dSJoseph Koshy 
1053e829eb6dSJoseph Koshy 	/*
1054e829eb6dSJoseph Koshy 	 * Reset pointers in the MI 'per-cpu' state.
1055e829eb6dSJoseph Koshy 	 */
1056e829eb6dSJoseph Koshy 	for (i = 0; i < AMD_NPMCS; i++) {
1057e829eb6dSJoseph Koshy 		pc->pc_hwpmcs[i + first_ri] = NULL;
1058e829eb6dSJoseph Koshy 	}
1059e829eb6dSJoseph Koshy 
1060e829eb6dSJoseph Koshy 
1061e829eb6dSJoseph Koshy 	free(pac, M_PMC);
1062e829eb6dSJoseph Koshy 
1063e829eb6dSJoseph Koshy 	return (0);
1064ebccf1e3SJoseph Koshy }
1065ebccf1e3SJoseph Koshy 
1066ebccf1e3SJoseph Koshy /*
1067ebccf1e3SJoseph Koshy  * Initialize ourselves.
1068ebccf1e3SJoseph Koshy  */
1069ebccf1e3SJoseph Koshy 
1070ebccf1e3SJoseph Koshy struct pmc_mdep *
1071ebccf1e3SJoseph Koshy pmc_amd_initialize(void)
1072ebccf1e3SJoseph Koshy {
1073f5f9340bSFabien Thomas 	int classindex, error, i, ncpus;
1074e829eb6dSJoseph Koshy 	struct pmc_classdep *pcd;
1075f263522aSJoseph Koshy 	enum pmc_cputype cputype;
1076ebccf1e3SJoseph Koshy 	struct pmc_mdep *pmc_mdep;
1077e829eb6dSJoseph Koshy 	enum pmc_class class;
1078ef013ceeSRyan Moeller 	int family, model, stepping;
1079f263522aSJoseph Koshy 	char *name;
1080ebccf1e3SJoseph Koshy 
1081f263522aSJoseph Koshy 	/*
1082f263522aSJoseph Koshy 	 * The presence of hardware performance counters on the AMD
1083f263522aSJoseph Koshy 	 * Athlon, Duron or later processors, is _not_ indicated by
1084f263522aSJoseph Koshy 	 * any of the processor feature flags set by the 'CPUID'
1085f263522aSJoseph Koshy 	 * instruction, so we only check the 'instruction family'
1086f263522aSJoseph Koshy 	 * field returned by CPUID for instruction family >= 6.
1087f263522aSJoseph Koshy 	 */
1088ebccf1e3SJoseph Koshy 
108954bad7c6SJoseph Koshy 	name = NULL;
1090ef013ceeSRyan Moeller 	family = CPUID_TO_FAMILY(cpu_id);
1091ef013ceeSRyan Moeller 	model = CPUID_TO_MODEL(cpu_id);
1092ef013ceeSRyan Moeller 	stepping = CPUID_TO_STEPPING(cpu_id);
1093ef013ceeSRyan Moeller 
1094ef013ceeSRyan Moeller 	if (family == 0x18)
10951791cad0SAlexander Motin 		snprintf(pmc_cpuid, sizeof(pmc_cpuid), "HygonGenuine-%d-%02X-%X",
1096ef013ceeSRyan Moeller 		    family, model, stepping);
1097ef013ceeSRyan Moeller 	else
1098ef013ceeSRyan Moeller 		snprintf(pmc_cpuid, sizeof(pmc_cpuid), "AuthenticAMD-%d-%02X-%X",
1099ef013ceeSRyan Moeller 		    family, model, stepping);
110081eb4dcfSMatt Macy 
1101f263522aSJoseph Koshy 	switch (cpu_id & 0xF00) {
1102e829eb6dSJoseph Koshy #if	defined(__i386__)
1103f263522aSJoseph Koshy 	case 0x600:		/* Athlon(tm) processor */
1104e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K7;
1105f263522aSJoseph Koshy 		cputype = PMC_CPU_AMD_K7;
1106f263522aSJoseph Koshy 		class = PMC_CLASS_K7;
1107f263522aSJoseph Koshy 		name = "K7";
1108f263522aSJoseph Koshy 		break;
1109e829eb6dSJoseph Koshy #endif
1110f263522aSJoseph Koshy 	case 0xF00:		/* Athlon64/Opteron processor */
1111e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K8;
1112f263522aSJoseph Koshy 		cputype = PMC_CPU_AMD_K8;
1113f263522aSJoseph Koshy 		class = PMC_CLASS_K8;
1114f263522aSJoseph Koshy 		name = "K8";
1115f263522aSJoseph Koshy 		break;
1116f263522aSJoseph Koshy 
1117b38c0519SDimitry Andric 	default:
111881eb4dcfSMatt Macy 		(void) printf("pmc: Unknown AMD CPU %x %d-%d.\n", cpu_id, (cpu_id & 0xF00) >> 8, model);
1119ebccf1e3SJoseph Koshy 		return NULL;
1120f263522aSJoseph Koshy 	}
1121f263522aSJoseph Koshy 
1122680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
1123f263522aSJoseph Koshy 	amd_pmc_class = class;
1124f263522aSJoseph Koshy #endif
1125ebccf1e3SJoseph Koshy 
1126e829eb6dSJoseph Koshy 	/*
1127e829eb6dSJoseph Koshy 	 * Allocate space for pointers to PMC HW descriptors and for
1128e829eb6dSJoseph Koshy 	 * the MDEP structure used by MI code.
1129e829eb6dSJoseph Koshy 	 */
1130e829eb6dSJoseph Koshy 	amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC,
1131e829eb6dSJoseph Koshy 	    M_WAITOK|M_ZERO);
1132e829eb6dSJoseph Koshy 
1133e829eb6dSJoseph Koshy 	/*
1134e829eb6dSJoseph Koshy 	 * These processors have two classes of PMCs: the TSC and
1135e829eb6dSJoseph Koshy 	 * programmable PMCs.
1136e829eb6dSJoseph Koshy 	 */
1137f5f9340bSFabien Thomas 	pmc_mdep = pmc_mdep_alloc(2);
1138ebccf1e3SJoseph Koshy 
1139f263522aSJoseph Koshy 	pmc_mdep->pmd_cputype = cputype;
1140ebccf1e3SJoseph Koshy 
1141e829eb6dSJoseph Koshy 	ncpus = pmc_cpu_max();
1142c5153e19SJoseph Koshy 
1143e829eb6dSJoseph Koshy 	/* Initialize the TSC. */
1144e829eb6dSJoseph Koshy 	error = pmc_tsc_initialize(pmc_mdep, ncpus);
1145e829eb6dSJoseph Koshy 	if (error)
1146e829eb6dSJoseph Koshy 		goto error;
1147c5153e19SJoseph Koshy 
1148e829eb6dSJoseph Koshy 	/* Initialize AMD K7 and K8 PMC handling. */
1149e829eb6dSJoseph Koshy 	pcd = &pmc_mdep->pmd_classdep[classindex];
1150c5153e19SJoseph Koshy 
1151e829eb6dSJoseph Koshy 	pcd->pcd_caps		= AMD_PMC_CAPS;
1152e829eb6dSJoseph Koshy 	pcd->pcd_class		= class;
1153e829eb6dSJoseph Koshy 	pcd->pcd_num		= AMD_NPMCS;
1154e829eb6dSJoseph Koshy 	pcd->pcd_ri		= pmc_mdep->pmd_npmc;
1155e829eb6dSJoseph Koshy 	pcd->pcd_width		= 48;
1156ebccf1e3SJoseph Koshy 
1157f263522aSJoseph Koshy 	/* fill in the correct pmc name and class */
1158e829eb6dSJoseph Koshy 	for (i = 0; i < AMD_NPMCS; i++) {
1159f263522aSJoseph Koshy 		(void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
1160f263522aSJoseph Koshy 		    sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
11618cd64ec8SJoseph Koshy 		    name, i);
1162f263522aSJoseph Koshy 		amd_pmcdesc[i].pm_descr.pd_class = class;
1163f263522aSJoseph Koshy 	}
1164f263522aSJoseph Koshy 
1165e829eb6dSJoseph Koshy 	pcd->pcd_allocate_pmc	= amd_allocate_pmc;
1166e829eb6dSJoseph Koshy 	pcd->pcd_config_pmc	= amd_config_pmc;
1167e829eb6dSJoseph Koshy 	pcd->pcd_describe	= amd_describe;
1168e829eb6dSJoseph Koshy 	pcd->pcd_get_config	= amd_get_config;
1169e829eb6dSJoseph Koshy 	pcd->pcd_get_msr	= amd_get_msr;
1170e829eb6dSJoseph Koshy 	pcd->pcd_pcpu_fini	= amd_pcpu_fini;
1171e829eb6dSJoseph Koshy 	pcd->pcd_pcpu_init	= amd_pcpu_init;
1172e829eb6dSJoseph Koshy 	pcd->pcd_read_pmc	= amd_read_pmc;
1173e829eb6dSJoseph Koshy 	pcd->pcd_release_pmc	= amd_release_pmc;
1174e829eb6dSJoseph Koshy 	pcd->pcd_start_pmc	= amd_start_pmc;
1175e829eb6dSJoseph Koshy 	pcd->pcd_stop_pmc	= amd_stop_pmc;
1176e829eb6dSJoseph Koshy 	pcd->pcd_write_pmc	= amd_write_pmc;
1177e829eb6dSJoseph Koshy 
1178e829eb6dSJoseph Koshy 	pmc_mdep->pmd_pcpu_init = NULL;
1179e829eb6dSJoseph Koshy 	pmc_mdep->pmd_pcpu_fini = NULL;
1180e829eb6dSJoseph Koshy 	pmc_mdep->pmd_intr	= amd_intr;
1181ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_in = amd_switch_in;
1182ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_out = amd_switch_out;
1183e829eb6dSJoseph Koshy 
1184e829eb6dSJoseph Koshy 	pmc_mdep->pmd_npmc     += AMD_NPMCS;
1185ebccf1e3SJoseph Koshy 
11864a3690dfSJohn Baldwin 	PMCDBG0(MDP,INI,0,"amd-initialize");
1187ebccf1e3SJoseph Koshy 
1188e829eb6dSJoseph Koshy 	return (pmc_mdep);
1189e829eb6dSJoseph Koshy 
1190e829eb6dSJoseph Koshy   error:
1191e829eb6dSJoseph Koshy 	if (error) {
1192e829eb6dSJoseph Koshy 		free(pmc_mdep, M_PMC);
1193e829eb6dSJoseph Koshy 		pmc_mdep = NULL;
1194e829eb6dSJoseph Koshy 	}
1195e829eb6dSJoseph Koshy 
1196e829eb6dSJoseph Koshy 	return (NULL);
1197e829eb6dSJoseph Koshy }
1198e829eb6dSJoseph Koshy 
1199e829eb6dSJoseph Koshy /*
1200e829eb6dSJoseph Koshy  * Finalization code for AMD CPUs.
1201e829eb6dSJoseph Koshy  */
1202e829eb6dSJoseph Koshy 
1203e829eb6dSJoseph Koshy void
1204e829eb6dSJoseph Koshy pmc_amd_finalize(struct pmc_mdep *md)
1205e829eb6dSJoseph Koshy {
1206e829eb6dSJoseph Koshy #if	defined(INVARIANTS)
1207e829eb6dSJoseph Koshy 	int classindex, i, ncpus, pmcclass;
1208e829eb6dSJoseph Koshy #endif
1209e829eb6dSJoseph Koshy 
1210e829eb6dSJoseph Koshy 	pmc_tsc_finalize(md);
1211e829eb6dSJoseph Koshy 
1212e829eb6dSJoseph Koshy 	KASSERT(amd_pcpu != NULL, ("[amd,%d] NULL per-cpu array pointer",
1213e829eb6dSJoseph Koshy 	    __LINE__));
1214e829eb6dSJoseph Koshy 
1215e829eb6dSJoseph Koshy #if	defined(INVARIANTS)
1216e829eb6dSJoseph Koshy 	switch (md->pmd_cputype) {
1217e829eb6dSJoseph Koshy #if	defined(__i386__)
1218e829eb6dSJoseph Koshy 	case PMC_CPU_AMD_K7:
1219e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K7;
1220e829eb6dSJoseph Koshy 		pmcclass = PMC_CLASS_K7;
1221e829eb6dSJoseph Koshy 		break;
1222e829eb6dSJoseph Koshy #endif
1223e829eb6dSJoseph Koshy 	default:
1224e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K8;
1225e829eb6dSJoseph Koshy 		pmcclass = PMC_CLASS_K8;
1226e829eb6dSJoseph Koshy 	}
1227e829eb6dSJoseph Koshy 
1228e829eb6dSJoseph Koshy 	KASSERT(md->pmd_classdep[classindex].pcd_class == pmcclass,
1229e829eb6dSJoseph Koshy 	    ("[amd,%d] pmc class mismatch", __LINE__));
1230e829eb6dSJoseph Koshy 
1231e829eb6dSJoseph Koshy 	ncpus = pmc_cpu_max();
1232e829eb6dSJoseph Koshy 
1233e829eb6dSJoseph Koshy 	for (i = 0; i < ncpus; i++)
1234e829eb6dSJoseph Koshy 		KASSERT(amd_pcpu[i] == NULL, ("[amd,%d] non-null pcpu",
1235e829eb6dSJoseph Koshy 		    __LINE__));
1236e829eb6dSJoseph Koshy #endif
1237e829eb6dSJoseph Koshy 
1238e829eb6dSJoseph Koshy 	free(amd_pcpu, M_PMC);
1239e829eb6dSJoseph Koshy 	amd_pcpu = NULL;
1240ebccf1e3SJoseph Koshy }
1241