xref: /freebsd/sys/dev/hwpmc/hwpmc_amd.c (revision 1791cad0a99ffa1cba463ac97e4fde7cb78869e0)
1ebccf1e3SJoseph Koshy /*-
2718cf2ccSPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3718cf2ccSPedro F. Giffuni  *
4122ccdc1SJoseph Koshy  * Copyright (c) 2003-2008 Joseph Koshy
5d07f36b0SJoseph Koshy  * Copyright (c) 2007 The FreeBSD Foundation
6ebccf1e3SJoseph Koshy  * All rights reserved.
7ebccf1e3SJoseph Koshy  *
8d07f36b0SJoseph Koshy  * Portions of this software were developed by A. Joseph Koshy under
9d07f36b0SJoseph Koshy  * sponsorship from the FreeBSD Foundation and Google, Inc.
10d07f36b0SJoseph Koshy  *
11ebccf1e3SJoseph Koshy  * Redistribution and use in source and binary forms, with or without
12ebccf1e3SJoseph Koshy  * modification, are permitted provided that the following conditions
13ebccf1e3SJoseph Koshy  * are met:
14ebccf1e3SJoseph Koshy  * 1. Redistributions of source code must retain the above copyright
15ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer.
16ebccf1e3SJoseph Koshy  * 2. Redistributions in binary form must reproduce the above copyright
17ebccf1e3SJoseph Koshy  *    notice, this list of conditions and the following disclaimer in the
18ebccf1e3SJoseph Koshy  *    documentation and/or other materials provided with the distribution.
19ebccf1e3SJoseph Koshy  *
20ebccf1e3SJoseph Koshy  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21ebccf1e3SJoseph Koshy  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22ebccf1e3SJoseph Koshy  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ebccf1e3SJoseph Koshy  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24ebccf1e3SJoseph Koshy  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25ebccf1e3SJoseph Koshy  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26ebccf1e3SJoseph Koshy  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27ebccf1e3SJoseph Koshy  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28ebccf1e3SJoseph Koshy  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29ebccf1e3SJoseph Koshy  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30ebccf1e3SJoseph Koshy  * SUCH DAMAGE.
31ebccf1e3SJoseph Koshy  */
32ebccf1e3SJoseph Koshy 
33ebccf1e3SJoseph Koshy #include <sys/cdefs.h>
34ebccf1e3SJoseph Koshy __FBSDID("$FreeBSD$");
35ebccf1e3SJoseph Koshy 
36ebccf1e3SJoseph Koshy /* Support for the AMD K7 and later processors */
37ebccf1e3SJoseph Koshy 
38ebccf1e3SJoseph Koshy #include <sys/param.h>
39ebccf1e3SJoseph Koshy #include <sys/lock.h>
40ebccf1e3SJoseph Koshy #include <sys/malloc.h>
41ebccf1e3SJoseph Koshy #include <sys/mutex.h>
42c5445f8bSAndrew Gallatin #include <sys/pcpu.h>
437ad17ef9SMarcel Moolenaar #include <sys/pmc.h>
44122ccdc1SJoseph Koshy #include <sys/pmckern.h>
45ebccf1e3SJoseph Koshy #include <sys/smp.h>
46ebccf1e3SJoseph Koshy #include <sys/systm.h>
47ebccf1e3SJoseph Koshy 
48d07f36b0SJoseph Koshy #include <machine/cpu.h>
49f263522aSJoseph Koshy #include <machine/cpufunc.h>
50ebccf1e3SJoseph Koshy #include <machine/md_var.h>
51f263522aSJoseph Koshy #include <machine/specialreg.h>
52ebccf1e3SJoseph Koshy 
53680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
54f263522aSJoseph Koshy enum pmc_class	amd_pmc_class;
55ebccf1e3SJoseph Koshy #endif
56ebccf1e3SJoseph Koshy 
57c5445f8bSAndrew Gallatin #define	OVERFLOW_WAIT_COUNT	50
58c5445f8bSAndrew Gallatin 
59c5445f8bSAndrew Gallatin DPCPU_DEFINE_STATIC(uint32_t, nmi_counter);
60c5445f8bSAndrew Gallatin 
61ebccf1e3SJoseph Koshy /* AMD K7 & K8 PMCs */
62ebccf1e3SJoseph Koshy struct amd_descr {
63ebccf1e3SJoseph Koshy 	struct pmc_descr pm_descr;  /* "base class" */
64ebccf1e3SJoseph Koshy 	uint32_t	pm_evsel;   /* address of EVSEL register */
65ebccf1e3SJoseph Koshy 	uint32_t	pm_perfctr; /* address of PERFCTR register */
66ebccf1e3SJoseph Koshy };
67ebccf1e3SJoseph Koshy 
68f263522aSJoseph Koshy static  struct amd_descr amd_pmcdesc[AMD_NPMCS] =
69ebccf1e3SJoseph Koshy {
70ebccf1e3SJoseph Koshy     {
71ebccf1e3SJoseph Koshy 	.pm_descr =
72ebccf1e3SJoseph Koshy 	{
73f263522aSJoseph Koshy 		.pd_name  = "",
74f263522aSJoseph Koshy 		.pd_class = -1,
75ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
76ebccf1e3SJoseph Koshy 		.pd_width = 48
77ebccf1e3SJoseph Koshy 	},
78ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_0,
79ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_0
80ebccf1e3SJoseph Koshy     },
81ebccf1e3SJoseph Koshy     {
82ebccf1e3SJoseph Koshy 	.pm_descr =
83ebccf1e3SJoseph Koshy 	{
84f263522aSJoseph Koshy 		.pd_name  = "",
85f263522aSJoseph Koshy 		.pd_class = -1,
86ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
87ebccf1e3SJoseph Koshy 		.pd_width = 48
88ebccf1e3SJoseph Koshy 	},
89ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_1,
90ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_1
91ebccf1e3SJoseph Koshy     },
92ebccf1e3SJoseph Koshy     {
93ebccf1e3SJoseph Koshy 	.pm_descr =
94ebccf1e3SJoseph Koshy 	{
95f263522aSJoseph Koshy 		.pd_name  = "",
96f263522aSJoseph Koshy 		.pd_class = -1,
97ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
98ebccf1e3SJoseph Koshy 		.pd_width = 48
99ebccf1e3SJoseph Koshy 	},
100ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_2,
101ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_2
102ebccf1e3SJoseph Koshy     },
103ebccf1e3SJoseph Koshy     {
104ebccf1e3SJoseph Koshy 	.pm_descr =
105ebccf1e3SJoseph Koshy 	{
106f263522aSJoseph Koshy 		.pd_name  = "",
107f263522aSJoseph Koshy 		.pd_class = -1,
108ebccf1e3SJoseph Koshy 		.pd_caps  = AMD_PMC_CAPS,
109ebccf1e3SJoseph Koshy 		.pd_width = 48
110ebccf1e3SJoseph Koshy 	},
111ebccf1e3SJoseph Koshy 	.pm_evsel   = AMD_PMC_EVSEL_3,
112ebccf1e3SJoseph Koshy 	.pm_perfctr = AMD_PMC_PERFCTR_3
113dacc43dfSMatt Macy      },
114dacc43dfSMatt Macy     {
115dacc43dfSMatt Macy 	.pm_descr =
116dacc43dfSMatt Macy 	{
117dacc43dfSMatt Macy 		.pd_name  = "",
118dacc43dfSMatt Macy 		.pd_class = -1,
119dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
120dacc43dfSMatt Macy 		.pd_width = 48
121dacc43dfSMatt Macy 	},
122dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_4,
123dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_4
124dacc43dfSMatt Macy     },
125dacc43dfSMatt Macy     {
126dacc43dfSMatt Macy 	.pm_descr =
127dacc43dfSMatt Macy 	{
128dacc43dfSMatt Macy 		.pd_name  = "",
129dacc43dfSMatt Macy 		.pd_class = -1,
130dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
131dacc43dfSMatt Macy 		.pd_width = 48
132dacc43dfSMatt Macy 	},
133dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_5,
134dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_5
135dacc43dfSMatt Macy     },
136dacc43dfSMatt Macy     {
137dacc43dfSMatt Macy 	.pm_descr =
138dacc43dfSMatt Macy 	{
139dacc43dfSMatt Macy 		.pd_name  = "",
140dacc43dfSMatt Macy 		.pd_class = -1,
141dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
142dacc43dfSMatt Macy 		.pd_width = 48
143dacc43dfSMatt Macy 	},
144dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_0,
145dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_0
146dacc43dfSMatt Macy     },
147dacc43dfSMatt Macy     {
148dacc43dfSMatt Macy 	.pm_descr =
149dacc43dfSMatt Macy 	{
150dacc43dfSMatt Macy 		.pd_name  = "",
151dacc43dfSMatt Macy 		.pd_class = -1,
152dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
153dacc43dfSMatt Macy 		.pd_width = 48
154dacc43dfSMatt Macy 	},
155dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_1,
156dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_1
157dacc43dfSMatt Macy     },
158dacc43dfSMatt Macy     {
159dacc43dfSMatt Macy 	.pm_descr =
160dacc43dfSMatt Macy 	{
161dacc43dfSMatt Macy 		.pd_name  = "",
162dacc43dfSMatt Macy 		.pd_class = -1,
163dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
164dacc43dfSMatt Macy 		.pd_width = 48
165dacc43dfSMatt Macy 	},
166dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_2,
167dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_2
168dacc43dfSMatt Macy     },
169dacc43dfSMatt Macy     {
170dacc43dfSMatt Macy 	.pm_descr =
171dacc43dfSMatt Macy 	{
172dacc43dfSMatt Macy 		.pd_name  = "",
173dacc43dfSMatt Macy 		.pd_class = -1,
174dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
175dacc43dfSMatt Macy 		.pd_width = 48
176dacc43dfSMatt Macy 	},
177dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_3,
178dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_3
179dacc43dfSMatt Macy     },
180dacc43dfSMatt Macy     {
181dacc43dfSMatt Macy 	.pm_descr =
182dacc43dfSMatt Macy 	{
183dacc43dfSMatt Macy 		.pd_name  = "",
184dacc43dfSMatt Macy 		.pd_class = -1,
185dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
186dacc43dfSMatt Macy 		.pd_width = 48
187dacc43dfSMatt Macy 	},
188dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_4,
189dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_4
190dacc43dfSMatt Macy     },
191dacc43dfSMatt Macy     {
192dacc43dfSMatt Macy 	.pm_descr =
193dacc43dfSMatt Macy 	{
194dacc43dfSMatt Macy 		.pd_name  = "",
195dacc43dfSMatt Macy 		.pd_class = -1,
196dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
197dacc43dfSMatt Macy 		.pd_width = 48
198dacc43dfSMatt Macy 	},
199dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_L3_5,
200dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_L3_5
201dacc43dfSMatt Macy     },
202dacc43dfSMatt Macy     {
203dacc43dfSMatt Macy 	.pm_descr =
204dacc43dfSMatt Macy 	{
205dacc43dfSMatt Macy 		.pd_name  = "",
206dacc43dfSMatt Macy 		.pd_class = -1,
207dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
208dacc43dfSMatt Macy 		.pd_width = 48
209dacc43dfSMatt Macy 	},
210dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_0,
211dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_0
212dacc43dfSMatt Macy     },
213dacc43dfSMatt Macy     {
214dacc43dfSMatt Macy 	.pm_descr =
215dacc43dfSMatt Macy 	{
216dacc43dfSMatt Macy 		.pd_name  = "",
217dacc43dfSMatt Macy 		.pd_class = -1,
218dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
219dacc43dfSMatt Macy 		.pd_width = 48
220dacc43dfSMatt Macy 	},
221dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_1,
222dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_1
223dacc43dfSMatt Macy     },
224dacc43dfSMatt Macy     {
225dacc43dfSMatt Macy 	.pm_descr =
226dacc43dfSMatt Macy 	{
227dacc43dfSMatt Macy 		.pd_name  = "",
228dacc43dfSMatt Macy 		.pd_class = -1,
229dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
230dacc43dfSMatt Macy 		.pd_width = 48
231dacc43dfSMatt Macy 	},
232dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_2,
233dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_2
234dacc43dfSMatt Macy     },
235dacc43dfSMatt Macy     {
236dacc43dfSMatt Macy 	.pm_descr =
237dacc43dfSMatt Macy 	{
238dacc43dfSMatt Macy 		.pd_name  = "",
239dacc43dfSMatt Macy 		.pd_class = -1,
240dacc43dfSMatt Macy 		.pd_caps  = AMD_PMC_CAPS,
241dacc43dfSMatt Macy 		.pd_width = 48
242dacc43dfSMatt Macy 	},
243dacc43dfSMatt Macy 	.pm_evsel   = AMD_PMC_EVSEL_EP_DF_3,
244dacc43dfSMatt Macy 	.pm_perfctr = AMD_PMC_PERFCTR_EP_DF_3
245ebccf1e3SJoseph Koshy      }
246ebccf1e3SJoseph Koshy };
247ebccf1e3SJoseph Koshy 
248ebccf1e3SJoseph Koshy struct amd_event_code_map {
249ebccf1e3SJoseph Koshy 	enum pmc_event	pe_ev;	 /* enum value */
2501d3aa362SConrad Meyer 	uint16_t	pe_code; /* encoded event mask */
251ebccf1e3SJoseph Koshy 	uint8_t		pe_mask; /* bits allowed in unit mask */
252ebccf1e3SJoseph Koshy };
253ebccf1e3SJoseph Koshy 
254ebccf1e3SJoseph Koshy const struct amd_event_code_map amd_event_codes[] = {
255f263522aSJoseph Koshy #if	defined(__i386__)	/* 32 bit Athlon (K7) only */
256ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_ACCESSES, 		0x40, 0 },
257ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_DC_MISSES,			0x41, 0 },
258f263522aSJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_L2,		0x42, AMD_PMC_UNITMASK_MOESI },
259f263522aSJoseph Koshy 	{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM,	0x43, AMD_PMC_UNITMASK_MOESI },
260f263522aSJoseph Koshy 	{ PMC_EV_K7_DC_WRITEBACKS,		0x44, AMD_PMC_UNITMASK_MOESI },
261ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
262ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES,	0x46, 0 },
263ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_MISALIGNED_REFERENCES,	0x47, 0 },
264ebccf1e3SJoseph Koshy 
265ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_FETCHES,			0x80, 0 },
266ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_IC_MISSES,			0x81, 0 },
267ebccf1e3SJoseph Koshy 
268ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_ITLB_MISSES,		0x84, 0 },
269ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_L1_L2_ITLB_MISSES,		0x85, 0 },
270ebccf1e3SJoseph Koshy 
271ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_INSTRUCTIONS,	0xC0, 0 },
272ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_OPS,		0xC1, 0 },
273ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES,		0xC2, 0 },
274ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
275ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 	0xC4, 0 },
276ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
277ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
278ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES,	0xC7, 0 },
279ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES,	0xCD, 0 },
280ebccf1e3SJoseph Koshy 	{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
281f263522aSJoseph Koshy 	{ PMC_EV_K7_HARDWARE_INTERRUPTS,	0xCF, 0 },
282ebccf1e3SJoseph Koshy #endif
283ebccf1e3SJoseph Koshy 
284ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS,		0x00, 0x3F },
285ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED,	0x01, 0x00 },
286ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS,	0x02, 0x00 },
287ebccf1e3SJoseph Koshy 
288ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 		0x20, 0x7F },
289ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
290ebccf1e3SJoseph Koshy 	  						0x21, 0x00 },
291ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
292ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_BUFFER2_FULL,			0x23, 0x00 },
293ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_LOCKED_OPERATION,		0x24, 0x07 },
294ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL,	0x25, 0x00 },
295ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS,	0x26, 0x00 },
296ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS,	0x27, 0x00 },
297ebccf1e3SJoseph Koshy 
298ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ACCESS,				0x40, 0x00 },
299ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISS,				0x41, 0x00 },
300ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_L2,			0x42, 0x1F },
301ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM,		0x43, 0x1F },
302ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_COPYBACK,			0x44, 0x1F },
303ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT,	0x45, 0x00 },
304ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS,	0x46, 0x00 },
305ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE,	0x47, 0x00 },
306ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL,	0x48, 0x00 },
307ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
308ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR,		0x4A, 0x03 },
309ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
310ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS,	0x4C, 0x03 },
311ebccf1e3SJoseph Koshy 
312ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_CPU_CLK_UNHALTED,		0x76, 0x00 },
313ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST,		0x7D, 0x1F },
314ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS,		0x7E, 0x07 },
315ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_BU_FILL_INTO_L2,			0x7F, 0x03 },
316ebccf1e3SJoseph Koshy 
317ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_FETCH,				0x80, 0x00 },
318ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MISS,				0x81, 0x00 },
319ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_L2,			0x82, 0x00 },
320ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM,		0x83, 0x00 },
321ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT,	0x84, 0x00 },
322ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS,	0x85, 0x00 },
323ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
324ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL,		0x87, 0x00 },
325ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_HIT,		0x88, 0x00 },
326ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW,		0x89, 0x00 },
327ebccf1e3SJoseph Koshy 
328ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS,	0xC0, 0x00 },
329ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_UOPS,			0xC1, 0x00 },
330ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES,		0xC2, 0x00 },
331ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED,	0xC3, 0x00 },
332ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES,		0xC4, 0x00 },
333ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
334ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS,	0xC6, 0x00 },
335ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_RESYNCS,			0xC7, 0x00 },
336ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS,		0xC8, 0x00 },
337ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
338ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
339ebccf1e3SJoseph Koshy 							0xCA, 0x00 },
340ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS,	0xCB, 0x0F },
341ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
342ebccf1e3SJoseph Koshy 							0xCC, 0x07 },
343ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES,	0xCD, 0x00 },
344ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
345ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS,	0xCF, 0x00 },
346ebccf1e3SJoseph Koshy 
347ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DECODER_EMPTY,			0xD0, 0x00 },
348ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALLS,			0xD1, 0x00 },
349ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
350ebccf1e3SJoseph Koshy 							0xD2, 0x00 },
351ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
352ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD,	0xD4, 0x00 },
353ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
354ebccf1e3SJoseph Koshy 							0xD5, 0x00 },
355ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
356ebccf1e3SJoseph Koshy 							0xD6, 0x00 },
357ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL,	0xD7, 0x00 },
358ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL,	0xD8, 0x00 },
359ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
360ebccf1e3SJoseph Koshy 							0xD9, 0x00 },
361ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
362ebccf1e3SJoseph Koshy 							0xDA, 0x00 },
363ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_FPU_EXCEPTIONS,			0xDB, 0x0F },
364ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0,	0xDC, 0x00 },
365ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1,	0xDD, 0x00 },
366ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2,	0xDE, 0x00 },
367ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3,	0xDF, 0x00 },
368ebccf1e3SJoseph Koshy 
369ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
370ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
371ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
372ebccf1e3SJoseph Koshy 							0xE2, 0x00 },
373ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND,	0xE3, 0x07 },
374ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
375ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_SIZED_COMMANDS,			0xEB, 0x7F },
376ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_PROBE_RESULT,			0xEC, 0x0F },
377ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH,		0xF6, 0x0F },
378ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH,		0xF7, 0x0F },
379ebccf1e3SJoseph Koshy 	{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH,		0xF8, 0x0F }
380ebccf1e3SJoseph Koshy 
381ebccf1e3SJoseph Koshy };
382ebccf1e3SJoseph Koshy 
383323b076eSPedro F. Giffuni const int amd_event_codes_size = nitems(amd_event_codes);
384ebccf1e3SJoseph Koshy 
385ebccf1e3SJoseph Koshy /*
386e829eb6dSJoseph Koshy  * Per-processor information
387e829eb6dSJoseph Koshy  */
388e829eb6dSJoseph Koshy 
389e829eb6dSJoseph Koshy struct amd_cpu {
390e829eb6dSJoseph Koshy 	struct pmc_hw	pc_amdpmcs[AMD_NPMCS];
391e829eb6dSJoseph Koshy };
392e829eb6dSJoseph Koshy 
393e829eb6dSJoseph Koshy static struct amd_cpu **amd_pcpu;
394e829eb6dSJoseph Koshy 
395e829eb6dSJoseph Koshy /*
396ebccf1e3SJoseph Koshy  * read a pmc register
397ebccf1e3SJoseph Koshy  */
398ebccf1e3SJoseph Koshy 
399ebccf1e3SJoseph Koshy static int
400ebccf1e3SJoseph Koshy amd_read_pmc(int cpu, int ri, pmc_value_t *v)
401ebccf1e3SJoseph Koshy {
402ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
403ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
404ebccf1e3SJoseph Koshy 	struct pmc *pm;
405ebccf1e3SJoseph Koshy 	pmc_value_t tmp;
406ebccf1e3SJoseph Koshy 
407122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
408ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
409ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
410ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
411e829eb6dSJoseph Koshy 	KASSERT(amd_pcpu[cpu],
412e829eb6dSJoseph Koshy 	    ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu));
413ebccf1e3SJoseph Koshy 
414e829eb6dSJoseph Koshy 	pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
415ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
416ebccf1e3SJoseph Koshy 
417ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
418ebccf1e3SJoseph Koshy 	    ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
419ebccf1e3SJoseph Koshy 		cpu, ri));
420ebccf1e3SJoseph Koshy 
421c5153e19SJoseph Koshy 	mode = PMC_TO_MODE(pm);
422ebccf1e3SJoseph Koshy 
4234a3690dfSJohn Baldwin 	PMCDBG2(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
424ebccf1e3SJoseph Koshy 
425680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
426f263522aSJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
427ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
428ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
429f263522aSJoseph Koshy #endif
430ebccf1e3SJoseph Koshy 
431ebccf1e3SJoseph Koshy 	tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
4324a3690dfSJohn Baldwin 	PMCDBG2(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp);
43305e486c7SAdrian Chadd 	if (PMC_IS_SAMPLING_MODE(mode)) {
43405e486c7SAdrian Chadd 		/* Sign extend 48 bit value to 64 bits. */
43505e486c7SAdrian Chadd 		tmp = (pmc_value_t) (((int64_t) tmp << 16) >> 16);
43605e486c7SAdrian Chadd 		tmp = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
43705e486c7SAdrian Chadd 	}
438ebccf1e3SJoseph Koshy 	*v = tmp;
439ebccf1e3SJoseph Koshy 
4404a3690dfSJohn Baldwin 	PMCDBG2(MDP,REA,2,"amd-read (post-munge) id=%d -> %jd", ri, *v);
441ebccf1e3SJoseph Koshy 
442ebccf1e3SJoseph Koshy 	return 0;
443ebccf1e3SJoseph Koshy }
444ebccf1e3SJoseph Koshy 
445ebccf1e3SJoseph Koshy /*
446ebccf1e3SJoseph Koshy  * Write a PMC MSR.
447ebccf1e3SJoseph Koshy  */
448ebccf1e3SJoseph Koshy 
449ebccf1e3SJoseph Koshy static int
450ebccf1e3SJoseph Koshy amd_write_pmc(int cpu, int ri, pmc_value_t v)
451ebccf1e3SJoseph Koshy {
452ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
453ebccf1e3SJoseph Koshy 	enum pmc_mode mode;
454e829eb6dSJoseph Koshy 	struct pmc *pm;
455ebccf1e3SJoseph Koshy 
456122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
457ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
458ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
459ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
460ebccf1e3SJoseph Koshy 
461e829eb6dSJoseph Koshy 	pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
462ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
463ebccf1e3SJoseph Koshy 
464ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
465ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
466ebccf1e3SJoseph Koshy 		cpu, ri));
467ebccf1e3SJoseph Koshy 
468c5153e19SJoseph Koshy 	mode = PMC_TO_MODE(pm);
469ebccf1e3SJoseph Koshy 
470680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
471f263522aSJoseph Koshy 	KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
472ebccf1e3SJoseph Koshy 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
473ebccf1e3SJoseph Koshy 		pd->pm_descr.pd_class));
474f263522aSJoseph Koshy #endif
475ebccf1e3SJoseph Koshy 
476ebccf1e3SJoseph Koshy 	/* use 2's complement of the count for sampling mode PMCs */
477ebccf1e3SJoseph Koshy 	if (PMC_IS_SAMPLING_MODE(mode))
478f263522aSJoseph Koshy 		v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
479ebccf1e3SJoseph Koshy 
4804a3690dfSJohn Baldwin 	PMCDBG3(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
481ebccf1e3SJoseph Koshy 
482ebccf1e3SJoseph Koshy 	/* write the PMC value */
483ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_perfctr, v);
484ebccf1e3SJoseph Koshy 	return 0;
485ebccf1e3SJoseph Koshy }
486ebccf1e3SJoseph Koshy 
487ebccf1e3SJoseph Koshy /*
488ebccf1e3SJoseph Koshy  * configure hardware pmc according to the configuration recorded in
489ebccf1e3SJoseph Koshy  * pmc 'pm'.
490ebccf1e3SJoseph Koshy  */
491ebccf1e3SJoseph Koshy 
492ebccf1e3SJoseph Koshy static int
493ebccf1e3SJoseph Koshy amd_config_pmc(int cpu, int ri, struct pmc *pm)
494ebccf1e3SJoseph Koshy {
495ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
496ebccf1e3SJoseph Koshy 
4974a3690dfSJohn Baldwin 	PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
4986b8c8cd8SJoseph Koshy 
499122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
500ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
501ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
502ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
503ebccf1e3SJoseph Koshy 
504e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
505ebccf1e3SJoseph Koshy 
506ebccf1e3SJoseph Koshy 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
5076b8c8cd8SJoseph Koshy 	    ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
5086b8c8cd8SJoseph Koshy 		__LINE__, pm, phw->phw_pmc));
509ebccf1e3SJoseph Koshy 
510ebccf1e3SJoseph Koshy 	phw->phw_pmc = pm;
511ebccf1e3SJoseph Koshy 	return 0;
512ebccf1e3SJoseph Koshy }
513ebccf1e3SJoseph Koshy 
514ebccf1e3SJoseph Koshy /*
515c5153e19SJoseph Koshy  * Retrieve a configured PMC pointer from hardware state.
516c5153e19SJoseph Koshy  */
517c5153e19SJoseph Koshy 
518c5153e19SJoseph Koshy static int
519c5153e19SJoseph Koshy amd_get_config(int cpu, int ri, struct pmc **ppm)
520c5153e19SJoseph Koshy {
521e829eb6dSJoseph Koshy 	*ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
522c5153e19SJoseph Koshy 
523c5153e19SJoseph Koshy 	return 0;
524c5153e19SJoseph Koshy }
525c5153e19SJoseph Koshy 
526c5153e19SJoseph Koshy /*
527ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch in of a
528ebccf1e3SJoseph Koshy  * thread.
529ebccf1e3SJoseph Koshy  */
530ebccf1e3SJoseph Koshy 
531ebccf1e3SJoseph Koshy static int
5326b8c8cd8SJoseph Koshy amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
533ebccf1e3SJoseph Koshy {
534ebccf1e3SJoseph Koshy 	(void) pc;
535ebccf1e3SJoseph Koshy 
5364a3690dfSJohn Baldwin 	PMCDBG3(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
537c5153e19SJoseph Koshy 	    (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0);
5386b8c8cd8SJoseph Koshy 
5396b8c8cd8SJoseph Koshy 	/* enable the RDPMC instruction if needed */
540c5153e19SJoseph Koshy 	if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
541ebccf1e3SJoseph Koshy 		load_cr4(rcr4() | CR4_PCE);
5426b8c8cd8SJoseph Koshy 
543ebccf1e3SJoseph Koshy 	return 0;
544ebccf1e3SJoseph Koshy }
545ebccf1e3SJoseph Koshy 
546ebccf1e3SJoseph Koshy /*
547ebccf1e3SJoseph Koshy  * Machine dependent actions taken during the context switch out of a
548ebccf1e3SJoseph Koshy  * thread.
549ebccf1e3SJoseph Koshy  */
550ebccf1e3SJoseph Koshy 
551ebccf1e3SJoseph Koshy static int
5526b8c8cd8SJoseph Koshy amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
553ebccf1e3SJoseph Koshy {
554ebccf1e3SJoseph Koshy 	(void) pc;
5556b8c8cd8SJoseph Koshy 	(void) pp;		/* can be NULL */
556ebccf1e3SJoseph Koshy 
5574a3690dfSJohn Baldwin 	PMCDBG3(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ?
558c5153e19SJoseph Koshy 	    (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0);
5596b8c8cd8SJoseph Koshy 
5606b8c8cd8SJoseph Koshy 	/* always turn off the RDPMC instruction */
561ebccf1e3SJoseph Koshy 	load_cr4(rcr4() & ~CR4_PCE);
5626b8c8cd8SJoseph Koshy 
563ebccf1e3SJoseph Koshy 	return 0;
564ebccf1e3SJoseph Koshy }
565ebccf1e3SJoseph Koshy 
566ebccf1e3SJoseph Koshy /*
567ebccf1e3SJoseph Koshy  * Check if a given allocation is feasible.
568ebccf1e3SJoseph Koshy  */
569ebccf1e3SJoseph Koshy 
570ebccf1e3SJoseph Koshy static int
571ebccf1e3SJoseph Koshy amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
572ebccf1e3SJoseph Koshy     const struct pmc_op_pmcallocate *a)
573ebccf1e3SJoseph Koshy {
574ebccf1e3SJoseph Koshy 	int i;
575dacc43dfSMatt Macy 	uint64_t allowed_unitmask, caps, config, unitmask;
576ebccf1e3SJoseph Koshy 	enum pmc_event pe;
577ebccf1e3SJoseph Koshy 	const struct pmc_descr *pd;
578ebccf1e3SJoseph Koshy 
579ebccf1e3SJoseph Koshy 	(void) cpu;
580ebccf1e3SJoseph Koshy 
581122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
582ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
583ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
584ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row index %d", __LINE__, ri));
585ebccf1e3SJoseph Koshy 
586ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri].pm_descr;
587ebccf1e3SJoseph Koshy 
588ebccf1e3SJoseph Koshy 	/* check class match */
589c5153e19SJoseph Koshy 	if (pd->pd_class != a->pm_class)
590ebccf1e3SJoseph Koshy 		return EINVAL;
591ebccf1e3SJoseph Koshy 
592ebccf1e3SJoseph Koshy 	caps = pm->pm_caps;
593ebccf1e3SJoseph Koshy 
5944a3690dfSJohn Baldwin 	PMCDBG2(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
595ebccf1e3SJoseph Koshy 
596dacc43dfSMatt Macy 	if((ri >= 0 && ri < 6) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_CORE))
597dacc43dfSMatt Macy 		return EINVAL;
598dacc43dfSMatt Macy 	if((ri >= 6 && ri < 12) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_L3_CACHE))
599dacc43dfSMatt Macy 		return EINVAL;
600dacc43dfSMatt Macy 	if((ri >= 12 && ri < 16) && !(a->pm_md.pm_amd.pm_amd_sub_class == PMC_AMD_SUB_CLASS_DATA_FABRIC))
601dacc43dfSMatt Macy 		return EINVAL;
602dacc43dfSMatt Macy 
603ebccf1e3SJoseph Koshy 	if ((pd->pd_caps & caps) != caps)
604ebccf1e3SJoseph Koshy 		return EPERM;
60581eb4dcfSMatt Macy 	if (strlen(pmc_cpuid) != 0) {
60681eb4dcfSMatt Macy 		pm->pm_md.pm_amd.pm_amd_evsel =
60781eb4dcfSMatt Macy 			a->pm_md.pm_amd.pm_amd_config;
60881eb4dcfSMatt Macy 		PMCDBG2(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, a->pm_md.pm_amd.pm_amd_config);
60981eb4dcfSMatt Macy 		return (0);
61081eb4dcfSMatt Macy 	}
611ebccf1e3SJoseph Koshy 
612ebccf1e3SJoseph Koshy 	pe = a->pm_ev;
613ebccf1e3SJoseph Koshy 
614ebccf1e3SJoseph Koshy 	/* map ev to the correct event mask code */
615ebccf1e3SJoseph Koshy 	config = allowed_unitmask = 0;
616ebccf1e3SJoseph Koshy 	for (i = 0; i < amd_event_codes_size; i++)
617ebccf1e3SJoseph Koshy 		if (amd_event_codes[i].pe_ev == pe) {
618ebccf1e3SJoseph Koshy 			config =
619ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
620ebccf1e3SJoseph Koshy 			allowed_unitmask =
621ebccf1e3SJoseph Koshy 			    AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
622ebccf1e3SJoseph Koshy 			break;
623ebccf1e3SJoseph Koshy 		}
624ebccf1e3SJoseph Koshy 	if (i == amd_event_codes_size)
625ebccf1e3SJoseph Koshy 		return EINVAL;
626ebccf1e3SJoseph Koshy 
627f263522aSJoseph Koshy 	unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK;
628ebccf1e3SJoseph Koshy 	if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
629ebccf1e3SJoseph Koshy 		return EINVAL;
630ebccf1e3SJoseph Koshy 
631ebccf1e3SJoseph Koshy 	if (unitmask && (caps & PMC_CAP_QUALIFIER))
632ebccf1e3SJoseph Koshy 		config |= unitmask;
633ebccf1e3SJoseph Koshy 
634ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_THRESHOLD)
635f263522aSJoseph Koshy 		config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK;
636ebccf1e3SJoseph Koshy 
637ebccf1e3SJoseph Koshy 	/* set at least one of the 'usr' or 'os' caps */
638ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_USER)
639ebccf1e3SJoseph Koshy 		config |= AMD_PMC_USR;
640ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_SYSTEM)
641ebccf1e3SJoseph Koshy 		config |= AMD_PMC_OS;
642ebccf1e3SJoseph Koshy 	if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
643ebccf1e3SJoseph Koshy 		config |= (AMD_PMC_USR|AMD_PMC_OS);
644ebccf1e3SJoseph Koshy 
645ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_EDGE)
646ebccf1e3SJoseph Koshy 		config |= AMD_PMC_EDGE;
647ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INVERT)
648ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INVERT;
649ebccf1e3SJoseph Koshy 	if (caps & PMC_CAP_INTERRUPT)
650ebccf1e3SJoseph Koshy 		config |= AMD_PMC_INT;
651ebccf1e3SJoseph Koshy 
652ebccf1e3SJoseph Koshy 	pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
653ebccf1e3SJoseph Koshy 
6544a3690dfSJohn Baldwin 	PMCDBG2(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
655ebccf1e3SJoseph Koshy 
656ebccf1e3SJoseph Koshy 	return 0;
657ebccf1e3SJoseph Koshy }
658ebccf1e3SJoseph Koshy 
659ebccf1e3SJoseph Koshy /*
660ebccf1e3SJoseph Koshy  * Release machine dependent state associated with a PMC.  This is a
661ebccf1e3SJoseph Koshy  * no-op on this architecture.
662ebccf1e3SJoseph Koshy  *
663ebccf1e3SJoseph Koshy  */
664ebccf1e3SJoseph Koshy 
665ebccf1e3SJoseph Koshy /* ARGSUSED0 */
666ebccf1e3SJoseph Koshy static int
667ebccf1e3SJoseph Koshy amd_release_pmc(int cpu, int ri, struct pmc *pmc)
668ebccf1e3SJoseph Koshy {
669680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
670ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
671ebccf1e3SJoseph Koshy #endif
672ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
673ebccf1e3SJoseph Koshy 
674ebccf1e3SJoseph Koshy 	(void) pmc;
675ebccf1e3SJoseph Koshy 
676122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
677ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
678ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
679ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
680ebccf1e3SJoseph Koshy 
681e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
682ebccf1e3SJoseph Koshy 
683ebccf1e3SJoseph Koshy 	KASSERT(phw->phw_pmc == NULL,
684ebccf1e3SJoseph Koshy 	    ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
685ebccf1e3SJoseph Koshy 
686680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
687ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
688f263522aSJoseph Koshy 	if (pd->pm_descr.pd_class == amd_pmc_class)
689ebccf1e3SJoseph Koshy 		KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
690ebccf1e3SJoseph Koshy 		    ("[amd,%d] PMC %d released while active", __LINE__, ri));
691ebccf1e3SJoseph Koshy #endif
692ebccf1e3SJoseph Koshy 
693ebccf1e3SJoseph Koshy 	return 0;
694ebccf1e3SJoseph Koshy }
695ebccf1e3SJoseph Koshy 
696ebccf1e3SJoseph Koshy /*
697ebccf1e3SJoseph Koshy  * start a PMC.
698ebccf1e3SJoseph Koshy  */
699ebccf1e3SJoseph Koshy 
700ebccf1e3SJoseph Koshy static int
701ebccf1e3SJoseph Koshy amd_start_pmc(int cpu, int ri)
702ebccf1e3SJoseph Koshy {
703dacc43dfSMatt Macy 	uint64_t config;
704ebccf1e3SJoseph Koshy 	struct pmc *pm;
705ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
706ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
707ebccf1e3SJoseph Koshy 
708122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
709ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
710ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
711ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
712ebccf1e3SJoseph Koshy 
713e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
714ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
715ebccf1e3SJoseph Koshy 	pd = &amd_pmcdesc[ri];
716ebccf1e3SJoseph Koshy 
717ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
718ebccf1e3SJoseph Koshy 	    ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
719ebccf1e3SJoseph Koshy 		cpu, ri));
720ebccf1e3SJoseph Koshy 
7214a3690dfSJohn Baldwin 	PMCDBG2(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
722ebccf1e3SJoseph Koshy 
723ebccf1e3SJoseph Koshy 	KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
724ebccf1e3SJoseph Koshy 	    ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
725ebccf1e3SJoseph Koshy 	    ri, cpu, pd->pm_descr.pd_name));
726ebccf1e3SJoseph Koshy 
727ebccf1e3SJoseph Koshy 	/* turn on the PMC ENABLE bit */
728ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
729ebccf1e3SJoseph Koshy 
7304a3690dfSJohn Baldwin 	PMCDBG1(MDP,STA,2,"amd-start config=0x%x", config);
731ebccf1e3SJoseph Koshy 
732ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
733ebccf1e3SJoseph Koshy 	return 0;
734ebccf1e3SJoseph Koshy }
735ebccf1e3SJoseph Koshy 
736ebccf1e3SJoseph Koshy /*
737ebccf1e3SJoseph Koshy  * Stop a PMC.
738ebccf1e3SJoseph Koshy  */
739ebccf1e3SJoseph Koshy 
740ebccf1e3SJoseph Koshy static int
741ebccf1e3SJoseph Koshy amd_stop_pmc(int cpu, int ri)
742ebccf1e3SJoseph Koshy {
743ebccf1e3SJoseph Koshy 	struct pmc *pm;
744ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
745ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
746ebccf1e3SJoseph Koshy 	uint64_t config;
747c5445f8bSAndrew Gallatin 	int i;
748ebccf1e3SJoseph Koshy 
749122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
750ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
751ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
752ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
753ebccf1e3SJoseph Koshy 
754e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
755ebccf1e3SJoseph Koshy 	pm  = phw->phw_pmc;
756ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
757ebccf1e3SJoseph Koshy 
758ebccf1e3SJoseph Koshy 	KASSERT(pm != NULL,
759ebccf1e3SJoseph Koshy 	    ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
760ebccf1e3SJoseph Koshy 		cpu, ri));
761ebccf1e3SJoseph Koshy 	KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
762ebccf1e3SJoseph Koshy 	    ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
763ebccf1e3SJoseph Koshy 		__LINE__, ri, cpu, pd->pm_descr.pd_name));
764ebccf1e3SJoseph Koshy 
7654a3690dfSJohn Baldwin 	PMCDBG1(MDP,STO,1,"amd-stop ri=%d", ri);
766ebccf1e3SJoseph Koshy 
767ebccf1e3SJoseph Koshy 	/* turn off the PMC ENABLE bit */
768ebccf1e3SJoseph Koshy 	config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
769ebccf1e3SJoseph Koshy 	wrmsr(pd->pm_evsel, config);
770c5445f8bSAndrew Gallatin 
771c5445f8bSAndrew Gallatin 	/*
772c5445f8bSAndrew Gallatin 	 * Due to NMI latency on newer AMD processors
773c5445f8bSAndrew Gallatin 	 * NMI interrupts are ignored, which leads to
774c5445f8bSAndrew Gallatin 	 * panic or messages based on kernel configuraiton
775c5445f8bSAndrew Gallatin 	 */
776c5445f8bSAndrew Gallatin 
777c5445f8bSAndrew Gallatin 	/* Wait for the count to be reset */
778c5445f8bSAndrew Gallatin 	for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
779c5445f8bSAndrew Gallatin 		if (rdmsr(pd->pm_perfctr) & (1 << (pd->pm_descr.pd_width - 1)))
780c5445f8bSAndrew Gallatin 			break;
781c5445f8bSAndrew Gallatin 
782c5445f8bSAndrew Gallatin 		DELAY(1);
783c5445f8bSAndrew Gallatin 	}
784c5445f8bSAndrew Gallatin 
785ebccf1e3SJoseph Koshy 	return 0;
786ebccf1e3SJoseph Koshy }
787ebccf1e3SJoseph Koshy 
788ebccf1e3SJoseph Koshy /*
789ebccf1e3SJoseph Koshy  * Interrupt handler.  This function needs to return '1' if the
790ebccf1e3SJoseph Koshy  * interrupt was this CPU's PMCs or '0' otherwise.  It is not allowed
791ebccf1e3SJoseph Koshy  * to sleep or do anything a 'fast' interrupt handler is not allowed
792ebccf1e3SJoseph Koshy  * to do.
793ebccf1e3SJoseph Koshy  */
794ebccf1e3SJoseph Koshy 
795ebccf1e3SJoseph Koshy static int
796eb7c9019SMatt Macy amd_intr(struct trapframe *tf)
797ebccf1e3SJoseph Koshy {
798eb7c9019SMatt Macy 	int i, error, retval, cpu;
799dacc43dfSMatt Macy 	uint64_t config, evsel, perfctr;
800ebccf1e3SJoseph Koshy 	struct pmc *pm;
801e829eb6dSJoseph Koshy 	struct amd_cpu *pac;
802f263522aSJoseph Koshy 	pmc_value_t v;
803c5445f8bSAndrew Gallatin 	uint32_t active = 0, count = 0;
80436c0fd9dSJoseph Koshy 
805eb7c9019SMatt Macy 	cpu = curcpu;
806122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
807ebccf1e3SJoseph Koshy 	    ("[amd,%d] out of range CPU %d", __LINE__, cpu));
808ebccf1e3SJoseph Koshy 
8094a3690dfSJohn Baldwin 	PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
810d07f36b0SJoseph Koshy 	    TRAPF_USERMODE(tf));
811f263522aSJoseph Koshy 
812ebccf1e3SJoseph Koshy 	retval = 0;
813ebccf1e3SJoseph Koshy 
814e829eb6dSJoseph Koshy 	pac = amd_pcpu[cpu];
815ebccf1e3SJoseph Koshy 
816ebccf1e3SJoseph Koshy 	/*
817ebccf1e3SJoseph Koshy 	 * look for all PMCs that have interrupted:
818f263522aSJoseph Koshy 	 * - look for a running, sampling PMC which has overflowed
819f263522aSJoseph Koshy 	 *   and which has a valid 'struct pmc' association
820f263522aSJoseph Koshy 	 *
821f263522aSJoseph Koshy 	 * If found, we call a helper to process the interrupt.
822bebaef4aSJoseph Koshy 	 *
823c5445f8bSAndrew Gallatin 	 * PMCs interrupting at the same time are collapsed into
824c5445f8bSAndrew Gallatin 	 * a single interrupt. Check all the valid pmcs for
825c5445f8bSAndrew Gallatin 	 * overflow.
826ebccf1e3SJoseph Koshy 	 */
827ebccf1e3SJoseph Koshy 
828c5445f8bSAndrew Gallatin 	for (i = 0; i < AMD_CORE_NPMCS; i++) {
829f263522aSJoseph Koshy 
830dfd9bc23SJoseph Koshy 		if ((pm = pac->pc_amdpmcs[i].phw_pmc) == NULL ||
831f263522aSJoseph Koshy 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
832ebccf1e3SJoseph Koshy 			continue;
833ebccf1e3SJoseph Koshy 		}
834ebccf1e3SJoseph Koshy 
835c5445f8bSAndrew Gallatin 		/* Consider pmc with valid handle as active */
836c5445f8bSAndrew Gallatin 		active++;
837c5445f8bSAndrew Gallatin 
838dfd9bc23SJoseph Koshy 		if (!AMD_PMC_HAS_OVERFLOWED(i))
839dfd9bc23SJoseph Koshy 			continue;
840dfd9bc23SJoseph Koshy 
841d07f36b0SJoseph Koshy 		retval = 1;	/* Found an interrupting PMC. */
842bebaef4aSJoseph Koshy 
843dfd9bc23SJoseph Koshy 		if (pm->pm_state != PMC_STATE_RUNNING)
844dfd9bc23SJoseph Koshy 			continue;
845dfd9bc23SJoseph Koshy 
846d07f36b0SJoseph Koshy 		/* Stop the PMC, reload count. */
847c5445f8bSAndrew Gallatin 		evsel	= amd_pmcdesc[i].pm_evsel;
848c5445f8bSAndrew Gallatin 		perfctr	= amd_pmcdesc[i].pm_perfctr;
849f263522aSJoseph Koshy 		v       = pm->pm_sc.pm_reloadcount;
850f263522aSJoseph Koshy 		config  = rdmsr(evsel);
851f263522aSJoseph Koshy 
852f263522aSJoseph Koshy 		KASSERT((config & ~AMD_PMC_ENABLE) ==
853f263522aSJoseph Koshy 		    (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
854dacc43dfSMatt Macy 		    ("[amd,%d] config mismatch reg=0x%jx pm=0x%jx", __LINE__,
855dacc43dfSMatt Macy 			 (uintmax_t)config, (uintmax_t)pm->pm_md.pm_amd.pm_amd_evsel));
856f263522aSJoseph Koshy 
857f263522aSJoseph Koshy 		wrmsr(evsel, config & ~AMD_PMC_ENABLE);
858f263522aSJoseph Koshy 		wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
859f263522aSJoseph Koshy 
860d07f36b0SJoseph Koshy 		/* Restart the counter if logging succeeded. */
861eb7c9019SMatt Macy 		error = pmc_process_interrupt(PMC_HR, pm, tf);
862f263522aSJoseph Koshy 		if (error == 0)
8633c1f73b1SAndriy Gapon 			wrmsr(evsel, config);
864ebccf1e3SJoseph Koshy 	}
865f263522aSJoseph Koshy 
866c5445f8bSAndrew Gallatin 	/*
867c5445f8bSAndrew Gallatin 	 * Due to NMI latency, there can be a scenario in which
868c5445f8bSAndrew Gallatin 	 * multiple pmcs gets serviced in an earlier NMI and we
869c5445f8bSAndrew Gallatin 	 * do not find an overflow in the subsequent NMI.
870c5445f8bSAndrew Gallatin 	 *
871c5445f8bSAndrew Gallatin 	 * For such cases we keep a per-cpu count of active NMIs
872c5445f8bSAndrew Gallatin 	 * and compare it with min(active pmcs, 2) to determine
873c5445f8bSAndrew Gallatin 	 * if this NMI was for a pmc overflow which was serviced
874c5445f8bSAndrew Gallatin 	 * in an earlier request or should be ignored.
875c5445f8bSAndrew Gallatin 	 */
876c5445f8bSAndrew Gallatin 
877c5445f8bSAndrew Gallatin 	if (retval) {
878c5445f8bSAndrew Gallatin 		DPCPU_SET(nmi_counter, min(2, active));
879c5445f8bSAndrew Gallatin 	} else {
880c5445f8bSAndrew Gallatin 		if ((count = DPCPU_GET(nmi_counter))) {
881c5445f8bSAndrew Gallatin 			retval = 1;
882c5445f8bSAndrew Gallatin 			DPCPU_SET(nmi_counter, --count);
883c5445f8bSAndrew Gallatin 		}
884c5445f8bSAndrew Gallatin 	}
885c5445f8bSAndrew Gallatin 
886e6b475e0SMatt Macy 	if (retval)
887e6b475e0SMatt Macy 		counter_u64_add(pmc_stats.pm_intr_processed, 1);
888e6b475e0SMatt Macy 	else
889e6b475e0SMatt Macy 		counter_u64_add(pmc_stats.pm_intr_ignored, 1);
890fbf1556dSJoseph Koshy 
8913c1f73b1SAndriy Gapon 	PMCDBG1(MDP,INT,2, "retval=%d", retval);
892d07f36b0SJoseph Koshy 	return (retval);
893ebccf1e3SJoseph Koshy }
894ebccf1e3SJoseph Koshy 
895ebccf1e3SJoseph Koshy /*
896ebccf1e3SJoseph Koshy  * describe a PMC
897ebccf1e3SJoseph Koshy  */
898ebccf1e3SJoseph Koshy static int
899ebccf1e3SJoseph Koshy amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
900ebccf1e3SJoseph Koshy {
901ebccf1e3SJoseph Koshy 	int error;
902ebccf1e3SJoseph Koshy 	size_t copied;
903ebccf1e3SJoseph Koshy 	const struct amd_descr *pd;
904ebccf1e3SJoseph Koshy 	struct pmc_hw *phw;
905ebccf1e3SJoseph Koshy 
906122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
907ebccf1e3SJoseph Koshy 	    ("[amd,%d] illegal CPU %d", __LINE__, cpu));
908ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
909ebccf1e3SJoseph Koshy 	    ("[amd,%d] row-index %d out of range", __LINE__, ri));
910ebccf1e3SJoseph Koshy 
911e829eb6dSJoseph Koshy 	phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
912ebccf1e3SJoseph Koshy 	pd  = &amd_pmcdesc[ri];
913ebccf1e3SJoseph Koshy 
914ebccf1e3SJoseph Koshy 	if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
915ebccf1e3SJoseph Koshy 		 PMC_NAME_MAX, &copied)) != 0)
916ebccf1e3SJoseph Koshy 		return error;
917ebccf1e3SJoseph Koshy 
918ebccf1e3SJoseph Koshy 	pi->pm_class = pd->pm_descr.pd_class;
919ebccf1e3SJoseph Koshy 
920ebccf1e3SJoseph Koshy 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
921ebccf1e3SJoseph Koshy 		pi->pm_enabled = TRUE;
922ebccf1e3SJoseph Koshy 		*ppmc          = phw->phw_pmc;
923ebccf1e3SJoseph Koshy 	} else {
924ebccf1e3SJoseph Koshy 		pi->pm_enabled = FALSE;
925ebccf1e3SJoseph Koshy 		*ppmc          = NULL;
926ebccf1e3SJoseph Koshy 	}
927ebccf1e3SJoseph Koshy 
928ebccf1e3SJoseph Koshy 	return 0;
929ebccf1e3SJoseph Koshy }
930ebccf1e3SJoseph Koshy 
931ebccf1e3SJoseph Koshy /*
932ebccf1e3SJoseph Koshy  * i386 specific entry points
933ebccf1e3SJoseph Koshy  */
934ebccf1e3SJoseph Koshy 
935ebccf1e3SJoseph Koshy /*
936ebccf1e3SJoseph Koshy  * return the MSR address of the given PMC.
937ebccf1e3SJoseph Koshy  */
938ebccf1e3SJoseph Koshy 
939ebccf1e3SJoseph Koshy static int
940ebccf1e3SJoseph Koshy amd_get_msr(int ri, uint32_t *msr)
941ebccf1e3SJoseph Koshy {
942ebccf1e3SJoseph Koshy 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
943ebccf1e3SJoseph Koshy 	    ("[amd,%d] ri %d out of range", __LINE__, ri));
944ebccf1e3SJoseph Koshy 
9456b8c8cd8SJoseph Koshy 	*msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
946e829eb6dSJoseph Koshy 
947e829eb6dSJoseph Koshy 	return (0);
948ebccf1e3SJoseph Koshy }
949ebccf1e3SJoseph Koshy 
950ebccf1e3SJoseph Koshy /*
951ebccf1e3SJoseph Koshy  * processor dependent initialization.
952ebccf1e3SJoseph Koshy  */
953ebccf1e3SJoseph Koshy 
954ebccf1e3SJoseph Koshy static int
955e829eb6dSJoseph Koshy amd_pcpu_init(struct pmc_mdep *md, int cpu)
956ebccf1e3SJoseph Koshy {
957e829eb6dSJoseph Koshy 	int classindex, first_ri, n;
958e829eb6dSJoseph Koshy 	struct pmc_cpu *pc;
959e829eb6dSJoseph Koshy 	struct amd_cpu *pac;
960ebccf1e3SJoseph Koshy 	struct pmc_hw  *phw;
961ebccf1e3SJoseph Koshy 
962122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
963ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number %d", __LINE__, cpu));
964ebccf1e3SJoseph Koshy 
9654a3690dfSJohn Baldwin 	PMCDBG1(MDP,INI,1,"amd-init cpu=%d", cpu);
966ebccf1e3SJoseph Koshy 
967e829eb6dSJoseph Koshy 	amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC,
968ebccf1e3SJoseph Koshy 	    M_WAITOK|M_ZERO);
969ebccf1e3SJoseph Koshy 
970ebccf1e3SJoseph Koshy 	/*
971e829eb6dSJoseph Koshy 	 * Set the content of the hardware descriptors to a known
972e829eb6dSJoseph Koshy 	 * state and initialize pointers in the MI per-cpu descriptor.
973ebccf1e3SJoseph Koshy 	 */
974e829eb6dSJoseph Koshy 	pc = pmc_pcpu[cpu];
975e829eb6dSJoseph Koshy #if	defined(__amd64__)
976e829eb6dSJoseph Koshy 	classindex = PMC_MDEP_CLASS_INDEX_K8;
977e829eb6dSJoseph Koshy #elif	defined(__i386__)
978e829eb6dSJoseph Koshy 	classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ?
979e829eb6dSJoseph Koshy 	    PMC_MDEP_CLASS_INDEX_K8 : PMC_MDEP_CLASS_INDEX_K7;
980e829eb6dSJoseph Koshy #endif
981e829eb6dSJoseph Koshy 	first_ri = md->pmd_classdep[classindex].pcd_ri;
982ebccf1e3SJoseph Koshy 
983e829eb6dSJoseph Koshy 	KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__));
984e829eb6dSJoseph Koshy 
985e829eb6dSJoseph Koshy 	for (n = 0, phw = pac->pc_amdpmcs; n < AMD_NPMCS; n++, phw++) {
986ebccf1e3SJoseph Koshy 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
987ebccf1e3SJoseph Koshy 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
988ebccf1e3SJoseph Koshy 		phw->phw_pmc	  = NULL;
989e829eb6dSJoseph Koshy 		pc->pc_hwpmcs[n + first_ri]  = phw;
990ebccf1e3SJoseph Koshy 	}
991ebccf1e3SJoseph Koshy 
992e829eb6dSJoseph Koshy 	return (0);
993ebccf1e3SJoseph Koshy }
994ebccf1e3SJoseph Koshy 
995ebccf1e3SJoseph Koshy 
996ebccf1e3SJoseph Koshy /*
997ebccf1e3SJoseph Koshy  * processor dependent cleanup prior to the KLD
998ebccf1e3SJoseph Koshy  * being unloaded
999ebccf1e3SJoseph Koshy  */
1000ebccf1e3SJoseph Koshy 
1001ebccf1e3SJoseph Koshy static int
1002e829eb6dSJoseph Koshy amd_pcpu_fini(struct pmc_mdep *md, int cpu)
1003ebccf1e3SJoseph Koshy {
1004e829eb6dSJoseph Koshy 	int classindex, first_ri, i;
1005ebccf1e3SJoseph Koshy 	uint32_t evsel;
1006e829eb6dSJoseph Koshy 	struct pmc_cpu *pc;
1007e829eb6dSJoseph Koshy 	struct amd_cpu *pac;
1008ebccf1e3SJoseph Koshy 
1009122ccdc1SJoseph Koshy 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
1010ebccf1e3SJoseph Koshy 	    ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
1011ebccf1e3SJoseph Koshy 
10124a3690dfSJohn Baldwin 	PMCDBG1(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
1013ebccf1e3SJoseph Koshy 
1014ebccf1e3SJoseph Koshy 	/*
1015ebccf1e3SJoseph Koshy 	 * First, turn off all PMCs on this CPU.
1016ebccf1e3SJoseph Koshy 	 */
1017ebccf1e3SJoseph Koshy 	for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
1018ebccf1e3SJoseph Koshy 		evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
1019ebccf1e3SJoseph Koshy 		evsel &= ~AMD_PMC_ENABLE;
1020ebccf1e3SJoseph Koshy 		wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
1021ebccf1e3SJoseph Koshy 	}
1022ebccf1e3SJoseph Koshy 
1023ebccf1e3SJoseph Koshy 	/*
1024ebccf1e3SJoseph Koshy 	 * Next, free up allocated space.
1025ebccf1e3SJoseph Koshy 	 */
1026e829eb6dSJoseph Koshy 	if ((pac = amd_pcpu[cpu]) == NULL)
1027e829eb6dSJoseph Koshy 		return (0);
1028ebccf1e3SJoseph Koshy 
1029e829eb6dSJoseph Koshy 	amd_pcpu[cpu] = NULL;
1030ebccf1e3SJoseph Koshy 
1031680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
1032e829eb6dSJoseph Koshy 	for (i = 0; i < AMD_NPMCS; i++) {
1033e829eb6dSJoseph Koshy 		KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL,
1034ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
1035e67c0426SAndriy Gapon 		KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + i),
1036ebccf1e3SJoseph Koshy 		    ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
1037ebccf1e3SJoseph Koshy 	}
1038ebccf1e3SJoseph Koshy #endif
1039ebccf1e3SJoseph Koshy 
1040e829eb6dSJoseph Koshy 	pc = pmc_pcpu[cpu];
1041e829eb6dSJoseph Koshy 	KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__));
1042e829eb6dSJoseph Koshy 
1043e829eb6dSJoseph Koshy #if	defined(__amd64__)
1044e829eb6dSJoseph Koshy 	classindex = PMC_MDEP_CLASS_INDEX_K8;
1045e829eb6dSJoseph Koshy #elif	defined(__i386__)
1046e829eb6dSJoseph Koshy 	classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? PMC_MDEP_CLASS_INDEX_K8 :
1047e829eb6dSJoseph Koshy 	    PMC_MDEP_CLASS_INDEX_K7;
1048e829eb6dSJoseph Koshy #endif
1049e829eb6dSJoseph Koshy 	first_ri = md->pmd_classdep[classindex].pcd_ri;
1050e829eb6dSJoseph Koshy 
1051e829eb6dSJoseph Koshy 	/*
1052e829eb6dSJoseph Koshy 	 * Reset pointers in the MI 'per-cpu' state.
1053e829eb6dSJoseph Koshy 	 */
1054e829eb6dSJoseph Koshy 	for (i = 0; i < AMD_NPMCS; i++) {
1055e829eb6dSJoseph Koshy 		pc->pc_hwpmcs[i + first_ri] = NULL;
1056e829eb6dSJoseph Koshy 	}
1057e829eb6dSJoseph Koshy 
1058e829eb6dSJoseph Koshy 
1059e829eb6dSJoseph Koshy 	free(pac, M_PMC);
1060e829eb6dSJoseph Koshy 
1061e829eb6dSJoseph Koshy 	return (0);
1062ebccf1e3SJoseph Koshy }
1063ebccf1e3SJoseph Koshy 
1064ebccf1e3SJoseph Koshy /*
1065ebccf1e3SJoseph Koshy  * Initialize ourselves.
1066ebccf1e3SJoseph Koshy  */
1067ebccf1e3SJoseph Koshy 
1068ebccf1e3SJoseph Koshy struct pmc_mdep *
1069ebccf1e3SJoseph Koshy pmc_amd_initialize(void)
1070ebccf1e3SJoseph Koshy {
1071f5f9340bSFabien Thomas 	int classindex, error, i, ncpus;
1072e829eb6dSJoseph Koshy 	struct pmc_classdep *pcd;
1073f263522aSJoseph Koshy 	enum pmc_cputype cputype;
1074ebccf1e3SJoseph Koshy 	struct pmc_mdep *pmc_mdep;
1075e829eb6dSJoseph Koshy 	enum pmc_class class;
1076*1791cad0SAlexander Motin 	int model, stepping;
1077f263522aSJoseph Koshy 	char *name;
1078ebccf1e3SJoseph Koshy 
1079f263522aSJoseph Koshy 	/*
1080f263522aSJoseph Koshy 	 * The presence of hardware performance counters on the AMD
1081f263522aSJoseph Koshy 	 * Athlon, Duron or later processors, is _not_ indicated by
1082f263522aSJoseph Koshy 	 * any of the processor feature flags set by the 'CPUID'
1083f263522aSJoseph Koshy 	 * instruction, so we only check the 'instruction family'
1084f263522aSJoseph Koshy 	 * field returned by CPUID for instruction family >= 6.
1085f263522aSJoseph Koshy 	 */
1086ebccf1e3SJoseph Koshy 
108754bad7c6SJoseph Koshy 	name = NULL;
108881eb4dcfSMatt Macy 	model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
1089*1791cad0SAlexander Motin 	stepping = cpu_id & 0xF;
109081eb4dcfSMatt Macy 	if (CPUID_TO_FAMILY(cpu_id) == 0x17)
1091*1791cad0SAlexander Motin 		snprintf(pmc_cpuid, sizeof(pmc_cpuid), "AuthenticAMD-%d-%02X-%X",
1092*1791cad0SAlexander Motin 				 CPUID_TO_FAMILY(cpu_id), model, stepping);
109353071ed1SKonstantin Belousov 	if (CPUID_TO_FAMILY(cpu_id) == 0x18)
1094*1791cad0SAlexander Motin 		snprintf(pmc_cpuid, sizeof(pmc_cpuid), "HygonGenuine-%d-%02X-%X",
1095*1791cad0SAlexander Motin 				 CPUID_TO_FAMILY(cpu_id), model, stepping);
109681eb4dcfSMatt Macy 
1097f263522aSJoseph Koshy 	switch (cpu_id & 0xF00) {
1098e829eb6dSJoseph Koshy #if	defined(__i386__)
1099f263522aSJoseph Koshy 	case 0x600:		/* Athlon(tm) processor */
1100e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K7;
1101f263522aSJoseph Koshy 		cputype = PMC_CPU_AMD_K7;
1102f263522aSJoseph Koshy 		class = PMC_CLASS_K7;
1103f263522aSJoseph Koshy 		name = "K7";
1104f263522aSJoseph Koshy 		break;
1105e829eb6dSJoseph Koshy #endif
1106f263522aSJoseph Koshy 	case 0xF00:		/* Athlon64/Opteron processor */
1107e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K8;
1108f263522aSJoseph Koshy 		cputype = PMC_CPU_AMD_K8;
1109f263522aSJoseph Koshy 		class = PMC_CLASS_K8;
1110f263522aSJoseph Koshy 		name = "K8";
1111f263522aSJoseph Koshy 		break;
1112f263522aSJoseph Koshy 
1113b38c0519SDimitry Andric 	default:
111481eb4dcfSMatt Macy 		(void) printf("pmc: Unknown AMD CPU %x %d-%d.\n", cpu_id, (cpu_id & 0xF00) >> 8, model);
1115ebccf1e3SJoseph Koshy 		return NULL;
1116f263522aSJoseph Koshy 	}
1117f263522aSJoseph Koshy 
1118680f1afdSJohn Baldwin #ifdef	HWPMC_DEBUG
1119f263522aSJoseph Koshy 	amd_pmc_class = class;
1120f263522aSJoseph Koshy #endif
1121ebccf1e3SJoseph Koshy 
1122e829eb6dSJoseph Koshy 	/*
1123e829eb6dSJoseph Koshy 	 * Allocate space for pointers to PMC HW descriptors and for
1124e829eb6dSJoseph Koshy 	 * the MDEP structure used by MI code.
1125e829eb6dSJoseph Koshy 	 */
1126e829eb6dSJoseph Koshy 	amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC,
1127e829eb6dSJoseph Koshy 	    M_WAITOK|M_ZERO);
1128e829eb6dSJoseph Koshy 
1129e829eb6dSJoseph Koshy 	/*
1130e829eb6dSJoseph Koshy 	 * These processors have two classes of PMCs: the TSC and
1131e829eb6dSJoseph Koshy 	 * programmable PMCs.
1132e829eb6dSJoseph Koshy 	 */
1133f5f9340bSFabien Thomas 	pmc_mdep = pmc_mdep_alloc(2);
1134ebccf1e3SJoseph Koshy 
1135f263522aSJoseph Koshy 	pmc_mdep->pmd_cputype = cputype;
1136ebccf1e3SJoseph Koshy 
1137e829eb6dSJoseph Koshy 	ncpus = pmc_cpu_max();
1138c5153e19SJoseph Koshy 
1139e829eb6dSJoseph Koshy 	/* Initialize the TSC. */
1140e829eb6dSJoseph Koshy 	error = pmc_tsc_initialize(pmc_mdep, ncpus);
1141e829eb6dSJoseph Koshy 	if (error)
1142e829eb6dSJoseph Koshy 		goto error;
1143c5153e19SJoseph Koshy 
1144e829eb6dSJoseph Koshy 	/* Initialize AMD K7 and K8 PMC handling. */
1145e829eb6dSJoseph Koshy 	pcd = &pmc_mdep->pmd_classdep[classindex];
1146c5153e19SJoseph Koshy 
1147e829eb6dSJoseph Koshy 	pcd->pcd_caps		= AMD_PMC_CAPS;
1148e829eb6dSJoseph Koshy 	pcd->pcd_class		= class;
1149e829eb6dSJoseph Koshy 	pcd->pcd_num		= AMD_NPMCS;
1150e829eb6dSJoseph Koshy 	pcd->pcd_ri		= pmc_mdep->pmd_npmc;
1151e829eb6dSJoseph Koshy 	pcd->pcd_width		= 48;
1152ebccf1e3SJoseph Koshy 
1153f263522aSJoseph Koshy 	/* fill in the correct pmc name and class */
1154e829eb6dSJoseph Koshy 	for (i = 0; i < AMD_NPMCS; i++) {
1155f263522aSJoseph Koshy 		(void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
1156f263522aSJoseph Koshy 		    sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
11578cd64ec8SJoseph Koshy 		    name, i);
1158f263522aSJoseph Koshy 		amd_pmcdesc[i].pm_descr.pd_class = class;
1159f263522aSJoseph Koshy 	}
1160f263522aSJoseph Koshy 
1161e829eb6dSJoseph Koshy 	pcd->pcd_allocate_pmc	= amd_allocate_pmc;
1162e829eb6dSJoseph Koshy 	pcd->pcd_config_pmc	= amd_config_pmc;
1163e829eb6dSJoseph Koshy 	pcd->pcd_describe	= amd_describe;
1164e829eb6dSJoseph Koshy 	pcd->pcd_get_config	= amd_get_config;
1165e829eb6dSJoseph Koshy 	pcd->pcd_get_msr	= amd_get_msr;
1166e829eb6dSJoseph Koshy 	pcd->pcd_pcpu_fini	= amd_pcpu_fini;
1167e829eb6dSJoseph Koshy 	pcd->pcd_pcpu_init	= amd_pcpu_init;
1168e829eb6dSJoseph Koshy 	pcd->pcd_read_pmc	= amd_read_pmc;
1169e829eb6dSJoseph Koshy 	pcd->pcd_release_pmc	= amd_release_pmc;
1170e829eb6dSJoseph Koshy 	pcd->pcd_start_pmc	= amd_start_pmc;
1171e829eb6dSJoseph Koshy 	pcd->pcd_stop_pmc	= amd_stop_pmc;
1172e829eb6dSJoseph Koshy 	pcd->pcd_write_pmc	= amd_write_pmc;
1173e829eb6dSJoseph Koshy 
1174e829eb6dSJoseph Koshy 	pmc_mdep->pmd_pcpu_init = NULL;
1175e829eb6dSJoseph Koshy 	pmc_mdep->pmd_pcpu_fini = NULL;
1176e829eb6dSJoseph Koshy 	pmc_mdep->pmd_intr	= amd_intr;
1177ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_in = amd_switch_in;
1178ebccf1e3SJoseph Koshy 	pmc_mdep->pmd_switch_out = amd_switch_out;
1179e829eb6dSJoseph Koshy 
1180e829eb6dSJoseph Koshy 	pmc_mdep->pmd_npmc     += AMD_NPMCS;
1181ebccf1e3SJoseph Koshy 
11824a3690dfSJohn Baldwin 	PMCDBG0(MDP,INI,0,"amd-initialize");
1183ebccf1e3SJoseph Koshy 
1184e829eb6dSJoseph Koshy 	return (pmc_mdep);
1185e829eb6dSJoseph Koshy 
1186e829eb6dSJoseph Koshy   error:
1187e829eb6dSJoseph Koshy 	if (error) {
1188e829eb6dSJoseph Koshy 		free(pmc_mdep, M_PMC);
1189e829eb6dSJoseph Koshy 		pmc_mdep = NULL;
1190e829eb6dSJoseph Koshy 	}
1191e829eb6dSJoseph Koshy 
1192e829eb6dSJoseph Koshy 	return (NULL);
1193e829eb6dSJoseph Koshy }
1194e829eb6dSJoseph Koshy 
1195e829eb6dSJoseph Koshy /*
1196e829eb6dSJoseph Koshy  * Finalization code for AMD CPUs.
1197e829eb6dSJoseph Koshy  */
1198e829eb6dSJoseph Koshy 
1199e829eb6dSJoseph Koshy void
1200e829eb6dSJoseph Koshy pmc_amd_finalize(struct pmc_mdep *md)
1201e829eb6dSJoseph Koshy {
1202e829eb6dSJoseph Koshy #if	defined(INVARIANTS)
1203e829eb6dSJoseph Koshy 	int classindex, i, ncpus, pmcclass;
1204e829eb6dSJoseph Koshy #endif
1205e829eb6dSJoseph Koshy 
1206e829eb6dSJoseph Koshy 	pmc_tsc_finalize(md);
1207e829eb6dSJoseph Koshy 
1208e829eb6dSJoseph Koshy 	KASSERT(amd_pcpu != NULL, ("[amd,%d] NULL per-cpu array pointer",
1209e829eb6dSJoseph Koshy 	    __LINE__));
1210e829eb6dSJoseph Koshy 
1211e829eb6dSJoseph Koshy #if	defined(INVARIANTS)
1212e829eb6dSJoseph Koshy 	switch (md->pmd_cputype) {
1213e829eb6dSJoseph Koshy #if	defined(__i386__)
1214e829eb6dSJoseph Koshy 	case PMC_CPU_AMD_K7:
1215e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K7;
1216e829eb6dSJoseph Koshy 		pmcclass = PMC_CLASS_K7;
1217e829eb6dSJoseph Koshy 		break;
1218e829eb6dSJoseph Koshy #endif
1219e829eb6dSJoseph Koshy 	default:
1220e829eb6dSJoseph Koshy 		classindex = PMC_MDEP_CLASS_INDEX_K8;
1221e829eb6dSJoseph Koshy 		pmcclass = PMC_CLASS_K8;
1222e829eb6dSJoseph Koshy 	}
1223e829eb6dSJoseph Koshy 
1224e829eb6dSJoseph Koshy 	KASSERT(md->pmd_classdep[classindex].pcd_class == pmcclass,
1225e829eb6dSJoseph Koshy 	    ("[amd,%d] pmc class mismatch", __LINE__));
1226e829eb6dSJoseph Koshy 
1227e829eb6dSJoseph Koshy 	ncpus = pmc_cpu_max();
1228e829eb6dSJoseph Koshy 
1229e829eb6dSJoseph Koshy 	for (i = 0; i < ncpus; i++)
1230e829eb6dSJoseph Koshy 		KASSERT(amd_pcpu[i] == NULL, ("[amd,%d] non-null pcpu",
1231e829eb6dSJoseph Koshy 		    __LINE__));
1232e829eb6dSJoseph Koshy #endif
1233e829eb6dSJoseph Koshy 
1234e829eb6dSJoseph Koshy 	free(amd_pcpu, M_PMC);
1235e829eb6dSJoseph Koshy 	amd_pcpu = NULL;
1236ebccf1e3SJoseph Koshy }
1237