xref: /freebsd/sys/dev/hwpmc/hwpmc_amd.c (revision ebccf1e3a6b11b97cbf5f813dd76636e892a9035)
1 /*-
2  * Copyright (c) 2003-2005 Joseph Koshy
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /* Support for the AMD K7 and later processors */
32 
33 #include <sys/param.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/smp.h>
38 #include <sys/systm.h>
39 
40 #include <machine/md_var.h>
41 #include <machine/pmc_mdep.h>
42 #include <machine/specialreg.h>
43 
44 /* AMD K7 and K8 PMCs */
45 
46 #define	AMD_PMC_EVSEL_0		0xC0010000
47 #define	AMD_PMC_EVSEL_1		0xC0010001
48 #define	AMD_PMC_EVSEL_2		0xC0010002
49 #define	AMD_PMC_EVSEL_3		0xC0010003
50 
51 #define	AMD_PMC_PERFCTR_0	0xC0010004
52 #define	AMD_PMC_PERFCTR_1	0xC0010005
53 #define	AMD_PMC_PERFCTR_2	0xC0010006
54 #define	AMD_PMC_PERFCTR_3	0xC0010007
55 
56 #define	K7_VALID_EVENT_CODE(c) (((c) >= 0x40 && (c) <= 0x47) ||		\
57 	((c) >= 0x80 && (c) <= 0x85) || ((c) >= 0xC0 && (c) <= 0xC7) ||	\
58 	((c) >= 0xCD && (c) <= 0xCF))
59 
60 #define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
61 	PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
62 	PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
63 
64 /* reserved bits include bit 21 and the top two bits of the unit mask */
65 #define K7_PMC_RESERVED ((1 << 21) | (3 << 13))
66 
67 #define	K8_PMC_RESERVED (1 << 21)
68 
69 #define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
70 #define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
71 
72 #if	__i386__
73 #define	AMD_NPMCS		K7_NPMCS
74 #define	AMD_PMC_CLASS		PMC_CLASS_K7
75 #define	AMD_PMC_COUNTERMASK	K7_PMC_COUNTERMASK
76 #define	AMD_PMC_TO_COUNTER(x)	K7_PMC_TO_COUNTER(x)
77 #define	AMD_PMC_INVERT		K7_PMC_INVERT
78 #define	AMD_PMC_ENABLE		K7_PMC_ENABLE
79 #define	AMD_PMC_INT		K7_PMC_INT
80 #define	AMD_PMC_PC		K7_PMC_PC
81 #define	AMD_PMC_EDGE		K7_PMC_EDGE
82 #define	AMD_PMC_OS		K7_PMC_OS
83 #define	AMD_PMC_USR		K7_PMC_USR
84 
85 #define	AMD_PMC_UNITMASK_M	K7_PMC_UNITMASK_M
86 #define	AMD_PMC_UNITMASK_O	K7_PMC_UNITMASK_O
87 #define	AMD_PMC_UNITMASK_E	K7_PMC_UNITMASK_E
88 #define	AMD_PMC_UNITMASK_S	K7_PMC_UNITMASK_S
89 #define	AMD_PMC_UNITMASK_I	K7_PMC_UNITMASK_I
90 
91 #define	AMD_PMC_UNITMASK	K7_PMC_UNITMASK
92 #define	AMD_PMC_EVENTMASK	K7_PMC_EVENTMASK
93 #define	AMD_PMC_TO_UNITMASK(x)	K7_PMC_TO_UNITMASK(x)
94 #define	AMD_PMC_TO_EVENTMASK(x)	K7_PMC_TO_EVENTMASK(x)
95 #define	AMD_VALID_BITS		K7_VALID_BITS
96 
97 #define	AMD_PMC_CLASS_NAME	"K7-"
98 
99 #elif	__amd64__
100 
101 #define	AMD_NPMCS		K8_NPMCS
102 #define	AMD_PMC_CLASS		PMC_CLASS_K8
103 #define	AMD_PMC_COUNTERMASK	K8_PMC_COUNTERMASK
104 #define	AMD_PMC_TO_COUNTER(x)	K8_PMC_TO_COUNTER(x)
105 #define	AMD_PMC_INVERT		K8_PMC_INVERT
106 #define	AMD_PMC_ENABLE		K8_PMC_ENABLE
107 #define	AMD_PMC_INT		K8_PMC_INT
108 #define	AMD_PMC_PC		K8_PMC_PC
109 #define	AMD_PMC_EDGE		K8_PMC_EDGE
110 #define	AMD_PMC_OS		K8_PMC_OS
111 #define	AMD_PMC_USR		K8_PMC_USR
112 
113 #define	AMD_PMC_UNITMASK_M	K8_PMC_UNITMASK_M
114 #define	AMD_PMC_UNITMASK_O	K8_PMC_UNITMASK_O
115 #define	AMD_PMC_UNITMASK_E	K8_PMC_UNITMASK_E
116 #define	AMD_PMC_UNITMASK_S	K8_PMC_UNITMASK_S
117 #define	AMD_PMC_UNITMASK_I	K8_PMC_UNITMASK_I
118 
119 #define	AMD_PMC_UNITMASK	K8_PMC_UNITMASK
120 #define	AMD_PMC_EVENTMASK	K8_PMC_EVENTMASK
121 #define	AMD_PMC_TO_UNITMASK(x)	K8_PMC_TO_UNITMASK(x)
122 #define	AMD_PMC_TO_EVENTMASK(x)	K8_PMC_TO_EVENTMASK(x)
123 #define	AMD_VALID_BITS		K8_VALID_BITS
124 
125 #define	AMD_PMC_CLASS_NAME	"K8-"
126 
127 #else
128 #error	Unsupported architecture.
129 #endif
130 
131 /* AMD K7 & K8 PMCs */
132 struct amd_descr {
133 	struct pmc_descr pm_descr;  /* "base class" */
134 	uint32_t	pm_evsel;   /* address of EVSEL register */
135 	uint32_t	pm_perfctr; /* address of PERFCTR register */
136 };
137 
138 static const struct amd_descr amd_pmcdesc[AMD_NPMCS] =
139 {
140     {
141 	.pm_descr =
142 	{
143 		.pd_name  = "TSC",
144 		.pd_class = PMC_CLASS_TSC,
145 		.pd_caps  = PMC_CAP_READ,
146 		.pd_width = 64
147 	},
148 	.pm_evsel   = MSR_TSC,
149 	.pm_perfctr = 0	/* unused */
150     },
151 
152     {
153 	.pm_descr =
154 	{
155 		.pd_name  = AMD_PMC_CLASS_NAME "0",
156 		.pd_class = AMD_PMC_CLASS,
157 		.pd_caps  = AMD_PMC_CAPS,
158 		.pd_width = 48
159 	},
160 	.pm_evsel   = AMD_PMC_EVSEL_0,
161 	.pm_perfctr = AMD_PMC_PERFCTR_0
162     },
163     {
164 	.pm_descr =
165 	{
166 		.pd_name  = AMD_PMC_CLASS_NAME "1",
167 		.pd_class = AMD_PMC_CLASS,
168 		.pd_caps  = AMD_PMC_CAPS,
169 		.pd_width = 48
170 	},
171 	.pm_evsel   = AMD_PMC_EVSEL_1,
172 	.pm_perfctr = AMD_PMC_PERFCTR_1
173     },
174     {
175 	.pm_descr =
176 	{
177 		.pd_name  = AMD_PMC_CLASS_NAME "2",
178 		.pd_class = AMD_PMC_CLASS,
179 		.pd_caps  = AMD_PMC_CAPS,
180 		.pd_width = 48
181 	},
182 	.pm_evsel   = AMD_PMC_EVSEL_2,
183 	.pm_perfctr = AMD_PMC_PERFCTR_2
184     },
185     {
186 	.pm_descr =
187 	{
188 		.pd_name  = AMD_PMC_CLASS_NAME "3",
189 		.pd_class = AMD_PMC_CLASS,
190 		.pd_caps  = AMD_PMC_CAPS,
191 		.pd_width = 48
192 	},
193 	.pm_evsel   = AMD_PMC_EVSEL_3,
194 	.pm_perfctr = AMD_PMC_PERFCTR_3
195     }
196 };
197 
198 struct amd_event_code_map {
199 	enum pmc_event	pe_ev;	 /* enum value */
200 	uint8_t		pe_code; /* encoded event mask */
201 	uint8_t		pe_mask; /* bits allowed in unit mask */
202 };
203 
204 const struct amd_event_code_map amd_event_codes[] = {
205 #if	__i386__
206 	{ PMC_EV_K7_DC_ACCESSES, 		0x40, 0 },
207 	{ PMC_EV_K7_DC_MISSES,			0x41, 0 },
208 	{ PMC_EV_K7_DC_REFILLS_FROM_L2,		0x42, K7_PMC_UNITMASK_MOESI },
209 	{ PMC_EV_K7_DC_REFILLS_FROM_SYSTEM,	0x43, K7_PMC_UNITMASK_MOESI },
210 	{ PMC_EV_K7_DC_WRITEBACKS,		0x44, K7_PMC_UNITMASK_MOESI },
211 	{ PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
212 	{ PMC_EV_K7_L1_AND_L2_DTLB_MISSES,	0x46, 0 },
213 	{ PMC_EV_K7_MISALIGNED_REFERENCES,	0x47, 0 },
214 
215 	{ PMC_EV_K7_IC_FETCHES,			0x80, 0 },
216 	{ PMC_EV_K7_IC_MISSES,			0x81, 0 },
217 
218 	{ PMC_EV_K7_L1_ITLB_MISSES,		0x84, 0 },
219 	{ PMC_EV_K7_L1_L2_ITLB_MISSES,		0x85, 0 },
220 
221 	{ PMC_EV_K7_RETIRED_INSTRUCTIONS,	0xC0, 0 },
222 	{ PMC_EV_K7_RETIRED_OPS,		0xC1, 0 },
223 	{ PMC_EV_K7_RETIRED_BRANCHES,		0xC2, 0 },
224 	{ PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
225 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 	0xC4, 0 },
226 	{ PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
227 	{ PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
228 	{ PMC_EV_K7_RETIRED_RESYNC_BRANCHES,	0xC7, 0 },
229 	{ PMC_EV_K7_INTERRUPTS_MASKED_CYCLES,	0xCD, 0 },
230 	{ PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
231 	{ PMC_EV_K7_HARDWARE_INTERRUPTS,	0xCF, 0 }
232 #endif
233 
234 #if	__amd64__
235 	{ PMC_EV_K8_FP_DISPATCHED_FPU_OPS,		0x00, 0x3F },
236 	{ PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED,	0x01, 0x00 },
237 	{ PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS,	0x02, 0x00 },
238 
239 	{ PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 		0x20, 0x7F },
240 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
241 	  						0x21, 0x00 },
242 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
243 	{ PMC_EV_K8_LS_BUFFER2_FULL,			0x23, 0x00 },
244 	{ PMC_EV_K8_LS_LOCKED_OPERATION,		0x24, 0x07 },
245 	{ PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL,	0x25, 0x00 },
246 	{ PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS,	0x26, 0x00 },
247 	{ PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS,	0x27, 0x00 },
248 
249 	{ PMC_EV_K8_DC_ACCESS,				0x40, 0x00 },
250 	{ PMC_EV_K8_DC_MISS,				0x41, 0x00 },
251 	{ PMC_EV_K8_DC_REFILL_FROM_L2,			0x42, 0x1F },
252 	{ PMC_EV_K8_DC_REFILL_FROM_SYSTEM,		0x43, 0x1F },
253 	{ PMC_EV_K8_DC_COPYBACK,			0x44, 0x1F },
254 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT,	0x45, 0x00 },
255 	{ PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS,	0x46, 0x00 },
256 	{ PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE,	0x47, 0x00 },
257 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL,	0x48, 0x00 },
258 	{ PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
259 	{ PMC_EV_K8_DC_ONE_BIT_ECC_ERROR,		0x4A, 0x03 },
260 	{ PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
261 	{ PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS,	0x4C, 0x03 },
262 
263 	{ PMC_EV_K8_BU_CPU_CLK_UNHALTED,		0x76, 0x00 },
264 	{ PMC_EV_K8_BU_INTERNAL_L2_REQUEST,		0x7D, 0x1F },
265 	{ PMC_EV_K8_BU_FILL_REQUEST_L2_MISS,		0x7E, 0x07 },
266 	{ PMC_EV_K8_BU_FILL_INTO_L2,			0x7F, 0x03 },
267 
268 	{ PMC_EV_K8_IC_FETCH,				0x80, 0x00 },
269 	{ PMC_EV_K8_IC_MISS,				0x81, 0x00 },
270 	{ PMC_EV_K8_IC_REFILL_FROM_L2,			0x82, 0x00 },
271 	{ PMC_EV_K8_IC_REFILL_FROM_SYSTEM,		0x83, 0x00 },
272 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT,	0x84, 0x00 },
273 	{ PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS,	0x85, 0x00 },
274 	{ PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
275 	{ PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL,		0x87, 0x00 },
276 	{ PMC_EV_K8_IC_RETURN_STACK_HIT,		0x88, 0x00 },
277 	{ PMC_EV_K8_IC_RETURN_STACK_OVERFLOW,		0x89, 0x00 },
278 
279 	{ PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS,	0xC0, 0x00 },
280 	{ PMC_EV_K8_FR_RETIRED_UOPS,			0xC1, 0x00 },
281 	{ PMC_EV_K8_FR_RETIRED_BRANCHES,		0xC2, 0x00 },
282 	{ PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED,	0xC3, 0x00 },
283 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES,		0xC4, 0x00 },
284 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
285 	{ PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS,	0xC6, 0x00 },
286 	{ PMC_EV_K8_FR_RETIRED_RESYNCS,			0xC7, 0x00 },
287 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS,		0xC8, 0x00 },
288 	{ PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
289 	{ PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
290 							0xCA, 0x00 },
291 	{ PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS,	0xCB, 0x0F },
292 	{ PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
293 							0xCC, 0x07 },
294 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES,	0xCD, 0x00 },
295 	{ PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
296 	{ PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS,	0xCF, 0x00 },
297 
298 	{ PMC_EV_K8_FR_DECODER_EMPTY,			0xD0, 0x00 },
299 	{ PMC_EV_K8_FR_DISPATCH_STALLS,			0xD1, 0x00 },
300 	{ PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
301 							0xD2, 0x00 },
302 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
303 	{ PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD,	0xD4, 0x00 },
304 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
305 							0xD5, 0x00 },
306 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
307 							0xD6, 0x00 },
308 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL,	0xD7, 0x00 },
309 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL,	0xD8, 0x00 },
310 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
311 							0xD9, 0x00 },
312 	{ PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
313 							0xDA, 0x00 },
314 	{ PMC_EV_K8_FR_FPU_EXCEPTIONS,			0xDB, 0x0F },
315 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0,	0xDC, 0x00 },
316 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1,	0xDD, 0x00 },
317 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2,	0xDE, 0x00 },
318 	{ PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3,	0xDF, 0x00 },
319 
320 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
321 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
322 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
323 							0xE2, 0x00 },
324 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND,	0xE3, 0x07 },
325 	{ PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
326 	{ PMC_EV_K8_NB_SIZED_COMMANDS,			0xEB, 0x7F },
327 	{ PMC_EV_K8_NB_PROBE_RESULT,			0xEC, 0x0F },
328 	{ PMC_EV_K8_NB_HT_BUS0_BANDWIDTH,		0xF6, 0x0F },
329 	{ PMC_EV_K8_NB_HT_BUS1_BANDWIDTH,		0xF7, 0x0F },
330 	{ PMC_EV_K8_NB_HT_BUS2_BANDWIDTH,		0xF8, 0x0F }
331 #endif
332 
333 };
334 
335 const int amd_event_codes_size =
336 	sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
337 
338 /*
339  * read a pmc register
340  */
341 
342 static int
343 amd_read_pmc(int cpu, int ri, pmc_value_t *v)
344 {
345 	enum pmc_mode mode;
346 	const struct amd_descr *pd;
347 	struct pmc *pm;
348 	const struct pmc_hw *phw;
349 	pmc_value_t tmp;
350 
351 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
352 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
353 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
354 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
355 
356 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
357 	pd  = &amd_pmcdesc[ri];
358 	pm  = phw->phw_pmc;
359 
360 	KASSERT(pm != NULL,
361 	    ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
362 		cpu, ri));
363 
364 	mode = pm->pm_mode;
365 
366 	PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
367 
368 	/* Reading the TSC is a special case */
369 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC) {
370 		KASSERT(PMC_IS_COUNTING_MODE(mode),
371 		    ("[amd,%d] TSC counter in non-counting mode", __LINE__));
372 		*v = rdtsc();
373 		PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
374 		return 0;
375 	}
376 
377 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
378 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
379 		pd->pm_descr.pd_class));
380 
381 	tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
382 	if (PMC_IS_SAMPLING_MODE(mode))
383 		*v = -tmp;
384 	else
385 		*v = tmp;
386 
387 	PMCDBG(MDP,REA,2,"amd-read id=%d -> %jd", ri, *v);
388 
389 	return 0;
390 }
391 
392 /*
393  * Write a PMC MSR.
394  */
395 
396 static int
397 amd_write_pmc(int cpu, int ri, pmc_value_t v)
398 {
399 	const struct amd_descr *pd;
400 	struct pmc *pm;
401 	const struct pmc_hw *phw;
402 	enum pmc_mode mode;
403 
404 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
405 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
406 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
407 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
408 
409 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
410 	pd  = &amd_pmcdesc[ri];
411 	pm  = phw->phw_pmc;
412 
413 	KASSERT(pm != NULL,
414 	    ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
415 		cpu, ri));
416 
417 	mode = pm->pm_mode;
418 
419 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
420 		return 0;
421 
422 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
423 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
424 		pd->pm_descr.pd_class));
425 
426 	/* use 2's complement of the count for sampling mode PMCs */
427 	if (PMC_IS_SAMPLING_MODE(mode))
428 		v = -v;
429 
430 	PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
431 
432 	/* write the PMC value */
433 	wrmsr(pd->pm_perfctr, v);
434 	return 0;
435 }
436 
437 /*
438  * configure hardware pmc according to the configuration recorded in
439  * pmc 'pm'.
440  */
441 
442 static int
443 amd_config_pmc(int cpu, int ri, struct pmc *pm)
444 {
445 	struct pmc_hw *phw;
446 
447 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
448 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
449 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
450 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
451 
452 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
453 
454 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
455 	    ("[amd,%d] hwpmc not unconfigured before re-config", __LINE__));
456 
457 	phw->phw_pmc = pm;
458 	return 0;
459 }
460 
461 /*
462  * Machine dependent actions taken during the context switch in of a
463  * thread.
464  */
465 
466 static int
467 amd_switch_in(struct pmc_cpu *pc)
468 {
469 	(void) pc;
470 
471 	/* enable the RDPMC instruction */
472 	load_cr4(rcr4() | CR4_PCE);
473 	return 0;
474 }
475 
476 /*
477  * Machine dependent actions taken during the context switch out of a
478  * thread.
479  */
480 
481 static int
482 amd_switch_out(struct pmc_cpu *pc)
483 {
484 	(void) pc;
485 
486 	/* disallow RDPMC instruction */
487 	load_cr4(rcr4() & ~CR4_PCE);
488 	return 0;
489 }
490 
491 /*
492  * Check if a given allocation is feasible.
493  */
494 
495 static int
496 amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
497     const struct pmc_op_pmcallocate *a)
498 {
499 	int i;
500 	uint32_t allowed_unitmask, caps, config, unitmask;
501 	enum pmc_event pe;
502 	const struct pmc_descr *pd;
503 
504 	(void) cpu;
505 
506 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
507 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
508 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
509 	    ("[amd,%d] illegal row index %d", __LINE__, ri));
510 
511 	pd = &amd_pmcdesc[ri].pm_descr;
512 
513 	/* check class match */
514 	if (pd->pd_class != pm->pm_class)
515 		return EINVAL;
516 
517 	caps = pm->pm_caps;
518 
519 	PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
520 
521 	if ((pd->pd_caps & caps) != caps)
522 		return EPERM;
523 	if (pd->pd_class == PMC_CLASS_TSC) {
524 		/* TSC's are always allocated in system-wide counting mode */
525 		if (a->pm_ev != PMC_EV_TSC_TSC ||
526 		    a->pm_mode != PMC_MODE_SC)
527 			return EINVAL;
528 		return 0;
529 	}
530 
531 	KASSERT(pd->pd_class == AMD_PMC_CLASS,
532 	    ("[amd,%d] Unknown PMC class (%d)", __LINE__, pd->pd_class));
533 
534 	pe = a->pm_ev;
535 
536 	/* map ev to the correct event mask code */
537 	config = allowed_unitmask = 0;
538 	for (i = 0; i < amd_event_codes_size; i++)
539 		if (amd_event_codes[i].pe_ev == pe) {
540 			config =
541 			    AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
542 			allowed_unitmask =
543 			    AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
544 			break;
545 		}
546 	if (i == amd_event_codes_size)
547 		return EINVAL;
548 
549 	unitmask = a->pm_amd_config & AMD_PMC_UNITMASK;
550 	if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
551 		return EINVAL;
552 
553 	if (unitmask && (caps & PMC_CAP_QUALIFIER))
554 		config |= unitmask;
555 
556 	if (caps & PMC_CAP_THRESHOLD)
557 		config |= a->pm_amd_config & AMD_PMC_COUNTERMASK;
558 
559 	/* set at least one of the 'usr' or 'os' caps */
560 	if (caps & PMC_CAP_USER)
561 		config |= AMD_PMC_USR;
562 	if (caps & PMC_CAP_SYSTEM)
563 		config |= AMD_PMC_OS;
564 	if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
565 		config |= (AMD_PMC_USR|AMD_PMC_OS);
566 
567 	if (caps & PMC_CAP_EDGE)
568 		config |= AMD_PMC_EDGE;
569 	if (caps & PMC_CAP_INVERT)
570 		config |= AMD_PMC_INVERT;
571 	if (caps & PMC_CAP_INTERRUPT)
572 		config |= AMD_PMC_INT;
573 
574 	pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
575 
576 	PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
577 
578 	return 0;
579 }
580 
581 /*
582  * Release machine dependent state associated with a PMC.  This is a
583  * no-op on this architecture.
584  *
585  */
586 
587 /* ARGSUSED0 */
588 static int
589 amd_release_pmc(int cpu, int ri, struct pmc *pmc)
590 {
591 #if	DEBUG
592 	const struct amd_descr *pd;
593 #endif
594 	struct pmc_hw *phw;
595 
596 	(void) pmc;
597 
598 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
599 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
600 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
601 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
602 
603 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
604 
605 	KASSERT(phw->phw_pmc == NULL,
606 	    ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
607 
608 #if 	DEBUG
609 	pd = &amd_pmcdesc[ri];
610 	if (pd->pm_descr.pd_class == AMD_PMC_CLASS)
611 		KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
612 		    ("[amd,%d] PMC %d released while active", __LINE__, ri));
613 #endif
614 
615 	return 0;
616 }
617 
618 /*
619  * start a PMC.
620  */
621 
622 static int
623 amd_start_pmc(int cpu, int ri)
624 {
625 	uint32_t config;
626 	struct pmc *pm;
627 	struct pmc_hw *phw;
628 	const struct amd_descr *pd;
629 
630 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
631 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
632 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
633 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
634 
635 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
636 	pm  = phw->phw_pmc;
637 	pd = &amd_pmcdesc[ri];
638 
639 	KASSERT(pm != NULL,
640 	    ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
641 		cpu, ri));
642 
643 	PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
644 
645 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
646 		return 0;	/* TSCs are always running */
647 
648 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
649 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
650 		pd->pm_descr.pd_class));
651 
652 	KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
653 	    ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
654 	    ri, cpu, pd->pm_descr.pd_name));
655 
656 	/* turn on the PMC ENABLE bit */
657 	config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
658 
659 	PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
660 
661 	wrmsr(pd->pm_evsel, config);
662 	return 0;
663 }
664 
665 /*
666  * Stop a PMC.
667  */
668 
669 static int
670 amd_stop_pmc(int cpu, int ri)
671 {
672 	struct pmc *pm;
673 	struct pmc_hw *phw;
674 	const struct amd_descr *pd;
675 	uint64_t config;
676 
677 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
678 	    ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
679 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
680 	    ("[amd,%d] illegal row-index %d", __LINE__, ri));
681 
682 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
683 	pm  = phw->phw_pmc;
684 	pd  = &amd_pmcdesc[ri];
685 
686 	KASSERT(pm != NULL,
687 	    ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
688 		cpu, ri));
689 
690 	/* can't stop a TSC */
691 	if (pd->pm_descr.pd_class == PMC_CLASS_TSC)
692 		return 0;
693 
694 	KASSERT(pd->pm_descr.pd_class == AMD_PMC_CLASS,
695 	    ("[amd,%d] unknown PMC class (%d)", __LINE__,
696 		pd->pm_descr.pd_class));
697 
698 	KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
699 	    ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
700 		__LINE__, ri, cpu, pd->pm_descr.pd_name));
701 
702 	PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
703 
704 	/* turn off the PMC ENABLE bit */
705 	config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
706 	wrmsr(pd->pm_evsel, config);
707 	return 0;
708 }
709 
710 /*
711  * Interrupt handler.  This function needs to return '1' if the
712  * interrupt was this CPU's PMCs or '0' otherwise.  It is not allowed
713  * to sleep or do anything a 'fast' interrupt handler is not allowed
714  * to do.
715  */
716 
717 static int
718 amd_intr(int cpu, uintptr_t eip)
719 {
720 	int i, retval;
721 	enum pmc_mode mode;
722 	uint32_t perfctr;
723 	struct pmc *pm;
724 	struct pmc_cpu *pc;
725 	struct pmc_hw *phw;
726 
727 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
728 	    ("[amd,%d] out of range CPU %d", __LINE__, cpu));
729 
730 	retval = 0;
731 
732 	pc = pmc_pcpu[cpu];
733 
734 	/*
735 	 * look for all PMCs that have interrupted:
736 	 * - skip over the TSC [PMC#0]
737 	 * - look for a PMC with a valid 'struct pmc' association
738 	 * - look for a PMC in (a) sampling mode and (b) which has
739 	 *   overflowed.  If found, we update the process's
740 	 *   histogram or send it a profiling signal by calling
741 	 *   the appropriate helper function.
742 	 */
743 
744 	for (i = 1; i < AMD_NPMCS; i++) {
745 
746 		phw = pc->pc_hwpmcs[i];
747 		perfctr = amd_pmcdesc[i].pm_perfctr;
748 		KASSERT(phw != NULL, ("[amd,%d] null PHW pointer", __LINE__));
749 
750 		if ((pm = phw->phw_pmc) == NULL ||
751 		    pm->pm_state != PMC_STATE_RUNNING) {
752 			atomic_add_int(&pmc_stats.pm_intr_ignored, 1);
753 			continue;
754 		}
755 
756 		mode = pm->pm_mode;
757 		if (PMC_IS_SAMPLING_MODE(mode) &&
758 		    AMD_PMC_HAS_OVERFLOWED(perfctr)) {
759 			atomic_add_int(&pmc_stats.pm_intr_processed, 1);
760 			if (PMC_IS_SYSTEM_MODE(mode))
761 				pmc_update_histogram(phw, eip);
762 			else if (PMC_IS_VIRTUAL_MODE(mode))
763 				pmc_send_signal(pm);
764 			retval = 1;
765 		}
766 	}
767 	return retval;
768 }
769 
770 /*
771  * describe a PMC
772  */
773 static int
774 amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
775 {
776 	int error;
777 	size_t copied;
778 	const struct amd_descr *pd;
779 	struct pmc_hw *phw;
780 
781 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
782 	    ("[amd,%d] illegal CPU %d", __LINE__, cpu));
783 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
784 	    ("[amd,%d] row-index %d out of range", __LINE__, ri));
785 
786 	phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
787 	pd  = &amd_pmcdesc[ri];
788 
789 	if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
790 		 PMC_NAME_MAX, &copied)) != 0)
791 		return error;
792 
793 	pi->pm_class = pd->pm_descr.pd_class;
794 	pi->pm_caps  = pd->pm_descr.pd_caps;
795 	pi->pm_width = pd->pm_descr.pd_width;
796 
797 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
798 		pi->pm_enabled = TRUE;
799 		*ppmc          = phw->phw_pmc;
800 	} else {
801 		pi->pm_enabled = FALSE;
802 		*ppmc          = NULL;
803 	}
804 
805 	return 0;
806 }
807 
808 /*
809  * i386 specific entry points
810  */
811 
812 /*
813  * return the MSR address of the given PMC.
814  */
815 
816 static int
817 amd_get_msr(int ri, uint32_t *msr)
818 {
819 	KASSERT(ri >= 0 && ri < AMD_NPMCS,
820 	    ("[amd,%d] ri %d out of range", __LINE__, ri));
821 
822 	*msr = amd_pmcdesc[ri].pm_perfctr;
823 	return 0;
824 }
825 
826 /*
827  * processor dependent initialization.
828  */
829 
830 /*
831  * Per-processor data structure
832  *
833  * [common stuff]
834  * [5 struct pmc_hw pointers]
835  * [5 struct pmc_hw structures]
836  */
837 
838 struct amd_cpu {
839 	struct pmc_cpu	pc_common;
840 	struct pmc_hw	*pc_hwpmcs[AMD_NPMCS];
841 	struct pmc_hw	pc_amdpmcs[AMD_NPMCS];
842 };
843 
844 
845 static int
846 amd_init(int cpu)
847 {
848 	int n;
849 	struct amd_cpu *pcs;
850 	struct pmc_hw  *phw;
851 
852 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
853 	    ("[amd,%d] insane cpu number %d", __LINE__, cpu));
854 
855 	PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
856 
857 	MALLOC(pcs, struct amd_cpu *, sizeof(struct amd_cpu), M_PMC,
858 	    M_WAITOK|M_ZERO);
859 
860 	if (pcs == NULL)
861 		return ENOMEM;
862 
863 	phw = &pcs->pc_amdpmcs[0];
864 
865 	/*
866 	 * Initialize the per-cpu mutex and set the content of the
867 	 * hardware descriptors to a known state.
868 	 */
869 
870 	for (n = 0; n < AMD_NPMCS; n++, phw++) {
871 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
872 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
873 		phw->phw_pmc	  = NULL;
874 		pcs->pc_hwpmcs[n] = phw;
875 	}
876 
877 	/* Mark the TSC as shareable */
878 	pcs->pc_hwpmcs[0]->phw_state |= PMC_PHW_FLAG_IS_SHAREABLE;
879 
880 	pmc_pcpu[cpu] = (struct pmc_cpu *) pcs;
881 
882 	return 0;
883 }
884 
885 
886 /*
887  * processor dependent cleanup prior to the KLD
888  * being unloaded
889  */
890 
891 static int
892 amd_cleanup(int cpu)
893 {
894 	int i;
895 	uint32_t evsel;
896 	struct pmc_cpu *pcs;
897 
898 	KASSERT(cpu >= 0 && cpu < mp_ncpus,
899 	    ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
900 
901 	PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
902 
903 	/*
904 	 * First, turn off all PMCs on this CPU.
905 	 */
906 
907 	for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
908 		evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
909 		evsel &= ~AMD_PMC_ENABLE;
910 		wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
911 	}
912 
913 	/*
914 	 * Next, free up allocated space.
915 	 */
916 
917 	pcs = pmc_pcpu[cpu];
918 
919 #if	DEBUG
920 	/* check the TSC */
921 	KASSERT(pcs->pc_hwpmcs[0]->phw_pmc == NULL,
922 	    ("[amd,%d] CPU%d,PMC0 still in use", __LINE__, cpu));
923 	for (i = 1; i < AMD_NPMCS; i++) {
924 		KASSERT(pcs->pc_hwpmcs[i]->phw_pmc == NULL,
925 		    ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
926 		KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + (i-1)),
927 		    ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
928 	}
929 #endif
930 	KASSERT(pcs != NULL,
931 	    ("[amd,%d] null per-cpu state pointer (cpu%d)", __LINE__, cpu));
932 
933 	pmc_pcpu[cpu] = NULL;
934 	FREE(pcs, M_PMC);
935 	return 0;
936 }
937 
938 /*
939  * Initialize ourselves.
940  */
941 
942 struct pmc_mdep *
943 pmc_amd_initialize(void)
944 {
945 
946 	struct pmc_mdep *pmc_mdep;
947 
948 	/* The presence of hardware performance counters on the AMD
949 	   Athlon, Duron or later processors, is _not_ indicated by
950 	   any of the processor feature flags set by the 'CPUID'
951 	   instruction, so we only check the 'instruction family'
952 	   field returned by CPUID for instruction family >= 6. This
953 	   test needs to be be refined. */
954 
955 	if ((cpu_id & 0xF00) < 0x600)
956 		return NULL;
957 
958 	MALLOC(pmc_mdep, struct pmc_mdep *, sizeof(struct pmc_mdep),
959 	    M_PMC, M_WAITOK|M_ZERO);
960 
961 #if	__i386__
962 	pmc_mdep->pmd_cputype	   = PMC_CPU_AMD_K7;
963 #elif	__amd64__
964 	pmc_mdep->pmd_cputype	   = PMC_CPU_AMD_K8;
965 #else
966 #error	Unknown AMD CPU type.
967 #endif
968 
969 	pmc_mdep->pmd_npmc 	   = AMD_NPMCS;
970 
971 	/* this processor has two classes of usable PMCs */
972 	pmc_mdep->pmd_nclass       = 2;
973 	pmc_mdep->pmd_classes[0]   = PMC_CLASS_TSC;
974 	pmc_mdep->pmd_classes[1]   = AMD_PMC_CLASS;
975 	pmc_mdep->pmd_nclasspmcs[0] = 1;
976 	pmc_mdep->pmd_nclasspmcs[1] = (AMD_NPMCS-1);
977 
978 	pmc_mdep->pmd_init    	   = amd_init;
979 	pmc_mdep->pmd_cleanup 	   = amd_cleanup;
980 	pmc_mdep->pmd_switch_in    = amd_switch_in;
981 	pmc_mdep->pmd_switch_out   = amd_switch_out;
982 	pmc_mdep->pmd_read_pmc 	   = amd_read_pmc;
983 	pmc_mdep->pmd_write_pmc    = amd_write_pmc;
984 	pmc_mdep->pmd_config_pmc   = amd_config_pmc;
985 	pmc_mdep->pmd_allocate_pmc = amd_allocate_pmc;
986 	pmc_mdep->pmd_release_pmc  = amd_release_pmc;
987 	pmc_mdep->pmd_start_pmc    = amd_start_pmc;
988 	pmc_mdep->pmd_stop_pmc     = amd_stop_pmc;
989 	pmc_mdep->pmd_intr	   = amd_intr;
990 	pmc_mdep->pmd_describe     = amd_describe;
991 	pmc_mdep->pmd_get_msr  	   = amd_get_msr; /* i386 */
992 
993 	PMCDBG(MDP,INI,0,"%s","amd-initialize");
994 
995 	return pmc_mdep;
996 }
997