xref: /titanic_50/usr/src/uts/intel/pcbe/opteron_pcbe.c (revision 14ea4bb737263733ad80a36b4f73f681c30a6b45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Performance Counter Back-End for AMD Opteron and AMD Athlon 64 processors.
30  */
31 
32 #include <sys/cpuvar.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/cpc_pcbe.h>
36 #include <sys/kmem.h>
37 #include <sys/sdt.h>
38 #include <sys/modctl.h>
39 #include <sys/errno.h>
40 #include <sys/debug.h>
41 #include <sys/archsystm.h>
42 #include <sys/x86_archext.h>
43 #include <sys/privregs.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 
47 static int opt_pcbe_init(void);
48 static uint_t opt_pcbe_ncounters(void);
49 static const char *opt_pcbe_impl_name(void);
50 static const char *opt_pcbe_cpuref(void);
51 static char *opt_pcbe_list_events(uint_t picnum);
52 static char *opt_pcbe_list_attrs(void);
53 static uint64_t opt_pcbe_event_coverage(char *event);
54 static uint64_t opt_pcbe_overflow_bitmap(void);
55 static int opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
56     uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
57     void *token);
58 static void opt_pcbe_program(void *token);
59 static void opt_pcbe_allstop(void);
60 static void opt_pcbe_sample(void *token);
61 static void opt_pcbe_free(void *config);
62 
63 static pcbe_ops_t opt_pcbe_ops = {
64 	PCBE_VER_1,
65 	CPC_CAP_OVERFLOW_INTERRUPT,
66 	opt_pcbe_ncounters,
67 	opt_pcbe_impl_name,
68 	opt_pcbe_cpuref,
69 	opt_pcbe_list_events,
70 	opt_pcbe_list_attrs,
71 	opt_pcbe_event_coverage,
72 	opt_pcbe_overflow_bitmap,
73 	opt_pcbe_configure,
74 	opt_pcbe_program,
75 	opt_pcbe_allstop,
76 	opt_pcbe_sample,
77 	opt_pcbe_free
78 };
79 
80 /*
81  * Define offsets and masks for the fields in the Performance
82  * Event-Select (PES) registers.
83  */
84 #define	OPT_PES_CMASK_SHIFT	24
85 #define	OPT_PES_CMASK_MASK	0xFF
86 #define	OPT_PES_INV_SHIFT	23
87 #define	OPT_PES_ENABLE_SHIFT	22
88 #define	OPT_PES_INT_SHIFT	20
89 #define	OPT_PES_PC_SHIFT	19
90 #define	OPT_PES_EDGE_SHIFT	18
91 #define	OPT_PES_OS_SHIFT	17
92 #define	OPT_PES_USR_SHIFT	16
93 #define	OPT_PES_UMASK_SHIFT	8
94 #define	OPT_PES_UMASK_MASK	0xFF
95 
96 #define	OPT_PES_INV		(1 << OPT_PES_INV_SHIFT)
97 #define	OPT_PES_ENABLE		(1 << OPT_PES_ENABLE_SHIFT)
98 #define	OPT_PES_INT		(1 << OPT_PES_INT_SHIFT)
99 #define	OPT_PES_PC		(1 << OPT_PES_PC_SHIFT)
100 #define	OPT_PES_EDGE		(1 << OPT_PES_EDGE_SHIFT)
101 #define	OPT_PES_OS		(1 << OPT_PES_OS_SHIFT)
102 #define	OPT_PES_USR		(1 << OPT_PES_USR_SHIFT)
103 
104 typedef struct _opt_pcbe_config {
105 	uint8_t		opt_picno;	/* Counter number: 0, 1, 2, or 3 */
106 	uint64_t	opt_evsel;	/* Event Selection register */
107 	uint64_t	opt_rawpic;	/* Raw counter value */
108 } opt_pcbe_config_t;
109 
110 opt_pcbe_config_t nullcfgs[4] = {
111 	{ 0, 0, 0 },
112 	{ 1, 0, 0 },
113 	{ 2, 0, 0 },
114 	{ 3, 0, 0 }
115 };
116 
117 typedef struct _opt_event {
118 	char		*name;
119 	uint8_t		emask;		/* Event mask setting */
120 	uint8_t		umask_valid;	/* Mask of unreserved UNIT_MASK bits */
121 } opt_event_t;
122 
123 /*
124  * Base MSR addresses for the PerfEvtSel registers and the counters themselves.
125  * Add counter number to base address to get corresponding MSR address.
126  */
127 #define	PES_BASE_ADDR	0xC0010000
128 #define	PIC_BASE_ADDR	0xC0010004
129 
130 #define	MASK48		0xFFFFFFFFFFFF
131 
132 #define	EV_END {NULL, 0, 0}
133 
134 #define	OPT_cmn_events							\
135 	{ "FP_dispatched_fpu_ops",			0x0, 0x1F },	\
136 	{ "FP_cycles_no_fpu_ops_retired",		0x1, 0x0 },	\
137 	{ "FP_dispatched_fpu_ops_ff",			0x2, 0x0 },	\
138 	{ "LS_seg_reg_load",				0x20, 0x7F },	\
139 	{ "LS_uarch_resync_self_modify",		0x21, 0x0 },	\
140 	{ "LS_uarch_resync_snoop",			0x22, 0x0 },	\
141 	{ "LS_buffer_2_full",				0x23, 0x0 },	\
142 	{ "LS_locked_operation",			0x24, 0x7 },	\
143 	{ "LS_retired_cflush",				0x26, 0x0 },	\
144 	{ "LS_retired_cpuid",				0x27, 0x0 },	\
145 	{ "DC_access",					0x40, 0x0 },	\
146 	{ "DC_miss",					0x41, 0x0 },	\
147 	{ "DC_refill_from_L2",				0x42, 0x1F },	\
148 	{ "DC_refill_from_system",			0x43, 0x1F },	\
149 	{ "DC_copyback",				0x44, 0x1F },	\
150 	{ "DC_dtlb_L1_miss_L2_hit",			0x45, 0x0 },	\
151 	{ "DC_dtlb_L1_miss_L2_miss",			0x46, 0x0 },	\
152 	{ "DC_misaligned_data_ref",			0x47, 0x0 },	\
153 	{ "DC_uarch_late_cancel_access",		0x48, 0x0 },	\
154 	{ "DC_uarch_early_cancel_access",		0x49, 0x0 },	\
155 	{ "DC_1bit_ecc_error_found",			0x4A, 0x3 },	\
156 	{ "DC_dispatched_prefetch_instr",		0x4B, 0x7 },	\
157 	{ "DC_dcache_accesses_by_locks",		0x4C, 0x2 },	\
158 	{ "BU_memory_requests",				0x65, 0x83},	\
159 	{ "BU_data_prefetch",				0x67, 0x3 },	\
160 	{ "BU_system_read_responses",			0x6C, 0x7 },	\
161 	{ "BU_quadwords_written_to_system",		0x6D, 0x1 },	\
162 	{ "BU_cpu_clk_unhalted",			0x76, 0x0 },	\
163 	{ "BU_internal_L2_req",				0x7D, 0x1F },	\
164 	{ "BU_fill_req_missed_L2",			0x7E, 0x7 },	\
165 	{ "BU_fill_into_L2",				0x7F, 0x1 },	\
166 	{ "IC_fetch",					0x80, 0x0 },	\
167 	{ "IC_miss",					0x81, 0x0 },	\
168 	{ "IC_refill_from_L2",				0x82, 0x0 },	\
169 	{ "IC_refill_from_system",			0x83, 0x0 },	\
170 	{ "IC_itlb_L1_miss_L2_hit",			0x84, 0x0 },	\
171 	{ "IC_itlb_L1_miss_L2_miss",			0x85, 0x0 },	\
172 	{ "IC_uarch_resync_snoop",			0x86, 0x0 },	\
173 	{ "IC_instr_fetch_stall",			0x87, 0x0 },	\
174 	{ "IC_return_stack_hit",			0x88, 0x0 },	\
175 	{ "IC_return_stack_overflow",			0x89, 0x0 },	\
176 	{ "FR_retired_x86_instr_w_excp_intr",		0xC0, 0x0 },	\
177 	{ "FR_retired_uops",				0xC1, 0x0 },	\
178 	{ "FR_retired_branches_w_excp_intr",		0xC2, 0x0 },	\
179 	{ "FR_retired_branches_mispred",		0xC3, 0x0 },	\
180 	{ "FR_retired_taken_branches",			0xC4, 0x0 },	\
181 	{ "FR_retired_taken_branches_mispred",		0xC5, 0x0 },	\
182 	{ "FR_retired_far_ctl_transfer",		0xC6, 0x0 },	\
183 	{ "FR_retired_resyncs",				0xC7, 0x0 },	\
184 	{ "FR_retired_near_rets",			0xC8, 0x0 },	\
185 	{ "FR_retired_near_rets_mispred",		0xC9, 0x0 },	\
186 	{ "FR_retired_taken_branches_mispred_addr_miscop",	0xCA, 0x0 },\
187 	{ "FR_retired_fpu_instr",			0xCB, 0xF },	\
188 	{ "FR_retired_fastpath_double_op_instr",	0xCC, 0x7 },	\
189 	{ "FR_intr_masked_cycles",			0xCD, 0x0 },	\
190 	{ "FR_intr_masked_while_pending_cycles",	0xCE, 0x0 },	\
191 	{ "FR_taken_hardware_intrs",			0xCF, 0x0 },	\
192 	{ "FR_nothing_to_dispatch",			0xD0, 0x0 },	\
193 	{ "FR_dispatch_stalls",				0xD1, 0x0 },	\
194 	{ "FR_dispatch_stall_branch_abort_to_retire",	0xD2, 0x0 },	\
195 	{ "FR_dispatch_stall_serialization",		0xD3, 0x0 },	\
196 	{ "FR_dispatch_stall_segment_load",		0xD4, 0x0 },	\
197 	{ "FR_dispatch_stall_reorder_buffer_full",	0xD5, 0x0 },	\
198 	{ "FR_dispatch_stall_resv_stations_full",	0xD6, 0x0 },	\
199 	{ "FR_dispatch_stall_fpu_full",			0xD7, 0x0 },	\
200 	{ "FR_dispatch_stall_ls_full",			0xD8, 0x0 },	\
201 	{ "FR_dispatch_stall_waiting_all_quiet",	0xD9, 0x0 },	\
202 	{ "FR_dispatch_stall_far_ctl_trsfr_resync_branc_pend",	0xDA, 0x0 },\
203 	{ "FR_fpu_exception",				0xDB, 0xF },	\
204 	{ "FR_num_brkpts_dr0",				0xDC, 0x0 },	\
205 	{ "FR_num_brkpts_dr1",				0xDD, 0x0 },	\
206 	{ "FR_num_brkpts_dr2",				0xDE, 0x0 },	\
207 	{ "FR_num_brkpts_dr3",				0xDF, 0x0 },	\
208 	{ "NB_mem_ctrlr_page_access",			0xE0, 0x7 },	\
209 	{ "NB_mem_ctrlr_page_table_overflow",		0xE1, 0x0 },	\
210 	{ "NB_mem_ctrlr_turnaround",			0xE3, 0x7 },	\
211 	{ "NB_mem_ctrlr_bypass_counter_saturation",	0xE4, 0xF },	\
212 	{ "NB_ECC_errors",				0xE8, 0x80},	\
213 	{ "NB_sized_commands",				0xEB, 0x7F },	\
214 	{ "NB_probe_result",				0xEC, 0x7F},	\
215 	{ "NB_gart_events",				0xEE, 0x7 },	\
216 	{ "NB_ht_bus0_bandwidth",			0xF6, 0xF },	\
217 	{ "NB_ht_bus1_bandwidth",			0xF7, 0xF },	\
218 	{ "NB_ht_bus2_bandwidth",			0xF8, 0xF }
219 
220 #define	OPT_RevD_events							\
221 	{ "NB_sized_blocks",				0xE5, 0x3C }
222 
223 #define	OPT_RevE_events							\
224 	{ "NB_cpu_io_to_mem_io",			0xE9, 0xFF},	\
225 	{ "NB_cache_block_commands",			0xEA, 0x3D}
226 
227 
228 static opt_event_t opt_events_cmn[] = {
229 	OPT_cmn_events,
230 	EV_END
231 };
232 
233 static opt_event_t opt_events_rev_D[] = {
234 	OPT_cmn_events,
235 	OPT_RevD_events,
236 	EV_END
237 };
238 
239 static opt_event_t opt_events_rev_E[] = {
240 	OPT_cmn_events,
241 	OPT_RevD_events,
242 	OPT_RevE_events,
243 	EV_END
244 };
245 
246 static char	*evlist;
247 static size_t	evlist_sz;
248 static opt_event_t *opt_events;
249 
250 #define	BITS(v, u, l)   \
251 	(((v) >> (l)) & ((1 << (1 + (u) - (l))) - 1))
252 
253 #define	OPTERON_FAMILY	15
254 
255 static int
256 opt_pcbe_init(void)
257 {
258 	opt_event_t		*evp;
259 	uint32_t		rev;
260 
261 	/*
262 	 * Make sure this really _is_ an Opteron or Athlon 64 system. The kernel
263 	 * loads this module based on its name in the module directory, but it
264 	 * could have been renamed.
265 	 */
266 	if (cpuid_getvendor(CPU) != X86_VENDOR_AMD ||
267 	    cpuid_getfamily(CPU) != OPTERON_FAMILY)
268 		return (-1);
269 
270 	/*
271 	 * Figure out processor revision here and assign appropriate
272 	 * event configuration.
273 	 */
274 
275 	rev = cpuid_getchiprev(CPU);
276 
277 	if (!X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_D)) {
278 		opt_events = opt_events_cmn;
279 	} else if X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_D) {
280 		opt_events = opt_events_rev_D;
281 	} else if (X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_E) ||
282 		X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_F) ||
283 		X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_G)) {
284 		    opt_events = opt_events_rev_E;
285 	};
286 
287 	if (opt_events == NULL)
288 		opt_events = opt_events_cmn;
289 
290 	/*
291 	 * Construct event list.
292 	 *
293 	 * First pass:  Calculate size needed. We'll need an additional byte
294 	 *		for the NULL pointer during the last strcat.
295 	 *
296 	 * Second pass: Copy strings.
297 	 */
298 	for (evp = opt_events; evp->name != NULL; evp++)
299 		evlist_sz += strlen(evp->name) + 1;
300 
301 	evlist = kmem_alloc(evlist_sz + 1, KM_SLEEP);
302 	evlist[0] = '\0';
303 
304 	for (evp = opt_events; evp->name != NULL; evp++) {
305 		(void) strcat(evlist, evp->name);
306 		(void) strcat(evlist, ",");
307 	}
308 	/*
309 	 * Remove trailing comma.
310 	 */
311 	evlist[evlist_sz - 1] = '\0';
312 
313 	return (0);
314 }
315 
316 static uint_t
317 opt_pcbe_ncounters(void)
318 {
319 	return (4);
320 }
321 
322 static const char *
323 opt_pcbe_impl_name(void)
324 {
325 	return ("AMD Opteron & Athlon64");
326 }
327 
328 static const char *
329 opt_pcbe_cpuref(void)
330 {
331 	return ("See Chapter 10 of the \"BIOS and Kernel Developer's Guide "
332 		"for the AMD Athlon 64 and AMD Opteron Processors,\" "
333 		"AMD publication #26094");
334 }
335 
336 /*ARGSUSED*/
337 static char *
338 opt_pcbe_list_events(uint_t picnum)
339 {
340 	return (evlist);
341 }
342 
343 static char *
344 opt_pcbe_list_attrs(void)
345 {
346 	return ("edge,pc,inv,cmask,umask");
347 }
348 
349 /*ARGSUSED*/
350 static uint64_t
351 opt_pcbe_event_coverage(char *event)
352 {
353 	/*
354 	 * Fortunately, all counters can count all events.
355 	 */
356 	return (0xF);
357 }
358 
359 static uint64_t
360 opt_pcbe_overflow_bitmap(void)
361 {
362 	/*
363 	 * Unfortunately, this chip cannot detect which counter overflowed, so
364 	 * we must act as if they all did.
365 	 */
366 	return (0xF);
367 }
368 
369 static opt_event_t *
370 find_event(char *name)
371 {
372 	opt_event_t	*evp;
373 
374 	for (evp = opt_events; evp->name != NULL; evp++)
375 		if (strcmp(name, evp->name) == 0)
376 			return (evp);
377 
378 	return (NULL);
379 }
380 
381 /*ARGSUSED*/
382 static int
383 opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
384     uint_t nattrs, kcpc_attr_t *attrs, void **data, void *token)
385 {
386 	opt_pcbe_config_t	*cfg;
387 	opt_event_t		*evp;
388 	opt_event_t		ev_raw = { "raw", 0, 0xFF };
389 	int			i;
390 	uint32_t		evsel = 0;
391 
392 	/*
393 	 * If we've been handed an existing configuration, we need only preset
394 	 * the counter value.
395 	 */
396 	if (*data != NULL) {
397 		cfg = *data;
398 		cfg->opt_rawpic = preset & MASK48;
399 		return (0);
400 	}
401 
402 	if (picnum >= 4)
403 		return (CPC_INVALID_PICNUM);
404 
405 	if ((evp = find_event(event)) == NULL) {
406 		long tmp;
407 
408 		/*
409 		 * If ddi_strtol() likes this event, use it as a raw event code.
410 		 */
411 		if (ddi_strtol(event, NULL, 0, &tmp) != 0)
412 			return (CPC_INVALID_EVENT);
413 
414 		ev_raw.emask = tmp;
415 		evp = &ev_raw;
416 	}
417 
418 	evsel |= evp->emask;
419 
420 	if (flags & CPC_COUNT_USER)
421 		evsel |= OPT_PES_USR;
422 	if (flags & CPC_COUNT_SYSTEM)
423 		evsel |= OPT_PES_OS;
424 	if (flags & CPC_OVF_NOTIFY_EMT)
425 		evsel |= OPT_PES_INT;
426 
427 	for (i = 0; i < nattrs; i++) {
428 		if (strcmp(attrs[i].ka_name, "edge") == 0) {
429 			if (attrs[i].ka_val != 0)
430 				evsel |= OPT_PES_EDGE;
431 		} else if (strcmp(attrs[i].ka_name, "pc") == 0) {
432 			if (attrs[i].ka_val != 0)
433 				evsel |= OPT_PES_PC;
434 		} else if (strcmp(attrs[i].ka_name, "inv") == 0) {
435 			if (attrs[i].ka_val != 0)
436 				evsel |= OPT_PES_INV;
437 		} else if (strcmp(attrs[i].ka_name, "cmask") == 0) {
438 			if ((attrs[i].ka_val | OPT_PES_CMASK_MASK) !=
439 			    OPT_PES_CMASK_MASK)
440 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
441 			evsel |= attrs[i].ka_val << OPT_PES_CMASK_SHIFT;
442 		} else if (strcmp(attrs[i].ka_name, "umask") == 0) {
443 			if ((attrs[i].ka_val | evp->umask_valid) !=
444 			    evp->umask_valid)
445 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
446 			evsel |= attrs[i].ka_val << OPT_PES_UMASK_SHIFT;
447 		} else
448 			return (CPC_INVALID_ATTRIBUTE);
449 	}
450 
451 	cfg = kmem_alloc(sizeof (*cfg), KM_SLEEP);
452 
453 	cfg->opt_picno = picnum;
454 	cfg->opt_evsel = evsel;
455 	cfg->opt_rawpic = preset & MASK48;
456 
457 	*data = cfg;
458 	return (0);
459 }
460 
461 static void
462 opt_pcbe_program(void *token)
463 {
464 	opt_pcbe_config_t	*cfgs[4] = { &nullcfgs[0], &nullcfgs[1],
465 						&nullcfgs[2], &nullcfgs[3] };
466 	opt_pcbe_config_t	*pcfg = NULL;
467 	int			i;
468 	uint32_t		curcr4 = getcr4();
469 
470 	/*
471 	 * Allow nonprivileged code to read the performance counters if desired.
472 	 */
473 	if (kcpc_allow_nonpriv(token))
474 		setcr4(curcr4 | CR4_PCE);
475 	else
476 		setcr4(curcr4 & ~CR4_PCE);
477 
478 	/*
479 	 * Query kernel for all configs which will be co-programmed.
480 	 */
481 	do {
482 		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, NULL);
483 
484 		if (pcfg != NULL) {
485 			ASSERT(pcfg->opt_picno < 4);
486 			cfgs[pcfg->opt_picno] = pcfg;
487 		}
488 	} while (pcfg != NULL);
489 
490 	/*
491 	 * Program in two loops. The first configures and presets the counter,
492 	 * and the second loop enables the counters. This ensures that the
493 	 * counters are all enabled as closely together in time as possible.
494 	 */
495 
496 	for (i = 0; i < 4; i++) {
497 		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel);
498 		wrmsr(PIC_BASE_ADDR + i, cfgs[i]->opt_rawpic);
499 	}
500 
501 	for (i = 0; i < 4; i++) {
502 		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel |
503 		    (uint64_t)(uintptr_t)OPT_PES_ENABLE);
504 	}
505 }
506 
507 static void
508 opt_pcbe_allstop(void)
509 {
510 	int		i;
511 
512 	for (i = 0; i < 4; i++)
513 		wrmsr(PES_BASE_ADDR + i, 0ULL);
514 
515 	/*
516 	 * Disable non-privileged access to the counter registers.
517 	 */
518 	setcr4((uint32_t)getcr4() & ~CR4_PCE);
519 }
520 
521 static void
522 opt_pcbe_sample(void *token)
523 {
524 	opt_pcbe_config_t	*cfgs[4] = { NULL, NULL, NULL, NULL };
525 	opt_pcbe_config_t	*pcfg = NULL;
526 	int			i;
527 	uint64_t		curpic[4];
528 	uint64_t		*addrs[4];
529 	uint64_t		*tmp;
530 	int64_t			diff;
531 
532 	for (i = 0; i < 4; i++)
533 		curpic[i] = rdmsr(PIC_BASE_ADDR + i);
534 
535 	/*
536 	 * Query kernel for all configs which are co-programmed.
537 	 */
538 	do {
539 		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, &tmp);
540 
541 		if (pcfg != NULL) {
542 			ASSERT(pcfg->opt_picno < 4);
543 			cfgs[pcfg->opt_picno] = pcfg;
544 			addrs[pcfg->opt_picno] = tmp;
545 		}
546 	} while (pcfg != NULL);
547 
548 	for (i = 0; i < 4; i++) {
549 		if (cfgs[i] == NULL)
550 			continue;
551 
552 		diff = (curpic[i] - cfgs[i]->opt_rawpic) & MASK48;
553 		*addrs[i] += diff;
554 		DTRACE_PROBE4(opt__pcbe__sample, int, i, uint64_t, *addrs[i],
555 		    uint64_t, curpic[i], uint64_t, cfgs[i]->opt_rawpic);
556 		cfgs[i]->opt_rawpic = *addrs[i] & MASK48;
557 	}
558 }
559 
560 static void
561 opt_pcbe_free(void *config)
562 {
563 	kmem_free(config, sizeof (opt_pcbe_config_t));
564 }
565 
566 
567 static struct modlpcbe modlpcbe = {
568 	&mod_pcbeops,
569 	"AMD Performance Counters v%I%",
570 	&opt_pcbe_ops
571 };
572 
573 static struct modlinkage modl = {
574 	MODREV_1,
575 	&modlpcbe,
576 };
577 
578 int
579 _init(void)
580 {
581 	int ret;
582 
583 	if (opt_pcbe_init() != 0)
584 		return (ENOTSUP);
585 
586 	if ((ret = mod_install(&modl)) != 0)
587 		kmem_free(evlist, evlist_sz + 1);
588 
589 	return (ret);
590 }
591 
592 int
593 _fini(void)
594 {
595 	int ret;
596 
597 	if ((ret = mod_remove(&modl)) == 0)
598 		kmem_free(evlist, evlist_sz + 1);
599 	return (ret);
600 }
601 
602 int
603 _info(struct modinfo *mi)
604 {
605 	return (mod_info(&modl, mi));
606 }
607