xref: /titanic_52/usr/src/uts/intel/pcbe/opteron_pcbe.c (revision 08c92e0e5d8d3c6bb3708cac154d2afba4edb6a4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file contains preset event names from the Performance Application
28  * Programming Interface v3.5 which included the following notice:
29  *
30  *                             Copyright (c) 2005,6
31  *                           Innovative Computing Labs
32  *                         Computer Science Department,
33  *                            University of Tennessee,
34  *                                 Knoxville, TN.
35  *                              All Rights Reserved.
36  *
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions are met:
40  *
41  *    * Redistributions of source code must retain the above copyright notice,
42  *      this list of conditions and the following disclaimer.
43  *    * Redistributions in binary form must reproduce the above copyright
44  *	notice, this list of conditions and the following disclaimer in the
45  *	documentation and/or other materials provided with the distribution.
46  *    * Neither the name of the University of Tennessee nor the names of its
47  *      contributors may be used to endorse or promote products derived from
48  *	this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
51  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
54  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60  * POSSIBILITY OF SUCH DAMAGE.
61  *
62  *
63  * This open source software license conforms to the BSD License template.
64  */
65 
66 /*
67  * Performance Counter Back-End for AMD Opteron and AMD Athlon 64 processors.
68  */
69 
70 #include <sys/cpuvar.h>
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/cpc_pcbe.h>
74 #include <sys/kmem.h>
75 #include <sys/sdt.h>
76 #include <sys/modctl.h>
77 #include <sys/errno.h>
78 #include <sys/debug.h>
79 #include <sys/archsystm.h>
80 #include <sys/x86_archext.h>
81 #include <sys/privregs.h>
82 #include <sys/ddi.h>
83 #include <sys/sunddi.h>
84 
85 static int opt_pcbe_init(void);
86 static uint_t opt_pcbe_ncounters(void);
87 static const char *opt_pcbe_impl_name(void);
88 static const char *opt_pcbe_cpuref(void);
89 static char *opt_pcbe_list_events(uint_t picnum);
90 static char *opt_pcbe_list_attrs(void);
91 static uint64_t opt_pcbe_event_coverage(char *event);
92 static uint64_t opt_pcbe_overflow_bitmap(void);
93 static int opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
94     uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
95     void *token);
96 static void opt_pcbe_program(void *token);
97 static void opt_pcbe_allstop(void);
98 static void opt_pcbe_sample(void *token);
99 static void opt_pcbe_free(void *config);
100 
101 static pcbe_ops_t opt_pcbe_ops = {
102 	PCBE_VER_1,
103 	CPC_CAP_OVERFLOW_INTERRUPT,
104 	opt_pcbe_ncounters,
105 	opt_pcbe_impl_name,
106 	opt_pcbe_cpuref,
107 	opt_pcbe_list_events,
108 	opt_pcbe_list_attrs,
109 	opt_pcbe_event_coverage,
110 	opt_pcbe_overflow_bitmap,
111 	opt_pcbe_configure,
112 	opt_pcbe_program,
113 	opt_pcbe_allstop,
114 	opt_pcbe_sample,
115 	opt_pcbe_free
116 };
117 
118 /*
119  * Define offsets and masks for the fields in the Performance
120  * Event-Select (PES) registers.
121  */
122 #define	OPT_PES_HOST_SHIFT	41
123 #define	OPT_PES_GUEST_SHIFT	40
124 #define	OPT_PES_CMASK_SHIFT	24
125 #define	OPT_PES_CMASK_MASK	0xFF
126 #define	OPT_PES_INV_SHIFT	23
127 #define	OPT_PES_ENABLE_SHIFT	22
128 #define	OPT_PES_INT_SHIFT	20
129 #define	OPT_PES_PC_SHIFT	19
130 #define	OPT_PES_EDGE_SHIFT	18
131 #define	OPT_PES_OS_SHIFT	17
132 #define	OPT_PES_USR_SHIFT	16
133 #define	OPT_PES_UMASK_SHIFT	8
134 #define	OPT_PES_UMASK_MASK	0xFF
135 
136 #define	OPT_PES_INV		(1ULL << OPT_PES_INV_SHIFT)
137 #define	OPT_PES_ENABLE		(1ULL << OPT_PES_ENABLE_SHIFT)
138 #define	OPT_PES_INT		(1ULL << OPT_PES_INT_SHIFT)
139 #define	OPT_PES_PC		(1ULL << OPT_PES_PC_SHIFT)
140 #define	OPT_PES_EDGE		(1ULL << OPT_PES_EDGE_SHIFT)
141 #define	OPT_PES_OS		(1ULL << OPT_PES_OS_SHIFT)
142 #define	OPT_PES_USR		(1ULL << OPT_PES_USR_SHIFT)
143 #define	OPT_PES_HOST		(1ULL << OPT_PES_HOST_SHIFT)
144 #define	OPT_PES_GUEST		(1ULL << OPT_PES_GUEST_SHIFT)
145 
146 typedef struct _opt_pcbe_config {
147 	uint8_t		opt_picno;	/* Counter number: 0, 1, 2, or 3 */
148 	uint64_t	opt_evsel;	/* Event Selection register */
149 	uint64_t	opt_rawpic;	/* Raw counter value */
150 } opt_pcbe_config_t;
151 
152 opt_pcbe_config_t nullcfgs[4] = {
153 	{ 0, 0, 0 },
154 	{ 1, 0, 0 },
155 	{ 2, 0, 0 },
156 	{ 3, 0, 0 }
157 };
158 
159 typedef struct _amd_event {
160 	char		*name;
161 	uint16_t	emask;		/* Event mask setting */
162 	uint8_t		umask_valid;	/* Mask of unreserved UNIT_MASK bits */
163 } amd_event_t;
164 
165 typedef struct _amd_generic_event {
166 	char *name;
167 	char *event;
168 	uint8_t umask;
169 } amd_generic_event_t;
170 
171 /*
172  * Base MSR addresses for the PerfEvtSel registers and the counters themselves.
173  * Add counter number to base address to get corresponding MSR address.
174  */
175 #define	PES_BASE_ADDR	0xC0010000
176 #define	PIC_BASE_ADDR	0xC0010004
177 
178 #define	MASK48		0xFFFFFFFFFFFF
179 
180 #define	EV_END {NULL, 0, 0}
181 #define	GEN_EV_END {NULL, NULL, 0 }
182 
183 #define	AMD_cmn_events							\
184 	{ "FP_dispatched_fpu_ops",			0x0, 0x3F },	\
185 	{ "FP_cycles_no_fpu_ops_retired",		0x1, 0x0 },	\
186 	{ "FP_dispatched_fpu_ops_ff",			0x2, 0x0 },	\
187 	{ "LS_seg_reg_load",				0x20, 0x7F },	\
188 	{ "LS_uarch_resync_self_modify",		0x21, 0x0 },	\
189 	{ "LS_uarch_resync_snoop",			0x22, 0x0 },	\
190 	{ "LS_buffer_2_full",				0x23, 0x0 },	\
191 	{ "LS_retired_cflush",				0x26, 0x0 },	\
192 	{ "LS_retired_cpuid",				0x27, 0x0 },	\
193 	{ "DC_access",					0x40, 0x0 },	\
194 	{ "DC_miss",					0x41, 0x0 },	\
195 	{ "DC_refill_from_L2",				0x42, 0x1F },	\
196 	{ "DC_refill_from_system",			0x43, 0x1F },	\
197 	{ "DC_misaligned_data_ref",			0x47, 0x0 },	\
198 	{ "DC_uarch_late_cancel_access",		0x48, 0x0 },	\
199 	{ "DC_uarch_early_cancel_access",		0x49, 0x0 },	\
200 	{ "DC_dispatched_prefetch_instr",		0x4B, 0x7 },	\
201 	{ "DC_dcache_accesses_by_locks",		0x4C, 0x2 },	\
202 	{ "BU_memory_requests",				0x65, 0x83},	\
203 	{ "BU_data_prefetch",				0x67, 0x3 },	\
204 	{ "BU_cpu_clk_unhalted",			0x76, 0x0 },	\
205 	{ "IC_fetch",					0x80, 0x0 },	\
206 	{ "IC_miss",					0x81, 0x0 },	\
207 	{ "IC_refill_from_L2",				0x82, 0x0 },	\
208 	{ "IC_refill_from_system",			0x83, 0x0 },	\
209 	{ "IC_itlb_L1_miss_L2_hit",			0x84, 0x0 },	\
210 	{ "IC_uarch_resync_snoop",			0x86, 0x0 },	\
211 	{ "IC_instr_fetch_stall",			0x87, 0x0 },	\
212 	{ "IC_return_stack_hit",			0x88, 0x0 },	\
213 	{ "IC_return_stack_overflow",			0x89, 0x0 },	\
214 	{ "FR_retired_x86_instr_w_excp_intr",		0xC0, 0x0 },	\
215 	{ "FR_retired_uops",				0xC1, 0x0 },	\
216 	{ "FR_retired_branches_w_excp_intr",		0xC2, 0x0 },	\
217 	{ "FR_retired_branches_mispred",		0xC3, 0x0 },	\
218 	{ "FR_retired_taken_branches",			0xC4, 0x0 },	\
219 	{ "FR_retired_taken_branches_mispred",		0xC5, 0x0 },	\
220 	{ "FR_retired_far_ctl_transfer",		0xC6, 0x0 },	\
221 	{ "FR_retired_resyncs",				0xC7, 0x0 },	\
222 	{ "FR_retired_near_rets",			0xC8, 0x0 },	\
223 	{ "FR_retired_near_rets_mispred",		0xC9, 0x0 },	\
224 	{ "FR_retired_taken_branches_mispred_addr_miscomp",	0xCA, 0x0 },\
225 	{ "FR_retired_fastpath_double_op_instr",	0xCC, 0x7 },	\
226 	{ "FR_intr_masked_cycles",			0xCD, 0x0 },	\
227 	{ "FR_intr_masked_while_pending_cycles",	0xCE, 0x0 },	\
228 	{ "FR_taken_hardware_intrs",			0xCF, 0x0 },	\
229 	{ "FR_nothing_to_dispatch",			0xD0, 0x0 },	\
230 	{ "FR_dispatch_stalls",				0xD1, 0x0 },	\
231 	{ "FR_dispatch_stall_branch_abort_to_retire",	0xD2, 0x0 },	\
232 	{ "FR_dispatch_stall_serialization",		0xD3, 0x0 },	\
233 	{ "FR_dispatch_stall_segment_load",		0xD4, 0x0 },	\
234 	{ "FR_dispatch_stall_reorder_buffer_full",	0xD5, 0x0 },	\
235 	{ "FR_dispatch_stall_resv_stations_full",	0xD6, 0x0 },	\
236 	{ "FR_dispatch_stall_fpu_full",			0xD7, 0x0 },	\
237 	{ "FR_dispatch_stall_ls_full",			0xD8, 0x0 },	\
238 	{ "FR_dispatch_stall_waiting_all_quiet",	0xD9, 0x0 },	\
239 	{ "FR_dispatch_stall_far_ctl_trsfr_resync_branch_pend",	0xDA, 0x0 },\
240 	{ "FR_fpu_exception",				0xDB, 0xF },	\
241 	{ "FR_num_brkpts_dr0",				0xDC, 0x0 },	\
242 	{ "FR_num_brkpts_dr1",				0xDD, 0x0 },	\
243 	{ "FR_num_brkpts_dr2",				0xDE, 0x0 },	\
244 	{ "FR_num_brkpts_dr3",				0xDF, 0x0 },	\
245 	{ "NB_mem_ctrlr_bypass_counter_saturation",	0xE4, 0xF }
246 
247 #define	OPT_events							\
248 	{ "LS_locked_operation",			0x24, 0x7 },	\
249 	{ "DC_copyback",				0x44, 0x1F },	\
250 	{ "DC_dtlb_L1_miss_L2_hit",			0x45, 0x0 },	\
251 	{ "DC_dtlb_L1_miss_L2_miss",			0x46, 0x0 },	\
252 	{ "DC_1bit_ecc_error_found",			0x4A, 0x3 },	\
253 	{ "BU_system_read_responses",			0x6C, 0x7 },	\
254 	{ "BU_quadwords_written_to_system",		0x6D, 0x1 },	\
255 	{ "BU_internal_L2_req",				0x7D, 0x1F },	\
256 	{ "BU_fill_req_missed_L2",			0x7E, 0x7 },	\
257 	{ "BU_fill_into_L2",				0x7F, 0x1 },	\
258 	{ "IC_itlb_L1_miss_L2_miss",			0x85, 0x0 },	\
259 	{ "FR_retired_fpu_instr",			0xCB, 0xF },	\
260 	{ "NB_mem_ctrlr_page_access",			0xE0, 0x7 },	\
261 	{ "NB_mem_ctrlr_page_table_overflow",		0xE1, 0x0 },	\
262 	{ "NB_mem_ctrlr_turnaround",			0xE3, 0x7 },	\
263 	{ "NB_ECC_errors",				0xE8, 0x80},	\
264 	{ "NB_sized_commands",				0xEB, 0x7F },	\
265 	{ "NB_probe_result",				0xEC, 0x7F},	\
266 	{ "NB_gart_events",				0xEE, 0x7 },	\
267 	{ "NB_ht_bus0_bandwidth",			0xF6, 0xF },	\
268 	{ "NB_ht_bus1_bandwidth",			0xF7, 0xF },	\
269 	{ "NB_ht_bus2_bandwidth",			0xF8, 0xF }
270 
271 #define	OPT_RevD_events							\
272 	{ "NB_sized_blocks",				0xE5, 0x3C }
273 
274 #define	OPT_RevE_events							\
275 	{ "NB_cpu_io_to_mem_io",			0xE9, 0xFF},	\
276 	{ "NB_cache_block_commands",			0xEA, 0x3D}
277 
278 #define	AMD_FAMILY_10h_cmn_events					\
279 	{ "FP_retired_sse_ops",				0x3,   0x7F},	\
280 	{ "FP_retired_move_ops",			0x4,   0xF},	\
281 	{ "FP_retired_serialize_ops",			0x5,   0xF},	\
282 	{ "FP_serialize_ops_cycles",			0x6,   0x3},	\
283 	{ "DC_copyback",				0x44,  0x7F },	\
284 	{ "DC_dtlb_L1_miss_L2_hit",			0x45,  0x3 },	\
285 	{ "DC_dtlb_L1_miss_L2_miss",			0x46,  0x7 },	\
286 	{ "DC_1bit_ecc_error_found",			0x4A,  0xF },	\
287 	{ "DC_dtlb_L1_hit",				0x4D,  0x7 },	\
288 	{ "BU_system_read_responses",			0x6C,  0x17 },	\
289 	{ "BU_octwords_written_to_system",		0x6D,  0x1 },	\
290 	{ "BU_internal_L2_req",				0x7D,  0x3F },	\
291 	{ "BU_fill_req_missed_L2",			0x7E,  0xF },	\
292 	{ "BU_fill_into_L2",				0x7F,  0x3 },	\
293 	{ "IC_itlb_L1_miss_L2_miss",			0x85,  0x3 },	\
294 	{ "IC_eviction",				0x8B,  0x0 },	\
295 	{ "IC_cache_lines_invalidate",			0x8C,  0xF },	\
296 	{ "IC_itlb_reload",				0x99,  0x0 },	\
297 	{ "IC_itlb_reload_aborted",			0x9A,  0x0 },	\
298 	{ "FR_retired_mmx_sse_fp_instr",		0xCB,  0x7 },	\
299 	{ "NB_mem_ctrlr_page_access",			0xE0,  0xFF },	\
300 	{ "NB_mem_ctrlr_page_table_overflow",		0xE1,  0x3 },	\
301 	{ "NB_mem_ctrlr_turnaround",			0xE3,  0x3F },	\
302 	{ "NB_thermal_status",				0xE8,  0x7C},	\
303 	{ "NB_sized_commands",				0xEB,  0x3F },	\
304 	{ "NB_probe_results_upstream_req",		0xEC,  0xFF},	\
305 	{ "NB_gart_events",				0xEE,  0xFF },	\
306 	{ "NB_ht_bus0_bandwidth",			0xF6,  0xBF },	\
307 	{ "NB_ht_bus1_bandwidth",			0xF7,  0xBF },	\
308 	{ "NB_ht_bus2_bandwidth",			0xF8,  0xBF },	\
309 	{ "NB_ht_bus3_bandwidth",			0x1F9, 0xBF },	\
310 	{ "LS_locked_operation",			0x24,  0xF },	\
311 	{ "LS_cancelled_store_to_load_fwd_ops",		0x2A,  0x7 },	\
312 	{ "LS_smi_received",				0x2B,  0x0 },	\
313 	{ "LS_ineffective_prefetch",			0x52,  0x9 },	\
314 	{ "LS_global_tlb_flush",			0x54,  0x0 },	\
315 	{ "NB_mem_ctrlr_dram_cmd_slots_missed",		0xE2,  0x3 },	\
316 	{ "NB_mem_ctrlr_req",				0x1F0, 0xFF },	\
317 	{ "CB_cpu_to_dram_req_to_target",		0x1E0, 0xFF },	\
318 	{ "CB_io_to_dram_req_to_target",		0x1E1, 0xFF },	\
319 	{ "CB_cpu_read_cmd_latency_to_target_0_to_3",	0x1E2, 0xFF },	\
320 	{ "CB_cpu_read_cmd_req_to_target_0_to_3",	0x1E3, 0xFF },	\
321 	{ "CB_cpu_read_cmd_latency_to_target_4_to_7",	0x1E4, 0xFF },	\
322 	{ "CB_cpu_read_cmd_req_to_target_4_to_7",	0x1E5, 0xFF },	\
323 	{ "CB_cpu_cmd_latency_to_target_0_to_7",	0x1E6, 0xFF },	\
324 	{ "CB_cpu_req_to_target_0_to_7",		0x1E7, 0xFF },	\
325 	{ "L3_read_req",				0x4E0, 0xF7 },	\
326 	{ "L3_miss",					0x4E1, 0xF7 },	\
327 	{ "L3_l2_eviction_l3_fill",			0x4E2, 0xFF },	\
328 	{ "L3_eviction",				0x4E3, 0xF  }
329 
330 #define	AMD_cmn_generic_events						\
331 	{ "PAPI_br_ins",	"FR_retired_branches_w_excp_intr", 0x0 },\
332 	{ "PAPI_br_msp",	"FR_retired_branches_mispred",	0x0 },	\
333 	{ "PAPI_br_tkn",	"FR_retired_taken_branches",	0x0 },	\
334 	{ "PAPI_fp_ops",	"FP_dispatched_fpu_ops",	0x3 },	\
335 	{ "PAPI_fad_ins",	"FP_dispatched_fpu_ops",	0x1 },	\
336 	{ "PAPI_fml_ins",	"FP_dispatched_fpu_ops",	0x2 },	\
337 	{ "PAPI_fpu_idl",	"FP_cycles_no_fpu_ops_retired",	0x0 },	\
338 	{ "PAPI_tot_cyc",	"BU_cpu_clk_unhalted",		0x0 },	\
339 	{ "PAPI_tot_ins",	"FR_retired_x86_instr_w_excp_intr", 0x0 }, \
340 	{ "PAPI_l1_dca",	"DC_access",			0x0 },	\
341 	{ "PAPI_l1_dcm",	"DC_miss",			0x0 },	\
342 	{ "PAPI_l1_ldm",	"DC_refill_from_L2",		0xe },	\
343 	{ "PAPI_l1_stm",	"DC_refill_from_L2",		0x10 },	\
344 	{ "PAPI_l1_ica",	"IC_fetch",			0x0 },	\
345 	{ "PAPI_l1_icm",	"IC_miss",			0x0 },	\
346 	{ "PAPI_l1_icr",	"IC_fetch",			0x0 },	\
347 	{ "PAPI_l2_dch",	"DC_refill_from_L2",		0x1e },	\
348 	{ "PAPI_l2_dcm",	"DC_refill_from_system",	0x1e },	\
349 	{ "PAPI_l2_dcr",	"DC_refill_from_L2",		0xe },	\
350 	{ "PAPI_l2_dcw",	"DC_refill_from_L2",		0x10 },	\
351 	{ "PAPI_l2_ich",	"IC_refill_from_L2",		0x0 },	\
352 	{ "PAPI_l2_icm",	"IC_refill_from_system",	0x0 },	\
353 	{ "PAPI_l2_ldm",	"DC_refill_from_system",	0xe },	\
354 	{ "PAPI_l2_stm",	"DC_refill_from_system",	0x10 },	\
355 	{ "PAPI_res_stl",	"FR_dispatch_stalls",		0x0 },	\
356 	{ "PAPI_stl_icy",	"FR_nothing_to_dispatch",	0x0 },	\
357 	{ "PAPI_hw_int",	"FR_taken_hardware_intrs",	0x0 }
358 
359 #define	OPT_cmn_generic_events						\
360 	{ "PAPI_tlb_dm",	"DC_dtlb_L1_miss_L2_miss",	0x0 },	\
361 	{ "PAPI_tlb_im",	"IC_itlb_L1_miss_L2_miss",	0x0 },	\
362 	{ "PAPI_fp_ins",	"FR_retired_fpu_instr",		0xd },	\
363 	{ "PAPI_vec_ins",	"FR_retired_fpu_instr",		0x4 }
364 
365 #define	AMD_FAMILY_10h_generic_events					\
366 	{ "PAPI_tlb_dm",	"DC_dtlb_L1_miss_L2_miss",	0x7 },	\
367 	{ "PAPI_tlb_im",	"IC_itlb_L1_miss_L2_miss",	0x3 },	\
368 	{ "PAPI_l3_dcr",	"L3_read_req",			0xf1 }, \
369 	{ "PAPI_l3_icr",	"L3_read_req",			0xf2 }, \
370 	{ "PAPI_l3_tcr",	"L3_read_req",			0xf7 }, \
371 	{ "PAPI_l3_stm",	"L3_miss",			0xf4 }, \
372 	{ "PAPI_l3_ldm",	"L3_miss",			0xf3 }, \
373 	{ "PAPI_l3_tcm",	"L3_miss",			0xf7 }
374 
375 static amd_event_t opt_events[] = {
376 	AMD_cmn_events,
377 	OPT_events,
378 	EV_END
379 };
380 
381 static amd_event_t opt_events_rev_D[] = {
382 	AMD_cmn_events,
383 	OPT_events,
384 	OPT_RevD_events,
385 	EV_END
386 };
387 
388 static amd_event_t opt_events_rev_E[] = {
389 	AMD_cmn_events,
390 	OPT_events,
391 	OPT_RevD_events,
392 	OPT_RevE_events,
393 	EV_END
394 };
395 
396 static amd_event_t family_10h_events[] = {
397 	AMD_cmn_events,
398 	OPT_RevE_events,
399 	AMD_FAMILY_10h_cmn_events,
400 	EV_END
401 };
402 
403 static amd_generic_event_t opt_generic_events[] = {
404 	AMD_cmn_generic_events,
405 	OPT_cmn_generic_events,
406 	GEN_EV_END
407 };
408 
409 static amd_generic_event_t family_10h_generic_events[] = {
410 	AMD_cmn_generic_events,
411 	AMD_FAMILY_10h_generic_events,
412 	GEN_EV_END
413 };
414 
415 static char	*evlist;
416 static size_t	evlist_sz;
417 static amd_event_t *amd_events = NULL;
418 static uint_t amd_family;
419 static amd_generic_event_t *amd_generic_events = NULL;
420 
421 #define	BITS(v, u, l)   \
422 	(((v) >> (l)) & ((1 << (1 + (u) - (l))) - 1))
423 
424 #define	OPTERON_FAMILY	0xf
425 #define	AMD_FAMILY_10H	0x10
426 
427 static int
428 opt_pcbe_init(void)
429 {
430 	amd_event_t		*evp;
431 	amd_generic_event_t	*gevp;
432 	uint32_t		rev;
433 
434 	amd_family = cpuid_getfamily(CPU);
435 
436 	/*
437 	 * Make sure this really _is_ an Opteron or Athlon 64 system. The kernel
438 	 * loads this module based on its name in the module directory, but it
439 	 * could have been renamed.
440 	 */
441 	if (cpuid_getvendor(CPU) != X86_VENDOR_AMD ||
442 	    (amd_family != OPTERON_FAMILY && amd_family != AMD_FAMILY_10H))
443 		return (-1);
444 
445 	/*
446 	 * Figure out processor revision here and assign appropriate
447 	 * event configuration.
448 	 */
449 
450 	rev = cpuid_getchiprev(CPU);
451 
452 	if (amd_family == OPTERON_FAMILY) {
453 		amd_generic_events = opt_generic_events;
454 		if (!X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_D)) {
455 			amd_events = opt_events;
456 		} else if X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_D) {
457 			amd_events = opt_events_rev_D;
458 		} else if (X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_E) ||
459 		    X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_F) ||
460 		    X86_CHIPREV_MATCH(rev, X86_CHIPREV_AMD_F_REV_G)) {
461 			amd_events = opt_events_rev_E;
462 		} else {
463 			amd_events = opt_events;
464 		}
465 	} else {
466 		amd_events = family_10h_events;
467 		amd_generic_events = family_10h_generic_events;
468 	}
469 
470 	/*
471 	 * Construct event list.
472 	 *
473 	 * First pass:  Calculate size needed. We'll need an additional byte
474 	 *		for the NULL pointer during the last strcat.
475 	 *
476 	 * Second pass: Copy strings.
477 	 */
478 	for (evp = amd_events; evp->name != NULL; evp++)
479 		evlist_sz += strlen(evp->name) + 1;
480 
481 	for (gevp = amd_generic_events; gevp->name != NULL; gevp++)
482 		evlist_sz += strlen(gevp->name) + 1;
483 
484 	evlist = kmem_alloc(evlist_sz + 1, KM_SLEEP);
485 	evlist[0] = '\0';
486 
487 	for (evp = amd_events; evp->name != NULL; evp++) {
488 		(void) strcat(evlist, evp->name);
489 		(void) strcat(evlist, ",");
490 	}
491 
492 	for (gevp = amd_generic_events; gevp->name != NULL; gevp++) {
493 		(void) strcat(evlist, gevp->name);
494 		(void) strcat(evlist, ",");
495 	}
496 
497 	/*
498 	 * Remove trailing comma.
499 	 */
500 	evlist[evlist_sz - 1] = '\0';
501 
502 	return (0);
503 }
504 
505 static uint_t
506 opt_pcbe_ncounters(void)
507 {
508 	return (4);
509 }
510 
511 static const char *
512 opt_pcbe_impl_name(void)
513 {
514 	if (amd_family == OPTERON_FAMILY) {
515 		return ("AMD Opteron & Athlon64");
516 	} else if (amd_family == AMD_FAMILY_10H) {
517 		return ("AMD Family 10h");
518 	} else {
519 		return ("Unknown AMD processor");
520 	}
521 }
522 
523 static const char *
524 opt_pcbe_cpuref(void)
525 {
526 	if (amd_family == OPTERON_FAMILY) {
527 		return ("See Chapter 10 of the \"BIOS and Kernel Developer's"
528 		" Guide for the AMD Athlon 64 and AMD Opteron Processors,\" "
529 		"AMD publication #26094");
530 	} else if (amd_family == AMD_FAMILY_10H) {
531 		return ("See section 3.15 of the \"BIOS and Kernel "
532 		"Developer's Guide (BKDG) For AMD Family 10h Processors,\" "
533 		"AMD publication #31116");
534 	} else {
535 		return ("Unknown AMD processor");
536 	}
537 }
538 
539 /*ARGSUSED*/
540 static char *
541 opt_pcbe_list_events(uint_t picnum)
542 {
543 	return (evlist);
544 }
545 
546 static char *
547 opt_pcbe_list_attrs(void)
548 {
549 	return ("edge,pc,inv,cmask,umask");
550 }
551 
552 /*ARGSUSED*/
553 static uint64_t
554 opt_pcbe_event_coverage(char *event)
555 {
556 	/*
557 	 * Fortunately, all counters can count all events.
558 	 */
559 	return (0xF);
560 }
561 
562 static uint64_t
563 opt_pcbe_overflow_bitmap(void)
564 {
565 	/*
566 	 * Unfortunately, this chip cannot detect which counter overflowed, so
567 	 * we must act as if they all did.
568 	 */
569 	return (0xF);
570 }
571 
572 static amd_generic_event_t *
573 find_generic_event(char *name)
574 {
575 	amd_generic_event_t	*gevp;
576 
577 	for (gevp = amd_generic_events; gevp->name != NULL; gevp++)
578 		if (strcmp(name, gevp->name) == 0)
579 			return (gevp);
580 
581 	return (NULL);
582 }
583 
584 static amd_event_t *
585 find_event(char *name)
586 {
587 	amd_event_t		*evp;
588 
589 	for (evp = amd_events; evp->name != NULL; evp++)
590 		if (strcmp(name, evp->name) == 0)
591 			return (evp);
592 
593 	return (NULL);
594 }
595 
596 /*ARGSUSED*/
597 static int
598 opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
599     uint_t nattrs, kcpc_attr_t *attrs, void **data, void *token)
600 {
601 	opt_pcbe_config_t	*cfg;
602 	amd_event_t		*evp;
603 	amd_event_t		ev_raw = { "raw", 0, 0xFF };
604 	amd_generic_event_t	*gevp;
605 	int			i;
606 	uint64_t		evsel = 0, evsel_tmp = 0;
607 
608 	/*
609 	 * If we've been handed an existing configuration, we need only preset
610 	 * the counter value.
611 	 */
612 	if (*data != NULL) {
613 		cfg = *data;
614 		cfg->opt_rawpic = preset & MASK48;
615 		return (0);
616 	}
617 
618 	if (picnum >= 4)
619 		return (CPC_INVALID_PICNUM);
620 
621 	if ((evp = find_event(event)) == NULL) {
622 		if ((gevp = find_generic_event(event)) != NULL) {
623 			evp = find_event(gevp->event);
624 			ASSERT(evp != NULL);
625 
626 			if (nattrs > 0)
627 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
628 
629 			evsel |= gevp->umask << OPT_PES_UMASK_SHIFT;
630 		} else {
631 			long tmp;
632 
633 			/*
634 			 * If ddi_strtol() likes this event, use it as a raw
635 			 * event code.
636 			 */
637 			if (ddi_strtol(event, NULL, 0, &tmp) != 0)
638 				return (CPC_INVALID_EVENT);
639 
640 			ev_raw.emask = tmp;
641 			evp = &ev_raw;
642 		}
643 	}
644 
645 	/*
646 	 * Configuration of EventSelect register for family 10h processors.
647 	 */
648 	if (amd_family == AMD_FAMILY_10H) {
649 
650 		/* Set GuestOnly bit to 0 and HostOnly bit to 1 */
651 		evsel &= ~OPT_PES_HOST;
652 		evsel &= ~OPT_PES_GUEST;
653 
654 		/* Set bits [35:32] for extended part of Event Select field */
655 		evsel_tmp = evp->emask & 0x0f00;
656 		evsel |= evsel_tmp << 24;
657 	}
658 
659 	evsel |= evp->emask & 0x00ff;
660 
661 	if (flags & CPC_COUNT_USER)
662 		evsel |= OPT_PES_USR;
663 	if (flags & CPC_COUNT_SYSTEM)
664 		evsel |= OPT_PES_OS;
665 	if (flags & CPC_OVF_NOTIFY_EMT)
666 		evsel |= OPT_PES_INT;
667 
668 	for (i = 0; i < nattrs; i++) {
669 		if (strcmp(attrs[i].ka_name, "edge") == 0) {
670 			if (attrs[i].ka_val != 0)
671 				evsel |= OPT_PES_EDGE;
672 		} else if (strcmp(attrs[i].ka_name, "pc") == 0) {
673 			if (attrs[i].ka_val != 0)
674 				evsel |= OPT_PES_PC;
675 		} else if (strcmp(attrs[i].ka_name, "inv") == 0) {
676 			if (attrs[i].ka_val != 0)
677 				evsel |= OPT_PES_INV;
678 		} else if (strcmp(attrs[i].ka_name, "cmask") == 0) {
679 			if ((attrs[i].ka_val | OPT_PES_CMASK_MASK) !=
680 			    OPT_PES_CMASK_MASK)
681 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
682 			evsel |= attrs[i].ka_val << OPT_PES_CMASK_SHIFT;
683 		} else if (strcmp(attrs[i].ka_name, "umask") == 0) {
684 			if ((attrs[i].ka_val | evp->umask_valid) !=
685 			    evp->umask_valid)
686 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
687 			evsel |= attrs[i].ka_val << OPT_PES_UMASK_SHIFT;
688 		} else
689 			return (CPC_INVALID_ATTRIBUTE);
690 	}
691 
692 	cfg = kmem_alloc(sizeof (*cfg), KM_SLEEP);
693 
694 	cfg->opt_picno = picnum;
695 	cfg->opt_evsel = evsel;
696 	cfg->opt_rawpic = preset & MASK48;
697 
698 	*data = cfg;
699 	return (0);
700 }
701 
702 static void
703 opt_pcbe_program(void *token)
704 {
705 	opt_pcbe_config_t	*cfgs[4] = { &nullcfgs[0], &nullcfgs[1],
706 						&nullcfgs[2], &nullcfgs[3] };
707 	opt_pcbe_config_t	*pcfg = NULL;
708 	int			i;
709 	ulong_t			curcr4 = getcr4();
710 
711 	/*
712 	 * Allow nonprivileged code to read the performance counters if desired.
713 	 */
714 	if (kcpc_allow_nonpriv(token))
715 		setcr4(curcr4 | CR4_PCE);
716 	else
717 		setcr4(curcr4 & ~CR4_PCE);
718 
719 	/*
720 	 * Query kernel for all configs which will be co-programmed.
721 	 */
722 	do {
723 		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, NULL);
724 
725 		if (pcfg != NULL) {
726 			ASSERT(pcfg->opt_picno < 4);
727 			cfgs[pcfg->opt_picno] = pcfg;
728 		}
729 	} while (pcfg != NULL);
730 
731 	/*
732 	 * Program in two loops. The first configures and presets the counter,
733 	 * and the second loop enables the counters. This ensures that the
734 	 * counters are all enabled as closely together in time as possible.
735 	 */
736 
737 	for (i = 0; i < 4; i++) {
738 		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel);
739 		wrmsr(PIC_BASE_ADDR + i, cfgs[i]->opt_rawpic);
740 	}
741 
742 	for (i = 0; i < 4; i++) {
743 		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel |
744 		    (uint64_t)(uintptr_t)OPT_PES_ENABLE);
745 	}
746 }
747 
748 static void
749 opt_pcbe_allstop(void)
750 {
751 	int		i;
752 
753 	for (i = 0; i < 4; i++)
754 		wrmsr(PES_BASE_ADDR + i, 0ULL);
755 
756 	/*
757 	 * Disable non-privileged access to the counter registers.
758 	 */
759 	setcr4(getcr4() & ~CR4_PCE);
760 }
761 
762 static void
763 opt_pcbe_sample(void *token)
764 {
765 	opt_pcbe_config_t	*cfgs[4] = { NULL, NULL, NULL, NULL };
766 	opt_pcbe_config_t	*pcfg = NULL;
767 	int			i;
768 	uint64_t		curpic[4];
769 	uint64_t		*addrs[4];
770 	uint64_t		*tmp;
771 	int64_t			diff;
772 
773 	for (i = 0; i < 4; i++)
774 		curpic[i] = rdmsr(PIC_BASE_ADDR + i);
775 
776 	/*
777 	 * Query kernel for all configs which are co-programmed.
778 	 */
779 	do {
780 		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, &tmp);
781 
782 		if (pcfg != NULL) {
783 			ASSERT(pcfg->opt_picno < 4);
784 			cfgs[pcfg->opt_picno] = pcfg;
785 			addrs[pcfg->opt_picno] = tmp;
786 		}
787 	} while (pcfg != NULL);
788 
789 	for (i = 0; i < 4; i++) {
790 		if (cfgs[i] == NULL)
791 			continue;
792 
793 		diff = (curpic[i] - cfgs[i]->opt_rawpic) & MASK48;
794 		*addrs[i] += diff;
795 		DTRACE_PROBE4(opt__pcbe__sample, int, i, uint64_t, *addrs[i],
796 		    uint64_t, curpic[i], uint64_t, cfgs[i]->opt_rawpic);
797 		cfgs[i]->opt_rawpic = *addrs[i] & MASK48;
798 	}
799 }
800 
801 static void
802 opt_pcbe_free(void *config)
803 {
804 	kmem_free(config, sizeof (opt_pcbe_config_t));
805 }
806 
807 
808 static struct modlpcbe modlpcbe = {
809 	&mod_pcbeops,
810 	"AMD Performance Counters",
811 	&opt_pcbe_ops
812 };
813 
814 static struct modlinkage modl = {
815 	MODREV_1,
816 	&modlpcbe,
817 };
818 
819 int
820 _init(void)
821 {
822 	int ret;
823 
824 	if (opt_pcbe_init() != 0)
825 		return (ENOTSUP);
826 
827 	if ((ret = mod_install(&modl)) != 0)
828 		kmem_free(evlist, evlist_sz + 1);
829 
830 	return (ret);
831 }
832 
833 int
834 _fini(void)
835 {
836 	int ret;
837 
838 	if ((ret = mod_remove(&modl)) == 0)
839 		kmem_free(evlist, evlist_sz + 1);
840 	return (ret);
841 }
842 
843 int
844 _info(struct modinfo *mi)
845 {
846 	return (mod_info(&modl, mi));
847 }
848