xref: /illumos-gate/usr/src/uts/intel/pcbe/opteron_pcbe.c (revision 02ac56e010f18fc0c5aafe47377586d8ba8c897c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file contains preset event names from the Performance Application
28  * Programming Interface v3.5 which included the following notice:
29  *
30  *                             Copyright (c) 2005,6
31  *                           Innovative Computing Labs
32  *                         Computer Science Department,
33  *                            University of Tennessee,
34  *                                 Knoxville, TN.
35  *                              All Rights Reserved.
36  *
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions are met:
40  *
41  *    * Redistributions of source code must retain the above copyright notice,
42  *      this list of conditions and the following disclaimer.
43  *    * Redistributions in binary form must reproduce the above copyright
44  *	notice, this list of conditions and the following disclaimer in the
45  *	documentation and/or other materials provided with the distribution.
46  *    * Neither the name of the University of Tennessee nor the names of its
47  *      contributors may be used to endorse or promote products derived from
48  *	this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
51  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
54  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60  * POSSIBILITY OF SUCH DAMAGE.
61  *
62  *
63  * This open source software license conforms to the BSD License template.
64  */
65 
66 /*
67  * Portions Copyright 2009 Advanced Micro Devices, Inc.
68  * Copyright 2019 Joyent, Inc.
69  * Copyright 2024 Oxide Computer Company
70  */
71 
72 /*
73  * Performance Counter Back-End for AMD Opteron, AMD Athlon 64, and Zen
74  * era processors.
75  */
76 
77 #include <sys/cpuvar.h>
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/cpc_pcbe.h>
81 #include <sys/kmem.h>
82 #include <sys/sdt.h>
83 #include <sys/modctl.h>
84 #include <sys/errno.h>
85 #include <sys/debug.h>
86 #include <sys/archsystm.h>
87 #include <sys/x86_archext.h>
88 #include <sys/privregs.h>
89 #include <sys/ddi.h>
90 #include <sys/sunddi.h>
91 
92 #include "opteron_pcbe_table.h"
93 #include <opteron_pcbe_cpcgen.h>
94 
95 static int opt_pcbe_init(void);
96 static uint_t opt_pcbe_ncounters(void);
97 static const char *opt_pcbe_impl_name(void);
98 static const char *opt_pcbe_cpuref(void);
99 static char *opt_pcbe_list_events(uint_t picnum);
100 static char *opt_pcbe_list_attrs(void);
101 static uint64_t opt_pcbe_event_coverage(char *event);
102 static uint64_t opt_pcbe_overflow_bitmap(void);
103 static int opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
104     uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
105     void *token);
106 static void opt_pcbe_program(void *token);
107 static void opt_pcbe_allstop(void);
108 static void opt_pcbe_sample(void *token);
109 static void opt_pcbe_free(void *config);
110 
111 static pcbe_ops_t opt_pcbe_ops = {
112 	PCBE_VER_1,
113 	CPC_CAP_OVERFLOW_INTERRUPT,
114 	opt_pcbe_ncounters,
115 	opt_pcbe_impl_name,
116 	opt_pcbe_cpuref,
117 	opt_pcbe_list_events,
118 	opt_pcbe_list_attrs,
119 	opt_pcbe_event_coverage,
120 	opt_pcbe_overflow_bitmap,
121 	opt_pcbe_configure,
122 	opt_pcbe_program,
123 	opt_pcbe_allstop,
124 	opt_pcbe_sample,
125 	opt_pcbe_free
126 };
127 
128 /*
129  * Base MSR addresses for the PerfEvtSel registers and the counters themselves.
130  * Add counter number to base address to get corresponding MSR address.
131  */
132 #define	PES_BASE_ADDR	0xC0010000
133 #define	PIC_BASE_ADDR	0xC0010004
134 
135 /*
136  * Base MSR addresses for the PerfEvtSel registers and counters. The counter and
137  * event select registers are interleaved, so one needs to multiply the counter
138  * number by two to determine what they should be set to.
139  */
140 #define	PES_EXT_BASE_ADDR	0xC0010200
141 #define	PIC_EXT_BASE_ADDR	0xC0010201
142 
143 /*
144  * The number of counters present depends on which CPU features are present.
145  */
146 #define	OPT_PCBE_DEF_NCOUNTERS	4
147 #define	OPT_PCBE_EXT_NCOUNTERS	6
148 
149 /*
150  * Define offsets and masks for the fields in the Performance
151  * Event-Select (PES) registers.
152  */
153 #define	OPT_PES_HOST_SHIFT	41
154 #define	OPT_PES_GUEST_SHIFT	40
155 #define	OPT_PES_EVSELHI_SHIFT	32
156 #define	OPT_PES_CMASK_SHIFT	24
157 #define	OPT_PES_CMASK_MASK	0xFF
158 #define	OPT_PES_INV_SHIFT	23
159 #define	OPT_PES_ENABLE_SHIFT	22
160 #define	OPT_PES_INT_SHIFT	20
161 #define	OPT_PES_PC_SHIFT	19
162 #define	OPT_PES_EDGE_SHIFT	18
163 #define	OPT_PES_OS_SHIFT	17
164 #define	OPT_PES_USR_SHIFT	16
165 #define	OPT_PES_UMASK_SHIFT	8
166 #define	OPT_PES_UMASK_MASK	0xFF
167 
168 #define	OPT_PES_INV		(1ULL << OPT_PES_INV_SHIFT)
169 #define	OPT_PES_ENABLE		(1ULL << OPT_PES_ENABLE_SHIFT)
170 #define	OPT_PES_INT		(1ULL << OPT_PES_INT_SHIFT)
171 #define	OPT_PES_PC		(1ULL << OPT_PES_PC_SHIFT)
172 #define	OPT_PES_EDGE		(1ULL << OPT_PES_EDGE_SHIFT)
173 #define	OPT_PES_OS		(1ULL << OPT_PES_OS_SHIFT)
174 #define	OPT_PES_USR		(1ULL << OPT_PES_USR_SHIFT)
175 #define	OPT_PES_HOST		(1ULL << OPT_PES_HOST_SHIFT)
176 #define	OPT_PES_GUEST		(1ULL << OPT_PES_GUEST_SHIFT)
177 
178 typedef struct _opt_pcbe_config {
179 	uint8_t		opt_picno;	/* Counter number: 0, 1, 2, or 3 */
180 	uint64_t	opt_evsel;	/* Event Selection register */
181 	uint64_t	opt_rawpic;	/* Raw counter value */
182 } opt_pcbe_config_t;
183 
184 opt_pcbe_config_t nullcfgs[OPT_PCBE_EXT_NCOUNTERS] = {
185 	{ 0, 0, 0 },
186 	{ 1, 0, 0 },
187 	{ 2, 0, 0 },
188 	{ 3, 0, 0 },
189 	{ 4, 0, 0 },
190 	{ 5, 0, 0 },
191 };
192 
193 typedef uint64_t (*opt_pcbe_addr_f)(uint_t);
194 
195 typedef struct opt_pcbe_data {
196 	uint_t		opd_ncounters;
197 	uint_t		opd_cmask;
198 	opt_pcbe_addr_f	opd_pesf;
199 	opt_pcbe_addr_f	opd_picf;
200 } opt_pcbe_data_t;
201 
202 opt_pcbe_data_t opd;
203 
204 #define	MASK48		0xFFFFFFFFFFFF
205 
206 #define	EV_END {NULL, 0}
207 #define	GEN_EV_END {NULL, NULL, 0 }
208 
209 /*
210  * The following Macros are used to define tables of events that are used by
211  * various families and some generic classes of events.
212  *
213  * When programming a performance counter there are two different values that we
214  * need to set:
215  *
216  *   o Event - Determines the general class of event that is being used.
217  *   o Unit  - A further breakdown that gives more specific value.
218  *
219  * Prior to the introduction of family 17h support, all family specific events
220  * were programmed based on their event. The generic events, which tried to
221  * provide PAPI mappings to events specified an additional unit mask.
222  *
223  * Starting with Family 17h, CPU performance counters default to using both the
224  * unit mask and the event select. Generic events are always aliases to a
225  * specific event/unit pair, hence why the units for them are always zero. In
226  * addition, the naming of events in family 17h has been changed to reflect
227  * AMD's guide. While this is a departure from what people are used to, it is
228  * believed that matching the more detailed literature that folks are told to
229  * reference is more valuable.
230  */
231 
232 #define	AMD_cmn_events						\
233 	{ "FP_dispatched_fpu_ops",			0x0 },	\
234 	{ "FP_cycles_no_fpu_ops_retired",		0x1 },	\
235 	{ "FP_dispatched_fpu_ops_ff",			0x2 },	\
236 	{ "LS_seg_reg_load",				0x20 },	\
237 	{ "LS_uarch_resync_self_modify",		0x21 },	\
238 	{ "LS_uarch_resync_snoop",			0x22 },	\
239 	{ "LS_buffer_2_full",				0x23 },	\
240 	{ "LS_locked_operation",			0x24 },	\
241 	{ "LS_retired_cflush",				0x26 },	\
242 	{ "LS_retired_cpuid",				0x27 },	\
243 	{ "DC_access",					0x40 },	\
244 	{ "DC_miss",					0x41 },	\
245 	{ "DC_refill_from_L2",				0x42 },	\
246 	{ "DC_refill_from_system",			0x43 },	\
247 	{ "DC_copyback",				0x44 },	\
248 	{ "DC_dtlb_L1_miss_L2_hit",			0x45 },	\
249 	{ "DC_dtlb_L1_miss_L2_miss",			0x46 },	\
250 	{ "DC_misaligned_data_ref",			0x47 },	\
251 	{ "DC_uarch_late_cancel_access",		0x48 },	\
252 	{ "DC_uarch_early_cancel_access",		0x49 },	\
253 	{ "DC_1bit_ecc_error_found",			0x4A },	\
254 	{ "DC_dispatched_prefetch_instr",		0x4B },	\
255 	{ "DC_dcache_accesses_by_locks",		0x4C },	\
256 	{ "BU_memory_requests",				0x65 },	\
257 	{ "BU_data_prefetch",				0x67 },	\
258 	{ "BU_system_read_responses",			0x6C },	\
259 	{ "BU_cpu_clk_unhalted",			0x76 },	\
260 	{ "BU_internal_L2_req",				0x7D },	\
261 	{ "BU_fill_req_missed_L2",			0x7E },	\
262 	{ "BU_fill_into_L2",				0x7F },	\
263 	{ "IC_fetch",					0x80 },	\
264 	{ "IC_miss",					0x81 },	\
265 	{ "IC_refill_from_L2",				0x82 },	\
266 	{ "IC_refill_from_system",			0x83 },	\
267 	{ "IC_itlb_L1_miss_L2_hit",			0x84 },	\
268 	{ "IC_itlb_L1_miss_L2_miss",			0x85 },	\
269 	{ "IC_uarch_resync_snoop",			0x86 },	\
270 	{ "IC_instr_fetch_stall",			0x87 },	\
271 	{ "IC_return_stack_hit",			0x88 },	\
272 	{ "IC_return_stack_overflow",			0x89 },	\
273 	{ "FR_retired_x86_instr_w_excp_intr",		0xC0 },	\
274 	{ "FR_retired_uops",				0xC1 },	\
275 	{ "FR_retired_branches_w_excp_intr",		0xC2 },	\
276 	{ "FR_retired_branches_mispred",		0xC3 },	\
277 	{ "FR_retired_taken_branches",			0xC4 },	\
278 	{ "FR_retired_taken_branches_mispred",		0xC5 },	\
279 	{ "FR_retired_far_ctl_transfer",		0xC6 },	\
280 	{ "FR_retired_resyncs",				0xC7 },	\
281 	{ "FR_retired_near_rets",			0xC8 },	\
282 	{ "FR_retired_near_rets_mispred",		0xC9 },	\
283 	{ "FR_retired_taken_branches_mispred_addr_miscomp",	0xCA },\
284 	{ "FR_retired_fastpath_double_op_instr",	0xCC },	\
285 	{ "FR_intr_masked_cycles",			0xCD },	\
286 	{ "FR_intr_masked_while_pending_cycles",	0xCE },	\
287 	{ "FR_taken_hardware_intrs",			0xCF },	\
288 	{ "FR_nothing_to_dispatch",			0xD0 },	\
289 	{ "FR_dispatch_stalls",				0xD1 },	\
290 	{ "FR_dispatch_stall_branch_abort_to_retire",	0xD2 },	\
291 	{ "FR_dispatch_stall_serialization",		0xD3 },	\
292 	{ "FR_dispatch_stall_segment_load",		0xD4 },	\
293 	{ "FR_dispatch_stall_reorder_buffer_full",	0xD5 },	\
294 	{ "FR_dispatch_stall_resv_stations_full",	0xD6 },	\
295 	{ "FR_dispatch_stall_fpu_full",			0xD7 },	\
296 	{ "FR_dispatch_stall_ls_full",			0xD8 },	\
297 	{ "FR_dispatch_stall_waiting_all_quiet",	0xD9 },	\
298 	{ "FR_dispatch_stall_far_ctl_trsfr_resync_branch_pend",	0xDA },\
299 	{ "FR_fpu_exception",				0xDB },	\
300 	{ "FR_num_brkpts_dr0",				0xDC },	\
301 	{ "FR_num_brkpts_dr1",				0xDD },	\
302 	{ "FR_num_brkpts_dr2",				0xDE },	\
303 	{ "FR_num_brkpts_dr3",				0xDF },	\
304 	{ "NB_mem_ctrlr_page_access",			0xE0 },	\
305 	{ "NB_mem_ctrlr_turnaround",			0xE3 },	\
306 	{ "NB_mem_ctrlr_bypass_counter_saturation",	0xE4 },	\
307 	{ "NB_cpu_io_to_mem_io",			0xE9 },	\
308 	{ "NB_cache_block_commands",			0xEA },	\
309 	{ "NB_sized_commands",				0xEB },	\
310 	{ "NB_ht_bus0_bandwidth",			0xF6 }
311 
312 #define	AMD_FAMILY_f_events					\
313 	{ "BU_quadwords_written_to_system",		0x6D },	\
314 	{ "FR_retired_fpu_instr",			0xCB },	\
315 	{ "NB_mem_ctrlr_page_table_overflow",		0xE1 },	\
316 	{ "NB_sized_blocks",				0xE5 },	\
317 	{ "NB_ECC_errors",				0xE8 },	\
318 	{ "NB_probe_result",				0xEC },	\
319 	{ "NB_gart_events",				0xEE },	\
320 	{ "NB_ht_bus1_bandwidth",			0xF7 },	\
321 	{ "NB_ht_bus2_bandwidth",			0xF8 }
322 
323 #define	AMD_FAMILY_10h_events					\
324 	{ "FP_retired_sse_ops",				0x3 },	\
325 	{ "FP_retired_move_ops",			0x4 },	\
326 	{ "FP_retired_serialize_ops",			0x5 },	\
327 	{ "FP_serialize_ops_cycles",			0x6 },	\
328 	{ "LS_cancelled_store_to_load_fwd_ops",		0x2A },	\
329 	{ "LS_smi_received",				0x2B },	\
330 	{ "DC_dtlb_L1_hit",				0x4D },	\
331 	{ "LS_ineffective_prefetch",			0x52 },	\
332 	{ "LS_global_tlb_flush",			0x54 },	\
333 	{ "BU_octwords_written_to_system",		0x6D },	\
334 	{ "Page_size_mismatches",			0x165 },	\
335 	{ "IC_eviction",				0x8B },	\
336 	{ "IC_cache_lines_invalidate",			0x8C },	\
337 	{ "IC_itlb_reload",				0x99 },	\
338 	{ "IC_itlb_reload_aborted",			0x9A },	\
339 	{ "FR_retired_mmx_sse_fp_instr",		0xCB },	\
340 	{ "Retired_x87_fp_ops",				0x1C0 },	\
341 	{ "IBS_ops_tagged",				0x1CF },	\
342 	{ "LFENCE_inst_retired",			0x1D3 },	\
343 	{ "SFENCE_inst_retired",			0x1D4 },	\
344 	{ "MFENCE_inst_retired",			0x1D5 },	\
345 	{ "NB_mem_ctrlr_page_table_overflow",		0xE1 },	\
346 	{ "NB_mem_ctrlr_dram_cmd_slots_missed",		0xE2 },	\
347 	{ "NB_thermal_status",				0xE8 },	\
348 	{ "NB_probe_results_upstream_req",		0xEC },	\
349 	{ "NB_gart_events",				0xEE },	\
350 	{ "NB_mem_ctrlr_req",				0x1F0 },	\
351 	{ "CB_cpu_to_dram_req_to_target",		0x1E0 },	\
352 	{ "CB_io_to_dram_req_to_target",		0x1E1 },	\
353 	{ "CB_cpu_read_cmd_latency_to_target_0_to_3",	0x1E2 },	\
354 	{ "CB_cpu_read_cmd_req_to_target_0_to_3",	0x1E3 },	\
355 	{ "CB_cpu_read_cmd_latency_to_target_4_to_7",	0x1E4 },	\
356 	{ "CB_cpu_read_cmd_req_to_target_4_to_7",	0x1E5 },	\
357 	{ "CB_cpu_cmd_latency_to_target_0_to_7",	0x1E6 },	\
358 	{ "CB_cpu_req_to_target_0_to_7",		0x1E7 },	\
359 	{ "NB_ht_bus1_bandwidth",			0xF7 },	\
360 	{ "NB_ht_bus2_bandwidth",			0xF8 },	\
361 	{ "NB_ht_bus3_bandwidth",			0x1F9 },	\
362 	{ "L3_read_req",				0x4E0 },	\
363 	{ "L3_miss",					0x4E1 },	\
364 	{ "L3_l2_eviction_l3_fill",			0x4E2 },	\
365 	{ "L3_eviction",				0x4E3 }
366 
367 #define	AMD_FAMILY_11h_events					\
368 	{ "BU_quadwords_written_to_system",		0x6D },	\
369 	{ "FR_retired_mmx_fp_instr",			0xCB },	\
370 	{ "NB_mem_ctrlr_page_table_events",		0xE1 },	\
371 	{ "NB_thermal_status",				0xE8 },	\
372 	{ "NB_probe_results_upstream_req",		0xEC },	\
373 	{ "NB_dev_events",				0xEE },	\
374 	{ "NB_mem_ctrlr_req",				0x1F0 }
375 
376 #define	AMD_cmn_generic_events						\
377 	{ "PAPI_br_ins",	"FR_retired_branches_w_excp_intr", 0x0 },\
378 	{ "PAPI_br_msp",	"FR_retired_branches_mispred",	0x0 },	\
379 	{ "PAPI_br_tkn",	"FR_retired_taken_branches",	0x0 },	\
380 	{ "PAPI_fp_ops",	"FP_dispatched_fpu_ops",	0x3 },	\
381 	{ "PAPI_fad_ins",	"FP_dispatched_fpu_ops",	0x1 },	\
382 	{ "PAPI_fml_ins",	"FP_dispatched_fpu_ops",	0x2 },	\
383 	{ "PAPI_fpu_idl",	"FP_cycles_no_fpu_ops_retired",	0x0 },	\
384 	{ "PAPI_tot_cyc",	"BU_cpu_clk_unhalted",		0x0 },	\
385 	{ "PAPI_tot_ins",	"FR_retired_x86_instr_w_excp_intr", 0x0 }, \
386 	{ "PAPI_l1_dca",	"DC_access",			0x0 },	\
387 	{ "PAPI_l1_dcm",	"DC_miss",			0x0 },	\
388 	{ "PAPI_l1_ldm",	"DC_refill_from_L2",		0xe },	\
389 	{ "PAPI_l1_stm",	"DC_refill_from_L2",		0x10 },	\
390 	{ "PAPI_l1_ica",	"IC_fetch",			0x0 },	\
391 	{ "PAPI_l1_icm",	"IC_miss",			0x0 },	\
392 	{ "PAPI_l1_icr",	"IC_fetch",			0x0 },	\
393 	{ "PAPI_l2_dch",	"DC_refill_from_L2",		0x1e },	\
394 	{ "PAPI_l2_dcm",	"DC_refill_from_system",	0x1e },	\
395 	{ "PAPI_l2_dcr",	"DC_refill_from_L2",		0xe },	\
396 	{ "PAPI_l2_dcw",	"DC_refill_from_L2",		0x10 },	\
397 	{ "PAPI_l2_ich",	"IC_refill_from_L2",		0x0 },	\
398 	{ "PAPI_l2_icm",	"IC_refill_from_system",	0x0 },	\
399 	{ "PAPI_l2_ldm",	"DC_refill_from_system",	0xe },	\
400 	{ "PAPI_l2_stm",	"DC_refill_from_system",	0x10 },	\
401 	{ "PAPI_res_stl",	"FR_dispatch_stalls",		0x0 },	\
402 	{ "PAPI_stl_icy",	"FR_nothing_to_dispatch",	0x0 },	\
403 	{ "PAPI_hw_int",	"FR_taken_hardware_intrs",	0x0 }
404 
405 #define	OPT_cmn_generic_events						\
406 	{ "PAPI_tlb_dm",	"DC_dtlb_L1_miss_L2_miss",	0x0 },	\
407 	{ "PAPI_tlb_im",	"IC_itlb_L1_miss_L2_miss",	0x0 },	\
408 	{ "PAPI_fp_ins",	"FR_retired_fpu_instr",		0xd },	\
409 	{ "PAPI_vec_ins",	"FR_retired_fpu_instr",		0x4 }
410 
411 #define	AMD_FAMILY_10h_generic_events					\
412 	{ "PAPI_tlb_dm",	"DC_dtlb_L1_miss_L2_miss",	0x7 },	\
413 	{ "PAPI_tlb_im",	"IC_itlb_L1_miss_L2_miss",	0x3 },	\
414 	{ "PAPI_l3_dcr",	"L3_read_req",			0xf1 }, \
415 	{ "PAPI_l3_icr",	"L3_read_req",			0xf2 }, \
416 	{ "PAPI_l3_tcr",	"L3_read_req",			0xf7 }, \
417 	{ "PAPI_l3_stm",	"L3_miss",			0xf4 }, \
418 	{ "PAPI_l3_ldm",	"L3_miss",			0xf3 }, \
419 	{ "PAPI_l3_tcm",	"L3_miss",			0xf7 }
420 
421 static const amd_event_t family_f_events[] = {
422 	AMD_cmn_events,
423 	AMD_FAMILY_f_events,
424 	EV_END
425 };
426 
427 static const amd_event_t family_10h_events[] = {
428 	AMD_cmn_events,
429 	AMD_FAMILY_10h_events,
430 	EV_END
431 };
432 
433 static const amd_event_t family_11h_events[] = {
434 	AMD_cmn_events,
435 	AMD_FAMILY_11h_events,
436 	EV_END
437 };
438 
439 static const amd_generic_event_t opt_generic_events[] = {
440 	AMD_cmn_generic_events,
441 	OPT_cmn_generic_events,
442 	GEN_EV_END
443 };
444 
445 static const amd_generic_event_t family_10h_generic_events[] = {
446 	AMD_cmn_generic_events,
447 	AMD_FAMILY_10h_generic_events,
448 	GEN_EV_END
449 };
450 
451 /*
452  * For Family 17h and Family 19h, the cpcgen utility generates all of our events
453  * including ones that need specific unit codes, therefore we leave all unit
454  * codes out of these. Zen 1, Zen 2, and Zen 3 have different event sets that
455  * they support.
456  */
457 static const amd_generic_event_t family_17h_zen1_papi_events[] = {
458 	{ "PAPI_br_cn",		"ExRetCond" },
459 	{ "PAPI_br_ins",	"ExRetBrn" },
460 	{ "PAPI_fpu_idl",	"FpSchedEmpty" },
461 	{ "PAPI_tot_cyc",	"LsNotHaltedCyc" },
462 	{ "PAPI_tot_ins",	"ExRetInstr" },
463 	{ "PAPI_tlb_dm",	"LsL1DTlbMiss" },
464 	{ "PAPI_tlb_im",	"BpL1TlbMissL2Miss" },
465 	GEN_EV_END
466 };
467 
468 static const amd_generic_event_t family_17h_zen2_papi_events[] = {
469 	{ "PAPI_br_cn",		"ExRetCond" },
470 	{ "PAPI_br_ins",	"ExRetBrn" },
471 	{ "PAPI_tot_cyc",	"LsNotHaltedCyc" },
472 	{ "PAPI_tot_ins",	"ExRetInstr" },
473 	{ "PAPI_tlb_dm",	"LsL1DTlbMiss" },
474 	{ "PAPI_tlb_im",	"BpL1TlbMissL2Miss" },
475 	GEN_EV_END
476 };
477 
478 static const amd_generic_event_t family_19h_zen3_papi_events[] = {
479 	{ "PAPI_br_cn",		"ExRetCond" },
480 	{ "PAPI_br_ins",	"ExRetBrn" },
481 	{ "PAPI_tot_cyc",	"LsNotHaltedCyc" },
482 	{ "PAPI_tot_ins",	"ExRetInstr" },
483 	{ "PAPI_tlb_dm",	"LsL1DTlbMiss" },
484 	{ "PAPI_tlb_im",	"BpL1TlbMissL2TlbMiss" },
485 	GEN_EV_END
486 };
487 
488 static const amd_generic_event_t family_19h_zen4_papi_events[] = {
489 	{ "PAPI_br_cn",		"ExRetCond" },
490 	{ "PAPI_br_ins",	"ExRetBrn" },
491 	{ "PAPI_tot_cyc",	"LsNotHaltedCyc" },
492 	{ "PAPI_tot_ins",	"ExRetInstr" },
493 	{ "PAPI_tlb_dm",	"LsL1DTlbMiss" },
494 	{ "PAPI_tlb_im",	"BpL1TlbMissL2TlbMiss" },
495 	GEN_EV_END
496 };
497 
498 static const amd_generic_event_t family_1ah_zen5_papi_events[] = {
499 	{ "PAPI_br_cn",		"Retired_Conditional_Branch_Instructions" },
500 	{ "PAPI_br_ins",	"Retired_Branch_Instructions" },
501 	{ "PAPI_br_msp",
502 		"Retired_Conditional_Branch_Instructions_Mispredicted" },
503 	{ "PAPI_br_ucn",	"Retired_Unconditional_Branch_Instructions" },
504 	{ "PAPI_tot_cyc",	"Cycles_Not_in_Halt" },
505 	{ "PAPI_tot_ins",	"Retired_Instructions" },
506 	{ "PAPI_hw_int",	"Interrupts_Taken" },
507 	{ "PAPI_tlb_sd",	"TLB_Flush_Events" },
508 	GEN_EV_END
509 };
510 
511 static char	*evlist;
512 static size_t	evlist_sz;
513 static const amd_event_t *amd_events = NULL;
514 static uint_t amd_family, amd_model;
515 static const amd_generic_event_t *amd_generic_events = NULL;
516 
517 static char amd_fam_f_rev_ae_bkdg[] = "See \"BIOS and Kernel Developer's "
518 "Guide for AMD Athlon 64 and AMD Opteron Processors\" (AMD publication 26094)";
519 static char amd_fam_f_NPT_bkdg[] = "See \"BIOS and Kernel Developer's Guide "
520 "for AMD NPT Family 0Fh Processors\" (AMD publication 32559)";
521 static char amd_fam_10h_bkdg[] = "See \"BIOS and Kernel Developer's Guide "
522 "(BKDG) For AMD Family 10h Processors\" (AMD publication 31116)";
523 static char amd_fam_11h_bkdg[] = "See \"BIOS and Kernel Developer's Guide "
524 "(BKDG) For AMD Family 11h Processors\" (AMD publication 41256)";
525 static char amd_fam_17h_zen1_reg[] = "See \"Open-Source Register Reference For "
526 "AMD Family 17h Processors Models 00h-2Fh\" (AMD publication 56255) and "
527 "amd_f17h_zen1_events(3CPC)";
528 static char amd_fam_17h_zen2_reg[] = "See \"Preliminary Processor Programming "
529 "Reference (PPR) for AMD Family 17h Model 31h, Revision B0 Processors\" "
530 "(AMD publication 55803), \"Processor Programming Reference (PPR) for AMD "
531 "Family 17h Model 71h, Revision B0 Processors\" (AMD publication 56176), and "
532 "amd_f17h_zen2_events(3CPC)";
533 static char amd_fam_19h_zen3_reg[] = "See \"Preliminary Processor Programming "
534 "Reference (PPR) for AMD Family 19h Model 01h, Revision B1 Processors Volume "
535 "1 of 2\" (AMD publication 55898), \"Processor Programming Reference (PPR) "
536 "for AMD Family 19h Model 21h, Revision B0 Processors\" (AMD publication "
537 "56214), and amd_f19h_zen3_events(3CPC)";
538 static char amd_fam_19h_zen4_reg[] = "See \"Processor Programming Reference "
539 "(PPR) for AMD Family 19h Model 11h, Revision B1 Processors Volume 1 of 6\" "
540 "(AMD publication 55901), \"Processor Programming Reference (PPR) for AMD "
541 "Family 19h Model 61h, Revision B1 Processors\" (AMD publication 56713), "
542 "\"Processor Programming Reference (PPR) for AMD Family 19h Model 70h, "
543 "Revision A0 Processors\" (AMD publication 57019), and "
544 "amd_f19h_zen4_events(3CPC)";
545 static char amd_fam_1ah_zen5_reg[] = "See \"Performance Monitor Counters "
546 "for AMD Family 1Ah Model 00h-Fh Processors\" (AMD publication 58550) and "
547 "amd_f1ah_zen5_events(3CPC)";
548 
549 static char amd_pcbe_impl_name[64];
550 static char *amd_pcbe_cpuref;
551 
552 
553 #define	BITS(v, u, l)   \
554 	(((v) >> (l)) & ((1 << (1 + (u) - (l))) - 1))
555 
556 static uint64_t
557 opt_pcbe_pes_addr(uint_t counter)
558 {
559 	ASSERT3U(counter, <, opd.opd_ncounters);
560 	return (PES_BASE_ADDR + counter);
561 }
562 
563 static uint64_t
564 opt_pcbe_pes_ext_addr(uint_t counter)
565 {
566 	ASSERT3U(counter, <, opd.opd_ncounters);
567 	return (PES_EXT_BASE_ADDR + 2 * counter);
568 }
569 
570 static uint64_t
571 opt_pcbe_pic_addr(uint_t counter)
572 {
573 	ASSERT3U(counter, <, opd.opd_ncounters);
574 	return (PIC_BASE_ADDR + counter);
575 }
576 
577 static uint64_t
578 opt_pcbe_pic_ext_addr(uint_t counter)
579 {
580 	ASSERT3U(counter, <, opd.opd_ncounters);
581 	return (PIC_EXT_BASE_ADDR + 2 * counter);
582 }
583 
584 static int
585 opt_pcbe_init(void)
586 {
587 	const amd_event_t		*evp;
588 	const amd_generic_event_t	*gevp;
589 	x86_uarchrev_t			uarchrev;
590 
591 	amd_family = cpuid_getfamily(CPU);
592 	amd_model = cpuid_getmodel(CPU);
593 	uarchrev = cpuid_getuarchrev(CPU);
594 
595 	/*
596 	 * Make sure this really _is_ an Opteron or Athlon 64 system. The kernel
597 	 * loads this module based on its name in the module directory, but it
598 	 * could have been renamed.
599 	 */
600 	if ((cpuid_getvendor(CPU) != X86_VENDOR_AMD || amd_family < 0xf) &&
601 	    cpuid_getvendor(CPU) != X86_VENDOR_HYGON)
602 		return (-1);
603 
604 	if (amd_family == 0xf) {
605 		/* Some tools expect this string for family 0fh */
606 		(void) snprintf(amd_pcbe_impl_name, sizeof (amd_pcbe_impl_name),
607 		    "AMD Opteron & Athlon64");
608 	} else {
609 		(void) snprintf(amd_pcbe_impl_name, sizeof (amd_pcbe_impl_name),
610 		    "%s Family %02xh",
611 		    cpuid_getvendor(CPU) == X86_VENDOR_HYGON ? "Hygon" : "AMD",
612 		    amd_family);
613 	}
614 
615 	/*
616 	 * Determine whether or not the extended counter set is supported on
617 	 * this processor.
618 	 */
619 	if (is_x86_feature(x86_featureset, X86FSET_AMD_PCEC)) {
620 		opd.opd_ncounters = OPT_PCBE_EXT_NCOUNTERS;
621 		opd.opd_pesf = opt_pcbe_pes_ext_addr;
622 		opd.opd_picf = opt_pcbe_pic_ext_addr;
623 	} else {
624 		opd.opd_ncounters = OPT_PCBE_DEF_NCOUNTERS;
625 		opd.opd_pesf = opt_pcbe_pes_addr;
626 		opd.opd_picf = opt_pcbe_pic_addr;
627 	}
628 	opd.opd_cmask = (1 << opd.opd_ncounters) - 1;
629 
630 	/*
631 	 * Figure out processor revision here and assign appropriate
632 	 * event configuration.
633 	 */
634 	switch (uarchrev_uarch(uarchrev)) {
635 	case X86_UARCH_AMD_LEGACY:
636 		switch (amd_family) {
637 		case 0xf: {
638 			x86_chiprev_t rev;
639 
640 			rev = cpuid_getchiprev(CPU);
641 
642 			if (chiprev_at_least(rev,
643 			    X86_CHIPREV_AMD_LEGACY_F_REV_F)) {
644 				amd_pcbe_cpuref = amd_fam_f_NPT_bkdg;
645 			} else {
646 				amd_pcbe_cpuref = amd_fam_f_rev_ae_bkdg;
647 			}
648 			amd_events = family_f_events;
649 			amd_generic_events = opt_generic_events;
650 			break;
651 		}
652 		case 0x10:
653 			amd_pcbe_cpuref = amd_fam_10h_bkdg;
654 			amd_events = family_10h_events;
655 			amd_generic_events = family_10h_generic_events;
656 			break;
657 		case 0x11:
658 			amd_pcbe_cpuref = amd_fam_11h_bkdg;
659 			amd_events = family_11h_events;
660 			amd_generic_events = opt_generic_events;
661 			break;
662 		default:
663 			return (-1);
664 		}
665 		break;
666 	case X86_UARCH_AMD_ZEN1:
667 	case X86_UARCH_AMD_ZENPLUS:
668 		amd_pcbe_cpuref = amd_fam_17h_zen1_reg;
669 		amd_events = opteron_pcbe_f17h_zen1_events;
670 		amd_generic_events = family_17h_zen1_papi_events;
671 		break;
672 	case X86_UARCH_AMD_ZEN2:
673 		amd_pcbe_cpuref = amd_fam_17h_zen2_reg;
674 		amd_events = opteron_pcbe_f17h_zen2_events;
675 		amd_generic_events = family_17h_zen2_papi_events;
676 		break;
677 	case X86_UARCH_AMD_ZEN3:
678 		amd_pcbe_cpuref = amd_fam_19h_zen3_reg;
679 		amd_events = opteron_pcbe_f19h_zen3_events;
680 		amd_generic_events = family_19h_zen3_papi_events;
681 		break;
682 	case X86_UARCH_AMD_ZEN4:
683 		amd_pcbe_cpuref = amd_fam_19h_zen4_reg;
684 		amd_events = opteron_pcbe_f19h_zen4_events;
685 		amd_generic_events = family_19h_zen4_papi_events;
686 		break;
687 	case X86_UARCH_AMD_ZEN5:
688 		amd_pcbe_cpuref = amd_fam_1ah_zen5_reg;
689 		amd_events = opteron_pcbe_f1ah_zen5_events;
690 		amd_generic_events = family_1ah_zen5_papi_events;
691 		break;
692 	default:
693 		/*
694 		 * Different families have different meanings on events and even
695 		 * worse (like family 15h), different constraints around
696 		 * programming these values.
697 		 */
698 		return (-1);
699 	}
700 
701 	/*
702 	 * Construct event list.
703 	 *
704 	 * First pass:  Calculate size needed. We'll need an additional byte
705 	 *		for the NULL pointer during the last strcat.
706 	 *
707 	 * Second pass: Copy strings.
708 	 */
709 	for (evp = amd_events; evp->name != NULL; evp++)
710 		evlist_sz += strlen(evp->name) + 1;
711 
712 	for (gevp = amd_generic_events; gevp->name != NULL; gevp++)
713 		evlist_sz += strlen(gevp->name) + 1;
714 
715 	evlist = kmem_alloc(evlist_sz + 1, KM_SLEEP);
716 	evlist[0] = '\0';
717 
718 	for (evp = amd_events; evp->name != NULL; evp++) {
719 		(void) strcat(evlist, evp->name);
720 		(void) strcat(evlist, ",");
721 	}
722 
723 	for (gevp = amd_generic_events; gevp->name != NULL; gevp++) {
724 		(void) strcat(evlist, gevp->name);
725 		(void) strcat(evlist, ",");
726 	}
727 
728 	/*
729 	 * Remove trailing comma.
730 	 */
731 	evlist[evlist_sz - 1] = '\0';
732 
733 	return (0);
734 }
735 
736 static uint_t
737 opt_pcbe_ncounters(void)
738 {
739 	return (opd.opd_ncounters);
740 }
741 
742 static const char *
743 opt_pcbe_impl_name(void)
744 {
745 	return (amd_pcbe_impl_name);
746 }
747 
748 static const char *
749 opt_pcbe_cpuref(void)
750 {
751 
752 	return (amd_pcbe_cpuref);
753 }
754 
755 /*ARGSUSED*/
756 static char *
757 opt_pcbe_list_events(uint_t picnum)
758 {
759 	return (evlist);
760 }
761 
762 static char *
763 opt_pcbe_list_attrs(void)
764 {
765 	return ("edge,pc,inv,cmask,umask");
766 }
767 
768 static const amd_generic_event_t *
769 find_generic_event(char *name)
770 {
771 	const amd_generic_event_t	*gevp;
772 
773 	for (gevp = amd_generic_events; gevp->name != NULL; gevp++)
774 		if (strcmp(name, gevp->name) == 0)
775 			return (gevp);
776 
777 	return (NULL);
778 }
779 
780 static const amd_event_t *
781 find_event(char *name)
782 {
783 	const amd_event_t	*evp;
784 
785 	for (evp = amd_events; evp->name != NULL; evp++)
786 		if (strcmp(name, evp->name) == 0)
787 			return (evp);
788 
789 	return (NULL);
790 }
791 
792 /*ARGSUSED*/
793 static uint64_t
794 opt_pcbe_event_coverage(char *event)
795 {
796 	/*
797 	 * Check whether counter event is supported
798 	 */
799 	if (find_event(event) == NULL && find_generic_event(event) == NULL)
800 		return (0);
801 
802 	/*
803 	 * Fortunately, all counters can count all events.
804 	 */
805 	return (opd.opd_cmask);
806 }
807 
808 static uint64_t
809 opt_pcbe_overflow_bitmap(void)
810 {
811 	/*
812 	 * Unfortunately, this chip cannot detect which counter overflowed, so
813 	 * we must act as if they all did.
814 	 */
815 	return (opd.opd_cmask);
816 }
817 
818 /*ARGSUSED*/
819 static int
820 opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
821     uint_t nattrs, kcpc_attr_t *attrs, void **data, void *token)
822 {
823 	opt_pcbe_config_t		*cfg;
824 	const amd_event_t		*evp;
825 	amd_event_t			ev_raw = { "raw", 0};
826 	const amd_generic_event_t	*gevp;
827 	int				i;
828 	uint64_t			evsel = 0, evsel_tmp = 0;
829 
830 	/*
831 	 * If we've been handed an existing configuration, we need only preset
832 	 * the counter value.
833 	 */
834 	if (*data != NULL) {
835 		cfg = *data;
836 		cfg->opt_rawpic = preset & MASK48;
837 		return (0);
838 	}
839 
840 	if (picnum >= opd.opd_ncounters)
841 		return (CPC_INVALID_PICNUM);
842 
843 	if ((evp = find_event(event)) == NULL) {
844 		if ((gevp = find_generic_event(event)) != NULL) {
845 			evp = find_event(gevp->event);
846 			ASSERT(evp != NULL);
847 
848 			if (nattrs > 0)
849 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
850 
851 			evsel |= gevp->umask << OPT_PES_UMASK_SHIFT;
852 		} else {
853 			long tmp;
854 
855 			/*
856 			 * If ddi_strtol() likes this event, use it as a raw
857 			 * event code.
858 			 */
859 			if (ddi_strtol(event, NULL, 0, &tmp) != 0)
860 				return (CPC_INVALID_EVENT);
861 
862 			ev_raw.emask = tmp;
863 			evp = &ev_raw;
864 		}
865 	}
866 
867 	/*
868 	 * Configuration of EventSelect register. While on some families
869 	 * certain bits might not be supported (e.g. Guest/Host on family
870 	 * 11h), setting these bits is harmless
871 	 */
872 
873 	/* Set GuestOnly bit to 0 and HostOnly bit to 1 */
874 	evsel &= ~OPT_PES_HOST;
875 	evsel &= ~OPT_PES_GUEST;
876 
877 	/* Set bits [35:32] for extended part of Event Select field */
878 	evsel_tmp = evp->emask & 0x0f00;
879 	evsel |= evsel_tmp << OPT_PES_EVSELHI_SHIFT;
880 
881 	evsel |= evp->emask & 0x00ff;
882 	evsel |= evp->unit << OPT_PES_UMASK_SHIFT;
883 
884 	if (flags & CPC_COUNT_USER)
885 		evsel |= OPT_PES_USR;
886 	if (flags & CPC_COUNT_SYSTEM)
887 		evsel |= OPT_PES_OS;
888 	if (flags & CPC_OVF_NOTIFY_EMT)
889 		evsel |= OPT_PES_INT;
890 
891 	for (i = 0; i < nattrs; i++) {
892 		if (strcmp(attrs[i].ka_name, "edge") == 0) {
893 			if (attrs[i].ka_val != 0)
894 				evsel |= OPT_PES_EDGE;
895 		} else if (strcmp(attrs[i].ka_name, "pc") == 0) {
896 			if (attrs[i].ka_val != 0)
897 				evsel |= OPT_PES_PC;
898 		} else if (strcmp(attrs[i].ka_name, "inv") == 0) {
899 			if (attrs[i].ka_val != 0)
900 				evsel |= OPT_PES_INV;
901 		} else if (strcmp(attrs[i].ka_name, "cmask") == 0) {
902 			if ((attrs[i].ka_val | OPT_PES_CMASK_MASK) !=
903 			    OPT_PES_CMASK_MASK)
904 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
905 			evsel |= attrs[i].ka_val << OPT_PES_CMASK_SHIFT;
906 		} else if (strcmp(attrs[i].ka_name, "umask") == 0) {
907 			if ((attrs[i].ka_val | OPT_PES_UMASK_MASK) !=
908 			    OPT_PES_UMASK_MASK)
909 				return (CPC_ATTRIBUTE_OUT_OF_RANGE);
910 			evsel |= attrs[i].ka_val << OPT_PES_UMASK_SHIFT;
911 		} else
912 			return (CPC_INVALID_ATTRIBUTE);
913 	}
914 
915 	cfg = kmem_alloc(sizeof (*cfg), KM_SLEEP);
916 
917 	cfg->opt_picno = picnum;
918 	cfg->opt_evsel = evsel;
919 	cfg->opt_rawpic = preset & MASK48;
920 
921 	*data = cfg;
922 	return (0);
923 }
924 
925 static void
926 opt_pcbe_program(void *token)
927 {
928 	opt_pcbe_config_t	*cfgs[OPT_PCBE_EXT_NCOUNTERS] = { &nullcfgs[0],
929 						&nullcfgs[1], &nullcfgs[2],
930 						&nullcfgs[3], &nullcfgs[4],
931 						&nullcfgs[5] };
932 	opt_pcbe_config_t	*pcfg = NULL;
933 	int			i;
934 	ulong_t			curcr4 = getcr4();
935 
936 	/*
937 	 * Allow nonprivileged code to read the performance counters if desired.
938 	 */
939 	if (kcpc_allow_nonpriv(token))
940 		setcr4(curcr4 | CR4_PCE);
941 	else
942 		setcr4(curcr4 & ~CR4_PCE);
943 
944 	/*
945 	 * Query kernel for all configs which will be co-programmed.
946 	 */
947 	do {
948 		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, NULL);
949 
950 		if (pcfg != NULL) {
951 			ASSERT(pcfg->opt_picno < opd.opd_ncounters);
952 			cfgs[pcfg->opt_picno] = pcfg;
953 		}
954 	} while (pcfg != NULL);
955 
956 	/*
957 	 * Program in two loops. The first configures and presets the counter,
958 	 * and the second loop enables the counters. This ensures that the
959 	 * counters are all enabled as closely together in time as possible.
960 	 */
961 
962 	for (i = 0; i < opd.opd_ncounters; i++) {
963 		wrmsr(opd.opd_pesf(i), cfgs[i]->opt_evsel);
964 		wrmsr(opd.opd_picf(i), cfgs[i]->opt_rawpic);
965 	}
966 
967 	for (i = 0; i < opd.opd_ncounters; i++) {
968 		wrmsr(opd.opd_pesf(i), cfgs[i]->opt_evsel |
969 		    (uint64_t)(uintptr_t)OPT_PES_ENABLE);
970 	}
971 }
972 
973 static void
974 opt_pcbe_allstop(void)
975 {
976 	int		i;
977 
978 	for (i = 0; i < opd.opd_ncounters; i++)
979 		wrmsr(opd.opd_pesf(i), 0ULL);
980 
981 	/*
982 	 * Disable non-privileged access to the counter registers.
983 	 */
984 	setcr4(getcr4() & ~CR4_PCE);
985 }
986 
987 static void
988 opt_pcbe_sample(void *token)
989 {
990 	opt_pcbe_config_t	*cfgs[OPT_PCBE_EXT_NCOUNTERS] = { NULL, NULL,
991 						NULL, NULL, NULL, NULL };
992 	opt_pcbe_config_t	*pcfg = NULL;
993 	int			i;
994 	uint64_t		curpic[OPT_PCBE_EXT_NCOUNTERS];
995 	uint64_t		*addrs[OPT_PCBE_EXT_NCOUNTERS];
996 	uint64_t		*tmp;
997 	int64_t			diff;
998 
999 	for (i = 0; i < opd.opd_ncounters; i++)
1000 		curpic[i] = rdmsr(opd.opd_picf(i));
1001 
1002 	/*
1003 	 * Query kernel for all configs which are co-programmed.
1004 	 */
1005 	do {
1006 		pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, &tmp);
1007 
1008 		if (pcfg != NULL) {
1009 			ASSERT3U(pcfg->opt_picno, <, opd.opd_ncounters);
1010 			cfgs[pcfg->opt_picno] = pcfg;
1011 			addrs[pcfg->opt_picno] = tmp;
1012 		}
1013 	} while (pcfg != NULL);
1014 
1015 	for (i = 0; i < opd.opd_ncounters; i++) {
1016 		if (cfgs[i] == NULL)
1017 			continue;
1018 
1019 		diff = (curpic[i] - cfgs[i]->opt_rawpic) & MASK48;
1020 		*addrs[i] += diff;
1021 		DTRACE_PROBE4(opt__pcbe__sample, int, i, uint64_t, *addrs[i],
1022 		    uint64_t, curpic[i], uint64_t, cfgs[i]->opt_rawpic);
1023 		cfgs[i]->opt_rawpic = *addrs[i] & MASK48;
1024 	}
1025 }
1026 
1027 static void
1028 opt_pcbe_free(void *config)
1029 {
1030 	kmem_free(config, sizeof (opt_pcbe_config_t));
1031 }
1032 
1033 
1034 static struct modlpcbe modlpcbe = {
1035 	&mod_pcbeops,
1036 	"AMD Performance Counters",
1037 	&opt_pcbe_ops
1038 };
1039 
1040 static struct modlinkage modl = {
1041 	MODREV_1,
1042 	&modlpcbe,
1043 };
1044 
1045 int
1046 _init(void)
1047 {
1048 	int ret;
1049 
1050 	if (opt_pcbe_init() != 0)
1051 		return (ENOTSUP);
1052 
1053 	if ((ret = mod_install(&modl)) != 0)
1054 		kmem_free(evlist, evlist_sz + 1);
1055 
1056 	return (ret);
1057 }
1058 
1059 int
1060 _fini(void)
1061 {
1062 	int ret;
1063 
1064 	if ((ret = mod_remove(&modl)) == 0)
1065 		kmem_free(evlist, evlist_sz + 1);
1066 	return (ret);
1067 }
1068 
1069 int
1070 _info(struct modinfo *mi)
1071 {
1072 	return (mod_info(&modl, mi));
1073 }
1074