1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * This file contains preset event names from the Performance Application
28 * Programming Interface v3.5 which included the following notice:
29 *
30 * Copyright (c) 2005,6
31 * Innovative Computing Labs
32 * Computer Science Department,
33 * University of Tennessee,
34 * Knoxville, TN.
35 * All Rights Reserved.
36 *
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions are met:
40 *
41 * * Redistributions of source code must retain the above copyright notice,
42 * this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * * Neither the name of the University of Tennessee nor the names of its
47 * contributors may be used to endorse or promote products derived from
48 * this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
51 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
54 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
61 *
62 *
63 * This open source software license conforms to the BSD License template.
64 */
65
66 /*
67 * Portions Copyright 2009 Advanced Micro Devices, Inc.
68 * Copyright 2019 Joyent, Inc.
69 * Copyright 2024 Oxide Computer Company
70 */
71
72 /*
73 * Performance Counter Back-End for AMD Opteron, AMD Athlon 64, and Zen
74 * era processors.
75 */
76
77 #include <sys/cpuvar.h>
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/cpc_pcbe.h>
81 #include <sys/kmem.h>
82 #include <sys/sdt.h>
83 #include <sys/modctl.h>
84 #include <sys/errno.h>
85 #include <sys/debug.h>
86 #include <sys/archsystm.h>
87 #include <sys/x86_archext.h>
88 #include <sys/privregs.h>
89 #include <sys/ddi.h>
90 #include <sys/sunddi.h>
91
92 #include "opteron_pcbe_table.h"
93 #include <opteron_pcbe_cpcgen.h>
94
95 static int opt_pcbe_init(void);
96 static uint_t opt_pcbe_ncounters(void);
97 static const char *opt_pcbe_impl_name(void);
98 static const char *opt_pcbe_cpuref(void);
99 static char *opt_pcbe_list_events(uint_t picnum);
100 static char *opt_pcbe_list_attrs(void);
101 static uint64_t opt_pcbe_event_coverage(char *event);
102 static uint64_t opt_pcbe_overflow_bitmap(void);
103 static int opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
104 uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
105 void *token);
106 static void opt_pcbe_program(void *token);
107 static void opt_pcbe_allstop(void);
108 static void opt_pcbe_sample(void *token);
109 static void opt_pcbe_free(void *config);
110
111 static pcbe_ops_t opt_pcbe_ops = {
112 PCBE_VER_1,
113 CPC_CAP_OVERFLOW_INTERRUPT,
114 opt_pcbe_ncounters,
115 opt_pcbe_impl_name,
116 opt_pcbe_cpuref,
117 opt_pcbe_list_events,
118 opt_pcbe_list_attrs,
119 opt_pcbe_event_coverage,
120 opt_pcbe_overflow_bitmap,
121 opt_pcbe_configure,
122 opt_pcbe_program,
123 opt_pcbe_allstop,
124 opt_pcbe_sample,
125 opt_pcbe_free
126 };
127
128 /*
129 * Base MSR addresses for the PerfEvtSel registers and the counters themselves.
130 * Add counter number to base address to get corresponding MSR address.
131 */
132 #define PES_BASE_ADDR 0xC0010000
133 #define PIC_BASE_ADDR 0xC0010004
134
135 /*
136 * Base MSR addresses for the PerfEvtSel registers and counters. The counter and
137 * event select registers are interleaved, so one needs to multiply the counter
138 * number by two to determine what they should be set to.
139 */
140 #define PES_EXT_BASE_ADDR 0xC0010200
141 #define PIC_EXT_BASE_ADDR 0xC0010201
142
143 /*
144 * The number of counters present depends on which CPU features are present.
145 */
146 #define OPT_PCBE_DEF_NCOUNTERS 4
147 #define OPT_PCBE_EXT_NCOUNTERS 6
148
149 /*
150 * Define offsets and masks for the fields in the Performance
151 * Event-Select (PES) registers.
152 */
153 #define OPT_PES_HOST_SHIFT 41
154 #define OPT_PES_GUEST_SHIFT 40
155 #define OPT_PES_EVSELHI_SHIFT 32
156 #define OPT_PES_CMASK_SHIFT 24
157 #define OPT_PES_CMASK_MASK 0xFF
158 #define OPT_PES_INV_SHIFT 23
159 #define OPT_PES_ENABLE_SHIFT 22
160 #define OPT_PES_INT_SHIFT 20
161 #define OPT_PES_PC_SHIFT 19
162 #define OPT_PES_EDGE_SHIFT 18
163 #define OPT_PES_OS_SHIFT 17
164 #define OPT_PES_USR_SHIFT 16
165 #define OPT_PES_UMASK_SHIFT 8
166 #define OPT_PES_UMASK_MASK 0xFF
167
168 #define OPT_PES_INV (1ULL << OPT_PES_INV_SHIFT)
169 #define OPT_PES_ENABLE (1ULL << OPT_PES_ENABLE_SHIFT)
170 #define OPT_PES_INT (1ULL << OPT_PES_INT_SHIFT)
171 #define OPT_PES_PC (1ULL << OPT_PES_PC_SHIFT)
172 #define OPT_PES_EDGE (1ULL << OPT_PES_EDGE_SHIFT)
173 #define OPT_PES_OS (1ULL << OPT_PES_OS_SHIFT)
174 #define OPT_PES_USR (1ULL << OPT_PES_USR_SHIFT)
175 #define OPT_PES_HOST (1ULL << OPT_PES_HOST_SHIFT)
176 #define OPT_PES_GUEST (1ULL << OPT_PES_GUEST_SHIFT)
177
178 typedef struct _opt_pcbe_config {
179 uint8_t opt_picno; /* Counter number: 0, 1, 2, or 3 */
180 uint64_t opt_evsel; /* Event Selection register */
181 uint64_t opt_rawpic; /* Raw counter value */
182 } opt_pcbe_config_t;
183
184 opt_pcbe_config_t nullcfgs[OPT_PCBE_EXT_NCOUNTERS] = {
185 { 0, 0, 0 },
186 { 1, 0, 0 },
187 { 2, 0, 0 },
188 { 3, 0, 0 },
189 { 4, 0, 0 },
190 { 5, 0, 0 },
191 };
192
193 typedef uint64_t (*opt_pcbe_addr_f)(uint_t);
194
195 typedef struct opt_pcbe_data {
196 uint_t opd_ncounters;
197 uint_t opd_cmask;
198 opt_pcbe_addr_f opd_pesf;
199 opt_pcbe_addr_f opd_picf;
200 } opt_pcbe_data_t;
201
202 opt_pcbe_data_t opd;
203
204 #define MASK48 0xFFFFFFFFFFFF
205
206 #define EV_END {NULL, 0}
207 #define GEN_EV_END {NULL, NULL, 0 }
208
209 /*
210 * The following Macros are used to define tables of events that are used by
211 * various families and some generic classes of events.
212 *
213 * When programming a performance counter there are two different values that we
214 * need to set:
215 *
216 * o Event - Determines the general class of event that is being used.
217 * o Unit - A further breakdown that gives more specific value.
218 *
219 * Prior to the introduction of family 17h support, all family specific events
220 * were programmed based on their event. The generic events, which tried to
221 * provide PAPI mappings to events specified an additional unit mask.
222 *
223 * Starting with Family 17h, CPU performance counters default to using both the
224 * unit mask and the event select. Generic events are always aliases to a
225 * specific event/unit pair, hence why the units for them are always zero. In
226 * addition, the naming of events in family 17h has been changed to reflect
227 * AMD's guide. While this is a departure from what people are used to, it is
228 * believed that matching the more detailed literature that folks are told to
229 * reference is more valuable.
230 */
231
232 #define AMD_cmn_events \
233 { "FP_dispatched_fpu_ops", 0x0 }, \
234 { "FP_cycles_no_fpu_ops_retired", 0x1 }, \
235 { "FP_dispatched_fpu_ops_ff", 0x2 }, \
236 { "LS_seg_reg_load", 0x20 }, \
237 { "LS_uarch_resync_self_modify", 0x21 }, \
238 { "LS_uarch_resync_snoop", 0x22 }, \
239 { "LS_buffer_2_full", 0x23 }, \
240 { "LS_locked_operation", 0x24 }, \
241 { "LS_retired_cflush", 0x26 }, \
242 { "LS_retired_cpuid", 0x27 }, \
243 { "DC_access", 0x40 }, \
244 { "DC_miss", 0x41 }, \
245 { "DC_refill_from_L2", 0x42 }, \
246 { "DC_refill_from_system", 0x43 }, \
247 { "DC_copyback", 0x44 }, \
248 { "DC_dtlb_L1_miss_L2_hit", 0x45 }, \
249 { "DC_dtlb_L1_miss_L2_miss", 0x46 }, \
250 { "DC_misaligned_data_ref", 0x47 }, \
251 { "DC_uarch_late_cancel_access", 0x48 }, \
252 { "DC_uarch_early_cancel_access", 0x49 }, \
253 { "DC_1bit_ecc_error_found", 0x4A }, \
254 { "DC_dispatched_prefetch_instr", 0x4B }, \
255 { "DC_dcache_accesses_by_locks", 0x4C }, \
256 { "BU_memory_requests", 0x65 }, \
257 { "BU_data_prefetch", 0x67 }, \
258 { "BU_system_read_responses", 0x6C }, \
259 { "BU_cpu_clk_unhalted", 0x76 }, \
260 { "BU_internal_L2_req", 0x7D }, \
261 { "BU_fill_req_missed_L2", 0x7E }, \
262 { "BU_fill_into_L2", 0x7F }, \
263 { "IC_fetch", 0x80 }, \
264 { "IC_miss", 0x81 }, \
265 { "IC_refill_from_L2", 0x82 }, \
266 { "IC_refill_from_system", 0x83 }, \
267 { "IC_itlb_L1_miss_L2_hit", 0x84 }, \
268 { "IC_itlb_L1_miss_L2_miss", 0x85 }, \
269 { "IC_uarch_resync_snoop", 0x86 }, \
270 { "IC_instr_fetch_stall", 0x87 }, \
271 { "IC_return_stack_hit", 0x88 }, \
272 { "IC_return_stack_overflow", 0x89 }, \
273 { "FR_retired_x86_instr_w_excp_intr", 0xC0 }, \
274 { "FR_retired_uops", 0xC1 }, \
275 { "FR_retired_branches_w_excp_intr", 0xC2 }, \
276 { "FR_retired_branches_mispred", 0xC3 }, \
277 { "FR_retired_taken_branches", 0xC4 }, \
278 { "FR_retired_taken_branches_mispred", 0xC5 }, \
279 { "FR_retired_far_ctl_transfer", 0xC6 }, \
280 { "FR_retired_resyncs", 0xC7 }, \
281 { "FR_retired_near_rets", 0xC8 }, \
282 { "FR_retired_near_rets_mispred", 0xC9 }, \
283 { "FR_retired_taken_branches_mispred_addr_miscomp", 0xCA },\
284 { "FR_retired_fastpath_double_op_instr", 0xCC }, \
285 { "FR_intr_masked_cycles", 0xCD }, \
286 { "FR_intr_masked_while_pending_cycles", 0xCE }, \
287 { "FR_taken_hardware_intrs", 0xCF }, \
288 { "FR_nothing_to_dispatch", 0xD0 }, \
289 { "FR_dispatch_stalls", 0xD1 }, \
290 { "FR_dispatch_stall_branch_abort_to_retire", 0xD2 }, \
291 { "FR_dispatch_stall_serialization", 0xD3 }, \
292 { "FR_dispatch_stall_segment_load", 0xD4 }, \
293 { "FR_dispatch_stall_reorder_buffer_full", 0xD5 }, \
294 { "FR_dispatch_stall_resv_stations_full", 0xD6 }, \
295 { "FR_dispatch_stall_fpu_full", 0xD7 }, \
296 { "FR_dispatch_stall_ls_full", 0xD8 }, \
297 { "FR_dispatch_stall_waiting_all_quiet", 0xD9 }, \
298 { "FR_dispatch_stall_far_ctl_trsfr_resync_branch_pend", 0xDA },\
299 { "FR_fpu_exception", 0xDB }, \
300 { "FR_num_brkpts_dr0", 0xDC }, \
301 { "FR_num_brkpts_dr1", 0xDD }, \
302 { "FR_num_brkpts_dr2", 0xDE }, \
303 { "FR_num_brkpts_dr3", 0xDF }, \
304 { "NB_mem_ctrlr_page_access", 0xE0 }, \
305 { "NB_mem_ctrlr_turnaround", 0xE3 }, \
306 { "NB_mem_ctrlr_bypass_counter_saturation", 0xE4 }, \
307 { "NB_cpu_io_to_mem_io", 0xE9 }, \
308 { "NB_cache_block_commands", 0xEA }, \
309 { "NB_sized_commands", 0xEB }, \
310 { "NB_ht_bus0_bandwidth", 0xF6 }
311
312 #define AMD_FAMILY_f_events \
313 { "BU_quadwords_written_to_system", 0x6D }, \
314 { "FR_retired_fpu_instr", 0xCB }, \
315 { "NB_mem_ctrlr_page_table_overflow", 0xE1 }, \
316 { "NB_sized_blocks", 0xE5 }, \
317 { "NB_ECC_errors", 0xE8 }, \
318 { "NB_probe_result", 0xEC }, \
319 { "NB_gart_events", 0xEE }, \
320 { "NB_ht_bus1_bandwidth", 0xF7 }, \
321 { "NB_ht_bus2_bandwidth", 0xF8 }
322
323 #define AMD_FAMILY_10h_events \
324 { "FP_retired_sse_ops", 0x3 }, \
325 { "FP_retired_move_ops", 0x4 }, \
326 { "FP_retired_serialize_ops", 0x5 }, \
327 { "FP_serialize_ops_cycles", 0x6 }, \
328 { "LS_cancelled_store_to_load_fwd_ops", 0x2A }, \
329 { "LS_smi_received", 0x2B }, \
330 { "DC_dtlb_L1_hit", 0x4D }, \
331 { "LS_ineffective_prefetch", 0x52 }, \
332 { "LS_global_tlb_flush", 0x54 }, \
333 { "BU_octwords_written_to_system", 0x6D }, \
334 { "Page_size_mismatches", 0x165 }, \
335 { "IC_eviction", 0x8B }, \
336 { "IC_cache_lines_invalidate", 0x8C }, \
337 { "IC_itlb_reload", 0x99 }, \
338 { "IC_itlb_reload_aborted", 0x9A }, \
339 { "FR_retired_mmx_sse_fp_instr", 0xCB }, \
340 { "Retired_x87_fp_ops", 0x1C0 }, \
341 { "IBS_ops_tagged", 0x1CF }, \
342 { "LFENCE_inst_retired", 0x1D3 }, \
343 { "SFENCE_inst_retired", 0x1D4 }, \
344 { "MFENCE_inst_retired", 0x1D5 }, \
345 { "NB_mem_ctrlr_page_table_overflow", 0xE1 }, \
346 { "NB_mem_ctrlr_dram_cmd_slots_missed", 0xE2 }, \
347 { "NB_thermal_status", 0xE8 }, \
348 { "NB_probe_results_upstream_req", 0xEC }, \
349 { "NB_gart_events", 0xEE }, \
350 { "NB_mem_ctrlr_req", 0x1F0 }, \
351 { "CB_cpu_to_dram_req_to_target", 0x1E0 }, \
352 { "CB_io_to_dram_req_to_target", 0x1E1 }, \
353 { "CB_cpu_read_cmd_latency_to_target_0_to_3", 0x1E2 }, \
354 { "CB_cpu_read_cmd_req_to_target_0_to_3", 0x1E3 }, \
355 { "CB_cpu_read_cmd_latency_to_target_4_to_7", 0x1E4 }, \
356 { "CB_cpu_read_cmd_req_to_target_4_to_7", 0x1E5 }, \
357 { "CB_cpu_cmd_latency_to_target_0_to_7", 0x1E6 }, \
358 { "CB_cpu_req_to_target_0_to_7", 0x1E7 }, \
359 { "NB_ht_bus1_bandwidth", 0xF7 }, \
360 { "NB_ht_bus2_bandwidth", 0xF8 }, \
361 { "NB_ht_bus3_bandwidth", 0x1F9 }, \
362 { "L3_read_req", 0x4E0 }, \
363 { "L3_miss", 0x4E1 }, \
364 { "L3_l2_eviction_l3_fill", 0x4E2 }, \
365 { "L3_eviction", 0x4E3 }
366
367 #define AMD_FAMILY_11h_events \
368 { "BU_quadwords_written_to_system", 0x6D }, \
369 { "FR_retired_mmx_fp_instr", 0xCB }, \
370 { "NB_mem_ctrlr_page_table_events", 0xE1 }, \
371 { "NB_thermal_status", 0xE8 }, \
372 { "NB_probe_results_upstream_req", 0xEC }, \
373 { "NB_dev_events", 0xEE }, \
374 { "NB_mem_ctrlr_req", 0x1F0 }
375
376 #define AMD_cmn_generic_events \
377 { "PAPI_br_ins", "FR_retired_branches_w_excp_intr", 0x0 },\
378 { "PAPI_br_msp", "FR_retired_branches_mispred", 0x0 }, \
379 { "PAPI_br_tkn", "FR_retired_taken_branches", 0x0 }, \
380 { "PAPI_fp_ops", "FP_dispatched_fpu_ops", 0x3 }, \
381 { "PAPI_fad_ins", "FP_dispatched_fpu_ops", 0x1 }, \
382 { "PAPI_fml_ins", "FP_dispatched_fpu_ops", 0x2 }, \
383 { "PAPI_fpu_idl", "FP_cycles_no_fpu_ops_retired", 0x0 }, \
384 { "PAPI_tot_cyc", "BU_cpu_clk_unhalted", 0x0 }, \
385 { "PAPI_tot_ins", "FR_retired_x86_instr_w_excp_intr", 0x0 }, \
386 { "PAPI_l1_dca", "DC_access", 0x0 }, \
387 { "PAPI_l1_dcm", "DC_miss", 0x0 }, \
388 { "PAPI_l1_ldm", "DC_refill_from_L2", 0xe }, \
389 { "PAPI_l1_stm", "DC_refill_from_L2", 0x10 }, \
390 { "PAPI_l1_ica", "IC_fetch", 0x0 }, \
391 { "PAPI_l1_icm", "IC_miss", 0x0 }, \
392 { "PAPI_l1_icr", "IC_fetch", 0x0 }, \
393 { "PAPI_l2_dch", "DC_refill_from_L2", 0x1e }, \
394 { "PAPI_l2_dcm", "DC_refill_from_system", 0x1e }, \
395 { "PAPI_l2_dcr", "DC_refill_from_L2", 0xe }, \
396 { "PAPI_l2_dcw", "DC_refill_from_L2", 0x10 }, \
397 { "PAPI_l2_ich", "IC_refill_from_L2", 0x0 }, \
398 { "PAPI_l2_icm", "IC_refill_from_system", 0x0 }, \
399 { "PAPI_l2_ldm", "DC_refill_from_system", 0xe }, \
400 { "PAPI_l2_stm", "DC_refill_from_system", 0x10 }, \
401 { "PAPI_res_stl", "FR_dispatch_stalls", 0x0 }, \
402 { "PAPI_stl_icy", "FR_nothing_to_dispatch", 0x0 }, \
403 { "PAPI_hw_int", "FR_taken_hardware_intrs", 0x0 }
404
405 #define OPT_cmn_generic_events \
406 { "PAPI_tlb_dm", "DC_dtlb_L1_miss_L2_miss", 0x0 }, \
407 { "PAPI_tlb_im", "IC_itlb_L1_miss_L2_miss", 0x0 }, \
408 { "PAPI_fp_ins", "FR_retired_fpu_instr", 0xd }, \
409 { "PAPI_vec_ins", "FR_retired_fpu_instr", 0x4 }
410
411 #define AMD_FAMILY_10h_generic_events \
412 { "PAPI_tlb_dm", "DC_dtlb_L1_miss_L2_miss", 0x7 }, \
413 { "PAPI_tlb_im", "IC_itlb_L1_miss_L2_miss", 0x3 }, \
414 { "PAPI_l3_dcr", "L3_read_req", 0xf1 }, \
415 { "PAPI_l3_icr", "L3_read_req", 0xf2 }, \
416 { "PAPI_l3_tcr", "L3_read_req", 0xf7 }, \
417 { "PAPI_l3_stm", "L3_miss", 0xf4 }, \
418 { "PAPI_l3_ldm", "L3_miss", 0xf3 }, \
419 { "PAPI_l3_tcm", "L3_miss", 0xf7 }
420
421 static const amd_event_t family_f_events[] = {
422 AMD_cmn_events,
423 AMD_FAMILY_f_events,
424 EV_END
425 };
426
427 static const amd_event_t family_10h_events[] = {
428 AMD_cmn_events,
429 AMD_FAMILY_10h_events,
430 EV_END
431 };
432
433 static const amd_event_t family_11h_events[] = {
434 AMD_cmn_events,
435 AMD_FAMILY_11h_events,
436 EV_END
437 };
438
439 static const amd_generic_event_t opt_generic_events[] = {
440 AMD_cmn_generic_events,
441 OPT_cmn_generic_events,
442 GEN_EV_END
443 };
444
445 static const amd_generic_event_t family_10h_generic_events[] = {
446 AMD_cmn_generic_events,
447 AMD_FAMILY_10h_generic_events,
448 GEN_EV_END
449 };
450
451 /*
452 * For Family 17h and Family 19h, the cpcgen utility generates all of our events
453 * including ones that need specific unit codes, therefore we leave all unit
454 * codes out of these. Zen 1, Zen 2, and Zen 3 have different event sets that
455 * they support.
456 */
457 static const amd_generic_event_t family_17h_zen1_papi_events[] = {
458 { "PAPI_br_cn", "ExRetCond" },
459 { "PAPI_br_ins", "ExRetBrn" },
460 { "PAPI_fpu_idl", "FpSchedEmpty" },
461 { "PAPI_tot_cyc", "LsNotHaltedCyc" },
462 { "PAPI_tot_ins", "ExRetInstr" },
463 { "PAPI_tlb_dm", "LsL1DTlbMiss" },
464 { "PAPI_tlb_im", "BpL1TlbMissL2Miss" },
465 GEN_EV_END
466 };
467
468 static const amd_generic_event_t family_17h_zen2_papi_events[] = {
469 { "PAPI_br_cn", "ExRetCond" },
470 { "PAPI_br_ins", "ExRetBrn" },
471 { "PAPI_tot_cyc", "LsNotHaltedCyc" },
472 { "PAPI_tot_ins", "ExRetInstr" },
473 { "PAPI_tlb_dm", "LsL1DTlbMiss" },
474 { "PAPI_tlb_im", "BpL1TlbMissL2Miss" },
475 GEN_EV_END
476 };
477
478 static const amd_generic_event_t family_19h_zen3_papi_events[] = {
479 { "PAPI_br_cn", "ExRetCond" },
480 { "PAPI_br_ins", "ExRetBrn" },
481 { "PAPI_tot_cyc", "LsNotHaltedCyc" },
482 { "PAPI_tot_ins", "ExRetInstr" },
483 { "PAPI_tlb_dm", "LsL1DTlbMiss" },
484 { "PAPI_tlb_im", "BpL1TlbMissL2TlbMiss" },
485 GEN_EV_END
486 };
487
488 static const amd_generic_event_t family_19h_zen4_papi_events[] = {
489 { "PAPI_br_cn", "ExRetCond" },
490 { "PAPI_br_ins", "ExRetBrn" },
491 { "PAPI_tot_cyc", "LsNotHaltedCyc" },
492 { "PAPI_tot_ins", "ExRetInstr" },
493 { "PAPI_tlb_dm", "LsL1DTlbMiss" },
494 { "PAPI_tlb_im", "BpL1TlbMissL2TlbMiss" },
495 GEN_EV_END
496 };
497
498 static const amd_generic_event_t family_1ah_zen5_papi_events[] = {
499 { "PAPI_br_cn", "Retired_Conditional_Branch_Instructions" },
500 { "PAPI_br_ins", "Retired_Branch_Instructions" },
501 { "PAPI_br_msp",
502 "Retired_Conditional_Branch_Instructions_Mispredicted" },
503 { "PAPI_br_ucn", "Retired_Unconditional_Branch_Instructions" },
504 { "PAPI_tot_cyc", "Cycles_Not_in_Halt" },
505 { "PAPI_tot_ins", "Retired_Instructions" },
506 { "PAPI_hw_int", "Interrupts_Taken" },
507 { "PAPI_tlb_sd", "TLB_Flush_Events" },
508 GEN_EV_END
509 };
510
511 static char *evlist;
512 static size_t evlist_sz;
513 static const amd_event_t *amd_events = NULL;
514 static uint_t amd_family, amd_model;
515 static const amd_generic_event_t *amd_generic_events = NULL;
516
517 static char amd_fam_f_rev_ae_bkdg[] = "See \"BIOS and Kernel Developer's "
518 "Guide for AMD Athlon 64 and AMD Opteron Processors\" (AMD publication 26094)";
519 static char amd_fam_f_NPT_bkdg[] = "See \"BIOS and Kernel Developer's Guide "
520 "for AMD NPT Family 0Fh Processors\" (AMD publication 32559)";
521 static char amd_fam_10h_bkdg[] = "See \"BIOS and Kernel Developer's Guide "
522 "(BKDG) For AMD Family 10h Processors\" (AMD publication 31116)";
523 static char amd_fam_11h_bkdg[] = "See \"BIOS and Kernel Developer's Guide "
524 "(BKDG) For AMD Family 11h Processors\" (AMD publication 41256)";
525 static char amd_fam_17h_zen1_reg[] = "See \"Open-Source Register Reference For "
526 "AMD Family 17h Processors Models 00h-2Fh\" (AMD publication 56255) and "
527 "amd_f17h_zen1_events(3CPC)";
528 static char amd_fam_17h_zen2_reg[] = "See \"Preliminary Processor Programming "
529 "Reference (PPR) for AMD Family 17h Model 31h, Revision B0 Processors\" "
530 "(AMD publication 55803), \"Processor Programming Reference (PPR) for AMD "
531 "Family 17h Model 71h, Revision B0 Processors\" (AMD publication 56176), and "
532 "amd_f17h_zen2_events(3CPC)";
533 static char amd_fam_19h_zen3_reg[] = "See \"Preliminary Processor Programming "
534 "Reference (PPR) for AMD Family 19h Model 01h, Revision B1 Processors Volume "
535 "1 of 2\" (AMD publication 55898), \"Processor Programming Reference (PPR) "
536 "for AMD Family 19h Model 21h, Revision B0 Processors\" (AMD publication "
537 "56214), and amd_f19h_zen3_events(3CPC)";
538 static char amd_fam_19h_zen4_reg[] = "See \"Processor Programming Reference "
539 "(PPR) for AMD Family 19h Model 11h, Revision B1 Processors Volume 1 of 6\" "
540 "(AMD publication 55901), \"Processor Programming Reference (PPR) for AMD "
541 "Family 19h Model 61h, Revision B1 Processors\" (AMD publication 56713), "
542 "\"Processor Programming Reference (PPR) for AMD Family 19h Model 70h, "
543 "Revision A0 Processors\" (AMD publication 57019), and "
544 "amd_f19h_zen4_events(3CPC)";
545 static char amd_fam_1ah_zen5_reg[] = "See \"Performance Monitor Counters "
546 "for AMD Family 1Ah Model 00h-Fh Processors\" (AMD publication 58550) and "
547 "amd_f1ah_zen5_events(3CPC)";
548
549 static char amd_pcbe_impl_name[64];
550 static char *amd_pcbe_cpuref;
551
552
553 #define BITS(v, u, l) \
554 (((v) >> (l)) & ((1 << (1 + (u) - (l))) - 1))
555
556 static uint64_t
opt_pcbe_pes_addr(uint_t counter)557 opt_pcbe_pes_addr(uint_t counter)
558 {
559 ASSERT3U(counter, <, opd.opd_ncounters);
560 return (PES_BASE_ADDR + counter);
561 }
562
563 static uint64_t
opt_pcbe_pes_ext_addr(uint_t counter)564 opt_pcbe_pes_ext_addr(uint_t counter)
565 {
566 ASSERT3U(counter, <, opd.opd_ncounters);
567 return (PES_EXT_BASE_ADDR + 2 * counter);
568 }
569
570 static uint64_t
opt_pcbe_pic_addr(uint_t counter)571 opt_pcbe_pic_addr(uint_t counter)
572 {
573 ASSERT3U(counter, <, opd.opd_ncounters);
574 return (PIC_BASE_ADDR + counter);
575 }
576
577 static uint64_t
opt_pcbe_pic_ext_addr(uint_t counter)578 opt_pcbe_pic_ext_addr(uint_t counter)
579 {
580 ASSERT3U(counter, <, opd.opd_ncounters);
581 return (PIC_EXT_BASE_ADDR + 2 * counter);
582 }
583
584 static int
opt_pcbe_init(void)585 opt_pcbe_init(void)
586 {
587 const amd_event_t *evp;
588 const amd_generic_event_t *gevp;
589 x86_uarchrev_t uarchrev;
590
591 amd_family = cpuid_getfamily(CPU);
592 amd_model = cpuid_getmodel(CPU);
593 uarchrev = cpuid_getuarchrev(CPU);
594
595 /*
596 * Make sure this really _is_ an Opteron or Athlon 64 system. The kernel
597 * loads this module based on its name in the module directory, but it
598 * could have been renamed.
599 */
600 if ((cpuid_getvendor(CPU) != X86_VENDOR_AMD || amd_family < 0xf) &&
601 cpuid_getvendor(CPU) != X86_VENDOR_HYGON)
602 return (-1);
603
604 if (amd_family == 0xf) {
605 /* Some tools expect this string for family 0fh */
606 (void) snprintf(amd_pcbe_impl_name, sizeof (amd_pcbe_impl_name),
607 "AMD Opteron & Athlon64");
608 } else {
609 (void) snprintf(amd_pcbe_impl_name, sizeof (amd_pcbe_impl_name),
610 "%s Family %02xh",
611 cpuid_getvendor(CPU) == X86_VENDOR_HYGON ? "Hygon" : "AMD",
612 amd_family);
613 }
614
615 /*
616 * Determine whether or not the extended counter set is supported on
617 * this processor.
618 *
619 * If access to counters beyond the 6 defined for OPT_PCBE_EXT_NCOUNTERS
620 * are added here, the logic in HMA for saving/restoring host CPC state
621 * will also need to be updated. See: os/hma.c
622 */
623 if (is_x86_feature(x86_featureset, X86FSET_AMD_PCEC)) {
624 opd.opd_ncounters = OPT_PCBE_EXT_NCOUNTERS;
625 opd.opd_pesf = opt_pcbe_pes_ext_addr;
626 opd.opd_picf = opt_pcbe_pic_ext_addr;
627 } else {
628 opd.opd_ncounters = OPT_PCBE_DEF_NCOUNTERS;
629 opd.opd_pesf = opt_pcbe_pes_addr;
630 opd.opd_picf = opt_pcbe_pic_addr;
631 }
632 opd.opd_cmask = (1 << opd.opd_ncounters) - 1;
633
634 /*
635 * Figure out processor revision here and assign appropriate
636 * event configuration.
637 */
638 switch (uarchrev_uarch(uarchrev)) {
639 case X86_UARCH_AMD_LEGACY:
640 switch (amd_family) {
641 case 0xf: {
642 x86_chiprev_t rev;
643
644 rev = cpuid_getchiprev(CPU);
645
646 if (chiprev_at_least(rev,
647 X86_CHIPREV_AMD_LEGACY_F_REV_F)) {
648 amd_pcbe_cpuref = amd_fam_f_NPT_bkdg;
649 } else {
650 amd_pcbe_cpuref = amd_fam_f_rev_ae_bkdg;
651 }
652 amd_events = family_f_events;
653 amd_generic_events = opt_generic_events;
654 break;
655 }
656 case 0x10:
657 amd_pcbe_cpuref = amd_fam_10h_bkdg;
658 amd_events = family_10h_events;
659 amd_generic_events = family_10h_generic_events;
660 break;
661 case 0x11:
662 amd_pcbe_cpuref = amd_fam_11h_bkdg;
663 amd_events = family_11h_events;
664 amd_generic_events = opt_generic_events;
665 break;
666 default:
667 return (-1);
668 }
669 break;
670 case X86_UARCH_AMD_ZEN1:
671 case X86_UARCH_AMD_ZENPLUS:
672 amd_pcbe_cpuref = amd_fam_17h_zen1_reg;
673 amd_events = opteron_pcbe_f17h_zen1_events;
674 amd_generic_events = family_17h_zen1_papi_events;
675 break;
676 case X86_UARCH_AMD_ZEN2:
677 amd_pcbe_cpuref = amd_fam_17h_zen2_reg;
678 amd_events = opteron_pcbe_f17h_zen2_events;
679 amd_generic_events = family_17h_zen2_papi_events;
680 break;
681 case X86_UARCH_AMD_ZEN3:
682 amd_pcbe_cpuref = amd_fam_19h_zen3_reg;
683 amd_events = opteron_pcbe_f19h_zen3_events;
684 amd_generic_events = family_19h_zen3_papi_events;
685 break;
686 case X86_UARCH_AMD_ZEN4:
687 amd_pcbe_cpuref = amd_fam_19h_zen4_reg;
688 amd_events = opteron_pcbe_f19h_zen4_events;
689 amd_generic_events = family_19h_zen4_papi_events;
690 break;
691 case X86_UARCH_AMD_ZEN5:
692 amd_pcbe_cpuref = amd_fam_1ah_zen5_reg;
693 amd_events = opteron_pcbe_f1ah_zen5_events;
694 amd_generic_events = family_1ah_zen5_papi_events;
695 break;
696 default:
697 /*
698 * Different families have different meanings on events and even
699 * worse (like family 15h), different constraints around
700 * programming these values.
701 */
702 return (-1);
703 }
704
705 /*
706 * Construct event list.
707 *
708 * First pass: Calculate size needed. We'll need an additional byte
709 * for the NULL pointer during the last strcat.
710 *
711 * Second pass: Copy strings.
712 */
713 for (evp = amd_events; evp->name != NULL; evp++)
714 evlist_sz += strlen(evp->name) + 1;
715
716 for (gevp = amd_generic_events; gevp->name != NULL; gevp++)
717 evlist_sz += strlen(gevp->name) + 1;
718
719 evlist = kmem_alloc(evlist_sz + 1, KM_SLEEP);
720 evlist[0] = '\0';
721
722 for (evp = amd_events; evp->name != NULL; evp++) {
723 (void) strcat(evlist, evp->name);
724 (void) strcat(evlist, ",");
725 }
726
727 for (gevp = amd_generic_events; gevp->name != NULL; gevp++) {
728 (void) strcat(evlist, gevp->name);
729 (void) strcat(evlist, ",");
730 }
731
732 /*
733 * Remove trailing comma.
734 */
735 evlist[evlist_sz - 1] = '\0';
736
737 return (0);
738 }
739
740 static uint_t
opt_pcbe_ncounters(void)741 opt_pcbe_ncounters(void)
742 {
743 return (opd.opd_ncounters);
744 }
745
746 static const char *
opt_pcbe_impl_name(void)747 opt_pcbe_impl_name(void)
748 {
749 return (amd_pcbe_impl_name);
750 }
751
752 static const char *
opt_pcbe_cpuref(void)753 opt_pcbe_cpuref(void)
754 {
755
756 return (amd_pcbe_cpuref);
757 }
758
759 /*ARGSUSED*/
760 static char *
opt_pcbe_list_events(uint_t picnum)761 opt_pcbe_list_events(uint_t picnum)
762 {
763 return (evlist);
764 }
765
766 static char *
opt_pcbe_list_attrs(void)767 opt_pcbe_list_attrs(void)
768 {
769 return ("edge,pc,inv,cmask,umask");
770 }
771
772 static const amd_generic_event_t *
find_generic_event(char * name)773 find_generic_event(char *name)
774 {
775 const amd_generic_event_t *gevp;
776
777 for (gevp = amd_generic_events; gevp->name != NULL; gevp++)
778 if (strcmp(name, gevp->name) == 0)
779 return (gevp);
780
781 return (NULL);
782 }
783
784 static const amd_event_t *
find_event(char * name)785 find_event(char *name)
786 {
787 const amd_event_t *evp;
788
789 for (evp = amd_events; evp->name != NULL; evp++)
790 if (strcmp(name, evp->name) == 0)
791 return (evp);
792
793 return (NULL);
794 }
795
796 /*ARGSUSED*/
797 static uint64_t
opt_pcbe_event_coverage(char * event)798 opt_pcbe_event_coverage(char *event)
799 {
800 /*
801 * Check whether counter event is supported
802 */
803 if (find_event(event) == NULL && find_generic_event(event) == NULL)
804 return (0);
805
806 /*
807 * Fortunately, all counters can count all events.
808 */
809 return (opd.opd_cmask);
810 }
811
812 static uint64_t
opt_pcbe_overflow_bitmap(void)813 opt_pcbe_overflow_bitmap(void)
814 {
815 /*
816 * Unfortunately, this chip cannot detect which counter overflowed, so
817 * we must act as if they all did.
818 */
819 return (opd.opd_cmask);
820 }
821
822 /*ARGSUSED*/
823 static int
opt_pcbe_configure(uint_t picnum,char * event,uint64_t preset,uint32_t flags,uint_t nattrs,kcpc_attr_t * attrs,void ** data,void * token)824 opt_pcbe_configure(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
825 uint_t nattrs, kcpc_attr_t *attrs, void **data, void *token)
826 {
827 opt_pcbe_config_t *cfg;
828 const amd_event_t *evp;
829 amd_event_t ev_raw = { "raw", 0};
830 const amd_generic_event_t *gevp;
831 int i;
832 uint64_t evsel = 0, evsel_tmp = 0;
833
834 /*
835 * If we've been handed an existing configuration, we need only preset
836 * the counter value.
837 */
838 if (*data != NULL) {
839 cfg = *data;
840 cfg->opt_rawpic = preset & MASK48;
841 return (0);
842 }
843
844 if (picnum >= opd.opd_ncounters)
845 return (CPC_INVALID_PICNUM);
846
847 if ((evp = find_event(event)) == NULL) {
848 if ((gevp = find_generic_event(event)) != NULL) {
849 evp = find_event(gevp->event);
850 ASSERT(evp != NULL);
851
852 if (nattrs > 0)
853 return (CPC_ATTRIBUTE_OUT_OF_RANGE);
854
855 evsel |= gevp->umask << OPT_PES_UMASK_SHIFT;
856 } else {
857 long tmp;
858
859 /*
860 * If ddi_strtol() likes this event, use it as a raw
861 * event code.
862 */
863 if (ddi_strtol(event, NULL, 0, &tmp) != 0)
864 return (CPC_INVALID_EVENT);
865
866 ev_raw.emask = tmp;
867 evp = &ev_raw;
868 }
869 }
870
871 /*
872 * Configuration of EventSelect register. While on some families
873 * certain bits might not be supported (e.g. Guest/Host on family
874 * 11h), setting these bits is harmless
875 */
876
877 /* Set GuestOnly bit to 0 and HostOnly bit to 1 */
878 evsel &= ~OPT_PES_HOST;
879 evsel &= ~OPT_PES_GUEST;
880
881 /* Set bits [35:32] for extended part of Event Select field */
882 evsel_tmp = evp->emask & 0x0f00;
883 evsel |= evsel_tmp << OPT_PES_EVSELHI_SHIFT;
884
885 evsel |= evp->emask & 0x00ff;
886 evsel |= evp->unit << OPT_PES_UMASK_SHIFT;
887
888 if (flags & CPC_COUNT_USER)
889 evsel |= OPT_PES_USR;
890 if (flags & CPC_COUNT_SYSTEM)
891 evsel |= OPT_PES_OS;
892 if (flags & CPC_OVF_NOTIFY_EMT)
893 evsel |= OPT_PES_INT;
894
895 for (i = 0; i < nattrs; i++) {
896 if (strcmp(attrs[i].ka_name, "edge") == 0) {
897 if (attrs[i].ka_val != 0)
898 evsel |= OPT_PES_EDGE;
899 } else if (strcmp(attrs[i].ka_name, "pc") == 0) {
900 if (attrs[i].ka_val != 0)
901 evsel |= OPT_PES_PC;
902 } else if (strcmp(attrs[i].ka_name, "inv") == 0) {
903 if (attrs[i].ka_val != 0)
904 evsel |= OPT_PES_INV;
905 } else if (strcmp(attrs[i].ka_name, "cmask") == 0) {
906 if ((attrs[i].ka_val | OPT_PES_CMASK_MASK) !=
907 OPT_PES_CMASK_MASK)
908 return (CPC_ATTRIBUTE_OUT_OF_RANGE);
909 evsel |= attrs[i].ka_val << OPT_PES_CMASK_SHIFT;
910 } else if (strcmp(attrs[i].ka_name, "umask") == 0) {
911 if ((attrs[i].ka_val | OPT_PES_UMASK_MASK) !=
912 OPT_PES_UMASK_MASK)
913 return (CPC_ATTRIBUTE_OUT_OF_RANGE);
914 evsel |= attrs[i].ka_val << OPT_PES_UMASK_SHIFT;
915 } else
916 return (CPC_INVALID_ATTRIBUTE);
917 }
918
919 cfg = kmem_alloc(sizeof (*cfg), KM_SLEEP);
920
921 cfg->opt_picno = picnum;
922 cfg->opt_evsel = evsel;
923 cfg->opt_rawpic = preset & MASK48;
924
925 *data = cfg;
926 return (0);
927 }
928
929 static void
opt_pcbe_program(void * token)930 opt_pcbe_program(void *token)
931 {
932 opt_pcbe_config_t *cfgs[OPT_PCBE_EXT_NCOUNTERS] = { &nullcfgs[0],
933 &nullcfgs[1], &nullcfgs[2],
934 &nullcfgs[3], &nullcfgs[4],
935 &nullcfgs[5] };
936 opt_pcbe_config_t *pcfg = NULL;
937 int i;
938 ulong_t curcr4 = getcr4();
939
940 /*
941 * Allow nonprivileged code to read the performance counters if desired.
942 */
943 if (kcpc_allow_nonpriv(token))
944 setcr4(curcr4 | CR4_PCE);
945 else
946 setcr4(curcr4 & ~CR4_PCE);
947
948 /*
949 * Query kernel for all configs which will be co-programmed.
950 */
951 do {
952 pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, NULL);
953
954 if (pcfg != NULL) {
955 ASSERT(pcfg->opt_picno < opd.opd_ncounters);
956 cfgs[pcfg->opt_picno] = pcfg;
957 }
958 } while (pcfg != NULL);
959
960 /*
961 * Program in two loops. The first configures and presets the counter,
962 * and the second loop enables the counters. This ensures that the
963 * counters are all enabled as closely together in time as possible.
964 */
965
966 for (i = 0; i < opd.opd_ncounters; i++) {
967 wrmsr(opd.opd_pesf(i), cfgs[i]->opt_evsel);
968 wrmsr(opd.opd_picf(i), cfgs[i]->opt_rawpic);
969 }
970
971 for (i = 0; i < opd.opd_ncounters; i++) {
972 wrmsr(opd.opd_pesf(i), cfgs[i]->opt_evsel |
973 (uint64_t)(uintptr_t)OPT_PES_ENABLE);
974 }
975 }
976
977 static void
opt_pcbe_allstop(void)978 opt_pcbe_allstop(void)
979 {
980 int i;
981
982 for (i = 0; i < opd.opd_ncounters; i++)
983 wrmsr(opd.opd_pesf(i), 0ULL);
984
985 /*
986 * Disable non-privileged access to the counter registers.
987 */
988 setcr4(getcr4() & ~CR4_PCE);
989 }
990
991 static void
opt_pcbe_sample(void * token)992 opt_pcbe_sample(void *token)
993 {
994 opt_pcbe_config_t *cfgs[OPT_PCBE_EXT_NCOUNTERS] = { NULL, NULL,
995 NULL, NULL, NULL, NULL };
996 opt_pcbe_config_t *pcfg = NULL;
997 int i;
998 uint64_t curpic[OPT_PCBE_EXT_NCOUNTERS];
999 uint64_t *addrs[OPT_PCBE_EXT_NCOUNTERS];
1000 uint64_t *tmp;
1001 int64_t diff;
1002
1003 for (i = 0; i < opd.opd_ncounters; i++)
1004 curpic[i] = rdmsr(opd.opd_picf(i));
1005
1006 /*
1007 * Query kernel for all configs which are co-programmed.
1008 */
1009 do {
1010 pcfg = (opt_pcbe_config_t *)kcpc_next_config(token, pcfg, &tmp);
1011
1012 if (pcfg != NULL) {
1013 ASSERT3U(pcfg->opt_picno, <, opd.opd_ncounters);
1014 cfgs[pcfg->opt_picno] = pcfg;
1015 addrs[pcfg->opt_picno] = tmp;
1016 }
1017 } while (pcfg != NULL);
1018
1019 for (i = 0; i < opd.opd_ncounters; i++) {
1020 if (cfgs[i] == NULL)
1021 continue;
1022
1023 diff = (curpic[i] - cfgs[i]->opt_rawpic) & MASK48;
1024 *addrs[i] += diff;
1025 DTRACE_PROBE4(opt__pcbe__sample, int, i, uint64_t, *addrs[i],
1026 uint64_t, curpic[i], uint64_t, cfgs[i]->opt_rawpic);
1027 cfgs[i]->opt_rawpic = *addrs[i] & MASK48;
1028 }
1029 }
1030
1031 static void
opt_pcbe_free(void * config)1032 opt_pcbe_free(void *config)
1033 {
1034 kmem_free(config, sizeof (opt_pcbe_config_t));
1035 }
1036
1037
1038 static struct modlpcbe modlpcbe = {
1039 &mod_pcbeops,
1040 "AMD Performance Counters",
1041 &opt_pcbe_ops
1042 };
1043
1044 static struct modlinkage modl = {
1045 MODREV_1,
1046 &modlpcbe,
1047 };
1048
1049 int
_init(void)1050 _init(void)
1051 {
1052 int ret;
1053
1054 if (opt_pcbe_init() != 0)
1055 return (ENOTSUP);
1056
1057 if ((ret = mod_install(&modl)) != 0)
1058 kmem_free(evlist, evlist_sz + 1);
1059
1060 return (ret);
1061 }
1062
1063 int
_fini(void)1064 _fini(void)
1065 {
1066 int ret;
1067
1068 if ((ret = mod_remove(&modl)) == 0)
1069 kmem_free(evlist, evlist_sz + 1);
1070 return (ret);
1071 }
1072
1073 int
_info(struct modinfo * mi)1074 _info(struct modinfo *mi)
1075 {
1076 return (mod_info(&modl, mi));
1077 }
1078