xref: /freebsd/lib/libpmc/libpmc_pmu_util.c (revision ccdc986607d3a12def360e6c6d01b44f64cb0868)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/sysctl.h>
34 #include <stddef.h>
35 #include <stdlib.h>
36 #include <limits.h>
37 #include <string.h>
38 #include <pmc.h>
39 #include <pmclog.h>
40 #include <assert.h>
41 #include <libpmcstat.h>
42 #include "pmu-events/pmu-events.h"
43 
44 #if defined(__amd64__) || defined(__i386__)
45 struct pmu_alias {
46 	const char *pa_alias;
47 	const char *pa_name;
48 };
49 
50 typedef enum {
51 	PMU_INVALID,
52 	PMU_INTEL,
53 	PMU_AMD,
54 } pmu_mfr_t;
55 
56 static struct pmu_alias pmu_intel_alias_table[] = {
57 	{"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
58 	{"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
59 	{"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"},
60 	{"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"},
61 	{"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
62 	{"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
63 	{"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"},
64 	{"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"},
65 	{"RESOURCE_STALL", "RESOURCE_STALLS.ANY"},
66 	{"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"},
67 	{"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
68 	{"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
69 	{"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
70 	{"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
71 	{"cycles", "tsc-tsc"},
72 	{"unhalted-cycles", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
73 	{"instructions", "inst-retired.any_p"},
74 	{"branch-mispredicts", "br_misp_retired.all_branches"},
75 	{"branches", "br_inst_retired.all_branches"},
76 	{"interrupts", "hw_interrupts.received"},
77 	{"ic-misses", "frontend_retired.l1i_miss"},
78 	{NULL, NULL},
79 };
80 
81 static struct pmu_alias pmu_amd_alias_table[] = {
82 	{"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"},
83 	{"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"},
84 	{NULL, NULL},
85 };
86 
87 
88 static pmu_mfr_t
89 pmu_events_mfr(void)
90 {
91 	char *buf;
92 	size_t s;
93 	pmu_mfr_t mfr;
94 
95 	if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
96 	    (void *)NULL, 0) == -1)
97 		return (PMU_INVALID);
98 	if ((buf = malloc(s + 1)) == NULL)
99 		return (PMU_INVALID);
100 	if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
101 		(void *)NULL, 0) == -1) {
102 		free(buf);
103 		return (PMU_INVALID);
104 	}
105 	if (strcasestr(buf, "AuthenticAMD") != NULL)
106 		mfr = PMU_AMD;
107 	else if (strcasestr(buf, "GenuineIntel") != NULL)
108 		mfr = PMU_INTEL;
109 	else
110 		mfr = PMU_INVALID;
111 	free(buf);
112 	return (mfr);
113 }
114 
115 /*
116  *  The Intel fixed mode counters are:
117  *	"inst_retired.any",
118  *	"cpu_clk_unhalted.thread",
119  *	"cpu_clk_unhalted.thread_any",
120  *	"cpu_clk_unhalted.ref_tsc",
121  *
122  */
123 
124 static const char *
125 pmu_alias_get(const char *name)
126 {
127 	pmu_mfr_t mfr;
128 	struct pmu_alias *pa;
129 	struct pmu_alias *pmu_alias_table;
130 
131 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
132 		return (name);
133 	if (mfr == PMU_AMD)
134 		pmu_alias_table = pmu_amd_alias_table;
135 	else if (mfr == PMU_INTEL)
136 		pmu_alias_table = pmu_intel_alias_table;
137 	else
138 		return (name);
139 
140 	for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++)
141 		if (strcasecmp(name, pa->pa_alias) == 0)
142 			return (pa->pa_name);
143 
144 	return (name);
145 }
146 
147 struct pmu_event_desc {
148 	uint64_t ped_period;
149 	uint64_t ped_offcore_rsp;
150 	uint64_t ped_l3_thread;
151 	uint64_t ped_l3_slice;
152 	uint32_t ped_event;
153 	uint32_t ped_frontend;
154 	uint32_t ped_ldlat;
155 	uint32_t ped_config1;
156 	int16_t	ped_umask;
157 	uint8_t	ped_cmask;
158 	uint8_t	ped_any;
159 	uint8_t	ped_inv;
160 	uint8_t	ped_edge;
161 	uint8_t	ped_fc_mask;
162 	uint8_t	ped_ch_mask;
163 };
164 
165 static const struct pmu_events_map *
166 pmu_events_map_get(const char *cpuid)
167 {
168 	size_t s;
169 	char buf[64];
170 	const struct pmu_events_map *pme;
171 
172 	if (cpuid != NULL) {
173 		memcpy(buf, cpuid, 64);
174 	} else {
175 		if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
176 		    (void *)NULL, 0) == -1)
177 			return (NULL);
178 		if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
179 		    (void *)NULL, 0) == -1)
180 			return (NULL);
181 	}
182 	for (pme = pmu_events_map; pme->cpuid != NULL; pme++)
183 		if (strcmp(buf, pme->cpuid) == 0)
184 			return (pme);
185 	return (NULL);
186 }
187 
188 static const struct pmu_event *
189 pmu_event_get(const char *cpuid, const char *event_name, int *idx)
190 {
191 	const struct pmu_events_map *pme;
192 	const struct pmu_event *pe;
193 	int i;
194 
195 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
196 		return (NULL);
197 	for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) {
198 		if (pe->name == NULL)
199 			continue;
200 		if (strcasecmp(pe->name, event_name) == 0) {
201 			if (idx)
202 				*idx = i;
203 			return (pe);
204 		}
205 	}
206 	return (NULL);
207 }
208 
209 int
210 pmc_pmu_idx_get_by_event(const char *cpuid, const char *event)
211 {
212 	int idx;
213 	const char *realname;
214 
215 	realname = pmu_alias_get(event);
216 	if (pmu_event_get(cpuid, realname, &idx) == NULL)
217 		return (-1);
218 	return (idx);
219 }
220 
221 const char *
222 pmc_pmu_event_get_by_idx(const char *cpuid, int idx)
223 {
224 	const struct pmu_events_map *pme;
225 
226 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
227 		return (NULL);
228 	assert(pme->table[idx].name);
229 	return (pme->table[idx].name);
230 }
231 
232 static int
233 pmu_parse_event(struct pmu_event_desc *ped, const char *eventin)
234 {
235 	char *event;
236 	char *kvp, *key, *value, *r;
237 	char *debug;
238 
239 	if ((event = strdup(eventin)) == NULL)
240 		return (ENOMEM);
241 	r = event;
242 	bzero(ped, sizeof(*ped));
243 	ped->ped_period = DEFAULT_SAMPLE_COUNT;
244 	ped->ped_umask = -1;
245 	while ((kvp = strsep(&event, ",")) != NULL) {
246 		key = strsep(&kvp, "=");
247 		if (key == NULL)
248 			abort();
249 		value = kvp;
250 		if (strcmp(key, "umask") == 0)
251 			ped->ped_umask = strtol(value, NULL, 16);
252 		else if (strcmp(key, "event") == 0)
253 			ped->ped_event = strtol(value, NULL, 16);
254 		else if (strcmp(key, "period") == 0)
255 			ped->ped_period = strtol(value, NULL, 10);
256 		else if (strcmp(key, "offcore_rsp") == 0)
257 			ped->ped_offcore_rsp = strtol(value, NULL, 16);
258 		else if (strcmp(key, "any") == 0)
259 			ped->ped_any = strtol(value, NULL, 10);
260 		else if (strcmp(key, "cmask") == 0)
261 			ped->ped_cmask = strtol(value, NULL, 10);
262 		else if (strcmp(key, "inv") == 0)
263 			ped->ped_inv = strtol(value, NULL, 10);
264 		else if (strcmp(key, "edge") == 0)
265 			ped->ped_edge = strtol(value, NULL, 10);
266 		else if (strcmp(key, "frontend") == 0)
267 			ped->ped_frontend = strtol(value, NULL, 16);
268 		else if (strcmp(key, "ldlat") == 0)
269 			ped->ped_ldlat = strtol(value, NULL, 16);
270 		else if (strcmp(key, "fc_mask") == 0)
271 			ped->ped_fc_mask = strtol(value, NULL, 16);
272 		else if (strcmp(key, "ch_mask") == 0)
273 			ped->ped_ch_mask = strtol(value, NULL, 16);
274 		else if (strcmp(key, "config1") == 0)
275 			ped->ped_config1 = strtol(value, NULL, 16);
276 		else if (strcmp(key, "l3_thread_mask") == 0)
277 			ped->ped_l3_thread = strtol(value, NULL, 16);
278 		else if (strcmp(key, "l3_slice_mask") == 0)
279 			ped->ped_l3_slice = strtol(value, NULL, 16);
280 		else {
281 			debug = getenv("PMUDEBUG");
282 			if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL)
283 				printf("unrecognized kvpair: %s:%s\n", key, value);
284 		}
285 	}
286 	free(r);
287 	return (0);
288 }
289 
290 uint64_t
291 pmc_pmu_sample_rate_get(const char *event_name)
292 {
293 	const struct pmu_event *pe;
294 	struct pmu_event_desc ped;
295 
296 	event_name = pmu_alias_get(event_name);
297 	if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL)
298 		return (DEFAULT_SAMPLE_COUNT);
299 	if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, NULL)) == NULL)
300 		return (DEFAULT_SAMPLE_COUNT);
301 	if (pe->event == NULL)
302 		return (DEFAULT_SAMPLE_COUNT);
303 	if (pmu_parse_event(&ped, pe->event))
304 		return (DEFAULT_SAMPLE_COUNT);
305 	return (ped.ped_period);
306 }
307 
308 int
309 pmc_pmu_enabled(void)
310 {
311 
312 	return (pmu_events_map_get(NULL) != NULL);
313 }
314 
315 void
316 pmc_pmu_print_counters(const char *event_name)
317 {
318 	const struct pmu_events_map *pme;
319 	const struct pmu_event *pe;
320 	struct pmu_event_desc ped;
321 	char *debug;
322 	int do_debug;
323 
324 	debug = getenv("PMUDEBUG");
325 	do_debug = 0;
326 
327 	if (debug != NULL && strcmp(debug, "true") == 0)
328 		do_debug = 1;
329 	if ((pme = pmu_events_map_get(NULL)) == NULL)
330 		return;
331 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
332 		if (pe->name == NULL)
333 			continue;
334 		if (event_name != NULL && strcasestr(pe->name, event_name) == NULL)
335 			continue;
336 		printf("\t%s\n", pe->name);
337 		if (do_debug)
338 			pmu_parse_event(&ped, pe->event);
339 	}
340 }
341 
342 void
343 pmc_pmu_print_counter_desc(const char *ev)
344 {
345 	const struct pmu_events_map *pme;
346 	const struct pmu_event *pe;
347 
348 	if ((pme = pmu_events_map_get(NULL)) == NULL)
349 		return;
350 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
351 		if (pe->name == NULL)
352 			continue;
353 		if (strcasestr(pe->name, ev) != NULL &&
354 		    pe->desc != NULL)
355 			printf("%s:\t%s\n", pe->name, pe->desc);
356 	}
357 }
358 
359 void
360 pmc_pmu_print_counter_desc_long(const char *ev)
361 {
362 	const struct pmu_events_map *pme;
363 	const struct pmu_event *pe;
364 
365 	if ((pme = pmu_events_map_get(NULL)) == NULL)
366 		return;
367 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
368 		if (pe->name == NULL)
369 			continue;
370 		if (strcasestr(pe->name, ev) != NULL) {
371 			if (pe->long_desc != NULL)
372 				printf("%s:\n%s\n", pe->name, pe->long_desc);
373 			else if (pe->desc != NULL)
374 				printf("%s:\t%s\n", pe->name, pe->desc);
375 		}
376 	}
377 }
378 
379 void
380 pmc_pmu_print_counter_full(const char *ev)
381 {
382 	const struct pmu_events_map *pme;
383 	const struct pmu_event *pe;
384 
385 	if ((pme = pmu_events_map_get(NULL)) == NULL)
386 		return;
387 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
388 		if (pe->name == NULL)
389 			continue;
390 		if (strcasestr(pe->name, ev) == NULL)
391 			continue;
392 		printf("name: %s\n", pe->name);
393 		if (pe->long_desc != NULL)
394 			printf("desc: %s\n", pe->long_desc);
395 		else if (pe->desc != NULL)
396 			printf("desc: %s\n", pe->desc);
397 		if (pe->event != NULL)
398 			printf("event: %s\n", pe->event);
399 		if (pe->topic != NULL)
400 			printf("topic: %s\n", pe->topic);
401 		if (pe->pmu != NULL)
402 			printf("pmu: %s\n", pe->pmu);
403 		if (pe->unit != NULL)
404 			printf("unit: %s\n", pe->unit);
405 		if (pe->perpkg != NULL)
406 			printf("perpkg: %s\n", pe->perpkg);
407 		if (pe->metric_expr != NULL)
408 			printf("metric_expr: %s\n", pe->metric_expr);
409 		if (pe->metric_name != NULL)
410 			printf("metric_name: %s\n", pe->metric_name);
411 		if (pe->metric_group != NULL)
412 			printf("metric_group: %s\n", pe->metric_group);
413 	}
414 }
415 
416 static int
417 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
418 	struct pmu_event_desc *ped)
419 {
420 	struct pmc_md_amd_op_pmcallocate *amd;
421 	const struct pmu_event *pe;
422 	int idx = -1;
423 
424 	amd = &pm->pm_md.pm_amd;
425 	if (ped->ped_umask > 0) {
426 		pm->pm_caps |= PMC_CAP_QUALIFIER;
427 		amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
428 	}
429 	pm->pm_class = PMC_CLASS_K8;
430 	pe = pmu_event_get(NULL, event_name, &idx);
431 
432 	if (strcmp("l3cache", pe->topic) == 0){
433 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
434 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE;
435 		amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice);
436 		amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread);
437 	}
438 	else if (strcmp("data fabric", pe->topic) == 0){
439 
440 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event);
441 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC;
442 	}
443 	else{
444 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
445 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE;
446 		if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
447 			(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
448 			(PMC_CAP_USER|PMC_CAP_SYSTEM))
449 			amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS);
450 		else if (pm->pm_caps & PMC_CAP_USER)
451 			amd->pm_amd_config |= AMD_PMC_USR;
452 		else if (pm->pm_caps & PMC_CAP_SYSTEM)
453 			amd->pm_amd_config |= AMD_PMC_OS;
454 		if (ped->ped_edge)
455 			amd->pm_amd_config |= AMD_PMC_EDGE;
456 		if (ped->ped_inv)
457 			amd->pm_amd_config |= AMD_PMC_EDGE;
458 		if (pm->pm_caps & PMC_CAP_INTERRUPT)
459 			amd->pm_amd_config |= AMD_PMC_INT;
460 	}
461 	return (0);
462 }
463 
464 static int
465 pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
466 	struct pmu_event_desc *ped)
467 {
468 	struct pmc_md_iap_op_pmcallocate *iap;
469 	int isfixed;
470 
471 	isfixed = 0;
472 	iap = &pm->pm_md.pm_iap;
473 	if (strcasestr(event_name, "UNC_") == event_name ||
474 	    strcasestr(event_name, "uncore") != NULL) {
475 		pm->pm_class = PMC_CLASS_UCP;
476 		pm->pm_caps |= PMC_CAP_QUALIFIER;
477 	} else if ((ped->ped_umask == -1) ||
478 	    (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) {
479 		pm->pm_class = PMC_CLASS_IAF;
480 	} else {
481 		pm->pm_class = PMC_CLASS_IAP;
482 		pm->pm_caps |= PMC_CAP_QUALIFIER;
483 	}
484 	iap->pm_iap_config |= IAP_EVSEL(ped->ped_event);
485 	if (ped->ped_umask > 0)
486 		iap->pm_iap_config |= IAP_UMASK(ped->ped_umask);
487 	iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask);
488 	iap->pm_iap_rsp = ped->ped_offcore_rsp;
489 
490 	if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
491 		(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
492 		(PMC_CAP_USER|PMC_CAP_SYSTEM))
493 		iap->pm_iap_config |= (IAP_USR | IAP_OS);
494 	else if (pm->pm_caps & PMC_CAP_USER)
495 		iap->pm_iap_config |= IAP_USR;
496 	else if (pm->pm_caps & PMC_CAP_SYSTEM)
497 		iap->pm_iap_config |= IAP_OS;
498 	if (ped->ped_edge)
499 		iap->pm_iap_config |= IAP_EDGE;
500 	if (ped->ped_any)
501 		iap->pm_iap_config |= IAP_ANY;
502 	if (ped->ped_inv)
503 		iap->pm_iap_config |= IAP_EDGE;
504 	if (pm->pm_caps & PMC_CAP_INTERRUPT)
505 		iap->pm_iap_config |= IAP_INT;
506 	return (0);
507 }
508 
509 int
510 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
511 {
512 	const struct pmu_event *pe;
513 	struct pmu_event_desc ped;
514 	pmu_mfr_t mfr;
515 	int idx = -1;
516 
517 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
518 		return (ENOENT);
519 
520 	bzero(&pm->pm_md, sizeof(pm->pm_md));
521 	pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
522 	event_name = pmu_alias_get(event_name);
523 	if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
524 		return (ENOENT);
525 	if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, &idx)) == NULL)
526 		return (ENOENT);
527 	assert(idx >= 0);
528 	pm->pm_ev = idx;
529 
530 	if (pe->event == NULL)
531 		return (ENOENT);
532 	if (pmu_parse_event(&ped, pe->event))
533 		return (ENOENT);
534 
535 	if (mfr == PMU_INTEL)
536 		return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped));
537 	else
538 		return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped));
539 }
540 
541 /*
542  * Ultimately rely on AMD calling theirs the same
543  */
544 static const char *stat_mode_cntrs[] = {
545 	"cpu_clk_unhalted.thread",
546 	"inst_retired.any",
547 	"br_inst_retired.all_branches",
548 	"br_misp_retired.all_branches",
549 	"longest_lat_cache.reference",
550 	"longest_lat_cache.miss",
551 };
552 
553 int
554 pmc_pmu_stat_mode(const char ***cntrs)
555 {
556 	if (pmc_pmu_enabled()) {
557 		*cntrs = stat_mode_cntrs;
558 		return (0);
559 	}
560 	return (EOPNOTSUPP);
561 }
562 
563 #else
564 
565 uint64_t
566 pmc_pmu_sample_rate_get(const char *event_name __unused)
567 {
568 	return (DEFAULT_SAMPLE_COUNT);
569 }
570 
571 void
572 pmc_pmu_print_counters(const char *event_name __unused)
573 {
574 }
575 
576 void
577 pmc_pmu_print_counter_desc(const char *e __unused)
578 {
579 }
580 
581 void
582 pmc_pmu_print_counter_desc_long(const char *e __unused)
583 {
584 }
585 
586 void
587 pmc_pmu_print_counter_full(const char *e __unused)
588 {
589 
590 }
591 
592 int
593 pmc_pmu_enabled(void)
594 {
595 	return (0);
596 }
597 
598 int
599 pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused)
600 {
601 	return (EOPNOTSUPP);
602 }
603 
604 const char *
605 pmc_pmu_event_get_by_idx(const char *c __unused, int idx __unused)
606 {
607 	return (NULL);
608 }
609 
610 int
611 pmc_pmu_stat_mode(const char ***a __unused)
612 {
613 	return (EOPNOTSUPP);
614 }
615 
616 int
617 pmc_pmu_idx_get_by_event(const char *c __unused, const char *e __unused)
618 {
619 	return (-1);
620 }
621 
622 #endif
623