xref: /freebsd/lib/libpmc/libpmc_pmu_util.c (revision 882f88ff77194ef2eea005232780468404438788)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/sysctl.h>
34 #include <stddef.h>
35 #include <stdlib.h>
36 #include <limits.h>
37 #include <regex.h>
38 #include <string.h>
39 #include <pmc.h>
40 #include <pmclog.h>
41 #include <assert.h>
42 #include <libpmcstat.h>
43 #include "pmu-events/pmu-events.h"
44 
45 #if defined(__amd64__) || defined(__i386__)
46 struct pmu_alias {
47 	const char *pa_alias;
48 	const char *pa_name;
49 };
50 
51 typedef enum {
52 	PMU_INVALID,
53 	PMU_INTEL,
54 	PMU_AMD,
55 } pmu_mfr_t;
56 
57 static struct pmu_alias pmu_intel_alias_table[] = {
58 	{"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
59 	{"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
60 	{"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"},
61 	{"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"},
62 	{"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
63 	{"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
64 	{"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"},
65 	{"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"},
66 	{"RESOURCE_STALL", "RESOURCE_STALLS.ANY"},
67 	{"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"},
68 	{"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
69 	{"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
70 	{"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
71 	{"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
72 	{"cycles", "tsc-tsc"},
73 	{"unhalted-cycles", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
74 	{"instructions", "inst-retired.any_p"},
75 	{"branch-mispredicts", "br_misp_retired.all_branches"},
76 	{"branches", "br_inst_retired.all_branches"},
77 	{"interrupts", "hw_interrupts.received"},
78 	{"ic-misses", "frontend_retired.l1i_miss"},
79 	{NULL, NULL},
80 };
81 
82 static struct pmu_alias pmu_amd_alias_table[] = {
83 	{"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"},
84 	{"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"},
85 	{NULL, NULL},
86 };
87 
88 
89 static pmu_mfr_t
90 pmu_events_mfr(void)
91 {
92 	char *buf;
93 	size_t s;
94 	pmu_mfr_t mfr;
95 
96 	if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
97 	    (void *)NULL, 0) == -1)
98 		return (PMU_INVALID);
99 	if ((buf = malloc(s + 1)) == NULL)
100 		return (PMU_INVALID);
101 	if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
102 		(void *)NULL, 0) == -1) {
103 		free(buf);
104 		return (PMU_INVALID);
105 	}
106 	if (strcasestr(buf, "AuthenticAMD") != NULL)
107 		mfr = PMU_AMD;
108 	else if (strcasestr(buf, "GenuineIntel") != NULL)
109 		mfr = PMU_INTEL;
110 	else
111 		mfr = PMU_INVALID;
112 	free(buf);
113 	return (mfr);
114 }
115 
116 /*
117  *  The Intel fixed mode counters are:
118  *	"inst_retired.any",
119  *	"cpu_clk_unhalted.thread",
120  *	"cpu_clk_unhalted.thread_any",
121  *	"cpu_clk_unhalted.ref_tsc",
122  *
123  */
124 
125 static const char *
126 pmu_alias_get(const char *name)
127 {
128 	pmu_mfr_t mfr;
129 	struct pmu_alias *pa;
130 	struct pmu_alias *pmu_alias_table;
131 
132 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
133 		return (name);
134 	if (mfr == PMU_AMD)
135 		pmu_alias_table = pmu_amd_alias_table;
136 	else if (mfr == PMU_INTEL)
137 		pmu_alias_table = pmu_intel_alias_table;
138 	else
139 		return (name);
140 
141 	for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++)
142 		if (strcasecmp(name, pa->pa_alias) == 0)
143 			return (pa->pa_name);
144 
145 	return (name);
146 }
147 
148 struct pmu_event_desc {
149 	uint64_t ped_period;
150 	uint64_t ped_offcore_rsp;
151 	uint64_t ped_l3_thread;
152 	uint64_t ped_l3_slice;
153 	uint32_t ped_event;
154 	uint32_t ped_frontend;
155 	uint32_t ped_ldlat;
156 	uint32_t ped_config1;
157 	int16_t	ped_umask;
158 	uint8_t	ped_cmask;
159 	uint8_t	ped_any;
160 	uint8_t	ped_inv;
161 	uint8_t	ped_edge;
162 	uint8_t	ped_fc_mask;
163 	uint8_t	ped_ch_mask;
164 };
165 
166 static const struct pmu_events_map *
167 pmu_events_map_get(const char *cpuid)
168 {
169 	regex_t re;
170 	regmatch_t pmatch[1];
171 	size_t s, len;
172 	char buf[64];
173 	int match;
174 	const struct pmu_events_map *pme;
175 
176 	if (cpuid != NULL) {
177 		memcpy(buf, cpuid, 64);
178 	} else {
179 		if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
180 		    (void *)NULL, 0) == -1)
181 			return (NULL);
182 		if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
183 		    (void *)NULL, 0) == -1)
184 			return (NULL);
185 	}
186 	for (pme = pmu_events_map; pme->cpuid != NULL; pme++) {
187 		if (regcomp(&re, pme->cpuid, REG_EXTENDED) != 0) {
188 			printf("regex '%s' failed to compile, ignoring\n",
189 			    pme->cpuid);
190 			continue;
191 		}
192 		match = regexec(&re, buf, 1, pmatch, 0);
193 		regfree(&re);
194 		if (match == 0) {
195 			len = pmatch[0].rm_eo - pmatch[0].rm_so;
196 			if(len == strlen(buf))
197 				return (pme);
198 		}
199 	}
200 	return (NULL);
201 }
202 
203 static const struct pmu_event *
204 pmu_event_get(const char *cpuid, const char *event_name, int *idx)
205 {
206 	const struct pmu_events_map *pme;
207 	const struct pmu_event *pe;
208 	int i;
209 
210 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
211 		return (NULL);
212 	for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) {
213 		if (pe->name == NULL)
214 			continue;
215 		if (strcasecmp(pe->name, event_name) == 0) {
216 			if (idx)
217 				*idx = i;
218 			return (pe);
219 		}
220 	}
221 	return (NULL);
222 }
223 
224 int
225 pmc_pmu_idx_get_by_event(const char *cpuid, const char *event)
226 {
227 	int idx;
228 	const char *realname;
229 
230 	realname = pmu_alias_get(event);
231 	if (pmu_event_get(cpuid, realname, &idx) == NULL)
232 		return (-1);
233 	return (idx);
234 }
235 
236 const char *
237 pmc_pmu_event_get_by_idx(const char *cpuid, int idx)
238 {
239 	const struct pmu_events_map *pme;
240 
241 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
242 		return (NULL);
243 	assert(pme->table[idx].name);
244 	return (pme->table[idx].name);
245 }
246 
247 static int
248 pmu_parse_event(struct pmu_event_desc *ped, const char *eventin)
249 {
250 	char *event;
251 	char *kvp, *key, *value, *r;
252 	char *debug;
253 
254 	if ((event = strdup(eventin)) == NULL)
255 		return (ENOMEM);
256 	r = event;
257 	bzero(ped, sizeof(*ped));
258 	ped->ped_period = DEFAULT_SAMPLE_COUNT;
259 	ped->ped_umask = -1;
260 	while ((kvp = strsep(&event, ",")) != NULL) {
261 		key = strsep(&kvp, "=");
262 		if (key == NULL)
263 			abort();
264 		value = kvp;
265 		if (strcmp(key, "umask") == 0)
266 			ped->ped_umask = strtol(value, NULL, 16);
267 		else if (strcmp(key, "event") == 0)
268 			ped->ped_event = strtol(value, NULL, 16);
269 		else if (strcmp(key, "period") == 0)
270 			ped->ped_period = strtol(value, NULL, 10);
271 		else if (strcmp(key, "offcore_rsp") == 0)
272 			ped->ped_offcore_rsp = strtol(value, NULL, 16);
273 		else if (strcmp(key, "any") == 0)
274 			ped->ped_any = strtol(value, NULL, 10);
275 		else if (strcmp(key, "cmask") == 0)
276 			ped->ped_cmask = strtol(value, NULL, 10);
277 		else if (strcmp(key, "inv") == 0)
278 			ped->ped_inv = strtol(value, NULL, 10);
279 		else if (strcmp(key, "edge") == 0)
280 			ped->ped_edge = strtol(value, NULL, 10);
281 		else if (strcmp(key, "frontend") == 0)
282 			ped->ped_frontend = strtol(value, NULL, 16);
283 		else if (strcmp(key, "ldlat") == 0)
284 			ped->ped_ldlat = strtol(value, NULL, 16);
285 		else if (strcmp(key, "fc_mask") == 0)
286 			ped->ped_fc_mask = strtol(value, NULL, 16);
287 		else if (strcmp(key, "ch_mask") == 0)
288 			ped->ped_ch_mask = strtol(value, NULL, 16);
289 		else if (strcmp(key, "config1") == 0)
290 			ped->ped_config1 = strtol(value, NULL, 16);
291 		else if (strcmp(key, "l3_thread_mask") == 0)
292 			ped->ped_l3_thread = strtol(value, NULL, 16);
293 		else if (strcmp(key, "l3_slice_mask") == 0)
294 			ped->ped_l3_slice = strtol(value, NULL, 16);
295 		else {
296 			debug = getenv("PMUDEBUG");
297 			if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL)
298 				printf("unrecognized kvpair: %s:%s\n", key, value);
299 		}
300 	}
301 	free(r);
302 	return (0);
303 }
304 
305 uint64_t
306 pmc_pmu_sample_rate_get(const char *event_name)
307 {
308 	const struct pmu_event *pe;
309 	struct pmu_event_desc ped;
310 
311 	event_name = pmu_alias_get(event_name);
312 	if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL)
313 		return (DEFAULT_SAMPLE_COUNT);
314 	if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, NULL)) == NULL)
315 		return (DEFAULT_SAMPLE_COUNT);
316 	if (pe->event == NULL)
317 		return (DEFAULT_SAMPLE_COUNT);
318 	if (pmu_parse_event(&ped, pe->event))
319 		return (DEFAULT_SAMPLE_COUNT);
320 	return (ped.ped_period);
321 }
322 
323 int
324 pmc_pmu_enabled(void)
325 {
326 
327 	return (pmu_events_map_get(NULL) != NULL);
328 }
329 
330 void
331 pmc_pmu_print_counters(const char *event_name)
332 {
333 	const struct pmu_events_map *pme;
334 	const struct pmu_event *pe;
335 	struct pmu_event_desc ped;
336 	char *debug;
337 	int do_debug;
338 
339 	debug = getenv("PMUDEBUG");
340 	do_debug = 0;
341 
342 	if (debug != NULL && strcmp(debug, "true") == 0)
343 		do_debug = 1;
344 	if ((pme = pmu_events_map_get(NULL)) == NULL)
345 		return;
346 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
347 		if (pe->name == NULL)
348 			continue;
349 		if (event_name != NULL && strcasestr(pe->name, event_name) == NULL)
350 			continue;
351 		printf("\t%s\n", pe->name);
352 		if (do_debug)
353 			pmu_parse_event(&ped, pe->event);
354 	}
355 }
356 
357 void
358 pmc_pmu_print_counter_desc(const char *ev)
359 {
360 	const struct pmu_events_map *pme;
361 	const struct pmu_event *pe;
362 
363 	if ((pme = pmu_events_map_get(NULL)) == NULL)
364 		return;
365 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
366 		if (pe->name == NULL)
367 			continue;
368 		if (strcasestr(pe->name, ev) != NULL &&
369 		    pe->desc != NULL)
370 			printf("%s:\t%s\n", pe->name, pe->desc);
371 	}
372 }
373 
374 void
375 pmc_pmu_print_counter_desc_long(const char *ev)
376 {
377 	const struct pmu_events_map *pme;
378 	const struct pmu_event *pe;
379 
380 	if ((pme = pmu_events_map_get(NULL)) == NULL)
381 		return;
382 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
383 		if (pe->name == NULL)
384 			continue;
385 		if (strcasestr(pe->name, ev) != NULL) {
386 			if (pe->long_desc != NULL)
387 				printf("%s:\n%s\n", pe->name, pe->long_desc);
388 			else if (pe->desc != NULL)
389 				printf("%s:\t%s\n", pe->name, pe->desc);
390 		}
391 	}
392 }
393 
394 void
395 pmc_pmu_print_counter_full(const char *ev)
396 {
397 	const struct pmu_events_map *pme;
398 	const struct pmu_event *pe;
399 
400 	if ((pme = pmu_events_map_get(NULL)) == NULL)
401 		return;
402 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
403 		if (pe->name == NULL)
404 			continue;
405 		if (strcasestr(pe->name, ev) == NULL)
406 			continue;
407 		printf("name: %s\n", pe->name);
408 		if (pe->long_desc != NULL)
409 			printf("desc: %s\n", pe->long_desc);
410 		else if (pe->desc != NULL)
411 			printf("desc: %s\n", pe->desc);
412 		if (pe->event != NULL)
413 			printf("event: %s\n", pe->event);
414 		if (pe->topic != NULL)
415 			printf("topic: %s\n", pe->topic);
416 		if (pe->pmu != NULL)
417 			printf("pmu: %s\n", pe->pmu);
418 		if (pe->unit != NULL)
419 			printf("unit: %s\n", pe->unit);
420 		if (pe->perpkg != NULL)
421 			printf("perpkg: %s\n", pe->perpkg);
422 		if (pe->metric_expr != NULL)
423 			printf("metric_expr: %s\n", pe->metric_expr);
424 		if (pe->metric_name != NULL)
425 			printf("metric_name: %s\n", pe->metric_name);
426 		if (pe->metric_group != NULL)
427 			printf("metric_group: %s\n", pe->metric_group);
428 	}
429 }
430 
431 static int
432 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
433 	struct pmu_event_desc *ped)
434 {
435 	struct pmc_md_amd_op_pmcallocate *amd;
436 	const struct pmu_event *pe;
437 	int idx = -1;
438 
439 	amd = &pm->pm_md.pm_amd;
440 	if (ped->ped_umask > 0) {
441 		pm->pm_caps |= PMC_CAP_QUALIFIER;
442 		amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
443 	}
444 	pm->pm_class = PMC_CLASS_K8;
445 	pe = pmu_event_get(NULL, event_name, &idx);
446 
447 	if (strcmp("l3cache", pe->topic) == 0){
448 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
449 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE;
450 		amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice);
451 		amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread);
452 	}
453 	else if (strcmp("data fabric", pe->topic) == 0){
454 
455 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event);
456 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC;
457 	}
458 	else{
459 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
460 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE;
461 		if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
462 			(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
463 			(PMC_CAP_USER|PMC_CAP_SYSTEM))
464 			amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS);
465 		else if (pm->pm_caps & PMC_CAP_USER)
466 			amd->pm_amd_config |= AMD_PMC_USR;
467 		else if (pm->pm_caps & PMC_CAP_SYSTEM)
468 			amd->pm_amd_config |= AMD_PMC_OS;
469 		if (ped->ped_edge)
470 			amd->pm_amd_config |= AMD_PMC_EDGE;
471 		if (ped->ped_inv)
472 			amd->pm_amd_config |= AMD_PMC_EDGE;
473 		if (pm->pm_caps & PMC_CAP_INTERRUPT)
474 			amd->pm_amd_config |= AMD_PMC_INT;
475 	}
476 	return (0);
477 }
478 
479 static int
480 pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
481 	struct pmu_event_desc *ped)
482 {
483 	struct pmc_md_iap_op_pmcallocate *iap;
484 	int isfixed;
485 
486 	isfixed = 0;
487 	iap = &pm->pm_md.pm_iap;
488 	if (strcasestr(event_name, "UNC_") == event_name ||
489 	    strcasestr(event_name, "uncore") != NULL) {
490 		pm->pm_class = PMC_CLASS_UCP;
491 		pm->pm_caps |= PMC_CAP_QUALIFIER;
492 	} else if ((ped->ped_umask == -1) ||
493 	    (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) {
494 		pm->pm_class = PMC_CLASS_IAF;
495 	} else {
496 		pm->pm_class = PMC_CLASS_IAP;
497 		pm->pm_caps |= PMC_CAP_QUALIFIER;
498 	}
499 	iap->pm_iap_config |= IAP_EVSEL(ped->ped_event);
500 	if (ped->ped_umask > 0)
501 		iap->pm_iap_config |= IAP_UMASK(ped->ped_umask);
502 	iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask);
503 	iap->pm_iap_rsp = ped->ped_offcore_rsp;
504 
505 	if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
506 		(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
507 		(PMC_CAP_USER|PMC_CAP_SYSTEM))
508 		iap->pm_iap_config |= (IAP_USR | IAP_OS);
509 	else if (pm->pm_caps & PMC_CAP_USER)
510 		iap->pm_iap_config |= IAP_USR;
511 	else if (pm->pm_caps & PMC_CAP_SYSTEM)
512 		iap->pm_iap_config |= IAP_OS;
513 	if (ped->ped_edge)
514 		iap->pm_iap_config |= IAP_EDGE;
515 	if (ped->ped_any)
516 		iap->pm_iap_config |= IAP_ANY;
517 	if (ped->ped_inv)
518 		iap->pm_iap_config |= IAP_EDGE;
519 	if (pm->pm_caps & PMC_CAP_INTERRUPT)
520 		iap->pm_iap_config |= IAP_INT;
521 	return (0);
522 }
523 
524 int
525 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
526 {
527 	const struct pmu_event *pe;
528 	struct pmu_event_desc ped;
529 	pmu_mfr_t mfr;
530 	int idx = -1;
531 
532 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
533 		return (ENOENT);
534 
535 	bzero(&pm->pm_md, sizeof(pm->pm_md));
536 	pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
537 	event_name = pmu_alias_get(event_name);
538 	if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
539 		return (ENOENT);
540 	if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, &idx)) == NULL)
541 		return (ENOENT);
542 	assert(idx >= 0);
543 	pm->pm_ev = idx;
544 
545 	if (pe->event == NULL)
546 		return (ENOENT);
547 	if (pmu_parse_event(&ped, pe->event))
548 		return (ENOENT);
549 
550 	if (mfr == PMU_INTEL)
551 		return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped));
552 	else
553 		return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped));
554 }
555 
556 /*
557  * Ultimately rely on AMD calling theirs the same
558  */
559 static const char *stat_mode_cntrs[] = {
560 	"cpu_clk_unhalted.thread",
561 	"inst_retired.any",
562 	"br_inst_retired.all_branches",
563 	"br_misp_retired.all_branches",
564 	"longest_lat_cache.reference",
565 	"longest_lat_cache.miss",
566 };
567 
568 int
569 pmc_pmu_stat_mode(const char ***cntrs)
570 {
571 	if (pmc_pmu_enabled()) {
572 		*cntrs = stat_mode_cntrs;
573 		return (0);
574 	}
575 	return (EOPNOTSUPP);
576 }
577 
578 #else
579 
580 uint64_t
581 pmc_pmu_sample_rate_get(const char *event_name __unused)
582 {
583 	return (DEFAULT_SAMPLE_COUNT);
584 }
585 
586 void
587 pmc_pmu_print_counters(const char *event_name __unused)
588 {
589 }
590 
591 void
592 pmc_pmu_print_counter_desc(const char *e __unused)
593 {
594 }
595 
596 void
597 pmc_pmu_print_counter_desc_long(const char *e __unused)
598 {
599 }
600 
601 void
602 pmc_pmu_print_counter_full(const char *e __unused)
603 {
604 
605 }
606 
607 int
608 pmc_pmu_enabled(void)
609 {
610 	return (0);
611 }
612 
613 int
614 pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused)
615 {
616 	return (EOPNOTSUPP);
617 }
618 
619 const char *
620 pmc_pmu_event_get_by_idx(const char *c __unused, int idx __unused)
621 {
622 	return (NULL);
623 }
624 
625 int
626 pmc_pmu_stat_mode(const char ***a __unused)
627 {
628 	return (EOPNOTSUPP);
629 }
630 
631 int
632 pmc_pmu_idx_get_by_event(const char *c __unused, const char *e __unused)
633 {
634 	return (-1);
635 }
636 
637 #endif
638