xref: /freebsd/lib/libpmc/libpmc_pmu_util.c (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/sysctl.h>
34 #include <stddef.h>
35 #include <stdlib.h>
36 #include <limits.h>
37 #include <regex.h>
38 #include <string.h>
39 #include <pmc.h>
40 #include <pmclog.h>
41 #include <assert.h>
42 #include <libpmcstat.h>
43 #include "pmu-events/pmu-events.h"
44 
45 #if defined(__amd64__) || defined(__i386__)
46 struct pmu_alias {
47 	const char *pa_alias;
48 	const char *pa_name;
49 };
50 
51 typedef enum {
52 	PMU_INVALID,
53 	PMU_INTEL,
54 	PMU_AMD,
55 } pmu_mfr_t;
56 
57 static struct pmu_alias pmu_intel_alias_table[] = {
58 	{"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
59 	{"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
60 	{"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"},
61 	{"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"},
62 	{"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
63 	{"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
64 	{"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"},
65 	{"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"},
66 	{"RESOURCE_STALL", "RESOURCE_STALLS.ANY"},
67 	{"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"},
68 	{"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
69 	{"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
70 	{"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
71 	{"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
72 	{"cycles", "tsc-tsc"},
73 	{"unhalted-cycles", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
74 	{"instructions", "inst-retired.any_p"},
75 	{"branch-mispredicts", "br_misp_retired.all_branches"},
76 	{"branches", "br_inst_retired.all_branches"},
77 	{"interrupts", "hw_interrupts.received"},
78 	{"ic-misses", "frontend_retired.l1i_miss"},
79 	{NULL, NULL},
80 };
81 
82 static struct pmu_alias pmu_amd_alias_table[] = {
83 	{"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"},
84 	{"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"},
85 	{NULL, NULL},
86 };
87 
88 
89 static pmu_mfr_t
90 pmu_events_mfr(void)
91 {
92 	char *buf;
93 	size_t s;
94 	pmu_mfr_t mfr;
95 
96 	if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
97 	    (void *)NULL, 0) == -1)
98 		return (PMU_INVALID);
99 	if ((buf = malloc(s + 1)) == NULL)
100 		return (PMU_INVALID);
101 	if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
102 		(void *)NULL, 0) == -1) {
103 		free(buf);
104 		return (PMU_INVALID);
105 	}
106 	if (strcasestr(buf, "AuthenticAMD") != NULL ||
107 	    strcasestr(buf, "HygonGenuine") != NULL)
108 		mfr = PMU_AMD;
109 	else if (strcasestr(buf, "GenuineIntel") != NULL)
110 		mfr = PMU_INTEL;
111 	else
112 		mfr = PMU_INVALID;
113 	free(buf);
114 	return (mfr);
115 }
116 
117 /*
118  *  The Intel fixed mode counters are:
119  *	"inst_retired.any",
120  *	"cpu_clk_unhalted.thread",
121  *	"cpu_clk_unhalted.thread_any",
122  *	"cpu_clk_unhalted.ref_tsc",
123  *
124  */
125 
126 static const char *
127 pmu_alias_get(const char *name)
128 {
129 	pmu_mfr_t mfr;
130 	struct pmu_alias *pa;
131 	struct pmu_alias *pmu_alias_table;
132 
133 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
134 		return (name);
135 	if (mfr == PMU_AMD)
136 		pmu_alias_table = pmu_amd_alias_table;
137 	else if (mfr == PMU_INTEL)
138 		pmu_alias_table = pmu_intel_alias_table;
139 	else
140 		return (name);
141 
142 	for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++)
143 		if (strcasecmp(name, pa->pa_alias) == 0)
144 			return (pa->pa_name);
145 
146 	return (name);
147 }
148 
149 struct pmu_event_desc {
150 	uint64_t ped_period;
151 	uint64_t ped_offcore_rsp;
152 	uint64_t ped_l3_thread;
153 	uint64_t ped_l3_slice;
154 	uint32_t ped_event;
155 	uint32_t ped_frontend;
156 	uint32_t ped_ldlat;
157 	uint32_t ped_config1;
158 	int16_t	ped_umask;
159 	uint8_t	ped_cmask;
160 	uint8_t	ped_any;
161 	uint8_t	ped_inv;
162 	uint8_t	ped_edge;
163 	uint8_t	ped_fc_mask;
164 	uint8_t	ped_ch_mask;
165 };
166 
167 static const struct pmu_events_map *
168 pmu_events_map_get(const char *cpuid)
169 {
170 	regex_t re;
171 	regmatch_t pmatch[1];
172 	size_t s, len;
173 	char buf[64];
174 	int match;
175 	const struct pmu_events_map *pme;
176 
177 	if (cpuid != NULL) {
178 		memcpy(buf, cpuid, 64);
179 	} else {
180 		if (sysctlbyname("kern.hwpmc.cpuid", (void *)NULL, &s,
181 		    (void *)NULL, 0) == -1)
182 			return (NULL);
183 		if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
184 		    (void *)NULL, 0) == -1)
185 			return (NULL);
186 	}
187 	for (pme = pmu_events_map; pme->cpuid != NULL; pme++) {
188 		if (regcomp(&re, pme->cpuid, REG_EXTENDED) != 0) {
189 			printf("regex '%s' failed to compile, ignoring\n",
190 			    pme->cpuid);
191 			continue;
192 		}
193 		match = regexec(&re, buf, 1, pmatch, 0);
194 		regfree(&re);
195 		if (match == 0) {
196 			len = pmatch[0].rm_eo - pmatch[0].rm_so;
197 			if(len == strlen(buf))
198 				return (pme);
199 		}
200 	}
201 	return (NULL);
202 }
203 
204 static const struct pmu_event *
205 pmu_event_get(const char *cpuid, const char *event_name, int *idx)
206 {
207 	const struct pmu_events_map *pme;
208 	const struct pmu_event *pe;
209 	int i;
210 
211 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
212 		return (NULL);
213 	for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) {
214 		if (pe->name == NULL)
215 			continue;
216 		if (strcasecmp(pe->name, event_name) == 0) {
217 			if (idx)
218 				*idx = i;
219 			return (pe);
220 		}
221 	}
222 	return (NULL);
223 }
224 
225 int
226 pmc_pmu_idx_get_by_event(const char *cpuid, const char *event)
227 {
228 	int idx;
229 	const char *realname;
230 
231 	realname = pmu_alias_get(event);
232 	if (pmu_event_get(cpuid, realname, &idx) == NULL)
233 		return (-1);
234 	return (idx);
235 }
236 
237 const char *
238 pmc_pmu_event_get_by_idx(const char *cpuid, int idx)
239 {
240 	const struct pmu_events_map *pme;
241 
242 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
243 		return (NULL);
244 	assert(pme->table[idx].name);
245 	return (pme->table[idx].name);
246 }
247 
248 static int
249 pmu_parse_event(struct pmu_event_desc *ped, const char *eventin)
250 {
251 	char *event;
252 	char *kvp, *key, *value, *r;
253 	char *debug;
254 
255 	if ((event = strdup(eventin)) == NULL)
256 		return (ENOMEM);
257 	r = event;
258 	bzero(ped, sizeof(*ped));
259 	ped->ped_period = DEFAULT_SAMPLE_COUNT;
260 	ped->ped_umask = -1;
261 	while ((kvp = strsep(&event, ",")) != NULL) {
262 		key = strsep(&kvp, "=");
263 		if (key == NULL)
264 			abort();
265 		value = kvp;
266 		if (strcmp(key, "umask") == 0)
267 			ped->ped_umask = strtol(value, NULL, 16);
268 		else if (strcmp(key, "event") == 0)
269 			ped->ped_event = strtol(value, NULL, 16);
270 		else if (strcmp(key, "period") == 0)
271 			ped->ped_period = strtol(value, NULL, 10);
272 		else if (strcmp(key, "offcore_rsp") == 0)
273 			ped->ped_offcore_rsp = strtol(value, NULL, 16);
274 		else if (strcmp(key, "any") == 0)
275 			ped->ped_any = strtol(value, NULL, 10);
276 		else if (strcmp(key, "cmask") == 0)
277 			ped->ped_cmask = strtol(value, NULL, 10);
278 		else if (strcmp(key, "inv") == 0)
279 			ped->ped_inv = strtol(value, NULL, 10);
280 		else if (strcmp(key, "edge") == 0)
281 			ped->ped_edge = strtol(value, NULL, 10);
282 		else if (strcmp(key, "frontend") == 0)
283 			ped->ped_frontend = strtol(value, NULL, 16);
284 		else if (strcmp(key, "ldlat") == 0)
285 			ped->ped_ldlat = strtol(value, NULL, 16);
286 		else if (strcmp(key, "fc_mask") == 0)
287 			ped->ped_fc_mask = strtol(value, NULL, 16);
288 		else if (strcmp(key, "ch_mask") == 0)
289 			ped->ped_ch_mask = strtol(value, NULL, 16);
290 		else if (strcmp(key, "config1") == 0)
291 			ped->ped_config1 = strtol(value, NULL, 16);
292 		else if (strcmp(key, "l3_thread_mask") == 0)
293 			ped->ped_l3_thread = strtol(value, NULL, 16);
294 		else if (strcmp(key, "l3_slice_mask") == 0)
295 			ped->ped_l3_slice = strtol(value, NULL, 16);
296 		else {
297 			debug = getenv("PMUDEBUG");
298 			if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL)
299 				printf("unrecognized kvpair: %s:%s\n", key, value);
300 		}
301 	}
302 	free(r);
303 	return (0);
304 }
305 
306 uint64_t
307 pmc_pmu_sample_rate_get(const char *event_name)
308 {
309 	const struct pmu_event *pe;
310 	struct pmu_event_desc ped;
311 
312 	event_name = pmu_alias_get(event_name);
313 	if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL)
314 		return (DEFAULT_SAMPLE_COUNT);
315 	if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, NULL)) == NULL)
316 		return (DEFAULT_SAMPLE_COUNT);
317 	if (pe->event == NULL)
318 		return (DEFAULT_SAMPLE_COUNT);
319 	if (pmu_parse_event(&ped, pe->event))
320 		return (DEFAULT_SAMPLE_COUNT);
321 	return (ped.ped_period);
322 }
323 
324 int
325 pmc_pmu_enabled(void)
326 {
327 
328 	return (pmu_events_map_get(NULL) != NULL);
329 }
330 
331 void
332 pmc_pmu_print_counters(const char *event_name)
333 {
334 	const struct pmu_events_map *pme;
335 	const struct pmu_event *pe;
336 	struct pmu_event_desc ped;
337 	char *debug;
338 	int do_debug;
339 
340 	debug = getenv("PMUDEBUG");
341 	do_debug = 0;
342 
343 	if (debug != NULL && strcmp(debug, "true") == 0)
344 		do_debug = 1;
345 	if ((pme = pmu_events_map_get(NULL)) == NULL)
346 		return;
347 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
348 		if (pe->name == NULL)
349 			continue;
350 		if (event_name != NULL && strcasestr(pe->name, event_name) == NULL)
351 			continue;
352 		printf("\t%s\n", pe->name);
353 		if (do_debug)
354 			pmu_parse_event(&ped, pe->event);
355 	}
356 }
357 
358 void
359 pmc_pmu_print_counter_desc(const char *ev)
360 {
361 	const struct pmu_events_map *pme;
362 	const struct pmu_event *pe;
363 
364 	if ((pme = pmu_events_map_get(NULL)) == NULL)
365 		return;
366 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
367 		if (pe->name == NULL)
368 			continue;
369 		if (strcasestr(pe->name, ev) != NULL &&
370 		    pe->desc != NULL)
371 			printf("%s:\t%s\n", pe->name, pe->desc);
372 	}
373 }
374 
375 void
376 pmc_pmu_print_counter_desc_long(const char *ev)
377 {
378 	const struct pmu_events_map *pme;
379 	const struct pmu_event *pe;
380 
381 	if ((pme = pmu_events_map_get(NULL)) == NULL)
382 		return;
383 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
384 		if (pe->name == NULL)
385 			continue;
386 		if (strcasestr(pe->name, ev) != NULL) {
387 			if (pe->long_desc != NULL)
388 				printf("%s:\n%s\n", pe->name, pe->long_desc);
389 			else if (pe->desc != NULL)
390 				printf("%s:\t%s\n", pe->name, pe->desc);
391 		}
392 	}
393 }
394 
395 void
396 pmc_pmu_print_counter_full(const char *ev)
397 {
398 	const struct pmu_events_map *pme;
399 	const struct pmu_event *pe;
400 
401 	if ((pme = pmu_events_map_get(NULL)) == NULL)
402 		return;
403 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
404 		if (pe->name == NULL)
405 			continue;
406 		if (strcasestr(pe->name, ev) == NULL)
407 			continue;
408 		printf("name: %s\n", pe->name);
409 		if (pe->long_desc != NULL)
410 			printf("desc: %s\n", pe->long_desc);
411 		else if (pe->desc != NULL)
412 			printf("desc: %s\n", pe->desc);
413 		if (pe->event != NULL)
414 			printf("event: %s\n", pe->event);
415 		if (pe->topic != NULL)
416 			printf("topic: %s\n", pe->topic);
417 		if (pe->pmu != NULL)
418 			printf("pmu: %s\n", pe->pmu);
419 		if (pe->unit != NULL)
420 			printf("unit: %s\n", pe->unit);
421 		if (pe->perpkg != NULL)
422 			printf("perpkg: %s\n", pe->perpkg);
423 		if (pe->metric_expr != NULL)
424 			printf("metric_expr: %s\n", pe->metric_expr);
425 		if (pe->metric_name != NULL)
426 			printf("metric_name: %s\n", pe->metric_name);
427 		if (pe->metric_group != NULL)
428 			printf("metric_group: %s\n", pe->metric_group);
429 	}
430 }
431 
432 static int
433 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
434 	struct pmu_event_desc *ped)
435 {
436 	struct pmc_md_amd_op_pmcallocate *amd;
437 	const struct pmu_event *pe;
438 	int idx = -1;
439 
440 	amd = &pm->pm_md.pm_amd;
441 	if (ped->ped_umask > 0) {
442 		pm->pm_caps |= PMC_CAP_QUALIFIER;
443 		amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
444 	}
445 	pm->pm_class = PMC_CLASS_K8;
446 	pe = pmu_event_get(NULL, event_name, &idx);
447 
448 	if (strcmp("l3cache", pe->topic) == 0){
449 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
450 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE;
451 		amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice);
452 		amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread);
453 	}
454 	else if (strcmp("data fabric", pe->topic) == 0){
455 
456 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event);
457 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC;
458 	}
459 	else{
460 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
461 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE;
462 		if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
463 			(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
464 			(PMC_CAP_USER|PMC_CAP_SYSTEM))
465 			amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS);
466 		else if (pm->pm_caps & PMC_CAP_USER)
467 			amd->pm_amd_config |= AMD_PMC_USR;
468 		else if (pm->pm_caps & PMC_CAP_SYSTEM)
469 			amd->pm_amd_config |= AMD_PMC_OS;
470 		if (ped->ped_edge)
471 			amd->pm_amd_config |= AMD_PMC_EDGE;
472 		if (ped->ped_inv)
473 			amd->pm_amd_config |= AMD_PMC_EDGE;
474 		if (pm->pm_caps & PMC_CAP_INTERRUPT)
475 			amd->pm_amd_config |= AMD_PMC_INT;
476 	}
477 	return (0);
478 }
479 
480 static int
481 pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
482 	struct pmu_event_desc *ped)
483 {
484 	struct pmc_md_iap_op_pmcallocate *iap;
485 	int isfixed;
486 
487 	isfixed = 0;
488 	iap = &pm->pm_md.pm_iap;
489 	if (strcasestr(event_name, "UNC_") == event_name ||
490 	    strcasestr(event_name, "uncore") != NULL) {
491 		pm->pm_class = PMC_CLASS_UCP;
492 		pm->pm_caps |= PMC_CAP_QUALIFIER;
493 	} else if ((ped->ped_umask == -1) ||
494 	    (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) {
495 		pm->pm_class = PMC_CLASS_IAF;
496 	} else {
497 		pm->pm_class = PMC_CLASS_IAP;
498 		pm->pm_caps |= PMC_CAP_QUALIFIER;
499 	}
500 	iap->pm_iap_config |= IAP_EVSEL(ped->ped_event);
501 	if (ped->ped_umask > 0)
502 		iap->pm_iap_config |= IAP_UMASK(ped->ped_umask);
503 	iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask);
504 	iap->pm_iap_rsp = ped->ped_offcore_rsp;
505 
506 	if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
507 		(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
508 		(PMC_CAP_USER|PMC_CAP_SYSTEM))
509 		iap->pm_iap_config |= (IAP_USR | IAP_OS);
510 	else if (pm->pm_caps & PMC_CAP_USER)
511 		iap->pm_iap_config |= IAP_USR;
512 	else if (pm->pm_caps & PMC_CAP_SYSTEM)
513 		iap->pm_iap_config |= IAP_OS;
514 	if (ped->ped_edge)
515 		iap->pm_iap_config |= IAP_EDGE;
516 	if (ped->ped_any)
517 		iap->pm_iap_config |= IAP_ANY;
518 	if (ped->ped_inv)
519 		iap->pm_iap_config |= IAP_EDGE;
520 	if (pm->pm_caps & PMC_CAP_INTERRUPT)
521 		iap->pm_iap_config |= IAP_INT;
522 	return (0);
523 }
524 
525 int
526 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
527 {
528 	const struct pmu_event *pe;
529 	struct pmu_event_desc ped;
530 	pmu_mfr_t mfr;
531 	int idx = -1;
532 
533 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
534 		return (ENOENT);
535 
536 	bzero(&pm->pm_md, sizeof(pm->pm_md));
537 	pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
538 	event_name = pmu_alias_get(event_name);
539 	if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
540 		return (ENOENT);
541 	if (pe->alias && (pe = pmu_event_get(NULL, pe->alias, &idx)) == NULL)
542 		return (ENOENT);
543 	assert(idx >= 0);
544 	pm->pm_ev = idx;
545 
546 	if (pe->event == NULL)
547 		return (ENOENT);
548 	if (pmu_parse_event(&ped, pe->event))
549 		return (ENOENT);
550 
551 	if (mfr == PMU_INTEL)
552 		return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped));
553 	else
554 		return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped));
555 }
556 
557 /*
558  * Ultimately rely on AMD calling theirs the same
559  */
560 static const char *stat_mode_cntrs[] = {
561 	"cpu_clk_unhalted.thread",
562 	"inst_retired.any",
563 	"br_inst_retired.all_branches",
564 	"br_misp_retired.all_branches",
565 	"longest_lat_cache.reference",
566 	"longest_lat_cache.miss",
567 };
568 
569 int
570 pmc_pmu_stat_mode(const char ***cntrs)
571 {
572 	if (pmc_pmu_enabled()) {
573 		*cntrs = stat_mode_cntrs;
574 		return (0);
575 	}
576 	return (EOPNOTSUPP);
577 }
578 
579 #else
580 
581 uint64_t
582 pmc_pmu_sample_rate_get(const char *event_name __unused)
583 {
584 	return (DEFAULT_SAMPLE_COUNT);
585 }
586 
587 void
588 pmc_pmu_print_counters(const char *event_name __unused)
589 {
590 }
591 
592 void
593 pmc_pmu_print_counter_desc(const char *e __unused)
594 {
595 }
596 
597 void
598 pmc_pmu_print_counter_desc_long(const char *e __unused)
599 {
600 }
601 
602 void
603 pmc_pmu_print_counter_full(const char *e __unused)
604 {
605 
606 }
607 
608 int
609 pmc_pmu_enabled(void)
610 {
611 	return (0);
612 }
613 
614 int
615 pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused)
616 {
617 	return (EOPNOTSUPP);
618 }
619 
620 const char *
621 pmc_pmu_event_get_by_idx(const char *c __unused, int idx __unused)
622 {
623 	return (NULL);
624 }
625 
626 int
627 pmc_pmu_stat_mode(const char ***a __unused)
628 {
629 	return (EOPNOTSUPP);
630 }
631 
632 int
633 pmc_pmu_idx_get_by_event(const char *c __unused, const char *e __unused)
634 {
635 	return (-1);
636 }
637 
638 #endif
639