xref: /freebsd/lib/libpmc/libpmc_pmu_util.c (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018, Matthew Macy
5  * Copyright (c) 2021, The FreeBSD Foundation
6  *
7  * Portions of this software were developed by Mitchell Horne
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  *
33  */
34 
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/pmc.h>
38 #include <sys/sysctl.h>
39 #include <stddef.h>
40 #include <stdlib.h>
41 #include <limits.h>
42 #include <regex.h>
43 #include <string.h>
44 #include <pmc.h>
45 #include <pmclog.h>
46 #include <assert.h>
47 #include <libpmcstat.h>
48 #include "pmu-events/pmu-events.h"
49 
50 struct pmu_alias {
51 	const char *pa_alias;
52 	const char *pa_name;
53 };
54 
55 #if defined(__amd64__) || defined(__i386__)
56 typedef enum {
57 	PMU_INVALID,
58 	PMU_INTEL,
59 	PMU_AMD,
60 } pmu_mfr_t;
61 
62 static struct pmu_alias pmu_intel_alias_table[] = {
63 	{"UNHALTED_CORE_CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
64 	{"UNHALTED-CORE-CYCLES", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
65 	{"LLC_MISSES", "LONGEST_LAT_CACHE.MISS"},
66 	{"LLC-MISSES", "LONGEST_LAT_CACHE.MISS"},
67 	{"LLC_REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
68 	{"LLC-REFERENCE", "LONGEST_LAT_CACHE.REFERENCE"},
69 	{"LLC_MISS_RHITM", "mem_load_l3_miss_retired.remote_hitm"},
70 	{"LLC-MISS-RHITM", "mem_load_l3_miss_retired.remote_hitm"},
71 	{"RESOURCE_STALL", "RESOURCE_STALLS.ANY"},
72 	{"RESOURCE_STALLS_ANY", "RESOURCE_STALLS.ANY"},
73 	{"BRANCH_INSTRUCTION_RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
74 	{"BRANCH-INSTRUCTION-RETIRED", "BR_INST_RETIRED.ALL_BRANCHES"},
75 	{"BRANCH_MISSES_RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
76 	{"BRANCH-MISSES-RETIRED", "BR_MISP_RETIRED.ALL_BRANCHES"},
77 	{"unhalted-cycles", "CPU_CLK_UNHALTED.THREAD_P_ANY"},
78 	{"instructions", "inst_retired.any_p"},
79 	{"branch-mispredicts", "br_misp_retired.all_branches"},
80 	{"branches", "br_inst_retired.all_branches"},
81 	{"interrupts", "hw_interrupts.received"},
82 	{"ic-misses", "frontend_retired.l1i_miss"},
83 	{NULL, NULL},
84 };
85 
86 static struct pmu_alias pmu_amd_alias_table[] = {
87 	{"UNHALTED_CORE_CYCLES", "ls_not_halted_cyc"},
88 	{"UNHALTED-CORE-CYCLES", "ls_not_halted_cyc"},
89 	{"LLC_MISSES", "l3_comb_clstr_state.request_miss"},
90 	{"LLC-MISSES", "l3_comb_clstr_state.request_miss"},
91 	{"LLC_REFERENCE", "l3_request_g1.caching_l3_cache_accesses"},
92 	{"LLC-REFERENCE", "l3_request_g1.caching_l3_cache_accesses"},
93 	{"BRANCH_INSTRUCTION_RETIRED", "ex_ret_brn"},
94 	{"BRANCH-INSTRUCTION-RETIRED", "ex_ret_brn"},
95 	{"BRANCH_MISSES_RETIRED", "ex_ret_brn_misp"},
96 	{"BRANCH-MISSES-RETIRED", "ex_ret_brn_misp"},
97 	{"unhalted-cycles", "ls_not_halted_cyc"},
98 	{"instructions", "ex_ret_instr",},
99 	{"branch-mispredicts", "ex_ret_brn_misp"},
100 	{"branches", "ex_ret_brn"},
101 	{"interrupts", "ls_int_taken"}, /* Not on amdzen1 */
102 	{NULL, NULL},
103 };
104 
105 
106 static pmu_mfr_t
107 pmu_events_mfr(void)
108 {
109 	char buf[PMC_CPUID_LEN];
110 	size_t s = sizeof(buf);
111 	pmu_mfr_t mfr;
112 
113 	if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
114 	    (void *)NULL, 0) == -1)
115 		return (PMU_INVALID);
116 	if (strcasestr(buf, "AuthenticAMD") != NULL ||
117 	    strcasestr(buf, "HygonGenuine") != NULL)
118 		mfr = PMU_AMD;
119 	else if (strcasestr(buf, "GenuineIntel") != NULL)
120 		mfr = PMU_INTEL;
121 	else
122 		mfr = PMU_INVALID;
123 	return (mfr);
124 }
125 
126 /*
127  *  The Intel fixed mode counters are:
128  *	"inst_retired.any",
129  *	"cpu_clk_unhalted.thread",
130  *	"cpu_clk_unhalted.thread_any",
131  *	"cpu_clk_unhalted.ref_tsc",
132  *
133  */
134 
135 static const char *
136 pmu_alias_get(const char *name)
137 {
138 	pmu_mfr_t mfr;
139 	struct pmu_alias *pa;
140 	struct pmu_alias *pmu_alias_table;
141 
142 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
143 		return (name);
144 	if (mfr == PMU_AMD)
145 		pmu_alias_table = pmu_amd_alias_table;
146 	else if (mfr == PMU_INTEL)
147 		pmu_alias_table = pmu_intel_alias_table;
148 	else
149 		return (name);
150 
151 	for (pa = pmu_alias_table; pa->pa_alias != NULL; pa++)
152 		if (strcasecmp(name, pa->pa_alias) == 0)
153 			return (pa->pa_name);
154 
155 	return (name);
156 }
157 #elif defined(__powerpc64__)
158 
159 static const char *
160 pmu_alias_get(const char *name)
161 {
162 	return (name);
163 }
164 
165 #elif defined(__aarch64__)
166 
167 static struct pmu_alias pmu_armv8_alias_table[] = {
168 	{NULL, NULL},
169 };
170 
171 static const char *
172 pmu_alias_get(const char *name)
173 {
174 	struct pmu_alias *pa;
175 
176 	for (pa = pmu_armv8_alias_table; pa->pa_alias != NULL; pa++)
177 		if (strcasecmp(name, pa->pa_alias) == 0)
178 			return (pa->pa_name);
179 
180 	return (name);
181 }
182 
183 #else
184 
185 static const char *
186 pmu_alias_get(const char *name)
187 {
188 
189 	return (name);
190 }
191 #endif
192 
193 struct pmu_event_desc {
194 	uint64_t ped_period;
195 	uint64_t ped_offcore_rsp;
196 	uint64_t ped_l3_thread;
197 	uint64_t ped_l3_slice;
198 	uint32_t ped_event;
199 	uint32_t ped_frontend;
200 	uint32_t ped_ldlat;
201 	uint32_t ped_config1;
202 	int16_t	ped_umask;
203 	uint8_t	ped_cmask;
204 	uint8_t	ped_any;
205 	uint8_t	ped_inv;
206 	uint8_t	ped_edge;
207 	uint8_t	ped_fc_mask;
208 	uint8_t	ped_ch_mask;
209 };
210 
211 static const struct pmu_events_map *
212 pmu_events_map_get(const char *cpuid)
213 {
214 	regex_t re;
215 	regmatch_t pmatch[1];
216 	char buf[PMC_CPUID_LEN];
217 	size_t s = sizeof(buf);
218 	int match;
219 	const struct pmu_events_map *pme;
220 
221 	if (cpuid != NULL) {
222 		strlcpy(buf, cpuid, s);
223 	} else {
224 		if (sysctlbyname("kern.hwpmc.cpuid", buf, &s,
225 		    (void *)NULL, 0) == -1)
226 			return (NULL);
227 	}
228 	for (pme = pmu_events_map; pme->cpuid != NULL; pme++) {
229 		if (regcomp(&re, pme->cpuid, REG_EXTENDED) != 0) {
230 			printf("regex '%s' failed to compile, ignoring\n",
231 			    pme->cpuid);
232 			continue;
233 		}
234 		match = regexec(&re, buf, 1, pmatch, 0);
235 		regfree(&re);
236 		if (match == 0) {
237 			if (pmatch[0].rm_so == 0 && (buf[pmatch[0].rm_eo] == 0
238 			    || buf[pmatch[0].rm_eo] == '-'))
239 				return (pme);
240 		}
241 	}
242 	return (NULL);
243 }
244 
245 static const struct pmu_event *
246 pmu_event_get(const char *cpuid, const char *event_name, int *idx)
247 {
248 	const struct pmu_events_map *pme;
249 	const struct pmu_event *pe;
250 	int i;
251 
252 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
253 		return (NULL);
254 	for (i = 0, pe = pme->table; pe->name || pe->desc || pe->event; pe++, i++) {
255 		if (pe->name == NULL)
256 			continue;
257 		if (strcasecmp(pe->name, event_name) == 0) {
258 			if (idx)
259 				*idx = i;
260 			return (pe);
261 		}
262 	}
263 	return (NULL);
264 }
265 
266 int
267 pmc_pmu_idx_get_by_event(const char *cpuid, const char *event)
268 {
269 	int idx;
270 	const char *realname;
271 
272 	realname = pmu_alias_get(event);
273 	if (pmu_event_get(cpuid, realname, &idx) == NULL)
274 		return (-1);
275 	return (idx);
276 }
277 
278 const char *
279 pmc_pmu_event_get_by_idx(const char *cpuid, int idx)
280 {
281 	const struct pmu_events_map *pme;
282 
283 	if ((pme = pmu_events_map_get(cpuid)) == NULL)
284 		return (NULL);
285 	assert(pme->table[idx].name);
286 	return (pme->table[idx].name);
287 }
288 
289 static int
290 pmu_parse_event(struct pmu_event_desc *ped, const char *eventin)
291 {
292 	char *event;
293 	char *kvp, *key, *value, *r;
294 	char *debug;
295 
296 	if ((event = strdup(eventin)) == NULL)
297 		return (ENOMEM);
298 	r = event;
299 	bzero(ped, sizeof(*ped));
300 	ped->ped_period = DEFAULT_SAMPLE_COUNT;
301 	ped->ped_umask = -1;
302 	while ((kvp = strsep(&event, ",")) != NULL) {
303 		key = strsep(&kvp, "=");
304 		if (key == NULL)
305 			abort();
306 		value = kvp;
307 		if (strcmp(key, "umask") == 0)
308 			ped->ped_umask = strtol(value, NULL, 16);
309 		else if (strcmp(key, "event") == 0)
310 			ped->ped_event = strtol(value, NULL, 16);
311 		else if (strcmp(key, "period") == 0)
312 			ped->ped_period = strtol(value, NULL, 10);
313 		else if (strcmp(key, "offcore_rsp") == 0)
314 			ped->ped_offcore_rsp = strtol(value, NULL, 16);
315 		else if (strcmp(key, "any") == 0)
316 			ped->ped_any = strtol(value, NULL, 10);
317 		else if (strcmp(key, "cmask") == 0)
318 			ped->ped_cmask = strtol(value, NULL, 10);
319 		else if (strcmp(key, "inv") == 0)
320 			ped->ped_inv = strtol(value, NULL, 10);
321 		else if (strcmp(key, "edge") == 0)
322 			ped->ped_edge = strtol(value, NULL, 10);
323 		else if (strcmp(key, "frontend") == 0)
324 			ped->ped_frontend = strtol(value, NULL, 16);
325 		else if (strcmp(key, "ldlat") == 0)
326 			ped->ped_ldlat = strtol(value, NULL, 16);
327 		else if (strcmp(key, "fc_mask") == 0)
328 			ped->ped_fc_mask = strtol(value, NULL, 16);
329 		else if (strcmp(key, "ch_mask") == 0)
330 			ped->ped_ch_mask = strtol(value, NULL, 16);
331 		else if (strcmp(key, "config1") == 0)
332 			ped->ped_config1 = strtol(value, NULL, 16);
333 		else if (strcmp(key, "l3_thread_mask") == 0)
334 			ped->ped_l3_thread = strtol(value, NULL, 16);
335 		else if (strcmp(key, "l3_slice_mask") == 0)
336 			ped->ped_l3_slice = strtol(value, NULL, 16);
337 		else {
338 			debug = getenv("PMUDEBUG");
339 			if (debug != NULL && strcmp(debug, "true") == 0 && value != NULL)
340 				printf("unrecognized kvpair: %s:%s\n", key, value);
341 		}
342 	}
343 	free(r);
344 	return (0);
345 }
346 
347 uint64_t
348 pmc_pmu_sample_rate_get(const char *event_name)
349 {
350 	const struct pmu_event *pe;
351 	struct pmu_event_desc ped;
352 
353 	event_name = pmu_alias_get(event_name);
354 	if ((pe = pmu_event_get(NULL, event_name, NULL)) == NULL)
355 		return (DEFAULT_SAMPLE_COUNT);
356 	if (pe->event == NULL)
357 		return (DEFAULT_SAMPLE_COUNT);
358 	if (pmu_parse_event(&ped, pe->event))
359 		return (DEFAULT_SAMPLE_COUNT);
360 	return (ped.ped_period);
361 }
362 
363 int
364 pmc_pmu_enabled(void)
365 {
366 
367 	return (pmu_events_map_get(NULL) != NULL);
368 }
369 
370 void
371 pmc_pmu_print_counters(const char *event_name)
372 {
373 	const struct pmu_events_map *pme;
374 	const struct pmu_event *pe;
375 	struct pmu_event_desc ped;
376 	char *debug;
377 	int do_debug;
378 
379 	debug = getenv("PMUDEBUG");
380 	do_debug = 0;
381 
382 	if (debug != NULL && strcmp(debug, "true") == 0)
383 		do_debug = 1;
384 	if ((pme = pmu_events_map_get(NULL)) == NULL)
385 		return;
386 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
387 		if (pe->name == NULL)
388 			continue;
389 		if (event_name != NULL && strcasestr(pe->name, event_name) == NULL)
390 			continue;
391 		printf("\t%s\n", pe->name);
392 		if (do_debug)
393 			pmu_parse_event(&ped, pe->event);
394 	}
395 }
396 
397 void
398 pmc_pmu_print_counter_desc(const char *ev)
399 {
400 	const struct pmu_events_map *pme;
401 	const struct pmu_event *pe;
402 
403 	if ((pme = pmu_events_map_get(NULL)) == NULL)
404 		return;
405 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
406 		if (pe->name == NULL)
407 			continue;
408 		if (strcasestr(pe->name, ev) != NULL &&
409 		    pe->desc != NULL)
410 			printf("%s:\t%s\n", pe->name, pe->desc);
411 	}
412 }
413 
414 void
415 pmc_pmu_print_counter_desc_long(const char *ev)
416 {
417 	const struct pmu_events_map *pme;
418 	const struct pmu_event *pe;
419 
420 	if ((pme = pmu_events_map_get(NULL)) == NULL)
421 		return;
422 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
423 		if (pe->name == NULL)
424 			continue;
425 		if (strcasestr(pe->name, ev) != NULL) {
426 			if (pe->long_desc != NULL)
427 				printf("%s:\n%s\n", pe->name, pe->long_desc);
428 			else if (pe->desc != NULL)
429 				printf("%s:\t%s\n", pe->name, pe->desc);
430 		}
431 	}
432 }
433 
434 void
435 pmc_pmu_print_counter_full(const char *ev)
436 {
437 	const struct pmu_events_map *pme;
438 	const struct pmu_event *pe;
439 
440 	if ((pme = pmu_events_map_get(NULL)) == NULL)
441 		return;
442 	for (pe = pme->table; pe->name || pe->desc || pe->event; pe++) {
443 		if (pe->name == NULL)
444 			continue;
445 		if (strcasestr(pe->name, ev) == NULL)
446 			continue;
447 		printf("name: %s\n", pe->name);
448 		if (pe->long_desc != NULL)
449 			printf("desc: %s\n", pe->long_desc);
450 		else if (pe->desc != NULL)
451 			printf("desc: %s\n", pe->desc);
452 		if (pe->event != NULL)
453 			printf("event: %s\n", pe->event);
454 		if (pe->topic != NULL)
455 			printf("topic: %s\n", pe->topic);
456 		if (pe->pmu != NULL)
457 			printf("pmu: %s\n", pe->pmu);
458 		if (pe->unit != NULL)
459 			printf("unit: %s\n", pe->unit);
460 		if (pe->perpkg != NULL)
461 			printf("perpkg: %s\n", pe->perpkg);
462 		if (pe->metric_expr != NULL)
463 			printf("metric_expr: %s\n", pe->metric_expr);
464 		if (pe->metric_name != NULL)
465 			printf("metric_name: %s\n", pe->metric_name);
466 		if (pe->metric_group != NULL)
467 			printf("metric_group: %s\n", pe->metric_group);
468 	}
469 }
470 
471 #if defined(__amd64__) || defined(__i386__)
472 static int
473 pmc_pmu_amd_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
474 	struct pmu_event_desc *ped)
475 {
476 	struct pmc_md_amd_op_pmcallocate *amd;
477 	const struct pmu_event *pe;
478 	int idx = -1;
479 
480 	amd = &pm->pm_md.pm_amd;
481 	if (ped->ped_umask > 0) {
482 		pm->pm_caps |= PMC_CAP_QUALIFIER;
483 		amd->pm_amd_config |= AMD_PMC_TO_UNITMASK(ped->ped_umask);
484 	}
485 	pm->pm_class = PMC_CLASS_K8;
486 	pe = pmu_event_get(NULL, event_name, &idx);
487 
488 	if (strcmp("l3cache", pe->topic) == 0){
489 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
490 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_L3_CACHE;
491 		amd->pm_amd_config |= AMD_PMC_TO_L3SLICE(ped->ped_l3_slice);
492 		amd->pm_amd_config |= AMD_PMC_TO_L3CORE(ped->ped_l3_thread);
493 	}
494 	else if (strcmp("data fabric", pe->topic) == 0){
495 
496 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK_DF(ped->ped_event);
497 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_DATA_FABRIC;
498 	}
499 	else{
500 		amd->pm_amd_config |= AMD_PMC_TO_EVENTMASK(ped->ped_event);
501 		amd->pm_amd_sub_class = PMC_AMD_SUB_CLASS_CORE;
502 		if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
503 			(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
504 			(PMC_CAP_USER|PMC_CAP_SYSTEM))
505 			amd->pm_amd_config |= (AMD_PMC_USR | AMD_PMC_OS);
506 		else if (pm->pm_caps & PMC_CAP_USER)
507 			amd->pm_amd_config |= AMD_PMC_USR;
508 		else if (pm->pm_caps & PMC_CAP_SYSTEM)
509 			amd->pm_amd_config |= AMD_PMC_OS;
510 		if (ped->ped_edge)
511 			amd->pm_amd_config |= AMD_PMC_EDGE;
512 		if (ped->ped_inv)
513 			amd->pm_amd_config |= AMD_PMC_EDGE;
514 		if (pm->pm_caps & PMC_CAP_INTERRUPT)
515 			amd->pm_amd_config |= AMD_PMC_INT;
516 	}
517 	return (0);
518 }
519 
520 static int
521 pmc_pmu_intel_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm,
522 	struct pmu_event_desc *ped)
523 {
524 	struct pmc_md_iap_op_pmcallocate *iap;
525 
526 	iap = &pm->pm_md.pm_iap;
527 	if (strcasestr(event_name, "UNC_") == event_name ||
528 	    strcasestr(event_name, "uncore") != NULL) {
529 		pm->pm_class = PMC_CLASS_UCP;
530 		pm->pm_caps |= PMC_CAP_QUALIFIER;
531 	} else if ((ped->ped_umask == -1) ||
532 	    (ped->ped_event == 0x0 && ped->ped_umask == 0x3)) {
533 		pm->pm_class = PMC_CLASS_IAF;
534 	} else {
535 		pm->pm_class = PMC_CLASS_IAP;
536 		pm->pm_caps |= PMC_CAP_QUALIFIER;
537 	}
538 	iap->pm_iap_config |= IAP_EVSEL(ped->ped_event);
539 	if (ped->ped_umask > 0)
540 		iap->pm_iap_config |= IAP_UMASK(ped->ped_umask);
541 	iap->pm_iap_config |= IAP_CMASK(ped->ped_cmask);
542 	iap->pm_iap_rsp = ped->ped_offcore_rsp;
543 
544 	if ((pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0 ||
545 		(pm->pm_caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) ==
546 		(PMC_CAP_USER|PMC_CAP_SYSTEM))
547 		iap->pm_iap_config |= (IAP_USR | IAP_OS);
548 	else if (pm->pm_caps & PMC_CAP_USER)
549 		iap->pm_iap_config |= IAP_USR;
550 	else if (pm->pm_caps & PMC_CAP_SYSTEM)
551 		iap->pm_iap_config |= IAP_OS;
552 	if (ped->ped_edge)
553 		iap->pm_iap_config |= IAP_EDGE;
554 	if (ped->ped_any)
555 		iap->pm_iap_config |= IAP_ANY;
556 	if (ped->ped_inv)
557 		iap->pm_iap_config |= IAP_EDGE;
558 	if (pm->pm_caps & PMC_CAP_INTERRUPT)
559 		iap->pm_iap_config |= IAP_INT;
560 	return (0);
561 }
562 
563 int
564 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
565 {
566 	const struct pmu_event *pe;
567 	struct pmu_event_desc ped;
568 	pmu_mfr_t mfr;
569 	int idx = -1;
570 
571 	if ((mfr = pmu_events_mfr()) == PMU_INVALID)
572 		return (ENOENT);
573 
574 	bzero(&pm->pm_md, sizeof(pm->pm_md));
575 	pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
576 	event_name = pmu_alias_get(event_name);
577 	if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
578 		return (ENOENT);
579 	assert(idx >= 0);
580 	pm->pm_ev = idx;
581 
582 	if (pe->event == NULL)
583 		return (ENOENT);
584 	if (pmu_parse_event(&ped, pe->event))
585 		return (ENOENT);
586 
587 	if (mfr == PMU_INTEL)
588 		return (pmc_pmu_intel_pmcallocate(event_name, pm, &ped));
589 	else
590 		return (pmc_pmu_amd_pmcallocate(event_name, pm, &ped));
591 }
592 
593 #elif defined(__powerpc64__)
594 
595 int
596 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
597 {
598 	const struct pmu_event *pe;
599 	struct pmu_event_desc ped;
600 	int idx = -1;
601 
602 	bzero(&pm->pm_md, sizeof(pm->pm_md));
603 	pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
604 	event_name = pmu_alias_get(event_name);
605 
606 	if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
607 		return (ENOENT);
608 	if (pe->event == NULL)
609 		return (ENOENT);
610 	if (pmu_parse_event(&ped, pe->event))
611 		return (ENOENT);
612 
613 	assert(ped.ped_event >= 0);
614 	pm->pm_ev = idx;
615 	pm->pm_md.pm_event = ped.ped_event;
616 	pm->pm_class = PMC_CLASS_POWER8;
617 	return (0);
618 }
619 
620 #elif defined(__aarch64__)
621 
622 int
623 pmc_pmu_pmcallocate(const char *event_name, struct pmc_op_pmcallocate *pm)
624 {
625 	const struct pmu_event *pe;
626 	struct pmu_event_desc ped;
627 	int idx = -1;
628 
629 	event_name = pmu_alias_get(event_name);
630 	if ((pe = pmu_event_get(NULL, event_name, &idx)) == NULL)
631 		return (ENOENT);
632 	if (pe->event == NULL)
633 		return (ENOENT);
634 	if (pmu_parse_event(&ped, pe->event))
635 		return (ENOENT);
636 
637 	assert(idx >= 0);
638 	pm->pm_ev = idx;
639 	pm->pm_md.pm_md_config = ped.ped_event;
640 	pm->pm_md.pm_md_flags |= PM_MD_RAW_EVENT;
641 	pm->pm_class = PMC_CLASS_ARMV8;
642 	pm->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
643 
644 	return (0);
645 }
646 
647 #else
648 
649 int
650 pmc_pmu_pmcallocate(const char *e __unused, struct pmc_op_pmcallocate *p __unused)
651 {
652 	return (EOPNOTSUPP);
653 }
654 #endif
655