pmu.c (e80b500370e71b8cd7dd64be4080cee0a3e5068f) pmu.c (628eaa4e877af8230ef7326d378e15d511c506ba)
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/list.h>
3#include <linux/compiler.h>
4#include <linux/string.h>
5#include <linux/zalloc.h>
6#include <linux/ctype.h>
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/list.h>
3#include <linux/compiler.h>
4#include <linux/string.h>
5#include <linux/zalloc.h>
6#include <linux/ctype.h>
7#include <subcmd/pager.h>
8#include <sys/types.h>
7#include <sys/types.h>
9#include <errno.h>
10#include <fcntl.h>
11#include <sys/stat.h>
12#include <unistd.h>
13#include <stdio.h>
14#include <stdbool.h>
8#include <fcntl.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#include <stdio.h>
12#include <stdbool.h>
15#include <stdarg.h>
16#include <dirent.h>
17#include <api/fs/fs.h>
18#include <locale.h>
13#include <dirent.h>
14#include <api/fs/fs.h>
15#include <locale.h>
19#include <regex.h>
20#include <perf/cpumap.h>
21#include <fnmatch.h>
22#include <math.h>
23#include "debug.h"
24#include "evsel.h"
25#include "pmu.h"
26#include "pmus.h"
27#include "pmu-bison.h"
28#include "pmu-flex.h"
29#include "parse-events.h"
30#include "print-events.h"
31#include "header.h"
32#include "string2.h"
33#include "strbuf.h"
34#include "fncache.h"
16#include <fnmatch.h>
17#include <math.h>
18#include "debug.h"
19#include "evsel.h"
20#include "pmu.h"
21#include "pmus.h"
22#include "pmu-bison.h"
23#include "pmu-flex.h"
24#include "parse-events.h"
25#include "print-events.h"
26#include "header.h"
27#include "string2.h"
28#include "strbuf.h"
29#include "fncache.h"
35#include "pmu-hybrid.h"
36#include "util/evsel_config.h"
37
38struct perf_pmu perf_pmu__fake;
39
40/**
41 * struct perf_pmu_format - Values from a format file read from
42 * <sysfs>/devices/cpu/format/ held in struct perf_pmu.
43 *

--- 11 unchanged lines hidden (view full) ---

55 */
56 int value;
57 /** @bits: Which config bits are set by this format value. */
58 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
59 /** @list: Element on list within struct perf_pmu. */
60 struct list_head list;
61};
62
30#include "util/evsel_config.h"
31
32struct perf_pmu perf_pmu__fake;
33
34/**
35 * struct perf_pmu_format - Values from a format file read from
36 * <sysfs>/devices/cpu/format/ held in struct perf_pmu.
37 *

--- 11 unchanged lines hidden (view full) ---

49 */
50 int value;
51 /** @bits: Which config bits are set by this format value. */
52 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
53 /** @list: Element on list within struct perf_pmu. */
54 struct list_head list;
55};
56
63static bool hybrid_scanned;
64
65static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name);
66
67/*
68 * Parse & process all the sysfs attributes located under
69 * the directory specified in 'dir' parameter.
70 */
71int perf_pmu__format_parse(int dirfd, struct list_head *head)
72{
73 struct dirent *evt_ent;
74 DIR *format_dir;

--- 477 unchanged lines hidden (view full) ---

552 */
553 cloned->weak = true;
554 list_add_tail(&cloned->list, &list);
555 }
556 list_splice(&list, terms);
557 return 0;
558}
559
57/*
58 * Parse & process all the sysfs attributes located under
59 * the directory specified in 'dir' parameter.
60 */
61int perf_pmu__format_parse(int dirfd, struct list_head *head)
62{
63 struct dirent *evt_ent;
64 DIR *format_dir;

--- 477 unchanged lines hidden (view full) ---

542 */
543 cloned->weak = true;
544 list_add_tail(&cloned->list, &list);
545 }
546 list_splice(&list, terms);
547 return 0;
548}
549
560/* Add all pmus in sysfs to pmu list: */
561static void pmu_read_sysfs(void)
562{
563 int fd;
564 DIR *dir;
565 struct dirent *dent;
566
567 fd = perf_pmu__event_source_devices_fd();
568 if (fd < 0)
569 return;
570
571 dir = fdopendir(fd);
572 if (!dir)
573 return;
574
575 while ((dent = readdir(dir))) {
576 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
577 continue;
578 /* add to static LIST_HEAD(pmus): */
579 perf_pmu__find2(fd, dent->d_name);
580 }
581
582 closedir(dir);
583}
584
585/*
586 * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
587 * may have a "cpus" file.
588 */
550/*
551 * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
552 * may have a "cpus" file.
553 */
589static struct perf_cpu_map *pmu_cpumask(int dirfd, const char *name)
554static struct perf_cpu_map *pmu_cpumask(int dirfd, const char *name, bool is_core)
590{
591 struct perf_cpu_map *cpus;
592 const char *templates[] = {
593 "cpumask",
594 "cpus",
595 NULL
596 };
597 const char **template;

--- 7 unchanged lines hidden (view full) ---

605 if (!file)
606 continue;
607 cpus = perf_cpu_map__read(file);
608 fclose(file);
609 if (cpus)
610 return cpus;
611 }
612
555{
556 struct perf_cpu_map *cpus;
557 const char *templates[] = {
558 "cpumask",
559 "cpus",
560 NULL
561 };
562 const char **template;

--- 7 unchanged lines hidden (view full) ---

570 if (!file)
571 continue;
572 cpus = perf_cpu_map__read(file);
573 fclose(file);
574 if (cpus)
575 return cpus;
576 }
577
613 return NULL;
578 /* Nothing found, for core PMUs assume this means all CPUs. */
579 return is_core ? perf_cpu_map__get(cpu_map__online()) : NULL;
614}
615
616static bool pmu_is_uncore(int dirfd, const char *name)
617{
618 int fd;
619
580}
581
582static bool pmu_is_uncore(int dirfd, const char *name)
583{
584 int fd;
585
620 if (perf_pmu__hybrid_mounted(name))
621 return false;
622
623 fd = perf_pmu__pathname_fd(dirfd, name, "cpumask", O_PATH);
624 if (fd < 0)
625 return false;
626
627 close(fd);
628 return true;
629}
630

--- 7 unchanged lines hidden (view full) ---

638 if (filename__read_str(path, &str, &len) < 0)
639 return NULL;
640
641 str[len - 1] = 0; /* remove line feed */
642
643 return str;
644}
645
586 fd = perf_pmu__pathname_fd(dirfd, name, "cpumask", O_PATH);
587 if (fd < 0)
588 return false;
589
590 close(fd);
591 return true;
592}
593

--- 7 unchanged lines hidden (view full) ---

601 if (filename__read_str(path, &str, &len) < 0)
602 return NULL;
603
604 str[len - 1] = 0; /* remove line feed */
605
606 return str;
607}
608
646/*
647 * PMU CORE devices have different name other than cpu in sysfs on some
648 * platforms.
649 * Looking for possible sysfs files to identify the arm core device.
609/**
610 * is_sysfs_pmu_core() - PMU CORE devices have different name other than cpu in
611 * sysfs on some platforms like ARM or Intel hybrid. Looking for
612 * possible the cpus file in sysfs files to identify whether this is a
613 * core device.
614 * @name: The PMU name such as "cpu_atom".
650 */
615 */
651static int is_arm_pmu_core(const char *name)
616static int is_sysfs_pmu_core(const char *name)
652{
653 char path[PATH_MAX];
654
655 if (!perf_pmu__pathname_scnprintf(path, sizeof(path), name, "cpus"))
656 return 0;
657 return file_available(path);
658}
659

--- 112 unchanged lines hidden (view full) ---

772
773 res = true;
774out:
775 free(str);
776 return res;
777}
778
779struct pmu_add_cpu_aliases_map_data {
617{
618 char path[PATH_MAX];
619
620 if (!perf_pmu__pathname_scnprintf(path, sizeof(path), name, "cpus"))
621 return 0;
622 return file_available(path);
623}
624

--- 112 unchanged lines hidden (view full) ---

737
738 res = true;
739out:
740 free(str);
741 return res;
742}
743
744struct pmu_add_cpu_aliases_map_data {
745 /* List being added to. */
780 struct list_head *head;
746 struct list_head *head;
781 const char *name;
782 const char *cpu_name;
747 /* If a pmu_event lacks a given PMU the default used. */
748 char *default_pmu_name;
749 /* The PMU that we're searching for events for. */
783 struct perf_pmu *pmu;
784};
785
786static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
787 const struct pmu_events_table *table __maybe_unused,
788 void *vdata)
789{
790 struct pmu_add_cpu_aliases_map_data *data = vdata;
750 struct perf_pmu *pmu;
751};
752
753static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
754 const struct pmu_events_table *table __maybe_unused,
755 void *vdata)
756{
757 struct pmu_add_cpu_aliases_map_data *data = vdata;
791 const char *pname = pe->pmu ? pe->pmu : data->cpu_name;
758 const char *pname = pe->pmu ?: data->default_pmu_name;
792
759
793 if (data->pmu->is_uncore && pmu_uncore_alias_match(pname, data->name))
794 goto new_alias;
795
796 if (strcmp(pname, data->name))
797 return 0;
798
799new_alias:
800 /* need type casts to override 'const' */
801 __perf_pmu__new_alias(data->head, -1, (char *)pe->name, (char *)pe->desc,
802 (char *)pe->event, pe);
760 if (!strcmp(pname, data->pmu->name) ||
761 (data->pmu->is_uncore && pmu_uncore_alias_match(pname, data->pmu->name))) {
762 /* need type casts to override 'const' */
763 __perf_pmu__new_alias(data->head, -1, (char *)pe->name, (char *)pe->desc,
764 (char *)pe->event, pe);
765 }
803 return 0;
804}
805
806/*
766 return 0;
767}
768
769/*
807 * From the pmu_events_map, find the table of PMU events that corresponds
808 * to the current running CPU. Then, add all PMU events from that table
809 * as aliases.
770 * From the pmu_events_table, find the events that correspond to the given
771 * PMU and add them to the list 'head'.
810 */
811void pmu_add_cpu_aliases_table(struct list_head *head, struct perf_pmu *pmu,
772 */
773void pmu_add_cpu_aliases_table(struct list_head *head, struct perf_pmu *pmu,
812 const struct pmu_events_table *table)
774 const struct pmu_events_table *table)
813{
814 struct pmu_add_cpu_aliases_map_data data = {
815 .head = head,
775{
776 struct pmu_add_cpu_aliases_map_data data = {
777 .head = head,
816 .name = pmu->name,
817 .cpu_name = is_arm_pmu_core(pmu->name) ? pmu->name : "cpu",
778 .default_pmu_name = perf_pmus__default_pmu_name(),
818 .pmu = pmu,
819 };
820
821 pmu_events_table_for_each_event(table, pmu_add_cpu_aliases_map_callback, &data);
779 .pmu = pmu,
780 };
781
782 pmu_events_table_for_each_event(table, pmu_add_cpu_aliases_map_callback, &data);
783 free(data.default_pmu_name);
822}
823
824static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
825{
826 const struct pmu_events_table *table;
827
828 table = perf_pmu__find_events_table(pmu);
829 if (!table)

--- 63 unchanged lines hidden (view full) ---

893static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
894{
895 int max_precise = -1;
896
897 perf_pmu__scan_file_at(pmu, dirfd, "caps/max_precise", "%d", &max_precise);
898 return max_precise;
899}
900
784}
785
786static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
787{
788 const struct pmu_events_table *table;
789
790 table = perf_pmu__find_events_table(pmu);
791 if (!table)

--- 63 unchanged lines hidden (view full) ---

855static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
856{
857 int max_precise = -1;
858
859 perf_pmu__scan_file_at(pmu, dirfd, "caps/max_precise", "%d", &max_precise);
860 return max_precise;
861}
862
901static struct perf_pmu *pmu_lookup(int dirfd, const char *lookup_name)
863struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name)
902{
903 struct perf_pmu *pmu;
904 LIST_HEAD(format);
905 LIST_HEAD(aliases);
906 __u32 type;
907 char *name = pmu_find_real_name(lookup_name);
864{
865 struct perf_pmu *pmu;
866 LIST_HEAD(format);
867 LIST_HEAD(aliases);
868 __u32 type;
869 char *name = pmu_find_real_name(lookup_name);
908 bool is_hybrid = perf_pmu__hybrid_mounted(name);
909 char *alias_name;
910
911 /*
870 char *alias_name;
871
872 /*
912 * Check pmu name for hybrid and the pmu may be invalid in sysfs
913 */
914 if (!strncmp(name, "cpu_", 4) && !is_hybrid)
915 return NULL;
916
917 /*
918 * The pmu data we store & need consists of the pmu
919 * type value and format definitions. Load both right
920 * now.
921 */
922 if (pmu_format(dirfd, name, &format))
923 return NULL;
924
925 /*
926 * Check the aliases first to avoid unnecessary work.
927 */
928 if (pmu_aliases(dirfd, name, &aliases))
929 return NULL;
930
931 pmu = zalloc(sizeof(*pmu));
932 if (!pmu)
933 return NULL;
934
873 * The pmu data we store & need consists of the pmu
874 * type value and format definitions. Load both right
875 * now.
876 */
877 if (pmu_format(dirfd, name, &format))
878 return NULL;
879
880 /*
881 * Check the aliases first to avoid unnecessary work.
882 */
883 if (pmu_aliases(dirfd, name, &aliases))
884 return NULL;
885
886 pmu = zalloc(sizeof(*pmu));
887 if (!pmu)
888 return NULL;
889
935 pmu->cpus = pmu_cpumask(dirfd, name);
890 pmu->is_core = is_pmu_core(name);
891 pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
936 pmu->name = strdup(name);
892 pmu->name = strdup(name);
937
938 if (!pmu->name)
939 goto err;
940
941 /* Read type, and ensure that type value is successfully assigned (return 1) */
942 if (perf_pmu__scan_file_at(pmu, dirfd, "type", "%u", &type) != 1)
943 goto err;
944
945 alias_name = pmu_find_alias_name(name);

--- 11 unchanged lines hidden (view full) ---

957 pmu_add_cpu_aliases(&aliases, pmu);
958 pmu_add_sys_aliases(&aliases, pmu);
959
960 INIT_LIST_HEAD(&pmu->format);
961 INIT_LIST_HEAD(&pmu->aliases);
962 INIT_LIST_HEAD(&pmu->caps);
963 list_splice(&format, &pmu->format);
964 list_splice(&aliases, &pmu->aliases);
893 if (!pmu->name)
894 goto err;
895
896 /* Read type, and ensure that type value is successfully assigned (return 1) */
897 if (perf_pmu__scan_file_at(pmu, dirfd, "type", "%u", &type) != 1)
898 goto err;
899
900 alias_name = pmu_find_alias_name(name);

--- 11 unchanged lines hidden (view full) ---

912 pmu_add_cpu_aliases(&aliases, pmu);
913 pmu_add_sys_aliases(&aliases, pmu);
914
915 INIT_LIST_HEAD(&pmu->format);
916 INIT_LIST_HEAD(&pmu->aliases);
917 INIT_LIST_HEAD(&pmu->caps);
918 list_splice(&format, &pmu->format);
919 list_splice(&aliases, &pmu->aliases);
965 list_add_tail(&pmu->list, &pmus);
920 list_add_tail(&pmu->list, pmus);
966
921
967 if (is_hybrid)
968 list_add_tail(&pmu->hybrid_list, &perf_pmu__hybrid_pmus);
969 else
970 INIT_LIST_HEAD(&pmu->hybrid_list);
971
972 pmu->default_config = perf_pmu__get_default_config(pmu);
973
974 return pmu;
975err:
976 zfree(&pmu->name);
977 free(pmu);
978 return NULL;
979}
980
922 pmu->default_config = perf_pmu__get_default_config(pmu);
923
924 return pmu;
925err:
926 zfree(&pmu->name);
927 free(pmu);
928 return NULL;
929}
930
931/* Creates the PMU when sysfs scanning fails. */
932struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus)
933{
934 struct perf_pmu *pmu = zalloc(sizeof(*pmu));
935
936 if (!pmu)
937 return NULL;
938
939 pmu->name = strdup("cpu");
940 if (!pmu->name) {
941 free(pmu);
942 return NULL;
943 }
944
945 pmu->is_core = true;
946 pmu->type = PERF_TYPE_RAW;
947 pmu->cpus = cpu_map__online();
948
949 INIT_LIST_HEAD(&pmu->format);
950 INIT_LIST_HEAD(&pmu->aliases);
951 INIT_LIST_HEAD(&pmu->caps);
952 list_add_tail(&pmu->list, core_pmus);
953 return pmu;
954}
955
981void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
982{
983 struct perf_pmu_format *format;
984
956void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
957{
958 struct perf_pmu_format *format;
959
960 if (pmu->formats_checked)
961 return;
962
963 pmu->formats_checked = true;
964
985 /* fake pmu doesn't have format list */
986 if (pmu == &perf_pmu__fake)
987 return;
988
989 list_for_each_entry(format, &pmu->format, list)
990 if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
991 pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
992 "which is not supported by this version of perf!\n",
993 pmu->name, format->name, format->value);
994 return;
995 }
996}
997
965 /* fake pmu doesn't have format list */
966 if (pmu == &perf_pmu__fake)
967 return;
968
969 list_for_each_entry(format, &pmu->format, list)
970 if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
971 pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
972 "which is not supported by this version of perf!\n",
973 pmu->name, format->name, format->value);
974 return;
975 }
976}
977
998static struct perf_pmu *pmu_find(const char *name)
999{
1000 struct perf_pmu *pmu;
1001
1002 list_for_each_entry(pmu, &pmus, list) {
1003 if (!strcmp(pmu->name, name) ||
1004 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
1005 return pmu;
1006 }
1007
1008 return NULL;
1009}
1010
1011struct perf_pmu *perf_pmu__find_by_type(unsigned int type)
1012{
1013 struct perf_pmu *pmu;
1014
1015 list_for_each_entry(pmu, &pmus, list)
1016 if (pmu->type == type)
1017 return pmu;
1018
1019 return NULL;
1020}
1021
1022struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu)
1023{
1024 /*
1025 * pmu iterator: If pmu is NULL, we start at the begin,
1026 * otherwise return the next pmu. Returns NULL on end.
1027 */
1028 if (!pmu) {
1029 pmu_read_sysfs();
1030 pmu = list_prepare_entry(pmu, &pmus, list);
1031 }
1032 list_for_each_entry_continue(pmu, &pmus, list)
1033 return pmu;
1034 return NULL;
1035}
1036
1037struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
1038{
1039 struct perf_pmu *pmu = NULL;
1040
1041 if (evsel->pmu)
1042 return evsel->pmu;
1043
1044 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1045 if (pmu->type == evsel->core.attr.type)
1046 break;
1047 }
1048
1049 ((struct evsel *)evsel)->pmu = pmu;
1050 return pmu;
1051}
1052
1053bool evsel__is_aux_event(const struct evsel *evsel)
1054{
1055 struct perf_pmu *pmu = evsel__find_pmu(evsel);
1056
1057 return pmu && pmu->auxtrace;
1058}
1059
1060/*

--- 20 unchanged lines hidden (view full) ---

1081 if (bits & user_bits)
1082 return;
1083
1084 /* Otherwise replace it */
1085 evsel->core.attr.config &= ~bits;
1086 evsel->core.attr.config |= field_prep(bits, val);
1087}
1088
978bool evsel__is_aux_event(const struct evsel *evsel)
979{
980 struct perf_pmu *pmu = evsel__find_pmu(evsel);
981
982 return pmu && pmu->auxtrace;
983}
984
985/*

--- 20 unchanged lines hidden (view full) ---

1006 if (bits & user_bits)
1007 return;
1008
1009 /* Otherwise replace it */
1010 evsel->core.attr.config &= ~bits;
1011 evsel->core.attr.config |= field_prep(bits, val);
1012}
1013
1089struct perf_pmu *perf_pmu__find(const char *name)
1090{
1091 struct perf_pmu *pmu;
1092 int dirfd;
1093
1094 /*
1095 * Once PMU is loaded it stays in the list,
1096 * so we keep us from multiple reading/parsing
1097 * the pmu format definitions.
1098 */
1099 pmu = pmu_find(name);
1100 if (pmu)
1101 return pmu;
1102
1103 dirfd = perf_pmu__event_source_devices_fd();
1104 pmu = pmu_lookup(dirfd, name);
1105 close(dirfd);
1106
1107 return pmu;
1108}
1109
1110static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
1111{
1112 struct perf_pmu *pmu;
1113
1114 /*
1115 * Once PMU is loaded it stays in the list,
1116 * so we keep us from multiple reading/parsing
1117 * the pmu format definitions.
1118 */
1119 pmu = pmu_find(name);
1120 if (pmu)
1121 return pmu;
1122
1123 return pmu_lookup(dirfd, name);
1124}
1125
1126static struct perf_pmu_format *
1127pmu_find_format(struct list_head *formats, const char *name)
1128{
1129 struct perf_pmu_format *format;
1130
1131 list_for_each_entry(format, formats, list)
1132 if (!strcmp(format->name, name))
1133 return format;

--- 259 unchanged lines hidden (view full) ---

1393 * 2) pmu format definitions - specified by pmu parameter
1394 */
1395int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
1396 struct list_head *head_terms,
1397 struct parse_events_error *err)
1398{
1399 bool zero = !!pmu->default_config;
1400
1014static struct perf_pmu_format *
1015pmu_find_format(struct list_head *formats, const char *name)
1016{
1017 struct perf_pmu_format *format;
1018
1019 list_for_each_entry(format, formats, list)
1020 if (!strcmp(format->name, name))
1021 return format;

--- 259 unchanged lines hidden (view full) ---

1281 * 2) pmu format definitions - specified by pmu parameter
1282 */
1283int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
1284 struct list_head *head_terms,
1285 struct parse_events_error *err)
1286{
1287 bool zero = !!pmu->default_config;
1288
1401 attr->type = pmu->type;
1402 return perf_pmu__config_terms(pmu->name, &pmu->format, attr,
1403 head_terms, zero, err);
1404}
1405
1406static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
1407 struct parse_events_term *term)
1408{
1409 struct perf_pmu_alias *alias;

--- 138 unchanged lines hidden (view full) ---

1548
1549 list_for_each_entry_safe(fmt, tmp, formats, list) {
1550 list_del(&fmt->list);
1551 zfree(&fmt->name);
1552 free(fmt);
1553 }
1554}
1555
1289 return perf_pmu__config_terms(pmu->name, &pmu->format, attr,
1290 head_terms, zero, err);
1291}
1292
1293static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
1294 struct parse_events_term *term)
1295{
1296 struct perf_pmu_alias *alias;

--- 138 unchanged lines hidden (view full) ---

1435
1436 list_for_each_entry_safe(fmt, tmp, formats, list) {
1437 list_del(&fmt->list);
1438 zfree(&fmt->name);
1439 free(fmt);
1440 }
1441}
1442
1556static int sub_non_neg(int a, int b)
1443bool is_pmu_core(const char *name)
1557{
1444{
1558 if (b > a)
1559 return 0;
1560 return a - b;
1445 return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name);
1561}
1562
1446}
1447
1563static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
1564 const struct perf_pmu_alias *alias)
1448bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu)
1565{
1449{
1566 struct parse_events_term *term;
1567 int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
1568
1569 list_for_each_entry(term, &alias->terms, list) {
1570 if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
1571 used += snprintf(buf + used, sub_non_neg(len, used),
1572 ",%s=%s", term->config,
1573 term->val.str);
1574 }
1575
1576 if (sub_non_neg(len, used) > 0) {
1577 buf[used] = '/';
1578 used++;
1579 }
1580 if (sub_non_neg(len, used) > 0) {
1581 buf[used] = '\0';
1582 used++;
1583 } else
1584 buf[len - 1] = '\0';
1585
1586 return buf;
1450 return pmu->is_core;
1587}
1588
1451}
1452
1589/** Struct for ordering events as output in perf list. */
1590struct sevent {
1591 /** PMU for event. */
1592 const struct perf_pmu *pmu;
1593 /**
1594 * Optional event for name, desc, etc. If not present then this is a
1595 * selectable PMU and the event name is shown as "//".
1596 */
1597 const struct perf_pmu_alias *event;
1598 /** Is the PMU for the CPU? */
1599 bool is_cpu;
1600};
1601
1602static int cmp_sevent(const void *a, const void *b)
1453bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
1603{
1454{
1604 const struct sevent *as = a;
1605 const struct sevent *bs = b;
1606 const char *a_pmu_name = NULL, *b_pmu_name = NULL;
1607 const char *a_name = "//", *a_desc = NULL, *a_topic = "";
1608 const char *b_name = "//", *b_desc = NULL, *b_topic = "";
1609 int ret;
1610
1611 if (as->event) {
1612 a_name = as->event->name;
1613 a_desc = as->event->desc;
1614 a_topic = as->event->topic ?: "";
1615 a_pmu_name = as->event->pmu_name;
1616 }
1617 if (bs->event) {
1618 b_name = bs->event->name;
1619 b_desc = bs->event->desc;
1620 b_topic = bs->event->topic ?: "";
1621 b_pmu_name = bs->event->pmu_name;
1622 }
1623 /* Put extra events last. */
1624 if (!!a_desc != !!b_desc)
1625 return !!a_desc - !!b_desc;
1626
1627 /* Order by topics. */
1628 ret = strcmp(a_topic, b_topic);
1629 if (ret)
1630 return ret;
1631
1632 /* Order CPU core events to be first */
1633 if (as->is_cpu != bs->is_cpu)
1634 return as->is_cpu ? -1 : 1;
1635
1636 /* Order by PMU name. */
1637 if (as->pmu != bs->pmu) {
1638 a_pmu_name = a_pmu_name ?: (as->pmu->name ?: "");
1639 b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: "");
1640 ret = strcmp(a_pmu_name, b_pmu_name);
1641 if (ret)
1642 return ret;
1643 }
1644
1645 /* Order by event name. */
1646 return strcmp(a_name, b_name);
1455 return !pmu->is_core || perf_pmus__num_core_pmus() == 1;
1647}
1648
1456}
1457
1649bool is_pmu_core(const char *name)
1458bool perf_pmu__have_event(const struct perf_pmu *pmu, const char *name)
1650{
1459{
1651 return !strcmp(name, "cpu") || is_arm_pmu_core(name);
1652}
1460 struct perf_pmu_alias *alias;
1653
1461
1654static bool pmu_alias_is_duplicate(struct sevent *alias_a,
1655 struct sevent *alias_b)
1656{
1657 const char *a_pmu_name = NULL, *b_pmu_name = NULL;
1658 const char *a_name = "//", *b_name = "//";
1659
1660
1661 if (alias_a->event) {
1662 a_name = alias_a->event->name;
1663 a_pmu_name = alias_a->event->pmu_name;
1462 list_for_each_entry(alias, &pmu->aliases, list) {
1463 if (!strcmp(alias->name, name))
1464 return true;
1664 }
1465 }
1665 if (alias_b->event) {
1666 b_name = alias_b->event->name;
1667 b_pmu_name = alias_b->event->pmu_name;
1668 }
1669
1670 /* Different names -> never duplicates */
1671 if (strcmp(a_name, b_name))
1672 return false;
1673
1674 /* Don't remove duplicates for different PMUs */
1675 a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: "");
1676 b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: "");
1677 return strcmp(a_pmu_name, b_pmu_name) == 0;
1466 return false;
1678}
1679
1467}
1468
1680void print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
1469bool perf_pmu__is_software(const struct perf_pmu *pmu)
1681{
1470{
1682 struct perf_pmu *pmu;
1683 struct perf_pmu_alias *event;
1684 char buf[1024];
1685 int printed = 0;
1686 int len, j;
1687 struct sevent *aliases;
1688
1689 pmu = NULL;
1690 len = 0;
1691 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1692 list_for_each_entry(event, &pmu->aliases, list)
1693 len++;
1694 if (pmu->selectable)
1695 len++;
1471 if (pmu->is_core || pmu->is_uncore || pmu->auxtrace)
1472 return false;
1473 switch (pmu->type) {
1474 case PERF_TYPE_HARDWARE: return false;
1475 case PERF_TYPE_SOFTWARE: return true;
1476 case PERF_TYPE_TRACEPOINT: return true;
1477 case PERF_TYPE_HW_CACHE: return false;
1478 case PERF_TYPE_RAW: return false;
1479 case PERF_TYPE_BREAKPOINT: return true;
1480 default: break;
1696 }
1481 }
1697 aliases = zalloc(sizeof(struct sevent) * len);
1698 if (!aliases) {
1699 pr_err("FATAL: not enough memory to print PMU events\n");
1700 return;
1701 }
1702 pmu = NULL;
1703 j = 0;
1704 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1705 bool is_cpu = is_pmu_core(pmu->name) || perf_pmu__is_hybrid(pmu->name);
1706
1707 list_for_each_entry(event, &pmu->aliases, list) {
1708 aliases[j].event = event;
1709 aliases[j].pmu = pmu;
1710 aliases[j].is_cpu = is_cpu;
1711 j++;
1712 }
1713 if (pmu->selectable) {
1714 aliases[j].event = NULL;
1715 aliases[j].pmu = pmu;
1716 aliases[j].is_cpu = is_cpu;
1717 j++;
1718 }
1719 }
1720 len = j;
1721 qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
1722 for (j = 0; j < len; j++) {
1723 const char *name, *alias = NULL, *scale_unit = NULL,
1724 *desc = NULL, *long_desc = NULL,
1725 *encoding_desc = NULL, *topic = NULL,
1726 *pmu_name = NULL;
1727 bool deprecated = false;
1728 size_t buf_used;
1729
1730 /* Skip duplicates */
1731 if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
1732 continue;
1733
1734 if (!aliases[j].event) {
1735 /* A selectable event. */
1736 pmu_name = aliases[j].pmu->name;
1737 buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1;
1738 name = buf;
1739 } else {
1740 if (aliases[j].event->desc) {
1741 name = aliases[j].event->name;
1742 buf_used = 0;
1743 } else {
1744 name = format_alias(buf, sizeof(buf), aliases[j].pmu,
1745 aliases[j].event);
1746 if (aliases[j].is_cpu) {
1747 alias = name;
1748 name = aliases[j].event->name;
1749 }
1750 buf_used = strlen(buf) + 1;
1751 }
1752 pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: "");
1753 if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) {
1754 scale_unit = buf + buf_used;
1755 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
1756 "%G%s", aliases[j].event->scale,
1757 aliases[j].event->unit) + 1;
1758 }
1759 desc = aliases[j].event->desc;
1760 long_desc = aliases[j].event->long_desc;
1761 topic = aliases[j].event->topic;
1762 encoding_desc = buf + buf_used;
1763 buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
1764 "%s/%s/", pmu_name, aliases[j].event->str) + 1;
1765 deprecated = aliases[j].event->deprecated;
1766 }
1767 print_cb->print_event(print_state,
1768 pmu_name,
1769 topic,
1770 name,
1771 alias,
1772 scale_unit,
1773 deprecated,
1774 "Kernel PMU event",
1775 desc,
1776 long_desc,
1777 encoding_desc);
1778 }
1779 if (printed && pager_in_use())
1780 printf("\n");
1781
1782 zfree(&aliases);
1783 return;
1482 return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe");
1784}
1785
1483}
1484
1786bool pmu_have_event(const char *pname, const char *name)
1787{
1788 struct perf_pmu *pmu;
1789 struct perf_pmu_alias *alias;
1790
1791 pmu = NULL;
1792 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1793 if (strcmp(pname, pmu->name))
1794 continue;
1795 list_for_each_entry(alias, &pmu->aliases, list)
1796 if (!strcmp(alias->name, name))
1797 return true;
1798 }
1799 return false;
1800}
1801
1802FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
1803{
1804 char path[PATH_MAX];
1805
1806 if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, name) ||
1807 !file_available(path))
1808 return NULL;
1809

--- 152 unchanged lines hidden (view full) ---

1962 }
1963
1964 closedir(caps_dir);
1965
1966 pmu->caps_initialized = true;
1967 return pmu->nr_caps;
1968}
1969
1485FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
1486{
1487 char path[PATH_MAX];
1488
1489 if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, name) ||
1490 !file_available(path))
1491 return NULL;
1492

--- 152 unchanged lines hidden (view full) ---

1645 }
1646
1647 closedir(caps_dir);
1648
1649 pmu->caps_initialized = true;
1650 return pmu->nr_caps;
1651}
1652
1970void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
1971 const char *name)
1653static void perf_pmu__compute_config_masks(struct perf_pmu *pmu)
1972{
1973 struct perf_pmu_format *format;
1654{
1655 struct perf_pmu_format *format;
1974 __u64 masks = 0, bits;
1975 char buf[100];
1976 unsigned int i;
1977
1656
1657 if (pmu->config_masks_computed)
1658 return;
1659
1978 list_for_each_entry(format, &pmu->format, list) {
1660 list_for_each_entry(format, &pmu->format, list) {
1979 if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG)
1661 unsigned int i;
1662 __u64 *mask;
1663
1664 if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END)
1980 continue;
1981
1665 continue;
1666
1667 pmu->config_masks_present = true;
1668 mask = &pmu->config_masks[format->value];
1669
1982 for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
1670 for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
1983 masks |= 1ULL << i;
1671 *mask |= 1ULL << i;
1984 }
1672 }
1673 pmu->config_masks_computed = true;
1674}
1985
1675
1676void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
1677 const char *name, int config_num,
1678 const char *config_name)
1679{
1680 __u64 bits;
1681 char buf[100];
1682
1683 perf_pmu__compute_config_masks(pmu);
1684
1986 /*
1987 * Kernel doesn't export any valid format bits.
1988 */
1685 /*
1686 * Kernel doesn't export any valid format bits.
1687 */
1989 if (masks == 0)
1688 if (!pmu->config_masks_present)
1990 return;
1991
1689 return;
1690
1992 bits = config & ~masks;
1691 bits = config & ~pmu->config_masks[config_num];
1993 if (bits == 0)
1994 return;
1995
1996 bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
1997
1692 if (bits == 0)
1693 return;
1694
1695 bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
1696
1998 pr_warning("WARNING: event '%s' not valid (bits %s of config "
1697 pr_warning("WARNING: event '%s' not valid (bits %s of %s "
1999 "'%llx' not supported by kernel)!\n",
1698 "'%llx' not supported by kernel)!\n",
2000 name ?: "N/A", buf, config);
1699 name ?: "N/A", buf, config_name, config);
2001}
2002
1700}
1701
2003bool perf_pmu__has_hybrid(void)
2004{
2005 if (!hybrid_scanned) {
2006 hybrid_scanned = true;
2007 perf_pmu__scan(NULL);
2008 }
2009
2010 return !list_empty(&perf_pmu__hybrid_pmus);
2011}
2012
2013int perf_pmu__match(char *pattern, char *name, char *tok)
2014{
2015 if (!name)
2016 return -1;
2017
2018 if (fnmatch(pattern, name, 0))
2019 return -1;
2020
2021 if (tok && !perf_pmu__match_ignoring_suffix(name, tok))
2022 return -1;
2023
2024 return 0;
2025}
2026
1702int perf_pmu__match(char *pattern, char *name, char *tok)
1703{
1704 if (!name)
1705 return -1;
1706
1707 if (fnmatch(pattern, name, 0))
1708 return -1;
1709
1710 if (tok && !perf_pmu__match_ignoring_suffix(name, tok))
1711 return -1;
1712
1713 return 0;
1714}
1715
2027int perf_pmu__cpus_match(struct perf_pmu *pmu, struct perf_cpu_map *cpus,
2028 struct perf_cpu_map **mcpus_ptr,
2029 struct perf_cpu_map **ucpus_ptr)
2030{
2031 struct perf_cpu_map *pmu_cpus = pmu->cpus;
2032 struct perf_cpu_map *matched_cpus, *unmatched_cpus;
2033 struct perf_cpu cpu;
2034 int i, matched_nr = 0, unmatched_nr = 0;
2035
2036 matched_cpus = perf_cpu_map__default_new();
2037 if (!matched_cpus)
2038 return -1;
2039
2040 unmatched_cpus = perf_cpu_map__default_new();
2041 if (!unmatched_cpus) {
2042 perf_cpu_map__put(matched_cpus);
2043 return -1;
2044 }
2045
2046 perf_cpu_map__for_each_cpu(cpu, i, cpus) {
2047 if (!perf_cpu_map__has(pmu_cpus, cpu))
2048 RC_CHK_ACCESS(unmatched_cpus)->map[unmatched_nr++] = cpu;
2049 else
2050 RC_CHK_ACCESS(matched_cpus)->map[matched_nr++] = cpu;
2051 }
2052
2053 perf_cpu_map__set_nr(unmatched_cpus, unmatched_nr);
2054 perf_cpu_map__set_nr(matched_cpus, matched_nr);
2055 *mcpus_ptr = matched_cpus;
2056 *ucpus_ptr = unmatched_cpus;
2057 return 0;
2058}
2059
2060double __weak perf_pmu__cpu_slots_per_cycle(void)
2061{
2062 return NAN;
2063}
2064
2065int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size)
2066{
2067 const char *sysfs = sysfs__mountpoint();

--- 37 unchanged lines hidden (view full) ---

2105int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags)
2106{
2107 char path[PATH_MAX];
2108
2109 scnprintf(path, sizeof(path), "%s/%s", pmu_name, filename);
2110 return openat(dirfd, path, flags);
2111}
2112
1716double __weak perf_pmu__cpu_slots_per_cycle(void)
1717{
1718 return NAN;
1719}
1720
1721int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size)
1722{
1723 const char *sysfs = sysfs__mountpoint();

--- 37 unchanged lines hidden (view full) ---

1761int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags)
1762{
1763 char path[PATH_MAX];
1764
1765 scnprintf(path, sizeof(path), "%s/%s", pmu_name, filename);
1766 return openat(dirfd, path, flags);
1767}
1768
2113static void perf_pmu__delete(struct perf_pmu *pmu)
1769void perf_pmu__delete(struct perf_pmu *pmu)
2114{
2115 perf_pmu__del_formats(&pmu->format);
2116 perf_pmu__del_aliases(pmu);
2117 perf_pmu__del_caps(pmu);
2118
2119 perf_cpu_map__put(pmu->cpus);
2120
2121 zfree(&pmu->default_config);
2122 zfree(&pmu->name);
2123 zfree(&pmu->alias_name);
2124 free(pmu);
2125}
1770{
1771 perf_pmu__del_formats(&pmu->format);
1772 perf_pmu__del_aliases(pmu);
1773 perf_pmu__del_caps(pmu);
1774
1775 perf_cpu_map__put(pmu->cpus);
1776
1777 zfree(&pmu->default_config);
1778 zfree(&pmu->name);
1779 zfree(&pmu->alias_name);
1780 free(pmu);
1781}
2126
2127void perf_pmu__destroy(void)
2128{
2129 struct perf_pmu *pmu, *tmp;
2130
2131 list_for_each_entry_safe(pmu, tmp, &pmus, list) {
2132 list_del(&pmu->list);
2133 list_del(&pmu->hybrid_list);
2134
2135 perf_pmu__delete(pmu);
2136 }
2137}