1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <dirent.h> 8 #include <stdbool.h> 9 #include <linux/coresight-pmu.h> 10 #include <linux/zalloc.h> 11 #include <api/fs/fs.h> 12 13 #include "../../../util/auxtrace.h" 14 #include "../../../util/debug.h" 15 #include "../../../util/evlist.h" 16 #include "../../../util/pmu.h" 17 #include "cs-etm.h" 18 #include "arm-spe.h" 19 #include "hisi-ptt.h" 20 21 static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err) 22 { 23 struct perf_pmu **arm_spe_pmus = NULL; 24 int ret, i, nr_cpus = sysconf(_SC_NPROCESSORS_CONF); 25 /* arm_spe_xxxxxxxxx\0 */ 26 char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10]; 27 28 arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus); 29 if (!arm_spe_pmus) { 30 pr_err("spes alloc failed\n"); 31 *err = -ENOMEM; 32 return NULL; 33 } 34 35 for (i = 0; i < nr_cpus; i++) { 36 ret = sprintf(arm_spe_pmu_name, "%s%d", ARM_SPE_PMU_NAME, i); 37 if (ret < 0) { 38 pr_err("sprintf failed\n"); 39 *err = -ENOMEM; 40 return NULL; 41 } 42 43 arm_spe_pmus[*nr_spes] = perf_pmu__find(arm_spe_pmu_name); 44 if (arm_spe_pmus[*nr_spes]) { 45 pr_debug2("%s %d: arm_spe_pmu %d type %d name %s\n", 46 __func__, __LINE__, *nr_spes, 47 arm_spe_pmus[*nr_spes]->type, 48 arm_spe_pmus[*nr_spes]->name); 49 (*nr_spes)++; 50 } 51 } 52 53 return arm_spe_pmus; 54 } 55 56 static struct perf_pmu **find_all_hisi_ptt_pmus(int *nr_ptts, int *err) 57 { 58 const char *sysfs = sysfs__mountpoint(); 59 struct perf_pmu **hisi_ptt_pmus = NULL; 60 struct dirent *dent; 61 char path[PATH_MAX]; 62 DIR *dir = NULL; 63 int idx = 0; 64 65 snprintf(path, PATH_MAX, "%s" EVENT_SOURCE_DEVICE_PATH, sysfs); 66 dir = opendir(path); 67 if (!dir) { 68 pr_err("can't read directory '%s'\n", EVENT_SOURCE_DEVICE_PATH); 69 *err = -EINVAL; 70 return NULL; 71 } 72 73 while ((dent = readdir(dir))) { 74 if (strstr(dent->d_name, HISI_PTT_PMU_NAME)) 75 (*nr_ptts)++; 76 } 77 78 if (!(*nr_ptts)) 79 goto out; 80 81 hisi_ptt_pmus = zalloc(sizeof(struct perf_pmu *) * (*nr_ptts)); 82 if (!hisi_ptt_pmus) { 83 pr_err("hisi_ptt alloc failed\n"); 84 *err = -ENOMEM; 85 goto out; 86 } 87 88 rewinddir(dir); 89 while ((dent = readdir(dir))) { 90 if (strstr(dent->d_name, HISI_PTT_PMU_NAME) && idx < *nr_ptts) { 91 hisi_ptt_pmus[idx] = perf_pmu__find(dent->d_name); 92 if (hisi_ptt_pmus[idx]) 93 idx++; 94 } 95 } 96 97 out: 98 closedir(dir); 99 return hisi_ptt_pmus; 100 } 101 102 static struct perf_pmu *find_pmu_for_event(struct perf_pmu **pmus, 103 int pmu_nr, struct evsel *evsel) 104 { 105 int i; 106 107 if (!pmus) 108 return NULL; 109 110 for (i = 0; i < pmu_nr; i++) { 111 if (evsel->core.attr.type == pmus[i]->type) 112 return pmus[i]; 113 } 114 115 return NULL; 116 } 117 118 struct auxtrace_record 119 *auxtrace_record__init(struct evlist *evlist, int *err) 120 { 121 struct perf_pmu *cs_etm_pmu = NULL; 122 struct perf_pmu **arm_spe_pmus = NULL; 123 struct perf_pmu **hisi_ptt_pmus = NULL; 124 struct evsel *evsel; 125 struct perf_pmu *found_etm = NULL; 126 struct perf_pmu *found_spe = NULL; 127 struct perf_pmu *found_ptt = NULL; 128 int auxtrace_event_cnt = 0; 129 int nr_spes = 0; 130 int nr_ptts = 0; 131 132 if (!evlist) 133 return NULL; 134 135 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME); 136 arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err); 137 hisi_ptt_pmus = find_all_hisi_ptt_pmus(&nr_ptts, err); 138 139 evlist__for_each_entry(evlist, evsel) { 140 if (cs_etm_pmu && !found_etm) 141 found_etm = find_pmu_for_event(&cs_etm_pmu, 1, evsel); 142 143 if (arm_spe_pmus && !found_spe) 144 found_spe = find_pmu_for_event(arm_spe_pmus, nr_spes, evsel); 145 146 if (hisi_ptt_pmus && !found_ptt) 147 found_ptt = find_pmu_for_event(hisi_ptt_pmus, nr_ptts, evsel); 148 } 149 150 free(arm_spe_pmus); 151 free(hisi_ptt_pmus); 152 153 if (found_etm) 154 auxtrace_event_cnt++; 155 156 if (found_spe) 157 auxtrace_event_cnt++; 158 159 if (found_ptt) 160 auxtrace_event_cnt++; 161 162 if (auxtrace_event_cnt > 1) { 163 pr_err("Concurrent AUX trace operation not currently supported\n"); 164 *err = -EOPNOTSUPP; 165 return NULL; 166 } 167 168 if (found_etm) 169 return cs_etm_record_init(err); 170 171 #if defined(__aarch64__) 172 if (found_spe) 173 return arm_spe_recording_init(err, found_spe); 174 175 if (found_ptt) 176 return hisi_ptt_recording_init(err, found_ptt); 177 #endif 178 179 /* 180 * Clear 'err' even if we haven't found an event - that way perf 181 * record can still be used even if tracers aren't present. The NULL 182 * return value will take care of telling the infrastructure HW tracing 183 * isn't available. 184 */ 185 *err = 0; 186 return NULL; 187 } 188 189 #if defined(__arm__) 190 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 191 { 192 struct perf_event_mmap_page *pc = mm->userpg; 193 u64 result; 194 195 __asm__ __volatile__( 196 " ldrd %0, %H0, [%1]" 197 : "=&r" (result) 198 : "r" (&pc->aux_head), "Qo" (pc->aux_head) 199 ); 200 201 return result; 202 } 203 204 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 205 { 206 struct perf_event_mmap_page *pc = mm->userpg; 207 208 /* Ensure all reads are done before we write the tail out */ 209 smp_mb(); 210 211 __asm__ __volatile__( 212 " strd %2, %H2, [%1]" 213 : "=Qo" (pc->aux_tail) 214 : "r" (&pc->aux_tail), "r" (tail) 215 ); 216 217 return 0; 218 } 219 #endif 220