1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <api/fs/fs.h> 8 #include <linux/bits.h> 9 #include <linux/bitops.h> 10 #include <linux/compiler.h> 11 #include <linux/coresight-pmu.h> 12 #include <linux/kernel.h> 13 #include <linux/log2.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/zalloc.h> 17 18 #include "cs-etm.h" 19 #include "../../../util/debug.h" 20 #include "../../../util/record.h" 21 #include "../../../util/auxtrace.h" 22 #include "../../../util/cpumap.h" 23 #include "../../../util/event.h" 24 #include "../../../util/evlist.h" 25 #include "../../../util/evsel.h" 26 #include "../../../util/perf_api_probe.h" 27 #include "../../../util/evsel_config.h" 28 #include "../../../util/pmus.h" 29 #include "../../../util/cs-etm.h" 30 #include <internal/lib.h> // page_size 31 #include "../../../util/session.h" 32 33 #include <errno.h> 34 #include <stdlib.h> 35 #include <sys/stat.h> 36 37 struct cs_etm_recording { 38 struct auxtrace_record itr; 39 struct perf_pmu *cs_etm_pmu; 40 struct evlist *evlist; 41 bool snapshot_mode; 42 size_t snapshot_size; 43 }; 44 45 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = { 46 [CS_ETM_ETMCCER] = "mgmt/etmccer", 47 [CS_ETM_ETMIDR] = "mgmt/etmidr", 48 }; 49 50 static const char * const metadata_etmv4_ro[] = { 51 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0", 52 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1", 53 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2", 54 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8", 55 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 56 [CS_ETMV4_TS_SOURCE] = "ts_source", 57 }; 58 59 static const char * const metadata_ete_ro[] = { 60 [CS_ETE_TRCIDR0] = "trcidr/trcidr0", 61 [CS_ETE_TRCIDR1] = "trcidr/trcidr1", 62 [CS_ETE_TRCIDR2] = "trcidr/trcidr2", 63 [CS_ETE_TRCIDR8] = "trcidr/trcidr8", 64 [CS_ETE_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 65 [CS_ETE_TRCDEVARCH] = "mgmt/trcdevarch", 66 [CS_ETE_TS_SOURCE] = "ts_source", 67 }; 68 69 enum cs_etm_version { CS_NOT_PRESENT, CS_ETMV3, CS_ETMV4, CS_ETE }; 70 71 static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu); 72 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val); 73 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path); 74 75 static enum cs_etm_version cs_etm_get_version(struct perf_pmu *cs_etm_pmu, 76 struct perf_cpu cpu) 77 { 78 if (cs_etm_is_ete(cs_etm_pmu, cpu)) 79 return CS_ETE; 80 else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0])) 81 return CS_ETMV4; 82 else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER])) 83 return CS_ETMV3; 84 85 return CS_NOT_PRESENT; 86 } 87 88 static int cs_etm_validate_context_id(struct perf_pmu *cs_etm_pmu, struct evsel *evsel, 89 struct perf_cpu cpu) 90 { 91 int err; 92 __u64 val; 93 u64 contextid = evsel->core.attr.config & 94 (perf_pmu__format_bits(cs_etm_pmu, "contextid") | 95 perf_pmu__format_bits(cs_etm_pmu, "contextid1") | 96 perf_pmu__format_bits(cs_etm_pmu, "contextid2")); 97 98 if (!contextid) 99 return 0; 100 101 /* Not supported in etmv3 */ 102 if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) { 103 pr_err("%s: contextid not supported in ETMv3, disable with %s/contextid=0/\n", 104 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 105 return -EINVAL; 106 } 107 108 /* Get a handle on TRCIDR2 */ 109 err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], &val); 110 if (err) 111 return err; 112 113 if (contextid & 114 perf_pmu__format_bits(cs_etm_pmu, "contextid1")) { 115 /* 116 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID 117 * tracing is supported: 118 * 0b00000 Context ID tracing is not supported. 119 * 0b00100 Maximum of 32-bit Context ID size. 120 * All other values are reserved. 121 */ 122 if (BMVAL(val, 5, 9) != 0x4) { 123 pr_err("%s: CONTEXTIDR_EL1 isn't supported, disable with %s/contextid1=0/\n", 124 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 125 return -EINVAL; 126 } 127 } 128 129 if (contextid & 130 perf_pmu__format_bits(cs_etm_pmu, "contextid2")) { 131 /* 132 * TRCIDR2.VMIDOPT[30:29] != 0 and 133 * TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid) 134 * We can't support CONTEXTIDR in VMID if the size of the 135 * virtual context id is < 32bit. 136 * Any value of VMIDSIZE >= 4 (i.e, > 32bit) is fine for us. 137 */ 138 if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) { 139 pr_err("%s: CONTEXTIDR_EL2 isn't supported, disable with %s/contextid2=0/\n", 140 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 141 return -EINVAL; 142 } 143 } 144 145 return 0; 146 } 147 148 static int cs_etm_validate_timestamp(struct perf_pmu *cs_etm_pmu, struct evsel *evsel, 149 struct perf_cpu cpu) 150 { 151 int err; 152 __u64 val; 153 154 if (!(evsel->core.attr.config & 155 perf_pmu__format_bits(cs_etm_pmu, "timestamp"))) 156 return 0; 157 158 if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) { 159 pr_err("%s: timestamp not supported in ETMv3, disable with %s/timestamp=0/\n", 160 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 161 return -EINVAL; 162 } 163 164 /* Get a handle on TRCIRD0 */ 165 err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], &val); 166 if (err) 167 return err; 168 169 /* 170 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping 171 * is supported: 172 * 0b00000 Global timestamping is not implemented 173 * 0b00110 Implementation supports a maximum timestamp of 48bits. 174 * 0b01000 Implementation supports a maximum timestamp of 64bits. 175 */ 176 val &= GENMASK(28, 24); 177 if (!val) { 178 return -EINVAL; 179 } 180 181 return 0; 182 } 183 184 static struct perf_pmu *cs_etm_get_pmu(struct auxtrace_record *itr) 185 { 186 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); 187 188 return ptr->cs_etm_pmu; 189 } 190 191 /* 192 * Check whether the requested timestamp and contextid options should be 193 * available on all requested CPUs and if not, tell the user how to override. 194 * The kernel will silently disable any unavailable options so a warning here 195 * first is better. In theory the kernel could still disable the option for 196 * some other reason so this is best effort only. 197 */ 198 static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu, 199 struct evsel *evsel) 200 { 201 int idx, err = 0; 202 struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus; 203 struct perf_cpu_map *intersect_cpus; 204 struct perf_cpu cpu; 205 206 /* 207 * Set option of each CPU we have. In per-cpu case, do the validation 208 * for CPUs to work with. In per-thread case, the CPU map has the "any" 209 * CPU value. Since the traced program can run on any CPUs in this case, 210 * thus don't skip validation. 211 */ 212 if (!perf_cpu_map__has_any_cpu(event_cpus)) { 213 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 214 215 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); 216 perf_cpu_map__put(online_cpus); 217 } else { 218 intersect_cpus = perf_cpu_map__new_online_cpus(); 219 } 220 221 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { 222 if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_NOT_PRESENT) { 223 pr_err("%s: Not found on CPU %d. Check hardware and firmware support and that all Coresight drivers are loaded\n", 224 CORESIGHT_ETM_PMU_NAME, cpu.cpu); 225 return -EINVAL; 226 } 227 err = cs_etm_validate_context_id(cs_etm_pmu, evsel, cpu); 228 if (err) 229 break; 230 231 err = cs_etm_validate_timestamp(cs_etm_pmu, evsel, cpu); 232 if (err) 233 break; 234 } 235 236 perf_cpu_map__put(intersect_cpus); 237 return err; 238 } 239 240 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr, 241 struct record_opts *opts, 242 const char *str) 243 { 244 struct cs_etm_recording *ptr = 245 container_of(itr, struct cs_etm_recording, itr); 246 unsigned long long snapshot_size = 0; 247 char *endptr; 248 249 if (str) { 250 snapshot_size = strtoull(str, &endptr, 0); 251 if (*endptr || snapshot_size > SIZE_MAX) 252 return -1; 253 } 254 255 opts->auxtrace_snapshot_mode = true; 256 opts->auxtrace_snapshot_size = snapshot_size; 257 ptr->snapshot_size = snapshot_size; 258 259 return 0; 260 } 261 262 static int cs_etm_set_sink_attr(struct perf_pmu *pmu, 263 struct evsel *evsel) 264 { 265 char msg[BUFSIZ], path[PATH_MAX], *sink; 266 struct evsel_config_term *term; 267 int ret = -EINVAL; 268 u32 hash; 269 270 if (evsel->core.attr.config2 & GENMASK(31, 0)) 271 return 0; 272 273 list_for_each_entry(term, &evsel->config_terms, list) { 274 if (term->type != EVSEL__CONFIG_TERM_DRV_CFG) 275 continue; 276 277 sink = term->val.str; 278 snprintf(path, PATH_MAX, "sinks/%s", sink); 279 280 ret = perf_pmu__scan_file(pmu, path, "%x", &hash); 281 if (ret != 1) { 282 if (errno == ENOENT) 283 pr_err("Couldn't find sink \"%s\" on event %s\n" 284 "Missing kernel or device support?\n\n" 285 "Hint: An appropriate sink will be picked automatically if one isn't specified.\n", 286 sink, evsel__name(evsel)); 287 else 288 pr_err("Failed to set sink \"%s\" on event %s with %d (%s)\n", 289 sink, evsel__name(evsel), errno, 290 str_error_r(errno, msg, sizeof(msg))); 291 return ret; 292 } 293 294 evsel->core.attr.config2 |= hash; 295 return 0; 296 } 297 298 /* 299 * No sink was provided on the command line - allow the CoreSight 300 * system to look for a default 301 */ 302 return 0; 303 } 304 305 static int cs_etm_recording_options(struct auxtrace_record *itr, 306 struct evlist *evlist, 307 struct record_opts *opts) 308 { 309 int ret; 310 struct cs_etm_recording *ptr = 311 container_of(itr, struct cs_etm_recording, itr); 312 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 313 struct evsel *evsel, *cs_etm_evsel = NULL; 314 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 315 bool privileged = perf_event_paranoid_check(-1); 316 int err = 0; 317 318 evlist__for_each_entry(evlist, evsel) { 319 if (evsel->core.attr.type == cs_etm_pmu->type) { 320 if (cs_etm_evsel) { 321 pr_err("There may be only one %s event\n", 322 CORESIGHT_ETM_PMU_NAME); 323 return -EINVAL; 324 } 325 cs_etm_evsel = evsel; 326 } 327 } 328 329 /* no need to continue if at least one event of interest was found */ 330 if (!cs_etm_evsel) 331 return 0; 332 333 ptr->evlist = evlist; 334 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; 335 336 if (!record_opts__no_switch_events(opts) && 337 perf_can_record_switch_events()) 338 opts->record_switch_events = true; 339 340 cs_etm_evsel->needs_auxtrace_mmap = true; 341 opts->full_auxtrace = true; 342 343 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel); 344 if (ret) 345 return ret; 346 347 if (opts->use_clockid) { 348 pr_err("Cannot use clockid (-k option) with %s\n", 349 CORESIGHT_ETM_PMU_NAME); 350 return -EINVAL; 351 } 352 353 /* we are in snapshot mode */ 354 if (opts->auxtrace_snapshot_mode) { 355 /* 356 * No size were given to '-S' or '-m,', so go with 357 * the default 358 */ 359 if (!opts->auxtrace_snapshot_size && 360 !opts->auxtrace_mmap_pages) { 361 if (privileged) { 362 opts->auxtrace_mmap_pages = MiB(4) / page_size; 363 } else { 364 opts->auxtrace_mmap_pages = 365 KiB(128) / page_size; 366 if (opts->mmap_pages == UINT_MAX) 367 opts->mmap_pages = KiB(256) / page_size; 368 } 369 } else if (!opts->auxtrace_mmap_pages && !privileged && 370 opts->mmap_pages == UINT_MAX) { 371 opts->mmap_pages = KiB(256) / page_size; 372 } 373 374 /* 375 * '-m,xyz' was specified but no snapshot size, so make the 376 * snapshot size as big as the auxtrace mmap area. 377 */ 378 if (!opts->auxtrace_snapshot_size) { 379 opts->auxtrace_snapshot_size = 380 opts->auxtrace_mmap_pages * (size_t)page_size; 381 } 382 383 /* 384 * -Sxyz was specified but no auxtrace mmap area, so make the 385 * auxtrace mmap area big enough to fit the requested snapshot 386 * size. 387 */ 388 if (!opts->auxtrace_mmap_pages) { 389 size_t sz = opts->auxtrace_snapshot_size; 390 391 sz = round_up(sz, page_size) / page_size; 392 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 393 } 394 395 /* Snapshot size can't be bigger than the auxtrace area */ 396 if (opts->auxtrace_snapshot_size > 397 opts->auxtrace_mmap_pages * (size_t)page_size) { 398 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 399 opts->auxtrace_snapshot_size, 400 opts->auxtrace_mmap_pages * (size_t)page_size); 401 return -EINVAL; 402 } 403 404 /* Something went wrong somewhere - this shouldn't happen */ 405 if (!opts->auxtrace_snapshot_size || 406 !opts->auxtrace_mmap_pages) { 407 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 408 return -EINVAL; 409 } 410 } 411 412 /* Buffer sizes weren't specified with '-m,xyz' so give some defaults */ 413 if (!opts->auxtrace_mmap_pages) { 414 if (privileged) { 415 opts->auxtrace_mmap_pages = MiB(4) / page_size; 416 } else { 417 opts->auxtrace_mmap_pages = KiB(128) / page_size; 418 if (opts->mmap_pages == UINT_MAX) 419 opts->mmap_pages = KiB(256) / page_size; 420 } 421 } 422 423 if (opts->auxtrace_snapshot_mode) 424 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, 425 opts->auxtrace_snapshot_size); 426 427 /* 428 * To obtain the auxtrace buffer file descriptor, the auxtrace 429 * event must come first. 430 */ 431 evlist__to_front(evlist, cs_etm_evsel); 432 433 /* 434 * get the CPU on the sample - need it to associate trace ID in the 435 * AUX_OUTPUT_HW_ID event, and the AUX event for per-cpu mmaps. 436 */ 437 evsel__set_sample_bit(cs_etm_evsel, CPU); 438 439 /* 440 * Also the case of per-cpu mmaps, need the contextID in order to be notified 441 * when a context switch happened. 442 */ 443 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { 444 evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, 445 "timestamp", 1); 446 evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, 447 "contextid", 1); 448 } 449 450 /* 451 * When the option '--timestamp' or '-T' is enabled, the PERF_SAMPLE_TIME 452 * bit is set for all events. In this case, always enable Arm CoreSight 453 * timestamp tracing. 454 */ 455 if (opts->sample_time_set) 456 evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, 457 "timestamp", 1); 458 459 /* Add dummy event to keep tracking */ 460 err = parse_event(evlist, "dummy:u"); 461 if (err) 462 goto out; 463 evsel = evlist__last(evlist); 464 evlist__set_tracking_event(evlist, evsel); 465 evsel->core.attr.freq = 0; 466 evsel->core.attr.sample_period = 1; 467 468 /* In per-cpu case, always need the time of mmap events etc */ 469 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) 470 evsel__set_sample_bit(evsel, TIME); 471 472 err = cs_etm_validate_config(cs_etm_pmu, cs_etm_evsel); 473 out: 474 return err; 475 } 476 477 static u64 cs_etm_get_config(struct auxtrace_record *itr) 478 { 479 u64 config = 0; 480 struct cs_etm_recording *ptr = 481 container_of(itr, struct cs_etm_recording, itr); 482 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 483 struct evlist *evlist = ptr->evlist; 484 struct evsel *evsel; 485 486 evlist__for_each_entry(evlist, evsel) { 487 if (evsel->core.attr.type == cs_etm_pmu->type) { 488 /* 489 * Variable perf_event_attr::config is assigned to 490 * ETMv3/PTM. The bit fields have been made to match 491 * the ETMv3.5 ETRMCR register specification. See the 492 * PMU_FORMAT_ATTR() declarations in 493 * drivers/hwtracing/coresight/coresight-perf.c for 494 * details. 495 */ 496 config = evsel->core.attr.config; 497 break; 498 } 499 } 500 501 return config; 502 } 503 504 #ifndef BIT 505 #define BIT(N) (1UL << (N)) 506 #endif 507 508 static u64 cs_etmv4_get_config(struct auxtrace_record *itr) 509 { 510 u64 config = 0; 511 u64 config_opts = 0; 512 513 /* 514 * The perf event variable config bits represent both 515 * the command line options and register programming 516 * bits in ETMv3/PTM. For ETMv4 we must remap options 517 * to real bits 518 */ 519 config_opts = cs_etm_get_config(itr); 520 if (config_opts & BIT(ETM_OPT_CYCACC)) 521 config |= BIT(ETM4_CFG_BIT_CYCACC); 522 if (config_opts & BIT(ETM_OPT_CTXTID)) 523 config |= BIT(ETM4_CFG_BIT_CTXTID); 524 if (config_opts & BIT(ETM_OPT_TS)) 525 config |= BIT(ETM4_CFG_BIT_TS); 526 if (config_opts & BIT(ETM_OPT_RETSTK)) 527 config |= BIT(ETM4_CFG_BIT_RETSTK); 528 if (config_opts & BIT(ETM_OPT_CTXTID2)) 529 config |= BIT(ETM4_CFG_BIT_VMID) | 530 BIT(ETM4_CFG_BIT_VMID_OPT); 531 if (config_opts & BIT(ETM_OPT_BRANCH_BROADCAST)) 532 config |= BIT(ETM4_CFG_BIT_BB); 533 534 return config; 535 } 536 537 static size_t 538 cs_etm_info_priv_size(struct auxtrace_record *itr, 539 struct evlist *evlist) 540 { 541 int idx; 542 int etmv3 = 0, etmv4 = 0, ete = 0; 543 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; 544 struct perf_cpu_map *intersect_cpus; 545 struct perf_cpu cpu; 546 struct perf_pmu *cs_etm_pmu = cs_etm_get_pmu(itr); 547 548 if (!perf_cpu_map__has_any_cpu(event_cpus)) { 549 /* cpu map is not "any" CPU , we have specific CPUs to work with */ 550 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 551 552 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); 553 perf_cpu_map__put(online_cpus); 554 } else { 555 /* Event can be "any" CPU so count all online CPUs. */ 556 intersect_cpus = perf_cpu_map__new_online_cpus(); 557 } 558 /* Count number of each type of ETM. Don't count if that CPU has CS_NOT_PRESENT. */ 559 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { 560 enum cs_etm_version v = cs_etm_get_version(cs_etm_pmu, cpu); 561 562 ete += v == CS_ETE; 563 etmv4 += v == CS_ETMV4; 564 etmv3 += v == CS_ETMV3; 565 } 566 perf_cpu_map__put(intersect_cpus); 567 568 return (CS_ETM_HEADER_SIZE + 569 (ete * CS_ETE_PRIV_SIZE) + 570 (etmv4 * CS_ETMV4_PRIV_SIZE) + 571 (etmv3 * CS_ETMV3_PRIV_SIZE)); 572 } 573 574 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val) 575 { 576 char pmu_path[PATH_MAX]; 577 int scan; 578 579 /* Get RO metadata from sysfs */ 580 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); 581 582 scan = perf_pmu__scan_file(pmu, pmu_path, "%llx", val); 583 if (scan != 1) { 584 pr_err("%s: error reading: %s\n", __func__, pmu_path); 585 return -EINVAL; 586 } 587 588 return 0; 589 } 590 591 static int cs_etm_get_ro_signed(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, 592 __u64 *out_val) 593 { 594 char pmu_path[PATH_MAX]; 595 int scan; 596 int val = 0; 597 598 /* Get RO metadata from sysfs */ 599 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); 600 601 scan = perf_pmu__scan_file(pmu, pmu_path, "%d", &val); 602 if (scan != 1) { 603 pr_err("%s: error reading: %s\n", __func__, pmu_path); 604 return -EINVAL; 605 } 606 607 *out_val = (__u64) val; 608 return 0; 609 } 610 611 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path) 612 { 613 char pmu_path[PATH_MAX]; 614 615 /* Get RO metadata from sysfs */ 616 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); 617 618 return perf_pmu__file_exists(pmu, pmu_path); 619 } 620 621 #define TRCDEVARCH_ARCHPART_SHIFT 0 622 #define TRCDEVARCH_ARCHPART_MASK GENMASK(11, 0) 623 #define TRCDEVARCH_ARCHPART(x) (((x) & TRCDEVARCH_ARCHPART_MASK) >> TRCDEVARCH_ARCHPART_SHIFT) 624 625 #define TRCDEVARCH_ARCHVER_SHIFT 12 626 #define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12) 627 #define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT) 628 629 static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu) 630 { 631 __u64 trcdevarch; 632 633 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH])) 634 return false; 635 636 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], &trcdevarch); 637 /* 638 * ETE if ARCHVER is 5 (ARCHVER is 4 for ETM) and ARCHPART is 0xA13. 639 * See ETM_DEVARCH_ETE_ARCH in coresight-etm4x.h 640 */ 641 return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13; 642 } 643 644 static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu) 645 { 646 return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu); 647 } 648 649 static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) 650 { 651 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); 652 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 653 654 /* Get trace configuration register */ 655 data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr); 656 /* traceID set to legacy version, in case new perf running on older system */ 657 data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | 658 CORESIGHT_TRACE_ID_UNUSED_FLAG; 659 660 /* Get read-only information from sysFS */ 661 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], 662 &data[CS_ETMV4_TRCIDR0]); 663 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR1], 664 &data[CS_ETMV4_TRCIDR1]); 665 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], 666 &data[CS_ETMV4_TRCIDR2]); 667 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR8], 668 &data[CS_ETMV4_TRCIDR8]); 669 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS], 670 &data[CS_ETMV4_TRCAUTHSTATUS]); 671 672 /* Kernels older than 5.19 may not expose ts_source */ 673 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]) || 674 cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE], 675 &data[CS_ETMV4_TS_SOURCE])) { 676 pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", 677 cpu.cpu); 678 data[CS_ETMV4_TS_SOURCE] = (__u64) -1; 679 } 680 } 681 682 static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) 683 { 684 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); 685 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 686 687 /* Get trace configuration register */ 688 data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr); 689 /* traceID set to legacy version, in case new perf running on older system */ 690 data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; 691 692 /* Get read-only information from sysFS */ 693 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]); 694 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR1], &data[CS_ETE_TRCIDR1]); 695 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR2], &data[CS_ETE_TRCIDR2]); 696 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR8], &data[CS_ETE_TRCIDR8]); 697 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCAUTHSTATUS], 698 &data[CS_ETE_TRCAUTHSTATUS]); 699 /* ETE uses the same registers as ETMv4 plus TRCDEVARCH */ 700 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], 701 &data[CS_ETE_TRCDEVARCH]); 702 703 /* Kernels older than 5.19 may not expose ts_source */ 704 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]) || 705 cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE], 706 &data[CS_ETE_TS_SOURCE])) { 707 pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", 708 cpu.cpu); 709 data[CS_ETE_TS_SOURCE] = (__u64) -1; 710 } 711 } 712 713 static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset, 714 struct auxtrace_record *itr, 715 struct perf_record_auxtrace_info *info) 716 { 717 u32 increment, nr_trc_params; 718 u64 magic; 719 struct perf_pmu *cs_etm_pmu = cs_etm_get_pmu(itr); 720 721 /* first see what kind of tracer this cpu is affined to */ 722 switch (cs_etm_get_version(cs_etm_pmu, cpu)) { 723 case CS_ETE: 724 magic = __perf_cs_ete_magic; 725 cs_etm_save_ete_header(&info->priv[*offset], itr, cpu); 726 727 /* How much space was used */ 728 increment = CS_ETE_PRIV_MAX; 729 nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1; 730 break; 731 732 case CS_ETMV4: 733 magic = __perf_cs_etmv4_magic; 734 cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu); 735 736 /* How much space was used */ 737 increment = CS_ETMV4_PRIV_MAX; 738 nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR; 739 break; 740 741 case CS_ETMV3: 742 magic = __perf_cs_etmv3_magic; 743 /* Get configuration register */ 744 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); 745 /* traceID set to legacy value in case new perf running on old system */ 746 info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | 747 CORESIGHT_TRACE_ID_UNUSED_FLAG; 748 /* Get read-only information from sysFS */ 749 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER], 750 &info->priv[*offset + CS_ETM_ETMCCER]); 751 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMIDR], 752 &info->priv[*offset + CS_ETM_ETMIDR]); 753 754 /* How much space was used */ 755 increment = CS_ETM_PRIV_MAX; 756 nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR; 757 break; 758 759 default: 760 case CS_NOT_PRESENT: 761 /* Unreachable, CPUs already validated in cs_etm_validate_config() */ 762 assert(true); 763 return; 764 } 765 766 /* Build generic header portion */ 767 info->priv[*offset + CS_ETM_MAGIC] = magic; 768 info->priv[*offset + CS_ETM_CPU] = cpu.cpu; 769 info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params; 770 /* Where the next CPU entry should start from */ 771 *offset += increment; 772 } 773 774 static int cs_etm_info_fill(struct auxtrace_record *itr, 775 struct perf_session *session, 776 struct perf_record_auxtrace_info *info, 777 size_t priv_size) 778 { 779 int i; 780 u32 offset; 781 u64 nr_cpu, type; 782 struct perf_cpu_map *cpu_map; 783 struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus; 784 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 785 struct cs_etm_recording *ptr = 786 container_of(itr, struct cs_etm_recording, itr); 787 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 788 struct perf_cpu cpu; 789 790 if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) 791 return -EINVAL; 792 793 if (!session->evlist->core.nr_mmaps) 794 return -EINVAL; 795 796 /* If the cpu_map has the "any" CPU all online CPUs are involved */ 797 if (perf_cpu_map__has_any_cpu(event_cpus)) { 798 cpu_map = online_cpus; 799 } else { 800 /* Make sure all specified CPUs are online */ 801 perf_cpu_map__for_each_cpu(cpu, i, event_cpus) { 802 if (!perf_cpu_map__has(online_cpus, cpu)) 803 return -EINVAL; 804 } 805 806 cpu_map = event_cpus; 807 } 808 809 nr_cpu = perf_cpu_map__nr(cpu_map); 810 /* Get PMU type as dynamically assigned by the core */ 811 type = cs_etm_pmu->type; 812 813 /* First fill out the session header */ 814 info->type = PERF_AUXTRACE_CS_ETM; 815 info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION; 816 info->priv[CS_PMU_TYPE_CPUS] = type << 32; 817 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu; 818 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode; 819 820 offset = CS_ETM_SNAPSHOT + 1; 821 822 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { 823 assert(offset < priv_size); 824 cs_etm_get_metadata(cpu, &offset, itr, info); 825 } 826 827 perf_cpu_map__put(online_cpus); 828 829 return 0; 830 } 831 832 static int cs_etm_snapshot_start(struct auxtrace_record *itr) 833 { 834 struct cs_etm_recording *ptr = 835 container_of(itr, struct cs_etm_recording, itr); 836 struct evsel *evsel; 837 838 evlist__for_each_entry(ptr->evlist, evsel) { 839 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 840 return evsel__disable(evsel); 841 } 842 return -EINVAL; 843 } 844 845 static int cs_etm_snapshot_finish(struct auxtrace_record *itr) 846 { 847 struct cs_etm_recording *ptr = 848 container_of(itr, struct cs_etm_recording, itr); 849 struct evsel *evsel; 850 851 evlist__for_each_entry(ptr->evlist, evsel) { 852 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 853 return evsel__enable(evsel); 854 } 855 return -EINVAL; 856 } 857 858 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused) 859 { 860 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) | 861 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull); 862 } 863 864 static void cs_etm_recording_free(struct auxtrace_record *itr) 865 { 866 struct cs_etm_recording *ptr = 867 container_of(itr, struct cs_etm_recording, itr); 868 869 free(ptr); 870 } 871 872 struct auxtrace_record *cs_etm_record_init(int *err) 873 { 874 struct perf_pmu *cs_etm_pmu; 875 struct cs_etm_recording *ptr; 876 877 cs_etm_pmu = perf_pmus__find(CORESIGHT_ETM_PMU_NAME); 878 879 if (!cs_etm_pmu) { 880 *err = -EINVAL; 881 goto out; 882 } 883 884 ptr = zalloc(sizeof(struct cs_etm_recording)); 885 if (!ptr) { 886 *err = -ENOMEM; 887 goto out; 888 } 889 890 ptr->cs_etm_pmu = cs_etm_pmu; 891 ptr->itr.pmu = cs_etm_pmu; 892 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; 893 ptr->itr.recording_options = cs_etm_recording_options; 894 ptr->itr.info_priv_size = cs_etm_info_priv_size; 895 ptr->itr.info_fill = cs_etm_info_fill; 896 ptr->itr.snapshot_start = cs_etm_snapshot_start; 897 ptr->itr.snapshot_finish = cs_etm_snapshot_finish; 898 ptr->itr.reference = cs_etm_reference; 899 ptr->itr.free = cs_etm_recording_free; 900 ptr->itr.read_finish = auxtrace_record__read_finish; 901 902 *err = 0; 903 return &ptr->itr; 904 out: 905 return NULL; 906 } 907 908 /* 909 * Set a default config to enable the user changed config tracking mechanism 910 * (CFG_CHG and evsel__set_config_if_unset()). If no default is set then user 911 * changes aren't tracked. 912 */ 913 void 914 cs_etm_get_default_config(const struct perf_pmu *pmu __maybe_unused, 915 struct perf_event_attr *attr) 916 { 917 attr->sample_period = 1; 918 } 919