1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <api/fs/fs.h> 8 #include <linux/bits.h> 9 #include <linux/bitops.h> 10 #include <linux/compiler.h> 11 #include <linux/coresight-pmu.h> 12 #include <linux/kernel.h> 13 #include <linux/log2.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/zalloc.h> 17 18 #include "cs-etm.h" 19 #include "../../../util/debug.h" 20 #include "../../../util/record.h" 21 #include "../../../util/auxtrace.h" 22 #include "../../../util/cpumap.h" 23 #include "../../../util/event.h" 24 #include "../../../util/evlist.h" 25 #include "../../../util/evsel.h" 26 #include "../../../util/perf_api_probe.h" 27 #include "../../../util/evsel_config.h" 28 #include "../../../util/pmus.h" 29 #include "../../../util/cs-etm.h" 30 #include <internal/lib.h> // page_size 31 #include "../../../util/session.h" 32 33 #include <errno.h> 34 #include <stdlib.h> 35 #include <sys/stat.h> 36 37 struct cs_etm_recording { 38 struct auxtrace_record itr; 39 struct perf_pmu *cs_etm_pmu; 40 struct evlist *evlist; 41 bool snapshot_mode; 42 size_t snapshot_size; 43 }; 44 45 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = { 46 [CS_ETM_ETMCCER] = "mgmt/etmccer", 47 [CS_ETM_ETMIDR] = "mgmt/etmidr", 48 }; 49 50 static const char * const metadata_etmv4_ro[] = { 51 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0", 52 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1", 53 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2", 54 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8", 55 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 56 [CS_ETMV4_TS_SOURCE] = "ts_source", 57 }; 58 59 static const char * const metadata_ete_ro[] = { 60 [CS_ETE_TRCIDR0] = "trcidr/trcidr0", 61 [CS_ETE_TRCIDR1] = "trcidr/trcidr1", 62 [CS_ETE_TRCIDR2] = "trcidr/trcidr2", 63 [CS_ETE_TRCIDR8] = "trcidr/trcidr8", 64 [CS_ETE_TRCAUTHSTATUS] = "mgmt/trcauthstatus", 65 [CS_ETE_TRCDEVARCH] = "mgmt/trcdevarch", 66 [CS_ETE_TS_SOURCE] = "ts_source", 67 }; 68 69 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, struct perf_cpu cpu); 70 static bool cs_etm_is_ete(struct auxtrace_record *itr, struct perf_cpu cpu); 71 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val); 72 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path); 73 74 static int cs_etm_validate_context_id(struct auxtrace_record *itr, struct evsel *evsel, 75 struct perf_cpu cpu) 76 { 77 struct cs_etm_recording *ptr = 78 container_of(itr, struct cs_etm_recording, itr); 79 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 80 int err; 81 __u64 val; 82 u64 contextid = evsel->core.attr.config & 83 (perf_pmu__format_bits(cs_etm_pmu, "contextid") | 84 perf_pmu__format_bits(cs_etm_pmu, "contextid1") | 85 perf_pmu__format_bits(cs_etm_pmu, "contextid2")); 86 87 if (!contextid) 88 return 0; 89 90 /* Not supported in etmv3 */ 91 if (!cs_etm_is_etmv4(itr, cpu)) { 92 pr_err("%s: contextid not supported in ETMv3, disable with %s/contextid=0/\n", 93 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 94 return -EINVAL; 95 } 96 97 /* Get a handle on TRCIDR2 */ 98 err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], &val); 99 if (err) 100 return err; 101 102 if (contextid & 103 perf_pmu__format_bits(cs_etm_pmu, "contextid1")) { 104 /* 105 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID 106 * tracing is supported: 107 * 0b00000 Context ID tracing is not supported. 108 * 0b00100 Maximum of 32-bit Context ID size. 109 * All other values are reserved. 110 */ 111 if (BMVAL(val, 5, 9) != 0x4) { 112 pr_err("%s: CONTEXTIDR_EL1 isn't supported, disable with %s/contextid1=0/\n", 113 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 114 return -EINVAL; 115 } 116 } 117 118 if (contextid & 119 perf_pmu__format_bits(cs_etm_pmu, "contextid2")) { 120 /* 121 * TRCIDR2.VMIDOPT[30:29] != 0 and 122 * TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid) 123 * We can't support CONTEXTIDR in VMID if the size of the 124 * virtual context id is < 32bit. 125 * Any value of VMIDSIZE >= 4 (i.e, > 32bit) is fine for us. 126 */ 127 if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) { 128 pr_err("%s: CONTEXTIDR_EL2 isn't supported, disable with %s/contextid2=0/\n", 129 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 130 return -EINVAL; 131 } 132 } 133 134 return 0; 135 } 136 137 static int cs_etm_validate_timestamp(struct auxtrace_record *itr, struct evsel *evsel, 138 struct perf_cpu cpu) 139 { 140 struct cs_etm_recording *ptr = 141 container_of(itr, struct cs_etm_recording, itr); 142 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 143 int err; 144 __u64 val; 145 146 if (!(evsel->core.attr.config & 147 perf_pmu__format_bits(cs_etm_pmu, "timestamp"))) 148 return 0; 149 150 if (!cs_etm_is_etmv4(itr, cpu)) { 151 pr_err("%s: timestamp not supported in ETMv3, disable with %s/timestamp=0/\n", 152 CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); 153 return -EINVAL; 154 } 155 156 /* Get a handle on TRCIRD0 */ 157 err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], &val); 158 if (err) 159 return err; 160 161 /* 162 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping 163 * is supported: 164 * 0b00000 Global timestamping is not implemented 165 * 0b00110 Implementation supports a maximum timestamp of 48bits. 166 * 0b01000 Implementation supports a maximum timestamp of 64bits. 167 */ 168 val &= GENMASK(28, 24); 169 if (!val) { 170 return -EINVAL; 171 } 172 173 return 0; 174 } 175 176 /* 177 * Check whether the requested timestamp and contextid options should be 178 * available on all requested CPUs and if not, tell the user how to override. 179 * The kernel will silently disable any unavailable options so a warning here 180 * first is better. In theory the kernel could still disable the option for 181 * some other reason so this is best effort only. 182 */ 183 static int cs_etm_validate_config(struct auxtrace_record *itr, 184 struct evsel *evsel) 185 { 186 int idx, err = 0; 187 struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus; 188 struct perf_cpu_map *intersect_cpus; 189 struct perf_cpu cpu; 190 191 /* 192 * Set option of each CPU we have. In per-cpu case, do the validation 193 * for CPUs to work with. In per-thread case, the CPU map has the "any" 194 * CPU value. Since the traced program can run on any CPUs in this case, 195 * thus don't skip validation. 196 */ 197 if (!perf_cpu_map__has_any_cpu(event_cpus)) { 198 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 199 200 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); 201 perf_cpu_map__put(online_cpus); 202 } else { 203 intersect_cpus = perf_cpu_map__new_online_cpus(); 204 } 205 206 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { 207 err = cs_etm_validate_context_id(itr, evsel, cpu); 208 if (err) 209 break; 210 211 err = cs_etm_validate_timestamp(itr, evsel, cpu); 212 if (err) 213 break; 214 } 215 216 perf_cpu_map__put(intersect_cpus); 217 return err; 218 } 219 220 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr, 221 struct record_opts *opts, 222 const char *str) 223 { 224 struct cs_etm_recording *ptr = 225 container_of(itr, struct cs_etm_recording, itr); 226 unsigned long long snapshot_size = 0; 227 char *endptr; 228 229 if (str) { 230 snapshot_size = strtoull(str, &endptr, 0); 231 if (*endptr || snapshot_size > SIZE_MAX) 232 return -1; 233 } 234 235 opts->auxtrace_snapshot_mode = true; 236 opts->auxtrace_snapshot_size = snapshot_size; 237 ptr->snapshot_size = snapshot_size; 238 239 return 0; 240 } 241 242 static int cs_etm_set_sink_attr(struct perf_pmu *pmu, 243 struct evsel *evsel) 244 { 245 char msg[BUFSIZ], path[PATH_MAX], *sink; 246 struct evsel_config_term *term; 247 int ret = -EINVAL; 248 u32 hash; 249 250 if (evsel->core.attr.config2 & GENMASK(31, 0)) 251 return 0; 252 253 list_for_each_entry(term, &evsel->config_terms, list) { 254 if (term->type != EVSEL__CONFIG_TERM_DRV_CFG) 255 continue; 256 257 sink = term->val.str; 258 snprintf(path, PATH_MAX, "sinks/%s", sink); 259 260 ret = perf_pmu__scan_file(pmu, path, "%x", &hash); 261 if (ret != 1) { 262 if (errno == ENOENT) 263 pr_err("Couldn't find sink \"%s\" on event %s\n" 264 "Missing kernel or device support?\n\n" 265 "Hint: An appropriate sink will be picked automatically if one isn't specified.\n", 266 sink, evsel__name(evsel)); 267 else 268 pr_err("Failed to set sink \"%s\" on event %s with %d (%s)\n", 269 sink, evsel__name(evsel), errno, 270 str_error_r(errno, msg, sizeof(msg))); 271 return ret; 272 } 273 274 evsel->core.attr.config2 |= hash; 275 return 0; 276 } 277 278 /* 279 * No sink was provided on the command line - allow the CoreSight 280 * system to look for a default 281 */ 282 return 0; 283 } 284 285 static int cs_etm_recording_options(struct auxtrace_record *itr, 286 struct evlist *evlist, 287 struct record_opts *opts) 288 { 289 int ret; 290 struct cs_etm_recording *ptr = 291 container_of(itr, struct cs_etm_recording, itr); 292 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 293 struct evsel *evsel, *cs_etm_evsel = NULL; 294 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 295 bool privileged = perf_event_paranoid_check(-1); 296 int err = 0; 297 298 evlist__for_each_entry(evlist, evsel) { 299 if (evsel->core.attr.type == cs_etm_pmu->type) { 300 if (cs_etm_evsel) { 301 pr_err("There may be only one %s event\n", 302 CORESIGHT_ETM_PMU_NAME); 303 return -EINVAL; 304 } 305 cs_etm_evsel = evsel; 306 } 307 } 308 309 /* no need to continue if at least one event of interest was found */ 310 if (!cs_etm_evsel) 311 return 0; 312 313 ptr->evlist = evlist; 314 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; 315 316 if (!record_opts__no_switch_events(opts) && 317 perf_can_record_switch_events()) 318 opts->record_switch_events = true; 319 320 cs_etm_evsel->needs_auxtrace_mmap = true; 321 opts->full_auxtrace = true; 322 323 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel); 324 if (ret) 325 return ret; 326 327 if (opts->use_clockid) { 328 pr_err("Cannot use clockid (-k option) with %s\n", 329 CORESIGHT_ETM_PMU_NAME); 330 return -EINVAL; 331 } 332 333 /* we are in snapshot mode */ 334 if (opts->auxtrace_snapshot_mode) { 335 /* 336 * No size were given to '-S' or '-m,', so go with 337 * the default 338 */ 339 if (!opts->auxtrace_snapshot_size && 340 !opts->auxtrace_mmap_pages) { 341 if (privileged) { 342 opts->auxtrace_mmap_pages = MiB(4) / page_size; 343 } else { 344 opts->auxtrace_mmap_pages = 345 KiB(128) / page_size; 346 if (opts->mmap_pages == UINT_MAX) 347 opts->mmap_pages = KiB(256) / page_size; 348 } 349 } else if (!opts->auxtrace_mmap_pages && !privileged && 350 opts->mmap_pages == UINT_MAX) { 351 opts->mmap_pages = KiB(256) / page_size; 352 } 353 354 /* 355 * '-m,xyz' was specified but no snapshot size, so make the 356 * snapshot size as big as the auxtrace mmap area. 357 */ 358 if (!opts->auxtrace_snapshot_size) { 359 opts->auxtrace_snapshot_size = 360 opts->auxtrace_mmap_pages * (size_t)page_size; 361 } 362 363 /* 364 * -Sxyz was specified but no auxtrace mmap area, so make the 365 * auxtrace mmap area big enough to fit the requested snapshot 366 * size. 367 */ 368 if (!opts->auxtrace_mmap_pages) { 369 size_t sz = opts->auxtrace_snapshot_size; 370 371 sz = round_up(sz, page_size) / page_size; 372 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 373 } 374 375 /* Snapshot size can't be bigger than the auxtrace area */ 376 if (opts->auxtrace_snapshot_size > 377 opts->auxtrace_mmap_pages * (size_t)page_size) { 378 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 379 opts->auxtrace_snapshot_size, 380 opts->auxtrace_mmap_pages * (size_t)page_size); 381 return -EINVAL; 382 } 383 384 /* Something went wrong somewhere - this shouldn't happen */ 385 if (!opts->auxtrace_snapshot_size || 386 !opts->auxtrace_mmap_pages) { 387 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 388 return -EINVAL; 389 } 390 } 391 392 /* Buffer sizes weren't specified with '-m,xyz' so give some defaults */ 393 if (!opts->auxtrace_mmap_pages) { 394 if (privileged) { 395 opts->auxtrace_mmap_pages = MiB(4) / page_size; 396 } else { 397 opts->auxtrace_mmap_pages = KiB(128) / page_size; 398 if (opts->mmap_pages == UINT_MAX) 399 opts->mmap_pages = KiB(256) / page_size; 400 } 401 } 402 403 if (opts->auxtrace_snapshot_mode) 404 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME, 405 opts->auxtrace_snapshot_size); 406 407 /* 408 * To obtain the auxtrace buffer file descriptor, the auxtrace 409 * event must come first. 410 */ 411 evlist__to_front(evlist, cs_etm_evsel); 412 413 /* 414 * get the CPU on the sample - need it to associate trace ID in the 415 * AUX_OUTPUT_HW_ID event, and the AUX event for per-cpu mmaps. 416 */ 417 evsel__set_sample_bit(cs_etm_evsel, CPU); 418 419 /* 420 * Also the case of per-cpu mmaps, need the contextID in order to be notified 421 * when a context switch happened. 422 */ 423 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { 424 evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, 425 "timestamp", 1); 426 evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, 427 "contextid", 1); 428 } 429 430 /* 431 * When the option '--timestamp' or '-T' is enabled, the PERF_SAMPLE_TIME 432 * bit is set for all events. In this case, always enable Arm CoreSight 433 * timestamp tracing. 434 */ 435 if (opts->sample_time_set) 436 evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, 437 "timestamp", 1); 438 439 /* Add dummy event to keep tracking */ 440 err = parse_event(evlist, "dummy:u"); 441 if (err) 442 goto out; 443 evsel = evlist__last(evlist); 444 evlist__set_tracking_event(evlist, evsel); 445 evsel->core.attr.freq = 0; 446 evsel->core.attr.sample_period = 1; 447 448 /* In per-cpu case, always need the time of mmap events etc */ 449 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) 450 evsel__set_sample_bit(evsel, TIME); 451 452 err = cs_etm_validate_config(itr, cs_etm_evsel); 453 out: 454 return err; 455 } 456 457 static u64 cs_etm_get_config(struct auxtrace_record *itr) 458 { 459 u64 config = 0; 460 struct cs_etm_recording *ptr = 461 container_of(itr, struct cs_etm_recording, itr); 462 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 463 struct evlist *evlist = ptr->evlist; 464 struct evsel *evsel; 465 466 evlist__for_each_entry(evlist, evsel) { 467 if (evsel->core.attr.type == cs_etm_pmu->type) { 468 /* 469 * Variable perf_event_attr::config is assigned to 470 * ETMv3/PTM. The bit fields have been made to match 471 * the ETMv3.5 ETRMCR register specification. See the 472 * PMU_FORMAT_ATTR() declarations in 473 * drivers/hwtracing/coresight/coresight-perf.c for 474 * details. 475 */ 476 config = evsel->core.attr.config; 477 break; 478 } 479 } 480 481 return config; 482 } 483 484 #ifndef BIT 485 #define BIT(N) (1UL << (N)) 486 #endif 487 488 static u64 cs_etmv4_get_config(struct auxtrace_record *itr) 489 { 490 u64 config = 0; 491 u64 config_opts = 0; 492 493 /* 494 * The perf event variable config bits represent both 495 * the command line options and register programming 496 * bits in ETMv3/PTM. For ETMv4 we must remap options 497 * to real bits 498 */ 499 config_opts = cs_etm_get_config(itr); 500 if (config_opts & BIT(ETM_OPT_CYCACC)) 501 config |= BIT(ETM4_CFG_BIT_CYCACC); 502 if (config_opts & BIT(ETM_OPT_CTXTID)) 503 config |= BIT(ETM4_CFG_BIT_CTXTID); 504 if (config_opts & BIT(ETM_OPT_TS)) 505 config |= BIT(ETM4_CFG_BIT_TS); 506 if (config_opts & BIT(ETM_OPT_RETSTK)) 507 config |= BIT(ETM4_CFG_BIT_RETSTK); 508 if (config_opts & BIT(ETM_OPT_CTXTID2)) 509 config |= BIT(ETM4_CFG_BIT_VMID) | 510 BIT(ETM4_CFG_BIT_VMID_OPT); 511 if (config_opts & BIT(ETM_OPT_BRANCH_BROADCAST)) 512 config |= BIT(ETM4_CFG_BIT_BB); 513 514 return config; 515 } 516 517 static size_t 518 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, 519 struct evlist *evlist __maybe_unused) 520 { 521 int idx; 522 int etmv3 = 0, etmv4 = 0, ete = 0; 523 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; 524 struct perf_cpu_map *intersect_cpus; 525 struct perf_cpu cpu; 526 527 if (!perf_cpu_map__has_any_cpu(event_cpus)) { 528 /* cpu map is not "any" CPU , we have specific CPUs to work with */ 529 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 530 531 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); 532 perf_cpu_map__put(online_cpus); 533 } else { 534 /* Event can be "any" CPU so count all online CPUs. */ 535 intersect_cpus = perf_cpu_map__new_online_cpus(); 536 } 537 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { 538 if (cs_etm_is_ete(itr, cpu)) 539 ete++; 540 else if (cs_etm_is_etmv4(itr, cpu)) 541 etmv4++; 542 else 543 etmv3++; 544 } 545 perf_cpu_map__put(intersect_cpus); 546 547 return (CS_ETM_HEADER_SIZE + 548 (ete * CS_ETE_PRIV_SIZE) + 549 (etmv4 * CS_ETMV4_PRIV_SIZE) + 550 (etmv3 * CS_ETMV3_PRIV_SIZE)); 551 } 552 553 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, struct perf_cpu cpu) 554 { 555 struct cs_etm_recording *ptr = 556 container_of(itr, struct cs_etm_recording, itr); 557 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 558 559 /* Take any of the RO files for ETMv4 and see if it present */ 560 return cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); 561 } 562 563 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val) 564 { 565 char pmu_path[PATH_MAX]; 566 int scan; 567 568 /* Get RO metadata from sysfs */ 569 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); 570 571 scan = perf_pmu__scan_file(pmu, pmu_path, "%llx", val); 572 if (scan != 1) { 573 pr_err("%s: error reading: %s\n", __func__, pmu_path); 574 return -EINVAL; 575 } 576 577 return 0; 578 } 579 580 static int cs_etm_get_ro_signed(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, 581 __u64 *out_val) 582 { 583 char pmu_path[PATH_MAX]; 584 int scan; 585 int val = 0; 586 587 /* Get RO metadata from sysfs */ 588 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); 589 590 scan = perf_pmu__scan_file(pmu, pmu_path, "%d", &val); 591 if (scan != 1) { 592 pr_err("%s: error reading: %s\n", __func__, pmu_path); 593 return -EINVAL; 594 } 595 596 *out_val = (__u64) val; 597 return 0; 598 } 599 600 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path) 601 { 602 char pmu_path[PATH_MAX]; 603 604 /* Get RO metadata from sysfs */ 605 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); 606 607 return perf_pmu__file_exists(pmu, pmu_path); 608 } 609 610 #define TRCDEVARCH_ARCHPART_SHIFT 0 611 #define TRCDEVARCH_ARCHPART_MASK GENMASK(11, 0) 612 #define TRCDEVARCH_ARCHPART(x) (((x) & TRCDEVARCH_ARCHPART_MASK) >> TRCDEVARCH_ARCHPART_SHIFT) 613 614 #define TRCDEVARCH_ARCHVER_SHIFT 12 615 #define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12) 616 #define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT) 617 618 static bool cs_etm_is_ete(struct auxtrace_record *itr, struct perf_cpu cpu) 619 { 620 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); 621 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 622 __u64 trcdevarch; 623 624 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH])) 625 return false; 626 627 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], &trcdevarch); 628 /* 629 * ETE if ARCHVER is 5 (ARCHVER is 4 for ETM) and ARCHPART is 0xA13. 630 * See ETM_DEVARCH_ETE_ARCH in coresight-etm4x.h 631 */ 632 return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13; 633 } 634 635 static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu) 636 { 637 return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu); 638 } 639 640 static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) 641 { 642 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); 643 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 644 645 /* Get trace configuration register */ 646 data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr); 647 /* traceID set to legacy version, in case new perf running on older system */ 648 data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | 649 CORESIGHT_TRACE_ID_UNUSED_FLAG; 650 651 /* Get read-only information from sysFS */ 652 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], 653 &data[CS_ETMV4_TRCIDR0]); 654 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR1], 655 &data[CS_ETMV4_TRCIDR1]); 656 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], 657 &data[CS_ETMV4_TRCIDR2]); 658 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR8], 659 &data[CS_ETMV4_TRCIDR8]); 660 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS], 661 &data[CS_ETMV4_TRCAUTHSTATUS]); 662 663 /* Kernels older than 5.19 may not expose ts_source */ 664 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]) || 665 cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE], 666 &data[CS_ETMV4_TS_SOURCE])) { 667 pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", 668 cpu.cpu); 669 data[CS_ETMV4_TS_SOURCE] = (__u64) -1; 670 } 671 } 672 673 static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) 674 { 675 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); 676 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 677 678 /* Get trace configuration register */ 679 data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr); 680 /* traceID set to legacy version, in case new perf running on older system */ 681 data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; 682 683 /* Get read-only information from sysFS */ 684 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]); 685 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR1], &data[CS_ETE_TRCIDR1]); 686 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR2], &data[CS_ETE_TRCIDR2]); 687 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR8], &data[CS_ETE_TRCIDR8]); 688 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCAUTHSTATUS], 689 &data[CS_ETE_TRCAUTHSTATUS]); 690 /* ETE uses the same registers as ETMv4 plus TRCDEVARCH */ 691 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], 692 &data[CS_ETE_TRCDEVARCH]); 693 694 /* Kernels older than 5.19 may not expose ts_source */ 695 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]) || 696 cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE], 697 &data[CS_ETE_TS_SOURCE])) { 698 pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", 699 cpu.cpu); 700 data[CS_ETE_TS_SOURCE] = (__u64) -1; 701 } 702 } 703 704 static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset, 705 struct auxtrace_record *itr, 706 struct perf_record_auxtrace_info *info) 707 { 708 u32 increment, nr_trc_params; 709 u64 magic; 710 struct cs_etm_recording *ptr = 711 container_of(itr, struct cs_etm_recording, itr); 712 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 713 714 /* first see what kind of tracer this cpu is affined to */ 715 if (cs_etm_is_ete(itr, cpu)) { 716 magic = __perf_cs_ete_magic; 717 cs_etm_save_ete_header(&info->priv[*offset], itr, cpu); 718 719 /* How much space was used */ 720 increment = CS_ETE_PRIV_MAX; 721 nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1; 722 } else if (cs_etm_is_etmv4(itr, cpu)) { 723 magic = __perf_cs_etmv4_magic; 724 cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu); 725 726 /* How much space was used */ 727 increment = CS_ETMV4_PRIV_MAX; 728 nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR; 729 } else { 730 magic = __perf_cs_etmv3_magic; 731 /* Get configuration register */ 732 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); 733 /* traceID set to legacy value in case new perf running on old system */ 734 info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | 735 CORESIGHT_TRACE_ID_UNUSED_FLAG; 736 /* Get read-only information from sysFS */ 737 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER], 738 &info->priv[*offset + CS_ETM_ETMCCER]); 739 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMIDR], 740 &info->priv[*offset + CS_ETM_ETMIDR]); 741 742 /* How much space was used */ 743 increment = CS_ETM_PRIV_MAX; 744 nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR; 745 } 746 747 /* Build generic header portion */ 748 info->priv[*offset + CS_ETM_MAGIC] = magic; 749 info->priv[*offset + CS_ETM_CPU] = cpu.cpu; 750 info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params; 751 /* Where the next CPU entry should start from */ 752 *offset += increment; 753 } 754 755 static int cs_etm_info_fill(struct auxtrace_record *itr, 756 struct perf_session *session, 757 struct perf_record_auxtrace_info *info, 758 size_t priv_size) 759 { 760 int i; 761 u32 offset; 762 u64 nr_cpu, type; 763 struct perf_cpu_map *cpu_map; 764 struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus; 765 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 766 struct cs_etm_recording *ptr = 767 container_of(itr, struct cs_etm_recording, itr); 768 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; 769 struct perf_cpu cpu; 770 771 if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) 772 return -EINVAL; 773 774 if (!session->evlist->core.nr_mmaps) 775 return -EINVAL; 776 777 /* If the cpu_map has the "any" CPU all online CPUs are involved */ 778 if (perf_cpu_map__has_any_cpu(event_cpus)) { 779 cpu_map = online_cpus; 780 } else { 781 /* Make sure all specified CPUs are online */ 782 perf_cpu_map__for_each_cpu(cpu, i, event_cpus) { 783 if (!perf_cpu_map__has(online_cpus, cpu)) 784 return -EINVAL; 785 } 786 787 cpu_map = event_cpus; 788 } 789 790 nr_cpu = perf_cpu_map__nr(cpu_map); 791 /* Get PMU type as dynamically assigned by the core */ 792 type = cs_etm_pmu->type; 793 794 /* First fill out the session header */ 795 info->type = PERF_AUXTRACE_CS_ETM; 796 info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION; 797 info->priv[CS_PMU_TYPE_CPUS] = type << 32; 798 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu; 799 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode; 800 801 offset = CS_ETM_SNAPSHOT + 1; 802 803 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { 804 assert(offset < priv_size); 805 cs_etm_get_metadata(cpu, &offset, itr, info); 806 } 807 808 perf_cpu_map__put(online_cpus); 809 810 return 0; 811 } 812 813 static int cs_etm_snapshot_start(struct auxtrace_record *itr) 814 { 815 struct cs_etm_recording *ptr = 816 container_of(itr, struct cs_etm_recording, itr); 817 struct evsel *evsel; 818 819 evlist__for_each_entry(ptr->evlist, evsel) { 820 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 821 return evsel__disable(evsel); 822 } 823 return -EINVAL; 824 } 825 826 static int cs_etm_snapshot_finish(struct auxtrace_record *itr) 827 { 828 struct cs_etm_recording *ptr = 829 container_of(itr, struct cs_etm_recording, itr); 830 struct evsel *evsel; 831 832 evlist__for_each_entry(ptr->evlist, evsel) { 833 if (evsel->core.attr.type == ptr->cs_etm_pmu->type) 834 return evsel__enable(evsel); 835 } 836 return -EINVAL; 837 } 838 839 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused) 840 { 841 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) | 842 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull); 843 } 844 845 static void cs_etm_recording_free(struct auxtrace_record *itr) 846 { 847 struct cs_etm_recording *ptr = 848 container_of(itr, struct cs_etm_recording, itr); 849 850 free(ptr); 851 } 852 853 struct auxtrace_record *cs_etm_record_init(int *err) 854 { 855 struct perf_pmu *cs_etm_pmu; 856 struct cs_etm_recording *ptr; 857 858 cs_etm_pmu = perf_pmus__find(CORESIGHT_ETM_PMU_NAME); 859 860 if (!cs_etm_pmu) { 861 *err = -EINVAL; 862 goto out; 863 } 864 865 ptr = zalloc(sizeof(struct cs_etm_recording)); 866 if (!ptr) { 867 *err = -ENOMEM; 868 goto out; 869 } 870 871 ptr->cs_etm_pmu = cs_etm_pmu; 872 ptr->itr.pmu = cs_etm_pmu; 873 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options; 874 ptr->itr.recording_options = cs_etm_recording_options; 875 ptr->itr.info_priv_size = cs_etm_info_priv_size; 876 ptr->itr.info_fill = cs_etm_info_fill; 877 ptr->itr.snapshot_start = cs_etm_snapshot_start; 878 ptr->itr.snapshot_finish = cs_etm_snapshot_finish; 879 ptr->itr.reference = cs_etm_reference; 880 ptr->itr.free = cs_etm_recording_free; 881 ptr->itr.read_finish = auxtrace_record__read_finish; 882 883 *err = 0; 884 return &ptr->itr; 885 out: 886 return NULL; 887 } 888 889 /* 890 * Set a default config to enable the user changed config tracking mechanism 891 * (CFG_CHG and evsel__set_config_if_unset()). If no default is set then user 892 * changes aren't tracked. 893 */ 894 void 895 cs_etm_get_default_config(const struct perf_pmu *pmu __maybe_unused, 896 struct perf_event_attr *attr) 897 { 898 attr->sample_period = 1; 899 } 900