Lines Matching +full:cpu +full:- +full:centric
1 // SPDX-License-Identifier: GPL-2.0
3 * builtin-record.c
6 * (or a CPU, or a PID) into the perf.data output file - for
11 #include "util/build-id.h"
12 #include <subcmd/parse-options.h>
14 #include "util/parse-events.h"
38 #include "util/parse-branch-options.h"
39 #include "util/parse-regs-options.h"
42 #include "util/perf-hooks.h"
43 #include "util/cpu-set-sched.h"
44 #include "util/synthetic-events.h"
45 #include "util/time-utils.h"
47 #include "util/bpf-event.h"
54 #include "util/bpf-filter.h"
146 "undefined", "cpu", "core", "package", "numa", "user"
199 "SYS", "NODE", "CPU"
220 return rec->opts.threads_spec; in record__threads_enabled()
225 return rec->switch_output.signal && in switch_output_signal()
231 return rec->switch_output.size && in switch_output_size()
233 (rec->bytes_written >= rec->switch_output.size); in switch_output_size()
238 return rec->switch_output.time && in switch_output_time()
244 return rec->bytes_written + rec->thread_bytes_written; in record__bytes_written()
249 return rec->output_max_size && in record__output_max_size_exceeded()
250 (record__bytes_written(rec) >= rec->output_max_size); in record__output_max_size_exceeded()
256 struct perf_data_file *file = &rec->session->data->file; in record__write()
258 if (map && map->file) in record__write()
259 file = map->file; in record__write()
263 return -1; in record__write()
266 if (map && map->file) { in record__write()
267 thread->bytes_written += size; in record__write()
268 rec->thread_bytes_written += size; in record__write()
270 rec->bytes_written += size; in record__write()
297 cblock->aio_fildes = trace_fd; in record__aio_write()
298 cblock->aio_buf = buf; in record__aio_write()
299 cblock->aio_nbytes = size; in record__aio_write()
300 cblock->aio_offset = off; in record__aio_write()
301 cblock->aio_sigevent.sigev_notify = SIGEV_NONE; in record__aio_write()
308 cblock->aio_fildes = -1; in record__aio_write()
336 rem_size = cblock->aio_nbytes - written; in record__aio_complete()
339 cblock->aio_fildes = -1; in record__aio_complete()
341 * md->refcount is incremented in record__aio_pushfn() for in record__aio_complete()
345 perf_mmap__put(&md->core); in record__aio_complete()
353 rem_off = cblock->aio_offset + written; in record__aio_complete()
354 rem_buf = (void *)(cblock->aio_buf + written); in record__aio_complete()
355 record__aio_write(cblock, cblock->aio_fildes, in record__aio_complete()
365 struct aiocb **aiocb = md->aio.aiocb; in record__aio_sync()
366 struct aiocb *cblocks = md->aio.cblocks; in record__aio_sync()
372 for (i = 0; i < md->aio.nr_cblocks; ++i) { in record__aio_sync()
373 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) { in record__aio_sync()
389 return -1; in record__aio_sync()
391 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) { in record__aio_sync()
409 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer in record__aio_pushfn()
414 * the kernel buffer earlier than other per-cpu kernel buffers are handled. in record__aio_pushfn()
418 * part of data from map->start till the upper bound and then the remainder in record__aio_pushfn()
422 if (record__comp_enabled(aio->rec)) { in record__aio_pushfn()
423 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, in record__aio_pushfn()
424 mmap__mmap_len(map) - aio->size, in record__aio_pushfn()
431 memcpy(aio->data + aio->size, buf, size); in record__aio_pushfn()
434 if (!aio->size) { in record__aio_pushfn()
436 * Increment map->refcount to guard map->aio.data[] buffer in record__aio_pushfn()
439 * map->aio.data[] buffer is complete. in record__aio_pushfn()
445 perf_mmap__get(&map->core); in record__aio_pushfn()
448 aio->size += size; in record__aio_pushfn()
456 int trace_fd = rec->session->data->file.fd; in record__aio_push()
460 * Call record__aio_sync() to wait till map->aio.data[] buffer in record__aio_push()
465 aio.data = map->aio.data[idx]; in record__aio_push()
467 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */ in record__aio_push()
470 rec->samples++; in record__aio_push()
471 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off); in record__aio_push()
474 rec->bytes_written += aio.size; in record__aio_push()
479 * Decrement map->refcount incremented in record__aio_pushfn() in record__aio_push()
481 * map->refcount is decremented in record__aio_complete() after in record__aio_push()
484 perf_mmap__put(&map->core); in record__aio_push()
503 struct evlist *evlist = rec->evlist; in record__aio_mmap_read_sync()
504 struct mmap *maps = evlist->mmap; in record__aio_mmap_read_sync()
509 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__aio_mmap_read_sync()
512 if (map->core.base) in record__aio_mmap_read_sync()
524 struct record_opts *opts = (struct record_opts *)opt->value; in record__aio_parse()
527 opts->nr_cblocks = 0; in record__aio_parse()
530 opts->nr_cblocks = strtol(str, NULL, 0); in record__aio_parse()
531 if (!opts->nr_cblocks) in record__aio_parse()
532 opts->nr_cblocks = nr_cblocks_default; in record__aio_parse()
543 return -1; in record__aio_push()
548 return -1; in record__aio_get_pos()
562 return rec->opts.nr_cblocks > 0; in record__aio_enabled()
571 struct record_opts *opts = (struct record_opts *)opt->value; in record__mmap_flush_parse()
584 opts->mmap_flush = parse_tag_value(str, tags); in record__mmap_flush_parse()
585 if (opts->mmap_flush == (int)-1) in record__mmap_flush_parse()
586 opts->mmap_flush = strtol(str, NULL, 0); in record__mmap_flush_parse()
589 if (!opts->mmap_flush) in record__mmap_flush_parse()
590 opts->mmap_flush = MMAP_FLUSH_DEFAULT; in record__mmap_flush_parse()
592 flush_max = evlist__mmap_size(opts->mmap_pages); in record__mmap_flush_parse()
594 if (opts->mmap_flush > flush_max) in record__mmap_flush_parse()
595 opts->mmap_flush = flush_max; in record__mmap_flush_parse()
605 struct record_opts *opts = opt->value; in record__parse_comp_level()
608 opts->comp_level = 0; in record__parse_comp_level()
611 opts->comp_level = strtol(str, NULL, 0); in record__parse_comp_level()
612 if (!opts->comp_level) in record__parse_comp_level()
613 opts->comp_level = comp_level_default; in record__parse_comp_level()
623 return rec->opts.comp_level > 0; in record__comp_enabled()
632 return record__write(rec, NULL, event, event->header.size); in process_synthesized_event()
655 struct perf_record_compressed2 *event = map->data; in record__pushfn()
658 ssize_t compressed = zstd_compress(rec->session, map, map->data, in record__pushfn()
665 thread->samples++; in record__pushfn()
671 event->data_size = compressed - sizeof(struct perf_record_compressed2); in record__pushfn()
672 event->header.size = PERF_ALIGN(compressed, sizeof(u64)); in record__pushfn()
673 padding = event->header.size - compressed; in record__pushfn()
678 thread->samples++; in record__pushfn()
682 static volatile sig_atomic_t signr = -1;
685 static volatile sig_atomic_t done_fd = -1;
726 if (signr == -1) in record__sig_exit()
741 struct perf_data *data = &rec->data; in record__process_auxtrace()
751 if (file_offset == -1) in record__process_auxtrace()
752 return -1; in record__process_auxtrace()
753 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index, in record__process_auxtrace()
762 padding = 8 - padding; in record__process_auxtrace()
764 record__write(rec, map, event, event->header.size); in record__process_auxtrace()
778 ret = auxtrace_mmap__read(map, rec->itr, in record__auxtrace_mmap_read()
779 perf_session__env(rec->session), in record__auxtrace_mmap_read()
780 &rec->tool, in record__auxtrace_mmap_read()
786 rec->samples++; in record__auxtrace_mmap_read()
796 ret = auxtrace_mmap__read_snapshot(map, rec->itr, in record__auxtrace_mmap_read_snapshot()
797 perf_session__env(rec->session), in record__auxtrace_mmap_read_snapshot()
798 &rec->tool, in record__auxtrace_mmap_read_snapshot()
800 rec->opts.auxtrace_snapshot_size); in record__auxtrace_mmap_read_snapshot()
805 rec->samples++; in record__auxtrace_mmap_read_snapshot()
815 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) { in record__auxtrace_read_snapshot_all()
816 struct mmap *map = &rec->evlist->mmap[i]; in record__auxtrace_read_snapshot_all()
818 if (!map->auxtrace_mmap.base) in record__auxtrace_read_snapshot_all()
822 rc = -1; in record__auxtrace_read_snapshot_all()
836 if (auxtrace_record__snapshot_finish(rec->itr, on_exit)) in record__read_auxtrace_snapshot()
849 auxtrace_record__snapshot_start(rec->itr)) in record__auxtrace_snapshot_exit()
850 return -1; in record__auxtrace_snapshot_exit()
854 return -1; in record__auxtrace_snapshot_exit()
863 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts) in record__auxtrace_init()
866 return -EINVAL; in record__auxtrace_init()
869 if (!rec->itr) { in record__auxtrace_init()
870 rec->itr = auxtrace_record__init(rec->evlist, &err); in record__auxtrace_init()
875 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts, in record__auxtrace_init()
876 rec->opts.auxtrace_snapshot_opts); in record__auxtrace_init()
880 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts, in record__auxtrace_init()
881 rec->opts.auxtrace_sample_opts); in record__auxtrace_init()
885 err = auxtrace_parse_aux_action(rec->evlist); in record__auxtrace_init()
889 return auxtrace_parse_filters(rec->evlist); in record__auxtrace_init()
932 if (evsel->core.attr.text_poke) in record__config_text_poke()
938 return -ENOMEM; in record__config_text_poke()
940 evsel->core.attr.text_poke = 1; in record__config_text_poke()
941 evsel->core.attr.ksymbol = 1; in record__config_text_poke()
942 evsel->immediate = true; in record__config_text_poke()
950 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); in record__config_off_cpu()
955 struct evlist *evlist = rec->evlist; in record__tracking_system_wide()
959 * If non-dummy evsel exists, system_wide sideband is need to in record__tracking_system_wide()
974 struct record_opts *opts = &rec->opts; in record__config_tracking_events()
975 struct evlist *evlist = rec->evlist; in record__config_tracking_events()
984 if (opts->target.initial_delay || target__has_cpu(&opts->target) || in record__config_tracking_events()
991 if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) in record__config_tracking_events()
996 return -ENOMEM; in record__config_tracking_events()
1002 if (opts->target.initial_delay && !evsel->immediate && in record__config_tracking_events()
1003 !target__has_cpu(&opts->target)) in record__config_tracking_events()
1004 evsel->core.attr.enable_on_exec = 1; in record__config_tracking_events()
1006 evsel->immediate = 1; in record__config_tracking_events()
1017 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir); in record__kcore_readable()
1034 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir); in record__kcore_copy()
1045 thread_data->pipes.msg[0] = -1; in record__thread_data_init_pipes()
1046 thread_data->pipes.msg[1] = -1; in record__thread_data_init_pipes()
1047 thread_data->pipes.ack[0] = -1; in record__thread_data_init_pipes()
1048 thread_data->pipes.ack[1] = -1; in record__thread_data_init_pipes()
1053 if (pipe(thread_data->pipes.msg)) in record__thread_data_open_pipes()
1054 return -EINVAL; in record__thread_data_open_pipes()
1056 if (pipe(thread_data->pipes.ack)) { in record__thread_data_open_pipes()
1057 close(thread_data->pipes.msg[0]); in record__thread_data_open_pipes()
1058 thread_data->pipes.msg[0] = -1; in record__thread_data_open_pipes()
1059 close(thread_data->pipes.msg[1]); in record__thread_data_open_pipes()
1060 thread_data->pipes.msg[1] = -1; in record__thread_data_open_pipes()
1061 return -EINVAL; in record__thread_data_open_pipes()
1065 thread_data->pipes.msg[0], thread_data->pipes.msg[1], in record__thread_data_open_pipes()
1066 thread_data->pipes.ack[0], thread_data->pipes.ack[1]); in record__thread_data_open_pipes()
1073 if (thread_data->pipes.msg[0] != -1) { in record__thread_data_close_pipes()
1074 close(thread_data->pipes.msg[0]); in record__thread_data_close_pipes()
1075 thread_data->pipes.msg[0] = -1; in record__thread_data_close_pipes()
1077 if (thread_data->pipes.msg[1] != -1) { in record__thread_data_close_pipes()
1078 close(thread_data->pipes.msg[1]); in record__thread_data_close_pipes()
1079 thread_data->pipes.msg[1] = -1; in record__thread_data_close_pipes()
1081 if (thread_data->pipes.ack[0] != -1) { in record__thread_data_close_pipes()
1082 close(thread_data->pipes.ack[0]); in record__thread_data_close_pipes()
1083 thread_data->pipes.ack[0] = -1; in record__thread_data_close_pipes()
1085 if (thread_data->pipes.ack[1] != -1) { in record__thread_data_close_pipes()
1086 close(thread_data->pipes.ack[1]); in record__thread_data_close_pipes()
1087 thread_data->pipes.ack[1] = -1; in record__thread_data_close_pipes()
1093 return cpu_map__is_dummy(evlist->core.user_requested_cpus); in evlist__per_thread()
1098 int m, tm, nr_mmaps = evlist->core.nr_mmaps; in record__thread_data_init_maps()
1099 struct mmap *mmap = evlist->mmap; in record__thread_data_init_maps()
1100 struct mmap *overwrite_mmap = evlist->overwrite_mmap; in record__thread_data_init_maps()
1101 struct perf_cpu_map *cpus = evlist->core.all_cpus; in record__thread_data_init_maps()
1105 thread_data->nr_mmaps = nr_mmaps; in record__thread_data_init_maps()
1107 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, in record__thread_data_init_maps()
1108 thread_data->mask->maps.nbits); in record__thread_data_init_maps()
1110 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1111 if (!thread_data->maps) in record__thread_data_init_maps()
1112 return -ENOMEM; in record__thread_data_init_maps()
1115 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1116 if (!thread_data->overwrite_maps) { in record__thread_data_init_maps()
1117 zfree(&thread_data->maps); in record__thread_data_init_maps()
1118 return -ENOMEM; in record__thread_data_init_maps()
1122 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps); in record__thread_data_init_maps()
1124 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) { in record__thread_data_init_maps()
1126 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) { in record__thread_data_init_maps()
1127 if (thread_data->maps) { in record__thread_data_init_maps()
1128 thread_data->maps[tm] = &mmap[m]; in record__thread_data_init_maps()
1129 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n", in record__thread_data_init_maps()
1130 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m); in record__thread_data_init_maps()
1132 if (thread_data->overwrite_maps) { in record__thread_data_init_maps()
1133 thread_data->overwrite_maps[tm] = &overwrite_mmap[m]; in record__thread_data_init_maps()
1134 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n", in record__thread_data_init_maps()
1135 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m); in record__thread_data_init_maps()
1149 fdarray__init(&thread_data->pollfd, 64); in record__thread_data_init_pollfd()
1151 for (tm = 0; tm < thread_data->nr_mmaps; tm++) { in record__thread_data_init_pollfd()
1152 map = thread_data->maps ? thread_data->maps[tm] : NULL; in record__thread_data_init_pollfd()
1153 overwrite_map = thread_data->overwrite_maps ? in record__thread_data_init_pollfd()
1154 thread_data->overwrite_maps[tm] : NULL; in record__thread_data_init_pollfd()
1156 for (f = 0; f < evlist->core.pollfd.nr; f++) { in record__thread_data_init_pollfd()
1157 void *ptr = evlist->core.pollfd.priv[f].ptr; in record__thread_data_init_pollfd()
1160 pos = fdarray__dup_entry_from(&thread_data->pollfd, f, in record__thread_data_init_pollfd()
1161 &evlist->core.pollfd); in record__thread_data_init_pollfd()
1164 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n", in record__thread_data_init_pollfd()
1165 thread_data, pos, evlist->core.pollfd.entries[f].fd); in record__thread_data_init_pollfd()
1176 struct record_thread *thread_data = rec->thread_data; in record__free_thread_data()
1181 for (t = 0; t < rec->nr_threads; t++) { in record__free_thread_data()
1188 zfree(&rec->thread_data); in record__free_thread_data()
1195 size_t x = rec->index_map_cnt; in record__map_thread_evlist_pollfd_indexes()
1197 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL)) in record__map_thread_evlist_pollfd_indexes()
1198 return -ENOMEM; in record__map_thread_evlist_pollfd_indexes()
1199 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1200 rec->index_map[x].thread_pollfd_index = thread_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1201 rec->index_map_cnt += 1; in record__map_thread_evlist_pollfd_indexes()
1209 struct pollfd *e_entries = evlist->core.pollfd.entries; in record__update_evlist_pollfd_from_thread()
1210 struct pollfd *t_entries = thread_data->pollfd.entries; in record__update_evlist_pollfd_from_thread()
1214 for (i = 0; i < rec->index_map_cnt; i++) { in record__update_evlist_pollfd_from_thread()
1215 int e_pos = rec->index_map[i].evlist_pollfd_index; in record__update_evlist_pollfd_from_thread()
1216 int t_pos = rec->index_map[i].thread_pollfd_index; in record__update_evlist_pollfd_from_thread()
1221 err = -EINVAL; in record__update_evlist_pollfd_from_thread()
1233 struct fdarray *fda = &evlist->core.pollfd; in record__dup_non_perf_events()
1236 for (i = 0; i < fda->nr; i++) { in record__dup_non_perf_events()
1237 if (!(fda->priv[i].flags & fdarray_flag__non_perf_event)) in record__dup_non_perf_events()
1239 ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda); in record__dup_non_perf_events()
1244 pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n", in record__dup_non_perf_events()
1245 thread_data, ret, fda->entries[i].fd); in record__dup_non_perf_events()
1260 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data))); in record__alloc_thread_data()
1261 if (!rec->thread_data) { in record__alloc_thread_data()
1263 return -ENOMEM; in record__alloc_thread_data()
1265 thread_data = rec->thread_data; in record__alloc_thread_data()
1267 for (t = 0; t < rec->nr_threads; t++) in record__alloc_thread_data()
1270 for (t = 0; t < rec->nr_threads; t++) { in record__alloc_thread_data()
1272 thread_data[t].mask = &rec->thread_masks[t]; in record__alloc_thread_data()
1284 thread_data[t].tid = -1; in record__alloc_thread_data()
1297 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n", in record__alloc_thread_data()
1307 thread_data[t].ctlfd_pos = -1; /* Not used */ in record__alloc_thread_data()
1323 struct record_opts *opts = &rec->opts; in record__mmap_evlist()
1324 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode || in record__mmap_evlist()
1325 opts->auxtrace_sample_mode; in record__mmap_evlist()
1328 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist()
1331 if (evlist__mmap_ex(evlist, opts->mmap_pages, in record__mmap_evlist()
1332 opts->auxtrace_mmap_pages, in record__mmap_evlist()
1334 opts->nr_cblocks, opts->affinity, in record__mmap_evlist()
1335 opts->mmap_flush, opts->comp_level) < 0) { in record__mmap_evlist()
1340 "or try again with a smaller value of -m/--mmap_pages.\n" in record__mmap_evlist()
1342 opts->mmap_pages, opts->auxtrace_mmap_pages); in record__mmap_evlist()
1343 return -errno; in record__mmap_evlist()
1348 return -errno; in record__mmap_evlist()
1350 return -EINVAL; in record__mmap_evlist()
1354 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack)) in record__mmap_evlist()
1355 return -1; in record__mmap_evlist()
1362 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps); in record__mmap_evlist()
1364 pr_err("Failed to create data directory: %s\n", strerror(-ret)); in record__mmap_evlist()
1367 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__mmap_evlist()
1368 if (evlist->mmap) in record__mmap_evlist()
1369 evlist->mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1370 if (evlist->overwrite_mmap) in record__mmap_evlist()
1371 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1380 return record__mmap_evlist(rec, rec->evlist); in record__mmap()
1387 struct evlist *evlist = rec->evlist; in record__open()
1388 struct perf_session *session = rec->session; in record__open()
1389 struct record_opts *opts = &rec->opts; in record__open()
1394 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) { in record__open()
1395 if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) { in record__open()
1401 pos->core.leader != &pos->core && in record__open()
1402 pos->weak_group) { in record__open()
1406 rc = -errno; in record__open()
1407 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg)); in record__open()
1424 if (evlist__apply_filters(evlist, &pos, &opts->target)) { in record__open()
1426 pos->filter ?: "BPF", evsel__name(pos), errno, in record__open()
1428 rc = -1; in record__open()
1436 session->evlist = evlist; in record__open()
1444 if (rec->evlist->first_sample_time == 0) in set_timestamp_boundary()
1445 rec->evlist->first_sample_time = sample_time; in set_timestamp_boundary()
1448 rec->evlist->last_sample_time = sample_time; in set_timestamp_boundary()
1459 set_timestamp_boundary(rec, sample->time); in process_sample_event()
1461 if (rec->buildid_all) in process_sample_event()
1464 rec->samples++; in process_sample_event()
1470 struct perf_session *session = rec->session; in process_buildids()
1472 if (perf_data__size(&rec->data) == 0) in process_buildids()
1477 * dso->long_name to a real pathname it found. In this case in process_buildids()
1481 * rather than build-id path (in debug directory). in process_buildids()
1482 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551 in process_buildids()
1487 * If --buildid-all is given, it marks all DSO regardless of hits, in process_buildids()
1492 if (rec->buildid_all && !rec->timestamp_boundary) in process_buildids()
1493 rec->tool.sample = process_event_sample_stub; in process_buildids()
1514 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1524 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1539 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity()
1540 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity()
1541 thread->mask->affinity.nbits)) { in record__adjust_affinity()
1542 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1543 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity()
1544 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1545 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity()
1546 (cpu_set_t *)thread->mask->affinity.bits); in record__adjust_affinity()
1548 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu()); in record__adjust_affinity()
1549 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity"); in record__adjust_affinity()
1560 event->header.size += increment; in process_comp_header()
1564 event->header.type = PERF_RECORD_COMPRESSED2; in process_comp_header()
1565 event->header.size = size; in process_comp_header()
1574 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed2) - 1; in zstd_compress()
1575 struct zstd_data *zstd_data = &session->zstd_data; in zstd_compress()
1577 if (map && map->file) in zstd_compress()
1578 zstd_data = &map->zstd_data; in zstd_compress()
1585 if (map && map->file) { in zstd_compress()
1586 thread->bytes_transferred += src_size; in zstd_compress()
1587 thread->bytes_compressed += compressed; in zstd_compress()
1589 session->bytes_transferred += src_size; in zstd_compress()
1590 session->bytes_compressed += compressed; in zstd_compress()
1599 u64 bytes_written = rec->bytes_written; in record__mmap_read_evlist()
1604 int trace_fd = rec->data.file.fd; in record__mmap_read_evlist()
1610 nr_mmaps = thread->nr_mmaps; in record__mmap_read_evlist()
1611 maps = overwrite ? thread->overwrite_maps : thread->maps; in record__mmap_read_evlist()
1616 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) in record__mmap_read_evlist()
1626 if (map->core.base) { in record__mmap_read_evlist()
1629 flush = map->core.flush; in record__mmap_read_evlist()
1630 map->core.flush = 1; in record__mmap_read_evlist()
1635 map->core.flush = flush; in record__mmap_read_evlist()
1636 rc = -1; in record__mmap_read_evlist()
1643 map->core.flush = flush; in record__mmap_read_evlist()
1644 rc = -1; in record__mmap_read_evlist()
1649 map->core.flush = flush; in record__mmap_read_evlist()
1652 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && in record__mmap_read_evlist()
1653 !rec->opts.auxtrace_sample_mode && in record__mmap_read_evlist()
1655 rc = -1; in record__mmap_read_evlist()
1668 * because per-cpu maps and files have data in record__mmap_read_evlist()
1671 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written) in record__mmap_read_evlist()
1684 err = record__mmap_read_evlist(rec, rec->evlist, false, synch); in record__mmap_read_all()
1688 return record__mmap_read_evlist(rec, rec->evlist, true, synch); in record__mmap_read_all()
1694 struct perf_mmap *map = fda->priv[fd].ptr; in record__thread_munmap_filtered()
1708 thread->tid = gettid(); in record__thread()
1710 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1711 if (err == -1) in record__thread()
1713 thread->tid, strerror(errno)); in record__thread()
1715 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__thread()
1717 pollfd = &thread->pollfd; in record__thread()
1718 ctlfd_pos = thread->ctlfd_pos; in record__thread()
1721 unsigned long long hits = thread->samples; in record__thread()
1723 if (record__mmap_read_all(thread->rec, false) < 0 || terminate) in record__thread()
1726 if (hits == thread->samples) { in record__thread()
1728 err = fdarray__poll(pollfd, -1); in record__thread()
1735 thread->waking++; in record__thread()
1742 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) { in record__thread()
1744 close(thread->pipes.msg[0]); in record__thread()
1745 thread->pipes.msg[0] = -1; in record__thread()
1746 pollfd->entries[ctlfd_pos].fd = -1; in record__thread()
1747 pollfd->entries[ctlfd_pos].events = 0; in record__thread()
1750 pollfd->entries[ctlfd_pos].revents = 0; in record__thread()
1752 record__mmap_read_all(thread->rec, true); in record__thread()
1754 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1755 if (err == -1) in record__thread()
1757 thread->tid, strerror(errno)); in record__thread()
1764 struct perf_session *session = rec->session; in record__init_features()
1768 perf_header__set_feat(&session->header, feat); in record__init_features()
1770 if (rec->no_buildid) in record__init_features()
1771 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); in record__init_features()
1773 if (!have_tracepoints(&rec->evlist->core.entries)) in record__init_features()
1774 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); in record__init_features()
1776 if (!rec->opts.branch_stack) in record__init_features()
1777 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); in record__init_features()
1779 if (!rec->opts.full_auxtrace) in record__init_features()
1780 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); in record__init_features()
1782 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) in record__init_features()
1783 perf_header__clear_feat(&session->header, HEADER_CLOCKID); in record__init_features()
1785 if (!rec->opts.use_clockid) in record__init_features()
1786 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA); in record__init_features()
1789 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); in record__init_features()
1792 perf_header__clear_feat(&session->header, HEADER_COMPRESSED); in record__init_features()
1794 perf_header__clear_feat(&session->header, HEADER_STAT); in record__init_features()
1801 struct perf_data *data = &rec->data; in record__finish_output()
1804 if (data->is_pipe) { in record__finish_output()
1806 data->file.size = rec->bytes_written; in record__finish_output()
1810 rec->session->header.data_size += rec->bytes_written; in record__finish_output()
1811 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR); in record__finish_output()
1813 for (i = 0; i < data->dir.nr; i++) in record__finish_output()
1814 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR); in record__finish_output()
1818 if (!rec->no_buildid) { in record__finish_output()
1821 if (rec->buildid_all) in record__finish_output()
1822 perf_session__dsos_hit_all(rec->session); in record__finish_output()
1824 perf_session__write_header(rec->session, rec->evlist, fd, true); in record__finish_output()
1833 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize_workload()
1835 if (rec->opts.tail_synthesize != tail) in record__synthesize_workload()
1838 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid); in record__synthesize_workload()
1840 return -1; in record__synthesize_workload()
1842 err = perf_event__synthesize_thread_map(&rec->tool, thread_map, in record__synthesize_workload()
1844 &rec->session->machines.host, in record__synthesize_workload()
1846 rec->opts.sample_address); in record__synthesize_workload()
1853 if (rec->opts.tail_synthesize != tail) in write_finished_init()
1864 struct perf_data *data = &rec->data; in record__switch_output()
1876 if (target__none(&rec->opts.target)) in record__switch_output()
1879 rec->samples = 0; in record__switch_output()
1884 return -EINVAL; in record__switch_output()
1888 rec->session->header.data_offset, in record__switch_output()
1891 rec->bytes_written = 0; in record__switch_output()
1892 rec->session->header.data_size = 0; in record__switch_output()
1897 data->path, timestamp); in record__switch_output()
1900 if (rec->switch_output.num_files) { in record__switch_output()
1901 int n = rec->switch_output.cur_file + 1; in record__switch_output()
1903 if (n >= rec->switch_output.num_files) in record__switch_output()
1905 rec->switch_output.cur_file = n; in record__switch_output()
1906 if (rec->switch_output.filenames[n]) { in record__switch_output()
1907 remove(rec->switch_output.filenames[n]); in record__switch_output()
1908 zfree(&rec->switch_output.filenames[n]); in record__switch_output()
1910 rec->switch_output.filenames[n] = new_filename; in record__switch_output()
1920 * In 'perf record --switch-output' without -a, in record__switch_output()
1928 if (target__none(&rec->opts.target)) in record__switch_output()
1945 lost->lost = lost_count; in __record__save_lost_samples()
1946 if (evsel->core.ids) { in __record__save_lost_samples()
1947 sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx); in __record__save_lost_samples()
1948 sample.id = sid->id; in __record__save_lost_samples()
1952 evsel->core.attr.sample_type, &sample); in __record__save_lost_samples()
1953 lost->header.size = sizeof(*lost) + id_hdr_size; in __record__save_lost_samples()
1954 lost->header.misc = misc_flag; in __record__save_lost_samples()
1955 record__write(rec, NULL, lost, lost->header.size); in __record__save_lost_samples()
1961 struct perf_session *session = rec->session; in record__read_lost_samples()
1966 if (session->evlist == NULL) in record__read_lost_samples()
1969 evlist__for_each_entry(session->evlist, evsel) { in record__read_lost_samples()
1970 struct xyarray *xy = evsel->core.sample_id; in record__read_lost_samples()
1973 if (xy == NULL || evsel->core.fd == NULL) in record__read_lost_samples()
1975 if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) || in record__read_lost_samples()
1976 xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) { in record__read_lost_samples()
1985 if (perf_evsel__read(&evsel->core, x, y, &count) < 0) { in record__read_lost_samples()
2020 workload_exec_errno = info->si_value.sival_int; in workload_exec_failed_signal()
2031 if (evlist->mmap && evlist->mmap[0].core.base) in evlist__pick_pc()
2032 return evlist->mmap[0].core.base; in evlist__pick_pc()
2033 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base) in evlist__pick_pc()
2034 return evlist->overwrite_mmap[0].core.base; in evlist__pick_pc()
2041 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist); in record__pick_pc()
2049 struct perf_session *session = rec->session; in record__synthesize()
2050 struct machine *machine = &session->machines.host; in record__synthesize()
2051 struct perf_data *data = &rec->data; in record__synthesize()
2052 struct record_opts *opts = &rec->opts; in record__synthesize()
2053 struct perf_tool *tool = &rec->tool; in record__synthesize()
2057 if (rec->opts.tail_synthesize != tail) in record__synthesize()
2060 if (data->is_pipe) { in record__synthesize()
2066 rec->bytes_written += err; in record__synthesize()
2077 session->evlist, machine); in record__synthesize()
2081 if (rec->opts.full_auxtrace) { in record__synthesize()
2082 err = perf_event__synthesize_auxtrace_info(rec->itr, tool, in record__synthesize()
2088 if (!evlist__exclude_kernel(rec->evlist)) { in record__synthesize()
2103 machines__process_guests(&session->machines, in record__synthesize()
2107 err = perf_event__synthesize_extra_attr(&rec->tool, in record__synthesize()
2108 rec->evlist, in record__synthesize()
2110 data->is_pipe); in record__synthesize()
2114 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads, in record__synthesize()
2122 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus, in record__synthesize()
2125 pr_err("Couldn't synthesize cpu map.\n"); in record__synthesize()
2136 if (rec->opts.synth & PERF_SYNTH_CGROUP) { in record__synthesize()
2145 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2151 if (rec->opts.synth & PERF_SYNTH_TASK) { in record__synthesize()
2152 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize()
2154 err = __machine__synthesize_threads(machine, tool, &opts->target, in record__synthesize()
2155 rec->evlist->core.threads, in record__synthesize()
2156 f, needs_mmap, opts->sample_address, in record__synthesize()
2157 rec->opts.nr_threads_synthesize); in record__synthesize()
2160 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2172 perf_event__synthesize_final_bpf_metadata(rec->session, in record__synthesize_final_bpf_metadata()
2180 pthread_kill(rec->thread_id, SIGUSR2); in record__process_signal_event()
2186 struct record_opts *opts = &rec->opts; in record__setup_sb_evlist()
2188 if (rec->sb_evlist != NULL) { in record__setup_sb_evlist()
2190 * We get here if --switch-output-event populated the in record__setup_sb_evlist()
2194 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec); in record__setup_sb_evlist()
2195 rec->thread_id = pthread_self(); in record__setup_sb_evlist()
2198 if (!opts->no_bpf_event) { in record__setup_sb_evlist()
2199 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2200 rec->sb_evlist = evlist__new(); in record__setup_sb_evlist()
2202 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2204 return -1; in record__setup_sb_evlist()
2208 if (evlist__add_bpf_sb_event(rec->sb_evlist, perf_session__env(rec->session))) { in record__setup_sb_evlist()
2210 return -1; in record__setup_sb_evlist()
2214 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) { in record__setup_sb_evlist()
2216 opts->no_bpf_event = true; in record__setup_sb_evlist()
2224 struct perf_session *session = rec->session; in record__init_clock()
2230 if (!rec->opts.use_clockid) in record__init_clock()
2233 if (rec->opts.use_clockid && rec->opts.clockid_res_ns) in record__init_clock()
2234 env->clock.clockid_res_ns = rec->opts.clockid_res_ns; in record__init_clock()
2236 env->clock.clockid = rec->opts.clockid; in record__init_clock()
2240 return -1; in record__init_clock()
2243 if (clock_gettime(rec->opts.clockid, &ref_clockid)) { in record__init_clock()
2245 return -1; in record__init_clock()
2251 env->clock.tod_ns = ref; in record__init_clock()
2256 env->clock.clockid_ns = ref; in record__init_clock()
2265 if (auxtrace_record__snapshot_start(rec->itr)) in hit_auxtrace_snapshot_trigger()
2274 pid_t tid = thread_data->tid; in record__terminate_thread()
2276 close(thread_data->pipes.msg[1]); in record__terminate_thread()
2277 thread_data->pipes.msg[1] = -1; in record__terminate_thread()
2278 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack)); in record__terminate_thread()
2283 thread->tid, tid); in record__terminate_thread()
2290 int t, tt, err, ret = 0, nr_threads = rec->nr_threads; in record__start_threads()
2291 struct record_thread *thread_data = rec->thread_data; in record__start_threads()
2304 return -1; in record__start_threads()
2315 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)), in record__start_threads()
2316 (cpu_set_t *)(thread_data[t].mask->affinity.bits)); in record__start_threads()
2322 ret = -1; in record__start_threads()
2328 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid, in record__start_threads()
2332 thread->tid, rec->thread_data[t].tid); in record__start_threads()
2335 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__start_threads()
2336 (cpu_set_t *)thread->mask->affinity.bits); in record__start_threads()
2338 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__start_threads()
2345 ret = -1; in record__start_threads()
2354 struct record_thread *thread_data = rec->thread_data; in record__stop_threads()
2356 for (t = 1; t < rec->nr_threads; t++) in record__stop_threads()
2359 for (t = 0; t < rec->nr_threads; t++) { in record__stop_threads()
2360 rec->samples += thread_data[t].samples; in record__stop_threads()
2363 rec->session->bytes_transferred += thread_data[t].bytes_transferred; in record__stop_threads()
2364 rec->session->bytes_compressed += thread_data[t].bytes_compressed; in record__stop_threads()
2381 struct record_thread *thread_data = rec->thread_data; in record__waking()
2383 for (t = 0; t < rec->nr_threads; t++) in record__waking()
2394 struct perf_tool *tool = &rec->tool; in __cmd_record()
2395 struct record_opts *opts = &rec->opts; in __cmd_record()
2396 struct perf_data *data = &rec->data; in __cmd_record()
2410 if (rec->opts.record_cgroup) { in __cmd_record()
2413 return -1; in __cmd_record()
2417 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) { in __cmd_record()
2419 if (rec->opts.auxtrace_snapshot_mode) in __cmd_record()
2421 if (rec->switch_output.enabled) in __cmd_record()
2428 tool->sample = process_sample_event; in __cmd_record()
2429 tool->fork = perf_event__process_fork; in __cmd_record()
2430 tool->exit = perf_event__process_exit; in __cmd_record()
2431 tool->comm = perf_event__process_comm; in __cmd_record()
2432 tool->namespaces = perf_event__process_namespaces; in __cmd_record()
2433 tool->mmap = build_id__process_mmap; in __cmd_record()
2434 tool->mmap2 = build_id__process_mmap2; in __cmd_record()
2435 tool->itrace_start = process_timestamp_boundary; in __cmd_record()
2436 tool->aux = process_timestamp_boundary; in __cmd_record()
2437 tool->namespace_events = rec->opts.record_namespaces; in __cmd_record()
2438 tool->cgroup_events = rec->opts.record_cgroup; in __cmd_record()
2446 if (perf_data__is_pipe(&rec->data)) { in __cmd_record()
2448 return -1; in __cmd_record()
2450 if (rec->opts.full_auxtrace) { in __cmd_record()
2452 return -1; in __cmd_record()
2457 rec->session = session; in __cmd_record()
2459 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) { in __cmd_record()
2461 return -1; in __cmd_record()
2467 status = -1; in __cmd_record()
2470 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd); in __cmd_record()
2478 env->comp_type = PERF_COMP_ZSTD; in __cmd_record()
2479 env->comp_level = rec->opts.comp_level; in __cmd_record()
2481 if (rec->opts.kcore && in __cmd_record()
2482 !record__kcore_readable(&session->machines.host)) { in __cmd_record()
2484 return -1; in __cmd_record()
2488 return -1; in __cmd_record()
2493 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe, in __cmd_record()
2508 if (data->is_pipe && rec->evlist->core.nr_entries == 1) in __cmd_record()
2509 rec->opts.sample_id = true; in __cmd_record()
2511 if (rec->timestamp_filename && perf_data__is_pipe(data)) { in __cmd_record()
2512 rec->timestamp_filename = false; in __cmd_record()
2513 pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n"); in __cmd_record()
2520 evlist__uniquify_evsel_names(rec->evlist, &stat_config); in __cmd_record()
2522 evlist__config(rec->evlist, opts, &callchain_param); in __cmd_record()
2527 err = -1; in __cmd_record()
2532 env->comp_mmap_len = session->evlist->core.mmap_len; in __cmd_record()
2534 if (rec->opts.kcore) { in __cmd_record()
2535 err = record__kcore_copy(&session->machines.host, data); in __cmd_record()
2546 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) { in __cmd_record()
2548 rec->tool.ordered_events = false; in __cmd_record()
2551 if (evlist__nr_groups(rec->evlist) == 0) in __cmd_record()
2552 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); in __cmd_record()
2554 if (data->is_pipe) { in __cmd_record()
2559 err = perf_session__write_header(session, rec->evlist, fd, false); in __cmd_record()
2564 err = -1; in __cmd_record()
2565 if (!rec->no_buildid in __cmd_record()
2566 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { in __cmd_record()
2568 "Use --no-buildid to profile anyway.\n"); in __cmd_record()
2572 if (!evlist__needs_bpf_sb_event(rec->evlist)) in __cmd_record()
2573 opts->no_bpf_event = true; in __cmd_record()
2583 if (rec->realtime_prio) { in __cmd_record()
2586 param.sched_priority = rec->realtime_prio; in __cmd_record()
2589 err = -1; in __cmd_record()
2602 if (!target__none(&opts->target) && !opts->target.initial_delay) in __cmd_record()
2603 evlist__enable(rec->evlist); in __cmd_record()
2606 * offcpu-time does not call execve, so enable_on_exe wouldn't work in __cmd_record()
2609 if (rec->off_cpu) in __cmd_record()
2610 evlist__enable_evsel(rec->evlist, (char *)OFFCPU_EVENT); in __cmd_record()
2616 struct machine *machine = &session->machines.host; in __cmd_record()
2620 event = malloc(sizeof(event->comm) + machine->id_hdr_size); in __cmd_record()
2622 err = -ENOMEM; in __cmd_record()
2633 rec->evlist->workload.pid, in __cmd_record()
2638 if (tgid == -1) in __cmd_record()
2641 event = malloc(sizeof(event->namespaces) + in __cmd_record()
2643 machine->id_hdr_size); in __cmd_record()
2645 err = -ENOMEM; in __cmd_record()
2653 rec->evlist->workload.pid, in __cmd_record()
2658 evlist__start_workload(rec->evlist); in __cmd_record()
2661 if (opts->target.initial_delay) { in __cmd_record()
2663 if (opts->target.initial_delay > 0) { in __cmd_record()
2664 usleep(opts->target.initial_delay * USEC_PER_MSEC); in __cmd_record()
2665 evlist__enable(rec->evlist); in __cmd_record()
2670 err = event_enable_timer__start(rec->evlist->eet); in __cmd_record()
2691 unsigned long long hits = thread->samples; in __cmd_record()
2694 * rec->evlist->bkw_mmap_state is possible to be in __cmd_record()
2696 * hits != rec->samples in previous round. in __cmd_record()
2702 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING); in __cmd_record()
2707 err = -1; in __cmd_record()
2717 err = -1; in __cmd_record()
2732 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING) in __cmd_record()
2741 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING); in __cmd_record()
2746 thread->waking = 0; in __cmd_record()
2755 /* re-arm the alarm */ in __cmd_record()
2756 if (rec->switch_output.time) in __cmd_record()
2757 alarm(rec->switch_output.time); in __cmd_record()
2760 if (hits == thread->samples) { in __cmd_record()
2763 err = fdarray__poll(&thread->pollfd, -1); in __cmd_record()
2770 thread->waking++; in __cmd_record()
2772 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP, in __cmd_record()
2776 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread); in __cmd_record()
2781 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) { in __cmd_record()
2785 evlist__ctlfd_ack(rec->evlist); in __cmd_record()
2801 err = event_enable_timer__process(rec->evlist->eet); in __cmd_record()
2814 if (done && !disabled && !target__none(&opts->target)) { in __cmd_record()
2816 evlist__disable(rec->evlist); in __cmd_record()
2826 if (opts->auxtrace_snapshot_on_exit) in __cmd_record()
2834 evlist__format_evsels(rec->evlist, &sb, 2048); in __cmd_record()
2839 err = -1; in __cmd_record()
2849 if (target__none(&rec->opts.target)) in __cmd_record()
2857 evlist__finalize_ctlfd(rec->evlist); in __cmd_record()
2860 if (rec->session->bytes_transferred && rec->session->bytes_compressed) { in __cmd_record()
2861 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed; in __cmd_record()
2862 env->comp_ratio = ratio + 0.5; in __cmd_record()
2869 kill(rec->evlist->workload.pid, SIGTERM); in __cmd_record()
2882 if (rec->off_cpu) in __cmd_record()
2883 rec->bytes_written += off_cpu_write(rec->session); in __cmd_record()
2888 rec->samples = 0; in __cmd_record()
2891 if (!rec->timestamp_filename) { in __cmd_record()
2906 const char *postfix = rec->timestamp_filename ? in __cmd_record()
2909 if (rec->samples && !rec->opts.full_auxtrace) in __cmd_record()
2911 " (%" PRIu64 " samples)", rec->samples); in __cmd_record()
2917 data->path, postfix, samples); in __cmd_record()
2920 rec->session->bytes_transferred / 1024.0 / 1024.0, in __cmd_record()
2930 done_fd = -1; in __cmd_record()
2935 zstd_fini(&session->zstd_data); in __cmd_record()
2936 if (!opts->no_bpf_event) in __cmd_record()
2937 evlist__stop_sb_thread(rec->sb_evlist); in __cmd_record()
2947 pr_debug("callchain: type %s\n", str[callchain->record_mode]); in callchain_debug()
2949 if (callchain->record_mode == CALLCHAIN_DWARF) in callchain_debug()
2951 callchain->dump_size); in callchain_debug()
2959 callchain->enabled = !unset; in record_opts__parse_callchain()
2961 /* --no-call-graph */ in record_opts__parse_callchain()
2963 callchain->record_mode = CALLCHAIN_NONE; in record_opts__parse_callchain()
2971 if (callchain->record_mode == CALLCHAIN_DWARF) in record_opts__parse_callchain()
2972 record->sample_address = true; in record_opts__parse_callchain()
2983 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset); in record_parse_callchain_opt()
2990 struct callchain_param *callchain = opt->value; in record_callchain_opt()
2992 callchain->enabled = true; in record_callchain_opt()
2994 if (callchain->record_mode == CALLCHAIN_NONE) in record_callchain_opt()
2995 callchain->record_mode = CALLCHAIN_FP; in record_callchain_opt()
3005 if (!strcmp(var, "record.build-id")) { in perf_record_config()
3007 rec->no_buildid_cache = false; in perf_record_config()
3008 else if (!strcmp(value, "no-cache")) in perf_record_config()
3009 rec->no_buildid_cache = true; in perf_record_config()
3011 rec->no_buildid = true; in perf_record_config()
3013 rec->buildid_mmap = true; in perf_record_config()
3014 else if (!strcmp(value, "no-mmap")) in perf_record_config()
3015 rec->buildid_mmap = false; in perf_record_config()
3017 return -1; in perf_record_config()
3020 if (!strcmp(var, "record.call-graph")) { in perf_record_config()
3021 var = "call-graph.record-mode"; in perf_record_config()
3026 rec->opts.nr_cblocks = strtol(value, NULL, 0); in perf_record_config()
3027 if (!rec->opts.nr_cblocks) in perf_record_config()
3028 rec->opts.nr_cblocks = nr_cblocks_default; in perf_record_config()
3032 rec->debuginfod.urls = strdup(value); in perf_record_config()
3033 if (!rec->debuginfod.urls) in perf_record_config()
3034 return -ENOMEM; in perf_record_config()
3035 rec->debuginfod.set = true; in perf_record_config()
3043 struct record *rec = (struct record *)opt->value; in record__parse_event_enable_time()
3045 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset); in record__parse_event_enable_time()
3050 struct record_opts *opts = (struct record_opts *)opt->value; in record__parse_affinity()
3056 opts->affinity = PERF_AFFINITY_NODE; in record__parse_affinity()
3057 else if (!strcasecmp(str, "cpu")) in record__parse_affinity()
3058 opts->affinity = PERF_AFFINITY_CPU; in record__parse_affinity()
3065 mask->nbits = nr_bits; in record__mmap_cpu_mask_alloc()
3066 mask->bits = bitmap_zalloc(mask->nbits); in record__mmap_cpu_mask_alloc()
3067 if (!mask->bits) in record__mmap_cpu_mask_alloc()
3068 return -ENOMEM; in record__mmap_cpu_mask_alloc()
3075 bitmap_free(mask->bits); in record__mmap_cpu_mask_free()
3076 mask->nbits = 0; in record__mmap_cpu_mask_free()
3083 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits); in record__thread_mask_alloc()
3085 mask->affinity.bits = NULL; in record__thread_mask_alloc()
3089 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits); in record__thread_mask_alloc()
3091 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_alloc()
3092 mask->maps.bits = NULL; in record__thread_mask_alloc()
3100 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_free()
3101 record__mmap_cpu_mask_free(&mask->affinity); in record__thread_mask_free()
3107 struct record_opts *opts = opt->value; in record__parse_threads()
3110 opts->threads_spec = THREAD_SPEC__CPU; in record__parse_threads()
3114 opts->threads_user_spec = strdup(str); in record__parse_threads()
3115 if (!opts->threads_user_spec) in record__parse_threads()
3116 return -ENOMEM; in record__parse_threads()
3117 opts->threads_spec = THREAD_SPEC__USER; in record__parse_threads()
3121 opts->threads_spec = s; in record__parse_threads()
3127 if (opts->threads_spec == THREAD_SPEC__USER) in record__parse_threads()
3128 pr_debug("threads_spec: %s\n", opts->threads_user_spec); in record__parse_threads()
3130 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]); in record__parse_threads()
3138 unsigned long *s = (unsigned long *)opt->value; in parse_output_max_size()
3154 if (val != (unsigned long) -1) { in parse_output_max_size()
3159 return -1; in parse_output_max_size()
3166 struct record_opts *opts = opt->value; in record__parse_mmap_pages()
3172 return -EINVAL; in record__parse_mmap_pages()
3176 return -ENOMEM; in record__parse_mmap_pages()
3186 opts->mmap_pages = mmap_pages; in record__parse_mmap_pages()
3198 opts->auxtrace_mmap_pages = mmap_pages; in record__parse_mmap_pages()
3209 struct record_opts *opts = opt->value; in record__parse_off_cpu_thresh()
3214 return -EINVAL; in record__parse_off_cpu_thresh()
3220 return -EINVAL; in record__parse_off_cpu_thresh()
3222 opts->off_cpu_thresh_ns = off_cpu_thresh_ms * NSEC_PER_MSEC; in record__parse_off_cpu_thresh()
3235 struct record_opts *opts = opt->value; in parse_control_option()
3237 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close); in parse_control_option()
3242 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages); in switch_output_size_warn()
3243 struct switch_output *s = &rec->switch_output; in switch_output_size_warn()
3247 if (s->size < wakeup_size) { in switch_output_size_warn()
3251 pr_warning("WARNING: switch-output data size lower than " in switch_output_size_warn()
3259 struct switch_output *s = &rec->switch_output; in switch_output_setup()
3277 * If we're using --switch-output-events, then we imply its in switch_output_setup()
3278 * --switch-output=signal, as we'll send a SIGUSR2 from the side band in switch_output_setup()
3281 if (rec->switch_output_event_set) { in switch_output_setup()
3283 …pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n"); in switch_output_setup()
3289 if (!s->set) in switch_output_setup()
3293 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n"); in switch_output_setup()
3297 if (!strcmp(s->str, "signal")) { in switch_output_setup()
3299 s->signal = true; in switch_output_setup()
3300 pr_debug("switch-output with SIGUSR2 signal\n"); in switch_output_setup()
3304 val = parse_tag_value(s->str, tags_size); in switch_output_setup()
3305 if (val != (unsigned long) -1) { in switch_output_setup()
3306 s->size = val; in switch_output_setup()
3307 pr_debug("switch-output with %s size threshold\n", s->str); in switch_output_setup()
3311 val = parse_tag_value(s->str, tags_time); in switch_output_setup()
3312 if (val != (unsigned long) -1) { in switch_output_setup()
3313 s->time = val; in switch_output_setup()
3314 pr_debug("switch-output with %s time threshold (%lu seconds)\n", in switch_output_setup()
3315 s->str, s->time); in switch_output_setup()
3319 return -1; in switch_output_setup()
3322 rec->timestamp_filename = true; in switch_output_setup()
3323 s->enabled = true; in switch_output_setup()
3325 if (s->size && !rec->opts.no_buffering) in switch_output_setup()
3333 "perf record [<options>] -- <command> [<options>]",
3345 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap()
3357 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap2()
3370 set_timestamp_boundary(rec, sample->time); in process_timestamp_boundary()
3378 struct record_opts *opts = opt->value; in parse_record_synth_option()
3382 return -1; in parse_record_synth_option()
3384 opts->synth = parse_synth_opt(p); in parse_record_synth_option()
3387 if (opts->synth < 0) { in parse_record_synth_option()
3389 return -1; in parse_record_synth_option()
3398 * builtin-script, leave it here.
3417 .ctl_fd = -1,
3418 .ctl_fd_ack = -1,
3439 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3441 * from builtin-record.c, i.e. use record_opts,
3453 "\t\t\t Use perf report --latency for latency-centric profile."),
3454 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3463 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
3465 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
3467 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
3468 "system-wide collection from all CPUs"),
3469 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
3474 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3477 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3478 "synthesize non-sample events at the end of output"),
3480 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
3481 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3486 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3489 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3493 NULL, "enables call-graph recording" ,
3495 OPT_CALLBACK(0, "call-graph", &record.opts,
3504 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3506 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3508 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3510 OPT_BOOLEAN(0, "sample-mem-info", &record.opts.sample_data_src,
3512 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3513 OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3520 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
3522 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3525 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3532 … "ms to wait before starting measurement after program start (-1: start with events disabled), "
3533 "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3538 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3542 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3549 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3550 "use per-thread mmaps"),
3551 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3553 " use '-I?' to list register names", parse_intr_regs),
3554 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3556 " use '--user-regs=?' to list register names", parse_user_regs),
3557 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3564 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3566 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3570 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3572 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3575 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3578 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3581 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3583 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3587 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3588 "Record build-id of all DSOs regardless of hits"),
3589 OPT_BOOLEAN_SET(0, "buildid-mmap", &record.buildid_mmap, &record.buildid_mmap_set,
3590 "Record build-id in mmap events and skip build-id processing."),
3591 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3593 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3595 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
3599 OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3603 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3605 OPT_BOOLEAN(0, "dry-run", &dry_run,
3612 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3613 … "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
3616 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3617 …"Compress records using specified level (default: 1 - fastest compression, 22 - greatest compressi…
3620 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3622 OPT_UINTEGER(0, "num-thread-synthesize",
3626 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3630 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
3631 …"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable…
3633 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3634 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
3637 "Fine-tune event synthesis: default=all", parse_record_synth_option),
3645 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
3646 OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
3648 OPT_CALLBACK(0, "off-cpu-thresh", &record.opts, "ms",
3649 … "Dump off-cpu samples if off-cpu time exceeds this threshold (in milliseconds). (Default: 500ms)",
3658 struct perf_cpu cpu; in record__mmap_cpu_mask_init() local
3664 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) { in record__mmap_cpu_mask_init()
3665 /* Return ENODEV is input cpu is greater than max cpu */ in record__mmap_cpu_mask_init()
3666 if ((unsigned long)cpu.cpu > mask->nbits) in record__mmap_cpu_mask_init()
3667 return -ENODEV; in record__mmap_cpu_mask_init()
3668 __set_bit(cpu.cpu, mask->bits); in record__mmap_cpu_mask_init()
3680 return -ENOMEM; in record__mmap_cpu_mask_init_spec()
3682 bitmap_zero(mask->bits, mask->nbits); in record__mmap_cpu_mask_init_spec()
3684 return -ENODEV; in record__mmap_cpu_mask_init_spec()
3695 if (rec->thread_masks) in record__free_thread_masks()
3697 record__thread_mask_free(&rec->thread_masks[t]); in record__free_thread_masks()
3699 zfree(&rec->thread_masks); in record__free_thread_masks()
3706 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks))); in record__alloc_thread_masks()
3707 if (!rec->thread_masks) { in record__alloc_thread_masks()
3709 return -ENOMEM; in record__alloc_thread_masks()
3713 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits); in record__alloc_thread_masks()
3732 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu); in record__init_thread_cpu_masks()
3736 rec->nr_threads = nr_cpus; in record__init_thread_cpu_masks()
3737 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_cpu_masks()
3739 for (t = 0; t < rec->nr_threads; t++) { in record__init_thread_cpu_masks()
3740 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); in record__init_thread_cpu_masks()
3741 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); in record__init_thread_cpu_masks()
3744 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_cpu_masks()
3746 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_cpu_masks()
3762 ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3770 pr_err("Failed to init cpu mask\n"); in record__init_thread_masks_spec()
3774 ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3780 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3802 ret = -EINVAL; in record__init_thread_masks_spec()
3808 ret = -EINVAL; in record__init_thread_masks_spec()
3816 ret = -EINVAL; in record__init_thread_masks_spec()
3822 ret = -EINVAL; in record__init_thread_masks_spec()
3831 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask)); in record__init_thread_masks_spec()
3834 ret = -ENOMEM; in record__init_thread_masks_spec()
3837 rec->thread_masks = thread_masks; in record__init_thread_masks_spec()
3838 rec->thread_masks[t] = thread_mask; in record__init_thread_masks_spec()
3841 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_masks_spec()
3843 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_masks_spec()
3846 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3852 rec->nr_threads = t; in record__init_thread_masks_spec()
3853 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_masks_spec()
3854 if (!rec->nr_threads) in record__init_thread_masks_spec()
3855 ret = -EINVAL; in record__init_thread_masks_spec()
3874 pr_err("Failed to allocate CPU topology\n"); in record__init_thread_core_masks()
3875 return -ENOMEM; in record__init_thread_core_masks()
3878 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list, in record__init_thread_core_masks()
3879 topo->core_cpus_list, topo->core_cpus_lists); in record__init_thread_core_masks()
3892 pr_err("Failed to allocate CPU topology\n"); in record__init_thread_package_masks()
3893 return -ENOMEM; in record__init_thread_package_masks()
3896 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list, in record__init_thread_package_masks()
3897 topo->package_cpus_list, topo->package_cpus_lists); in record__init_thread_package_masks()
3913 return -ENOMEM; in record__init_thread_numa_masks()
3916 spec = zalloc(topo->nr * sizeof(char *)); in record__init_thread_numa_masks()
3919 ret = -ENOMEM; in record__init_thread_numa_masks()
3922 for (s = 0; s < topo->nr; s++) in record__init_thread_numa_masks()
3923 spec[s] = topo->nodes[s].cpus; in record__init_thread_numa_masks()
3925 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr); in record__init_thread_numa_masks()
3942 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) { in record__init_thread_user_masks()
3954 ret = -ENOMEM; in record__init_thread_user_masks()
3961 ret = -ENOMEM; in record__init_thread_user_masks()
3967 ret = -EINVAL; in record__init_thread_user_masks()
3974 ret = -ENOMEM; in record__init_thread_user_masks()
3981 ret = -ENOMEM; in record__init_thread_user_masks()
4009 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu); in record__init_thread_default_masks()
4013 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus)) in record__init_thread_default_masks()
4014 return -ENODEV; in record__init_thread_default_masks()
4016 rec->nr_threads = 1; in record__init_thread_default_masks()
4024 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus; in record__init_thread_masks()
4029 if (evlist__per_thread(rec->evlist)) { in record__init_thread_masks()
4030 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n"); in record__init_thread_masks()
4031 return -EINVAL; in record__init_thread_masks()
4034 switch (rec->opts.threads_spec) { in record__init_thread_masks()
4067 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true); in cmd_record()
4073 rec->opts.affinity = PERF_AFFINITY_SYS; in cmd_record()
4075 rec->evlist = evlist__new(); in cmd_record()
4076 if (rec->evlist == NULL) in cmd_record()
4077 return -ENOMEM; in cmd_record()
4094 /* Make system wide (-a) the default target. */ in cmd_record()
4095 if (!argc && target__none(&rec->opts.target)) in cmd_record()
4096 rec->opts.target.system_wide = true; in cmd_record()
4098 if (nr_cgroups && !rec->opts.target.system_wide) { in cmd_record()
4100 "cgroup monitoring only available in system-wide mode"); in cmd_record()
4107 * can't work for system-wide mode, but exact semantics in cmd_record()
4113 pr_err("Failed: latency profiling is not supported with system-wide collection.\n"); in cmd_record()
4114 err = -EINVAL; in cmd_record()
4120 if (!rec->buildid_mmap) { in cmd_record()
4123 } else if (rec->buildid_mmap_set) { in cmd_record()
4125 * Explicitly passing --buildid-mmap disables buildid processing in cmd_record()
4128 rec->no_buildid = true; in cmd_record()
4130 if (rec->buildid_mmap && !perf_can_record_build_id()) { in cmd_record()
4132 "Disable this warning with --no-buildid-mmap\n"); in cmd_record()
4133 rec->buildid_mmap = false; in cmd_record()
4135 if (rec->buildid_mmap) { in cmd_record()
4137 rec->opts.build_id = true; in cmd_record()
4140 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) { in cmd_record()
4142 err = -EINVAL; in cmd_record()
4146 if (rec->opts.kcore) in cmd_record()
4147 rec->opts.text_poke = true; in cmd_record()
4149 if (rec->opts.kcore || record__threads_enabled(rec)) in cmd_record()
4150 rec->data.is_dir = true; in cmd_record()
4153 if (rec->opts.affinity != PERF_AFFINITY_SYS) { in cmd_record()
4154 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4158 … pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4163 if (rec->opts.comp_level != 0) { in cmd_record()
4165 rec->no_buildid = true; in cmd_record()
4168 if (rec->opts.record_switch_events && in cmd_record()
4171 parse_options_usage(record_usage, record_options, "switch-events", 0); in cmd_record()
4172 err = -EINVAL; in cmd_record()
4177 parse_options_usage(record_usage, record_options, "switch-output", 0); in cmd_record()
4178 err = -EINVAL; in cmd_record()
4182 if (rec->switch_output.time) { in cmd_record()
4184 alarm(rec->switch_output.time); in cmd_record()
4187 if (rec->switch_output.num_files) { in cmd_record()
4188 rec->switch_output.filenames = calloc(rec->switch_output.num_files, in cmd_record()
4190 if (!rec->switch_output.filenames) { in cmd_record()
4191 err = -EINVAL; in cmd_record()
4196 if (rec->timestamp_filename && record__threads_enabled(rec)) { in cmd_record()
4197 rec->timestamp_filename = false; in cmd_record()
4198 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n"); in cmd_record()
4201 if (rec->filter_action) { in cmd_record()
4202 if (!strcmp(rec->filter_action, "pin")) in cmd_record()
4204 else if (!strcmp(rec->filter_action, "unpin")) in cmd_record()
4207 pr_warning("Unknown BPF filter action: %s\n", rec->filter_action); in cmd_record()
4208 err = -EINVAL; in cmd_record()
4213 /* For backward compatibility, -d implies --mem-info */ in cmd_record()
4214 if (rec->opts.sample_address) in cmd_record()
4215 rec->opts.sample_data_src = true; in cmd_record()
4232 err = -ENOMEM; in cmd_record()
4234 if (rec->no_buildid_cache || rec->no_buildid) { in cmd_record()
4236 } else if (rec->switch_output.enabled) { in cmd_record()
4238 * In 'perf record --switch-output', disable buildid in cmd_record()
4243 * perf record --switch-output --no-no-buildid \ in cmd_record()
4244 * --no-no-buildid-cache in cmd_record()
4248 * if ((rec->no_buildid || !rec->no_buildid_set) && in cmd_record()
4249 * (rec->no_buildid_cache || !rec->no_buildid_cache_set)) in cmd_record()
4254 if (rec->no_buildid_set && !rec->no_buildid) in cmd_record()
4256 if (rec->no_buildid_cache_set && !rec->no_buildid_cache) in cmd_record()
4259 rec->no_buildid = true; in cmd_record()
4260 rec->no_buildid_cache = true; in cmd_record()
4268 if (rec->evlist->core.nr_entries == 0) { in cmd_record()
4269 err = parse_event(rec->evlist, "cycles:P"); in cmd_record()
4274 if (rec->opts.target.tid && !rec->opts.no_inherit_set) in cmd_record()
4275 rec->opts.no_inherit = true; in cmd_record()
4277 err = target__validate(&rec->opts.target); in cmd_record()
4279 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4283 if (rec->uid_str) { in cmd_record()
4284 uid_t uid = parse_uid(rec->uid_str); in cmd_record()
4287 ui__error("Invalid User: %s", rec->uid_str); in cmd_record()
4288 err = -EINVAL; in cmd_record()
4291 err = parse_uid_filter(rec->evlist, uid); in cmd_record()
4296 rec->opts.target.system_wide = true; in cmd_record()
4299 /* Enable ignoring missing threads when -p option is defined. */ in cmd_record()
4300 rec->opts.ignore_missing_thread = rec->opts.target.pid; in cmd_record()
4302 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list); in cmd_record()
4305 arch__add_leaf_frame_record_opts(&rec->opts); in cmd_record()
4307 err = -ENOMEM; in cmd_record()
4308 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) { in cmd_record()
4309 if (rec->opts.target.pid != NULL) { in cmd_record()
4310 pr_err("Couldn't create thread/CPU maps: %s\n", in cmd_record()
4318 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts); in cmd_record()
4327 if (rec->opts.full_auxtrace) in cmd_record()
4328 rec->buildid_all = true; in cmd_record()
4330 if (rec->opts.text_poke) { in cmd_record()
4331 err = record__config_text_poke(rec->evlist); in cmd_record()
4338 if (rec->off_cpu) { in cmd_record()
4346 if (record_opts__config(&rec->opts)) { in cmd_record()
4347 err = -EINVAL; in cmd_record()
4363 if (rec->opts.nr_cblocks > nr_cblocks_max) in cmd_record()
4364 rec->opts.nr_cblocks = nr_cblocks_max; in cmd_record()
4365 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); in cmd_record()
4367 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]); in cmd_record()
4368 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush); in cmd_record()
4370 if (rec->opts.comp_level > comp_level_max) in cmd_record()
4371 rec->opts.comp_level = comp_level_max; in cmd_record()
4372 pr_debug("comp level: %d\n", rec->opts.comp_level); in cmd_record()
4376 record__free_thread_masks(rec, rec->nr_threads); in cmd_record()
4377 rec->nr_threads = 0; in cmd_record()
4379 auxtrace_record__free(rec->itr); in cmd_record()
4381 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); in cmd_record()
4382 evlist__delete(rec->evlist); in cmd_record()