Lines Matching defs:rec
113 struct record *rec;
213 static int record__threads_enabled(struct record *rec)
215 return rec->opts.threads_spec;
218 static bool switch_output_signal(struct record *rec)
220 return rec->switch_output.signal &&
224 static bool switch_output_size(struct record *rec)
226 return rec->switch_output.size &&
228 (rec->bytes_written >= rec->switch_output.size);
231 static bool switch_output_time(struct record *rec)
233 return rec->switch_output.time &&
237 static u64 record__bytes_written(struct record *rec)
239 return rec->bytes_written + rec->thread_bytes_written;
242 static bool record__output_max_size_exceeded(struct record *rec)
244 return rec->output_max_size &&
245 (record__bytes_written(rec) >= rec->output_max_size);
248 static int record__write(struct record *rec, struct mmap *map __maybe_unused,
251 struct perf_data_file *file = &rec->session->data->file;
263 rec->thread_bytes_written += size;
265 rec->bytes_written += size;
268 if (record__output_max_size_exceeded(rec) && !done) {
271 record__bytes_written(rec) >> 10);
275 if (switch_output_size(rec))
281 static int record__aio_enabled(struct record *rec);
282 static int record__comp_enabled(struct record *rec);
394 struct record *rec;
417 if (record__comp_enabled(aio->rec)) {
418 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
448 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
451 int trace_fd = rec->session->data->file.fd;
452 struct record_aio aio = { .rec = rec, .size = 0 };
465 rec->samples++;
469 rec->bytes_written += aio.size;
470 if (switch_output_size(rec))
495 static void record__aio_mmap_read_sync(struct record *rec)
498 struct evlist *evlist = rec->evlist;
501 if (!record__aio_enabled(rec))
535 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
550 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
555 static int record__aio_enabled(struct record *rec)
557 return rec->opts.nr_cblocks > 0;
616 static int record__comp_enabled(struct record *rec)
618 return rec->opts.comp_level > 0;
626 struct record *rec = container_of(tool, struct record, tool);
627 return record__write(rec, NULL, event, event->header.size);
647 struct record *rec = to;
649 if (record__comp_enabled(rec)) {
650 ssize_t compressed = zstd_compress(rec->session, map, map->data,
661 return record__write(rec, map, bf, size);
722 struct record *rec = container_of(tool, struct record, tool);
723 struct perf_data *data = &rec->data;
735 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
746 record__write(rec, map, event, event->header.size);
747 record__write(rec, map, data1, len1);
749 record__write(rec, map, data2, len2);
750 record__write(rec, map, &pad, padding);
755 static int record__auxtrace_mmap_read(struct record *rec,
760 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
766 rec->samples++;
771 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
776 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
778 rec->opts.auxtrace_snapshot_size);
783 rec->samples++;
788 static int record__auxtrace_read_snapshot_all(struct record *rec)
793 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
794 struct mmap *map = &rec->evlist->mmap[i];
799 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
808 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
811 if (record__auxtrace_read_snapshot_all(rec) < 0) {
814 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
821 static int record__auxtrace_snapshot_exit(struct record *rec)
827 auxtrace_record__snapshot_start(rec->itr))
830 record__read_auxtrace_snapshot(rec, true);
837 static int record__auxtrace_init(struct record *rec)
841 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
842 && record__threads_enabled(rec)) {
847 if (!rec->itr) {
848 rec->itr = auxtrace_record__init(rec->evlist, &err);
853 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
854 rec->opts.auxtrace_snapshot_opts);
858 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
859 rec->opts.auxtrace_sample_opts);
863 err = auxtrace_parse_aux_action(rec->evlist);
867 return auxtrace_parse_filters(rec->evlist);
873 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
880 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
892 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
897 static int record__auxtrace_init(struct record *rec __maybe_unused)
926 static int record__config_off_cpu(struct record *rec)
928 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
931 static bool record__tracking_system_wide(struct record *rec)
933 struct evlist *evlist = rec->evlist;
950 static int record__config_tracking_events(struct record *rec)
952 struct record_opts *opts = &rec->opts;
953 struct evlist *evlist = rec->evlist;
969 if (!!opts->target.cpu_list && record__tracking_system_wide(rec))
1151 static void record__free_thread_data(struct record *rec)
1154 struct record_thread *thread_data = rec->thread_data;
1159 for (t = 0; t < rec->nr_threads; t++) {
1166 zfree(&rec->thread_data);
1169 static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
1173 size_t x = rec->index_map_cnt;
1175 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
1177 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
1178 rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
1179 rec->index_map_cnt += 1;
1183 static int record__update_evlist_pollfd_from_thread(struct record *rec,
1192 for (i = 0; i < rec->index_map_cnt; i++) {
1193 int e_pos = rec->index_map[i].evlist_pollfd_index;
1194 int t_pos = rec->index_map[i].thread_pollfd_index;
1207 static int record__dup_non_perf_events(struct record *rec,
1224 ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
1233 static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
1238 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
1239 if (!rec->thread_data) {
1243 thread_data = rec->thread_data;
1245 for (t = 0; t < rec->nr_threads; t++)
1248 for (t = 0; t < rec->nr_threads; t++) {
1249 thread_data[t].rec = rec;
1250 thread_data[t].mask = &rec->thread_masks[t];
1281 ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
1292 record__free_thread_data(rec);
1297 static int record__mmap_evlist(struct record *rec,
1301 struct record_opts *opts = &rec->opts;
1335 ret = record__alloc_thread_data(rec, evlist);
1339 if (record__threads_enabled(rec)) {
1340 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
1347 evlist->mmap[i].file = &rec->data.dir.files[i];
1349 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
1356 static int record__mmap(struct record *rec)
1358 return record__mmap_evlist(rec, rec->evlist);
1361 static int record__open(struct record *rec)
1365 struct evlist *evlist = rec->evlist;
1366 struct perf_session *session = rec->session;
1367 struct record_opts *opts = &rec->opts;
1412 rc = record__mmap(rec);
1422 static void set_timestamp_boundary(struct record *rec, u64 sample_time)
1424 if (rec->evlist->first_sample_time == 0)
1425 rec->evlist->first_sample_time = sample_time;
1428 rec->evlist->last_sample_time = sample_time;
1437 struct record *rec = container_of(tool, struct record, tool);
1439 set_timestamp_boundary(rec, sample->time);
1441 if (rec->buildid_all)
1444 rec->samples++;
1448 static int process_buildids(struct record *rec)
1450 struct perf_session *session = rec->session;
1452 if (perf_data__size(&rec->data) == 0)
1472 if (rec->buildid_all && !rec->timestamp_boundary)
1473 rec->tool.sample = process_event_sample_stub;
1517 static void record__adjust_affinity(struct record *rec, struct mmap *map)
1519 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
1576 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
1579 u64 bytes_written = rec->bytes_written;
1584 int trace_fd = rec->data.file.fd;
1599 if (record__aio_enabled(rec))
1607 record__adjust_affinity(rec, map);
1612 if (!record__aio_enabled(rec)) {
1613 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1620 if (record__aio_push(rec, map, &off) < 0) {
1632 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1633 !rec->opts.auxtrace_sample_mode &&
1634 record__auxtrace_mmap_read(rec, map) != 0) {
1640 if (record__aio_enabled(rec))
1651 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
1652 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1660 static int record__mmap_read_all(struct record *rec, bool synch)
1664 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1668 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1703 if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
1732 record__mmap_read_all(thread->rec, true);
1742 static void record__init_features(struct record *rec)
1744 struct perf_session *session = rec->session;
1750 if (rec->no_buildid)
1753 if (!have_tracepoints(&rec->evlist->core.entries))
1756 if (!rec->opts.branch_stack)
1759 if (!rec->opts.full_auxtrace)
1762 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1765 if (!rec->opts.use_clockid)
1768 if (!record__threads_enabled(rec))
1771 if (!record__comp_enabled(rec))
1778 record__finish_output(struct record *rec)
1781 struct perf_data *data = &rec->data;
1786 data->file.size = rec->bytes_written;
1790 rec->session->header.data_size += rec->bytes_written;
1792 if (record__threads_enabled(rec)) {
1797 if (!rec->no_buildid) {
1798 process_buildids(rec);
1800 if (rec->buildid_all)
1801 perf_session__dsos_hit_all(rec->session);
1803 perf_session__write_header(rec->session, rec->evlist, fd, true);
1808 static int record__synthesize_workload(struct record *rec, bool tail)
1812 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
1814 if (rec->opts.tail_synthesize != tail)
1817 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1821 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1823 &rec->session->machines.host,
1825 rec->opts.sample_address);
1830 static int write_finished_init(struct record *rec, bool tail)
1832 if (rec->opts.tail_synthesize != tail)
1835 return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
1838 static int record__synthesize(struct record *rec, bool tail);
1841 record__switch_output(struct record *rec, bool at_exit)
1843 struct perf_data *data = &rec->data;
1850 record__aio_mmap_read_sync(rec);
1852 write_finished_init(rec, true);
1854 record__synthesize(rec, true);
1855 if (target__none(&rec->opts.target))
1856 record__synthesize_workload(rec, true);
1858 rec->samples = 0;
1859 record__finish_output(rec);
1867 rec->session->header.data_offset,
1870 rec->bytes_written = 0;
1871 rec->session->header.data_size = 0;
1879 if (rec->switch_output.num_files) {
1880 int n = rec->switch_output.cur_file + 1;
1882 if (n >= rec->switch_output.num_files)
1884 rec->switch_output.cur_file = n;
1885 if (rec->switch_output.filenames[n]) {
1886 remove(rec->switch_output.filenames[n]);
1887 zfree(&rec->switch_output.filenames[n]);
1889 rec->switch_output.filenames[n] = new_filename;
1896 record__synthesize(rec, false);
1907 if (target__none(&rec->opts.target))
1908 record__synthesize_workload(rec, false);
1909 write_finished_init(rec, false);
1914 static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
1933 record__write(rec, NULL, lost, lost->header.size);
1936 static void record__read_lost_samples(struct record *rec)
1938 struct perf_session *session = rec->session;
1970 __record__save_lost_samples(rec, evsel, &lost.lost,
1980 __record__save_lost_samples(rec, evsel, &lost.lost, 0, 0, lost_count,
2016 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
2018 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
2024 static int record__synthesize(struct record *rec, bool tail)
2026 struct perf_session *session = rec->session;
2028 struct perf_data *data = &rec->data;
2029 struct record_opts *opts = &rec->opts;
2030 struct perf_tool *tool = &rec->tool;
2034 if (rec->opts.tail_synthesize != tail)
2043 rec->bytes_written += err;
2046 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
2058 if (rec->opts.full_auxtrace) {
2059 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
2065 if (!evlist__exclude_kernel(rec->evlist)) {
2084 err = perf_event__synthesize_extra_attr(&rec->tool,
2085 rec->evlist,
2091 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
2099 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
2113 if (rec->opts.synth & PERF_SYNTH_CGROUP) {
2122 if (rec->opts.nr_threads_synthesize > 1) {
2128 if (rec->opts.synth & PERF_SYNTH_TASK) {
2129 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
2132 rec->evlist->core.threads,
2134 rec->opts.nr_threads_synthesize);
2137 if (rec->opts.nr_threads_synthesize > 1) {
2148 struct record *rec = data;
2149 pthread_kill(rec->thread_id, SIGUSR2);
2153 static int record__setup_sb_evlist(struct record *rec)
2155 struct record_opts *opts = &rec->opts;
2157 if (rec->sb_evlist != NULL) {
2163 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
2164 rec->thread_id = pthread_self();
2168 if (rec->sb_evlist == NULL) {
2169 rec->sb_evlist = evlist__new();
2171 if (rec->sb_evlist == NULL) {
2177 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
2183 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
2191 static int record__init_clock(struct record *rec)
2193 struct perf_session *session = rec->session;
2198 if (!rec->opts.use_clockid)
2201 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
2202 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
2204 session->header.env.clock.clockid = rec->opts.clockid;
2211 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
2228 static void hit_auxtrace_snapshot_trigger(struct record *rec)
2233 if (auxtrace_record__snapshot_start(rec->itr))
2256 static int record__start_threads(struct record *rec)
2258 int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
2259 struct record_thread *thread_data = rec->thread_data;
2266 if (!record__threads_enabled(rec))
2296 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
2300 thread->tid, rec->thread_data[t].tid);
2319 static int record__stop_threads(struct record *rec)
2322 struct record_thread *thread_data = rec->thread_data;
2324 for (t = 1; t < rec->nr_threads; t++)
2327 for (t = 0; t < rec->nr_threads; t++) {
2328 rec->samples += thread_data[t].samples;
2329 if (!record__threads_enabled(rec))
2331 rec->session->bytes_transferred += thread_data[t].bytes_transferred;
2332 rec->session->bytes_compressed += thread_data[t].bytes_compressed;
2345 static unsigned long record__waking(struct record *rec)
2349 struct record_thread *thread_data = rec->thread_data;
2351 for (t = 0; t < rec->nr_threads; t++)
2357 static int __cmd_record(struct record *rec, int argc, const char **argv)
2362 struct perf_tool *tool = &rec->tool;
2363 struct record_opts *opts = &rec->opts;
2364 struct perf_data *data = &rec->data;
2377 if (rec->opts.record_cgroup) {
2384 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2386 if (rec->opts.auxtrace_snapshot_mode)
2388 if (rec->switch_output.enabled)
2404 tool->namespace_events = rec->opts.record_namespaces;
2405 tool->cgroup_events = rec->opts.record_cgroup;
2412 if (record__threads_enabled(rec)) {
2413 if (perf_data__is_pipe(&rec->data)) {
2417 if (rec->opts.full_auxtrace) {
2424 rec->session = session;
2426 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
2437 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
2446 session->header.env.comp_level = rec->opts.comp_level;
2448 if (rec->opts.kcore &&
2454 if (record__init_clock(rec))
2457 record__init_features(rec);
2460 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
2475 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
2476 rec->opts.sample_id = true;
2478 if (rec->timestamp_filename && perf_data__is_pipe(data)) {
2479 rec->timestamp_filename = false;
2483 evlist__uniquify_name(rec->evlist);
2485 evlist__config(rec->evlist, opts, &callchain_param);
2489 if (record__open(rec) != 0) {
2497 if (rec->opts.kcore) {
2509 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
2511 rec->tool.ordered_events = false;
2514 if (evlist__nr_groups(rec->evlist) == 0)
2522 err = perf_session__write_header(session, rec->evlist, fd, false);
2528 if (!rec->no_buildid
2535 err = record__setup_sb_evlist(rec);
2539 err = record__synthesize(rec, false);
2543 if (rec->realtime_prio) {
2546 param.sched_priority = rec->realtime_prio;
2554 if (record__start_threads(rec))
2563 evlist__enable(rec->evlist);
2586 rec->evlist->workload.pid,
2606 rec->evlist->workload.pid,
2611 evlist__start_workload(rec->evlist);
2618 evlist__enable(rec->evlist);
2623 err = event_enable_timer__start(rec->evlist->eet);
2639 err = write_finished_init(rec, false);
2647 * rec->evlist->bkw_mmap_state is possible to be
2649 * hits != rec->samples in previous round.
2655 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
2657 if (record__mmap_read_all(rec, false) < 0) {
2667 record__read_auxtrace_snapshot(rec, false);
2685 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
2694 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
2698 record__waking(rec));
2700 fd = record__switch_output(rec, false);
2709 if (rec->switch_output.time)
2710 alarm(rec->switch_output.time);
2729 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
2734 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
2737 hit_auxtrace_snapshot_trigger(rec);
2738 evlist__ctlfd_ack(rec->evlist);
2754 err = event_enable_timer__process(rec->evlist->eet);
2769 evlist__disable(rec->evlist);
2778 record__auxtrace_snapshot_exit(rec);
2784 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2794 record__waking(rec));
2796 write_finished_init(rec, true);
2798 if (target__none(&rec->opts.target))
2799 record__synthesize_workload(rec, true);
2802 record__stop_threads(rec);
2803 record__mmap_read_all(rec, true);
2805 record__free_thread_data(rec);
2806 evlist__finalize_ctlfd(rec->evlist);
2807 record__aio_mmap_read_sync(rec);
2809 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2810 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2818 kill(rec->evlist->workload.pid, SIGTERM);
2831 if (rec->off_cpu)
2832 rec->bytes_written += off_cpu_write(rec->session);
2834 record__read_lost_samples(rec);
2835 record__synthesize(rec, true);
2837 rec->samples = 0;
2840 if (!rec->timestamp_filename) {
2841 record__finish_output(rec);
2843 fd = record__switch_output(rec, true);
2855 const char *postfix = rec->timestamp_filename ?
2858 if (rec->samples && !rec->opts.full_auxtrace)
2860 " (%" PRIu64 " samples)", rec->samples);
2869 rec->session->bytes_transferred / 1024.0 / 1024.0,
2886 evlist__stop_sb_thread(rec->sb_evlist);
2952 struct record *rec = cb;
2956 rec->no_buildid_cache = false;
2958 rec->no_buildid_cache = true;
2960 rec->no_buildid = true;
2962 rec->buildid_mmap = true;
2973 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2974 if (!rec->opts.nr_cblocks)
2975 rec->opts.nr_cblocks = nr_cblocks_default;
2979 rec->debuginfod.urls = strdup(value);
2980 if (!rec->debuginfod.urls)
2982 rec->debuginfod.set = true;
2990 struct record *rec = (struct record *)opt->value;
2992 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
3165 static void switch_output_size_warn(struct record *rec)
3167 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
3168 struct switch_output *s = &rec->switch_output;
3182 static int switch_output_setup(struct record *rec)
3184 struct switch_output *s = &rec->switch_output;
3206 if (rec->switch_output_event_set) {
3207 if (record__threads_enabled(rec)) {
3217 if (record__threads_enabled(rec)) {
3247 rec->timestamp_filename = true;
3250 if (s->size && !rec->opts.no_buffering)
3251 switch_output_size_warn(rec);
3293 struct record *rec = container_of(tool, struct record, tool);
3295 set_timestamp_boundary(rec, sample->time);
3607 static void record__free_thread_masks(struct record *rec, int nr_threads)
3611 if (rec->thread_masks)
3613 record__thread_mask_free(&rec->thread_masks[t]);
3615 zfree(&rec->thread_masks);
3618 static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
3622 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
3623 if (!rec->thread_masks) {
3629 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
3639 record__free_thread_masks(rec, nr_threads);
3644 static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
3648 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
3652 rec->nr_threads = nr_cpus;
3653 pr_debug("nr_threads: %d\n", rec->nr_threads);
3655 for (t = 0; t < rec->nr_threads; t++) {
3656 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3657 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
3660 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3662 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3669 static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
3747 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
3753 rec->thread_masks = thread_masks;
3754 rec->thread_masks[t] = thread_mask;
3757 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3759 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3768 rec->nr_threads = t;
3769 pr_debug("nr_threads: %d\n", rec->nr_threads);
3770 if (!rec->nr_threads)
3783 static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
3794 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
3801 static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
3812 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
3819 static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
3841 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
3851 static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
3858 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
3904 ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
3921 static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
3925 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
3929 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
3932 rec->nr_threads = 1;
3937 static int record__init_thread_masks(struct record *rec)
3940 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
3942 if (!record__threads_enabled(rec))
3943 return record__init_thread_default_masks(rec, cpus);
3945 if (evlist__per_thread(rec->evlist)) {
3950 switch (rec->opts.threads_spec) {
3952 ret = record__init_thread_cpu_masks(rec, cpus);
3955 ret = record__init_thread_core_masks(rec, cpus);
3958 ret = record__init_thread_package_masks(rec, cpus);
3961 ret = record__init_thread_numa_masks(rec, cpus);
3964 ret = record__init_thread_user_masks(rec, cpus);
3976 struct record *rec = &record;
3989 rec->opts.affinity = PERF_AFFINITY_SYS;
3991 rec->evlist = evlist__new();
3992 if (rec->evlist == NULL)
3995 err = perf_config(perf_record_config, rec);
4011 if (!argc && target__none(&rec->opts.target))
4012 rec->opts.target.system_wide = true;
4014 if (nr_cgroups && !rec->opts.target.system_wide) {
4020 if (rec->buildid_mmap) {
4030 rec->opts.build_id = true;
4032 rec->no_buildid = true;
4035 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
4041 if (rec->opts.kcore)
4042 rec->opts.text_poke = true;
4044 if (rec->opts.kcore || record__threads_enabled(rec))
4045 rec->data.is_dir = true;
4047 if (record__threads_enabled(rec)) {
4048 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
4052 if (record__aio_enabled(rec)) {
4058 if (rec->opts.comp_level != 0) {
4060 rec->no_buildid = true;
4063 if (rec->opts.record_switch_events &&
4071 if (switch_output_setup(rec)) {
4077 if (rec->switch_output.time) {
4079 alarm(rec->switch_output.time);
4082 if (rec->switch_output.num_files) {
4083 rec->switch_output.filenames = calloc(rec->switch_output.num_files,
4085 if (!rec->switch_output.filenames) {
4091 if (rec->timestamp_filename && record__threads_enabled(rec)) {
4092 rec->timestamp_filename = false;
4096 if (rec->filter_action) {
4097 if (!strcmp(rec->filter_action, "pin"))
4099 else if (!strcmp(rec->filter_action, "unpin"))
4102 pr_warning("Unknown BPF filter action: %s\n", rec->filter_action);
4116 err = record__auxtrace_init(rec);
4125 if (rec->no_buildid_cache || rec->no_buildid) {
4127 } else if (rec->switch_output.enabled) {
4139 * if ((rec->no_buildid || !rec->no_buildid_set) &&
4140 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
4145 if (rec->no_buildid_set && !rec->no_buildid)
4147 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
4150 rec->no_buildid = true;
4151 rec->no_buildid_cache = true;
4159 if (rec->evlist->core.nr_entries == 0) {
4160 err = parse_event(rec->evlist, "cycles:P");
4165 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
4166 rec->opts.no_inherit = true;
4168 err = target__validate(&rec->opts.target);
4170 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4174 err = target__parse_uid(&rec->opts.target);
4178 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4186 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
4188 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
4191 arch__add_leaf_frame_record_opts(&rec->opts);
4194 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
4195 if (rec->opts.target.pid != NULL) {
4204 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
4213 if (rec->opts.full_auxtrace)
4214 rec->buildid_all = true;
4216 if (rec->opts.text_poke) {
4217 err = record__config_text_poke(rec->evlist);
4224 if (rec->off_cpu) {
4225 err = record__config_off_cpu(rec);
4232 if (record_opts__config(&rec->opts)) {
4237 err = record__config_tracking_events(rec);
4243 err = record__init_thread_masks(rec);
4249 if (rec->opts.nr_cblocks > nr_cblocks_max)
4250 rec->opts.nr_cblocks = nr_cblocks_max;
4251 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
4253 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
4254 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
4256 if (rec->opts.comp_level > comp_level_max)
4257 rec->opts.comp_level = comp_level_max;
4258 pr_debug("comp level: %d\n", rec->opts.comp_level);
4262 record__free_thread_masks(rec, rec->nr_threads);
4263 rec->nr_threads = 0;
4265 auxtrace_record__free(rec->itr);
4267 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
4268 evlist__delete(rec->evlist);
4274 struct record *rec = &record;
4276 hit_auxtrace_snapshot_trigger(rec);
4278 if (switch_output_signal(rec))
4284 struct record *rec = &record;
4286 if (switch_output_time(rec))