xref: /linux/tools/perf/builtin-inject.c (revision 4e277d0d83a5b5aadbe033af3ce7bffbcc51a6fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-inject.c
4  *
5  * Builtin inject command: Examine the live mode (stdin) event stream
6  * and repipe it to stdout while optionally injecting additional
7  * events into it.
8  */
9 #include "builtin.h"
10 
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/symbol.h"
25 #include "util/synthetic-events.h"
26 #include "util/thread.h"
27 #include "util/namespaces.h"
28 
29 #include <linux/err.h>
30 #include <subcmd/parse-options.h>
31 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
32 
33 #include <linux/list.h>
34 #include <linux/string.h>
35 #include <errno.h>
36 #include <signal.h>
37 
38 struct perf_inject {
39 	struct perf_tool	tool;
40 	struct perf_session	*session;
41 	bool			build_ids;
42 	bool			build_id_all;
43 	bool			sched_stat;
44 	bool			have_auxtrace;
45 	bool			strip;
46 	bool			jit_mode;
47 	bool			in_place_update;
48 	bool			in_place_update_dry_run;
49 	const char		*input_name;
50 	struct perf_data	output;
51 	u64			bytes_written;
52 	u64			aux_id;
53 	struct list_head	samples;
54 	struct itrace_synth_opts itrace_synth_opts;
55 	char			event_copy[PERF_SAMPLE_MAX_SIZE];
56 };
57 
58 struct event_entry {
59 	struct list_head node;
60 	u32		 tid;
61 	union perf_event event[];
62 };
63 
64 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
65 				struct machine *machine, u8 cpumode, u32 flags);
66 
67 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
68 {
69 	ssize_t size;
70 
71 	size = perf_data__write(&inject->output, buf, sz);
72 	if (size < 0)
73 		return -errno;
74 
75 	inject->bytes_written += size;
76 	return 0;
77 }
78 
79 static int perf_event__repipe_synth(struct perf_tool *tool,
80 				    union perf_event *event)
81 {
82 	struct perf_inject *inject = container_of(tool, struct perf_inject,
83 						  tool);
84 
85 	return output_bytes(inject, event, event->header.size);
86 }
87 
88 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
89 				       union perf_event *event,
90 				       struct ordered_events *oe __maybe_unused)
91 {
92 	return perf_event__repipe_synth(tool, event);
93 }
94 
95 #ifdef HAVE_JITDUMP
96 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
97 			       union perf_event *event __maybe_unused,
98 			       struct ordered_events *oe __maybe_unused)
99 {
100 	return 0;
101 }
102 #endif
103 
104 static int perf_event__repipe_op2_synth(struct perf_session *session,
105 					union perf_event *event)
106 {
107 	return perf_event__repipe_synth(session->tool, event);
108 }
109 
110 static int perf_event__repipe_op4_synth(struct perf_session *session,
111 					union perf_event *event,
112 					u64 data __maybe_unused)
113 {
114 	return perf_event__repipe_synth(session->tool, event);
115 }
116 
117 static int perf_event__repipe_attr(struct perf_tool *tool,
118 				   union perf_event *event,
119 				   struct evlist **pevlist)
120 {
121 	struct perf_inject *inject = container_of(tool, struct perf_inject,
122 						  tool);
123 	int ret;
124 
125 	ret = perf_event__process_attr(tool, event, pevlist);
126 	if (ret)
127 		return ret;
128 
129 	if (!inject->output.is_pipe)
130 		return 0;
131 
132 	return perf_event__repipe_synth(tool, event);
133 }
134 
135 static int perf_event__repipe_event_update(struct perf_tool *tool,
136 					   union perf_event *event,
137 					   struct evlist **pevlist __maybe_unused)
138 {
139 	return perf_event__repipe_synth(tool, event);
140 }
141 
142 #ifdef HAVE_AUXTRACE_SUPPORT
143 
144 static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
145 {
146 	char buf[4096];
147 	ssize_t ssz;
148 	int ret;
149 
150 	while (size > 0) {
151 		ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
152 		if (ssz < 0)
153 			return -errno;
154 		ret = output_bytes(inject, buf, ssz);
155 		if (ret)
156 			return ret;
157 		size -= ssz;
158 	}
159 
160 	return 0;
161 }
162 
163 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
164 				       union perf_event *event)
165 {
166 	struct perf_tool *tool = session->tool;
167 	struct perf_inject *inject = container_of(tool, struct perf_inject,
168 						  tool);
169 	int ret;
170 
171 	inject->have_auxtrace = true;
172 
173 	if (!inject->output.is_pipe) {
174 		off_t offset;
175 
176 		offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
177 		if (offset == -1)
178 			return -errno;
179 		ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
180 						     event, offset);
181 		if (ret < 0)
182 			return ret;
183 	}
184 
185 	if (perf_data__is_pipe(session->data) || !session->one_mmap) {
186 		ret = output_bytes(inject, event, event->header.size);
187 		if (ret < 0)
188 			return ret;
189 		ret = copy_bytes(inject, perf_data__fd(session->data),
190 				 event->auxtrace.size);
191 	} else {
192 		ret = output_bytes(inject, event,
193 				   event->header.size + event->auxtrace.size);
194 	}
195 	if (ret < 0)
196 		return ret;
197 
198 	return event->auxtrace.size;
199 }
200 
201 #else
202 
203 static s64
204 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
205 			    union perf_event *event __maybe_unused)
206 {
207 	pr_err("AUX area tracing not supported\n");
208 	return -EINVAL;
209 }
210 
211 #endif
212 
213 static int perf_event__repipe(struct perf_tool *tool,
214 			      union perf_event *event,
215 			      struct perf_sample *sample __maybe_unused,
216 			      struct machine *machine __maybe_unused)
217 {
218 	return perf_event__repipe_synth(tool, event);
219 }
220 
221 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
222 			    union perf_event *event __maybe_unused,
223 			    struct perf_sample *sample __maybe_unused,
224 			    struct machine *machine __maybe_unused)
225 {
226 	return 0;
227 }
228 
229 static int perf_event__drop_aux(struct perf_tool *tool,
230 				union perf_event *event __maybe_unused,
231 				struct perf_sample *sample,
232 				struct machine *machine __maybe_unused)
233 {
234 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
235 
236 	if (!inject->aux_id)
237 		inject->aux_id = sample->id;
238 
239 	return 0;
240 }
241 
242 static union perf_event *
243 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
244 				 union perf_event *event,
245 				 struct perf_sample *sample)
246 {
247 	size_t sz1 = sample->aux_sample.data - (void *)event;
248 	size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
249 	union perf_event *ev = (union perf_event *)inject->event_copy;
250 
251 	if (sz1 > event->header.size || sz2 > event->header.size ||
252 	    sz1 + sz2 > event->header.size ||
253 	    sz1 < sizeof(struct perf_event_header) + sizeof(u64))
254 		return event;
255 
256 	memcpy(ev, event, sz1);
257 	memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
258 	ev->header.size = sz1 + sz2;
259 	((u64 *)((void *)ev + sz1))[-1] = 0;
260 
261 	return ev;
262 }
263 
264 typedef int (*inject_handler)(struct perf_tool *tool,
265 			      union perf_event *event,
266 			      struct perf_sample *sample,
267 			      struct evsel *evsel,
268 			      struct machine *machine);
269 
270 static int perf_event__repipe_sample(struct perf_tool *tool,
271 				     union perf_event *event,
272 				     struct perf_sample *sample,
273 				     struct evsel *evsel,
274 				     struct machine *machine)
275 {
276 	struct perf_inject *inject = container_of(tool, struct perf_inject,
277 						  tool);
278 
279 	if (evsel && evsel->handler) {
280 		inject_handler f = evsel->handler;
281 		return f(tool, event, sample, evsel, machine);
282 	}
283 
284 	build_id__mark_dso_hit(tool, event, sample, evsel, machine);
285 
286 	if (inject->itrace_synth_opts.set && sample->aux_sample.size)
287 		event = perf_inject__cut_auxtrace_sample(inject, event, sample);
288 
289 	return perf_event__repipe_synth(tool, event);
290 }
291 
292 static int perf_event__repipe_mmap(struct perf_tool *tool,
293 				   union perf_event *event,
294 				   struct perf_sample *sample,
295 				   struct machine *machine)
296 {
297 	int err;
298 
299 	err = perf_event__process_mmap(tool, event, sample, machine);
300 	perf_event__repipe(tool, event, sample, machine);
301 
302 	return err;
303 }
304 
305 #ifdef HAVE_JITDUMP
306 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
307 				       union perf_event *event,
308 				       struct perf_sample *sample,
309 				       struct machine *machine)
310 {
311 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
312 	u64 n = 0;
313 	int ret;
314 
315 	/*
316 	 * if jit marker, then inject jit mmaps and generate ELF images
317 	 */
318 	ret = jit_process(inject->session, &inject->output, machine,
319 			  event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
320 	if (ret < 0)
321 		return ret;
322 	if (ret) {
323 		inject->bytes_written += n;
324 		return 0;
325 	}
326 	return perf_event__repipe_mmap(tool, event, sample, machine);
327 }
328 #endif
329 
330 static struct dso *findnew_dso(int pid, int tid, const char *filename,
331 			       struct dso_id *id, struct machine *machine)
332 {
333 	struct thread *thread;
334 	struct nsinfo *nsi = NULL;
335 	struct nsinfo *nnsi;
336 	struct dso *dso;
337 	bool vdso;
338 
339 	thread = machine__findnew_thread(machine, pid, tid);
340 	if (thread == NULL) {
341 		pr_err("cannot find or create a task %d/%d.\n", tid, pid);
342 		return NULL;
343 	}
344 
345 	vdso = is_vdso_map(filename);
346 	nsi = nsinfo__get(thread->nsinfo);
347 
348 	if (vdso) {
349 		/* The vdso maps are always on the host and not the
350 		 * container.  Ensure that we don't use setns to look
351 		 * them up.
352 		 */
353 		nnsi = nsinfo__copy(nsi);
354 		if (nnsi) {
355 			nsinfo__put(nsi);
356 			nnsi->need_setns = false;
357 			nsi = nnsi;
358 		}
359 		dso = machine__findnew_vdso(machine, thread);
360 	} else {
361 		dso = machine__findnew_dso_id(machine, filename, id);
362 	}
363 
364 	if (dso)
365 		dso->nsinfo = nsi;
366 	else
367 		nsinfo__put(nsi);
368 
369 	thread__put(thread);
370 	return dso;
371 }
372 
373 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
374 					   union perf_event *event,
375 					   struct perf_sample *sample,
376 					   struct machine *machine)
377 {
378 	struct dso *dso;
379 
380 	dso = findnew_dso(event->mmap.pid, event->mmap.tid,
381 			  event->mmap.filename, NULL, machine);
382 
383 	if (dso && !dso->hit) {
384 		dso->hit = 1;
385 		dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
386 		dso__put(dso);
387 	}
388 
389 	return perf_event__repipe(tool, event, sample, machine);
390 }
391 
392 static int perf_event__repipe_mmap2(struct perf_tool *tool,
393 				   union perf_event *event,
394 				   struct perf_sample *sample,
395 				   struct machine *machine)
396 {
397 	int err;
398 
399 	err = perf_event__process_mmap2(tool, event, sample, machine);
400 	perf_event__repipe(tool, event, sample, machine);
401 
402 	return err;
403 }
404 
405 #ifdef HAVE_JITDUMP
406 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
407 					union perf_event *event,
408 					struct perf_sample *sample,
409 					struct machine *machine)
410 {
411 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
412 	u64 n = 0;
413 	int ret;
414 
415 	/*
416 	 * if jit marker, then inject jit mmaps and generate ELF images
417 	 */
418 	ret = jit_process(inject->session, &inject->output, machine,
419 			  event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
420 	if (ret < 0)
421 		return ret;
422 	if (ret) {
423 		inject->bytes_written += n;
424 		return 0;
425 	}
426 	return perf_event__repipe_mmap2(tool, event, sample, machine);
427 }
428 #endif
429 
430 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
431 					    union perf_event *event,
432 					    struct perf_sample *sample,
433 					    struct machine *machine)
434 {
435 	struct dso_id dso_id = {
436 		.maj = event->mmap2.maj,
437 		.min = event->mmap2.min,
438 		.ino = event->mmap2.ino,
439 		.ino_generation = event->mmap2.ino_generation,
440 	};
441 	struct dso *dso;
442 
443 	dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
444 			  event->mmap2.filename, &dso_id, machine);
445 
446 	if (dso && !dso->hit) {
447 		dso->hit = 1;
448 		dso__inject_build_id(dso, tool, machine, sample->cpumode,
449 				     event->mmap2.flags);
450 		dso__put(dso);
451 	}
452 
453 	perf_event__repipe(tool, event, sample, machine);
454 
455 	return 0;
456 }
457 
458 static int perf_event__repipe_fork(struct perf_tool *tool,
459 				   union perf_event *event,
460 				   struct perf_sample *sample,
461 				   struct machine *machine)
462 {
463 	int err;
464 
465 	err = perf_event__process_fork(tool, event, sample, machine);
466 	perf_event__repipe(tool, event, sample, machine);
467 
468 	return err;
469 }
470 
471 static int perf_event__repipe_comm(struct perf_tool *tool,
472 				   union perf_event *event,
473 				   struct perf_sample *sample,
474 				   struct machine *machine)
475 {
476 	int err;
477 
478 	err = perf_event__process_comm(tool, event, sample, machine);
479 	perf_event__repipe(tool, event, sample, machine);
480 
481 	return err;
482 }
483 
484 static int perf_event__repipe_namespaces(struct perf_tool *tool,
485 					 union perf_event *event,
486 					 struct perf_sample *sample,
487 					 struct machine *machine)
488 {
489 	int err = perf_event__process_namespaces(tool, event, sample, machine);
490 
491 	perf_event__repipe(tool, event, sample, machine);
492 
493 	return err;
494 }
495 
496 static int perf_event__repipe_exit(struct perf_tool *tool,
497 				   union perf_event *event,
498 				   struct perf_sample *sample,
499 				   struct machine *machine)
500 {
501 	int err;
502 
503 	err = perf_event__process_exit(tool, event, sample, machine);
504 	perf_event__repipe(tool, event, sample, machine);
505 
506 	return err;
507 }
508 
509 static int perf_event__repipe_tracing_data(struct perf_session *session,
510 					   union perf_event *event)
511 {
512 	int err;
513 
514 	perf_event__repipe_synth(session->tool, event);
515 	err = perf_event__process_tracing_data(session, event);
516 
517 	return err;
518 }
519 
520 static int dso__read_build_id(struct dso *dso)
521 {
522 	struct nscookie nsc;
523 
524 	if (dso->has_build_id)
525 		return 0;
526 
527 	nsinfo__mountns_enter(dso->nsinfo, &nsc);
528 	if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
529 		dso->has_build_id = true;
530 	nsinfo__mountns_exit(&nsc);
531 
532 	return dso->has_build_id ? 0 : -1;
533 }
534 
535 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
536 				struct machine *machine, u8 cpumode, u32 flags)
537 {
538 	int err;
539 
540 	if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
541 		return 0;
542 	if (is_no_dso_memory(dso->long_name))
543 		return 0;
544 
545 	if (dso__read_build_id(dso) < 0) {
546 		pr_debug("no build_id found for %s\n", dso->long_name);
547 		return -1;
548 	}
549 
550 	err = perf_event__synthesize_build_id(tool, dso, cpumode,
551 					      perf_event__repipe, machine);
552 	if (err) {
553 		pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
554 		return -1;
555 	}
556 
557 	return 0;
558 }
559 
560 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
561 			       struct perf_sample *sample,
562 			       struct evsel *evsel __maybe_unused,
563 			       struct machine *machine)
564 {
565 	struct addr_location al;
566 	struct thread *thread;
567 
568 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
569 	if (thread == NULL) {
570 		pr_err("problem processing %d event, skipping it.\n",
571 		       event->header.type);
572 		goto repipe;
573 	}
574 
575 	if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
576 		if (!al.map->dso->hit) {
577 			al.map->dso->hit = 1;
578 			dso__inject_build_id(al.map->dso, tool, machine,
579 					     sample->cpumode, al.map->flags);
580 		}
581 	}
582 
583 	thread__put(thread);
584 repipe:
585 	perf_event__repipe(tool, event, sample, machine);
586 	return 0;
587 }
588 
589 static int perf_inject__sched_process_exit(struct perf_tool *tool,
590 					   union perf_event *event __maybe_unused,
591 					   struct perf_sample *sample,
592 					   struct evsel *evsel __maybe_unused,
593 					   struct machine *machine __maybe_unused)
594 {
595 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
596 	struct event_entry *ent;
597 
598 	list_for_each_entry(ent, &inject->samples, node) {
599 		if (sample->tid == ent->tid) {
600 			list_del_init(&ent->node);
601 			free(ent);
602 			break;
603 		}
604 	}
605 
606 	return 0;
607 }
608 
609 static int perf_inject__sched_switch(struct perf_tool *tool,
610 				     union perf_event *event,
611 				     struct perf_sample *sample,
612 				     struct evsel *evsel,
613 				     struct machine *machine)
614 {
615 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
616 	struct event_entry *ent;
617 
618 	perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
619 
620 	ent = malloc(event->header.size + sizeof(struct event_entry));
621 	if (ent == NULL) {
622 		color_fprintf(stderr, PERF_COLOR_RED,
623 			     "Not enough memory to process sched switch event!");
624 		return -1;
625 	}
626 
627 	ent->tid = sample->tid;
628 	memcpy(&ent->event, event, event->header.size);
629 	list_add(&ent->node, &inject->samples);
630 	return 0;
631 }
632 
633 static int perf_inject__sched_stat(struct perf_tool *tool,
634 				   union perf_event *event __maybe_unused,
635 				   struct perf_sample *sample,
636 				   struct evsel *evsel,
637 				   struct machine *machine)
638 {
639 	struct event_entry *ent;
640 	union perf_event *event_sw;
641 	struct perf_sample sample_sw;
642 	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
643 	u32 pid = evsel__intval(evsel, sample, "pid");
644 
645 	list_for_each_entry(ent, &inject->samples, node) {
646 		if (pid == ent->tid)
647 			goto found;
648 	}
649 
650 	return 0;
651 found:
652 	event_sw = &ent->event[0];
653 	evsel__parse_sample(evsel, event_sw, &sample_sw);
654 
655 	sample_sw.period = sample->period;
656 	sample_sw.time	 = sample->time;
657 	perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
658 				      evsel->core.attr.read_format, &sample_sw);
659 	build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
660 	return perf_event__repipe(tool, event_sw, &sample_sw, machine);
661 }
662 
663 static void sig_handler(int sig __maybe_unused)
664 {
665 	session_done = 1;
666 }
667 
668 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
669 {
670 	struct perf_event_attr *attr = &evsel->core.attr;
671 	const char *name = evsel__name(evsel);
672 
673 	if (!(attr->sample_type & sample_type)) {
674 		pr_err("Samples for %s event do not have %s attribute set.",
675 			name, sample_msg);
676 		return -EINVAL;
677 	}
678 
679 	return 0;
680 }
681 
682 static int drop_sample(struct perf_tool *tool __maybe_unused,
683 		       union perf_event *event __maybe_unused,
684 		       struct perf_sample *sample __maybe_unused,
685 		       struct evsel *evsel __maybe_unused,
686 		       struct machine *machine __maybe_unused)
687 {
688 	return 0;
689 }
690 
691 static void strip_init(struct perf_inject *inject)
692 {
693 	struct evlist *evlist = inject->session->evlist;
694 	struct evsel *evsel;
695 
696 	inject->tool.context_switch = perf_event__drop;
697 
698 	evlist__for_each_entry(evlist, evsel)
699 		evsel->handler = drop_sample;
700 }
701 
702 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
703 {
704 	struct perf_inject *inject = opt->value;
705 	const char *args;
706 	char *dry_run;
707 
708 	if (unset)
709 		return 0;
710 
711 	inject->itrace_synth_opts.set = true;
712 	inject->itrace_synth_opts.vm_time_correlation = true;
713 	inject->in_place_update = true;
714 
715 	if (!str)
716 		return 0;
717 
718 	dry_run = skip_spaces(str);
719 	if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
720 		inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
721 		inject->in_place_update_dry_run = true;
722 		args = dry_run + strlen("dry-run");
723 	} else {
724 		args = str;
725 	}
726 
727 	inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
728 
729 	return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
730 }
731 
732 static int __cmd_inject(struct perf_inject *inject)
733 {
734 	int ret = -EINVAL;
735 	struct perf_session *session = inject->session;
736 	struct perf_data *data_out = &inject->output;
737 	int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
738 	u64 output_data_offset;
739 
740 	signal(SIGINT, sig_handler);
741 
742 	if (inject->build_ids || inject->sched_stat ||
743 	    inject->itrace_synth_opts.set || inject->build_id_all) {
744 		inject->tool.mmap	  = perf_event__repipe_mmap;
745 		inject->tool.mmap2	  = perf_event__repipe_mmap2;
746 		inject->tool.fork	  = perf_event__repipe_fork;
747 		inject->tool.tracing_data = perf_event__repipe_tracing_data;
748 	}
749 
750 	output_data_offset = session->header.data_offset;
751 
752 	if (inject->build_id_all) {
753 		inject->tool.mmap	  = perf_event__repipe_buildid_mmap;
754 		inject->tool.mmap2	  = perf_event__repipe_buildid_mmap2;
755 	} else if (inject->build_ids) {
756 		inject->tool.sample = perf_event__inject_buildid;
757 	} else if (inject->sched_stat) {
758 		struct evsel *evsel;
759 
760 		evlist__for_each_entry(session->evlist, evsel) {
761 			const char *name = evsel__name(evsel);
762 
763 			if (!strcmp(name, "sched:sched_switch")) {
764 				if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
765 					return -EINVAL;
766 
767 				evsel->handler = perf_inject__sched_switch;
768 			} else if (!strcmp(name, "sched:sched_process_exit"))
769 				evsel->handler = perf_inject__sched_process_exit;
770 			else if (!strncmp(name, "sched:sched_stat_", 17))
771 				evsel->handler = perf_inject__sched_stat;
772 		}
773 	} else if (inject->itrace_synth_opts.vm_time_correlation) {
774 		session->itrace_synth_opts = &inject->itrace_synth_opts;
775 		memset(&inject->tool, 0, sizeof(inject->tool));
776 		inject->tool.id_index	    = perf_event__process_id_index;
777 		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
778 		inject->tool.auxtrace	    = perf_event__process_auxtrace;
779 		inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
780 		inject->tool.ordered_events = true;
781 		inject->tool.ordering_requires_timestamps = true;
782 	} else if (inject->itrace_synth_opts.set) {
783 		session->itrace_synth_opts = &inject->itrace_synth_opts;
784 		inject->itrace_synth_opts.inject = true;
785 		inject->tool.comm	    = perf_event__repipe_comm;
786 		inject->tool.namespaces	    = perf_event__repipe_namespaces;
787 		inject->tool.exit	    = perf_event__repipe_exit;
788 		inject->tool.id_index	    = perf_event__process_id_index;
789 		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
790 		inject->tool.auxtrace	    = perf_event__process_auxtrace;
791 		inject->tool.aux	    = perf_event__drop_aux;
792 		inject->tool.itrace_start   = perf_event__drop_aux,
793 		inject->tool.ordered_events = true;
794 		inject->tool.ordering_requires_timestamps = true;
795 		/* Allow space in the header for new attributes */
796 		output_data_offset = 4096;
797 		if (inject->strip)
798 			strip_init(inject);
799 	}
800 
801 	if (!inject->itrace_synth_opts.set)
802 		auxtrace_index__free(&session->auxtrace_index);
803 
804 	if (!data_out->is_pipe && !inject->in_place_update)
805 		lseek(fd, output_data_offset, SEEK_SET);
806 
807 	ret = perf_session__process_events(session);
808 	if (ret)
809 		return ret;
810 
811 	if (!data_out->is_pipe && !inject->in_place_update) {
812 		if (inject->build_ids)
813 			perf_header__set_feat(&session->header,
814 					      HEADER_BUILD_ID);
815 		/*
816 		 * Keep all buildids when there is unprocessed AUX data because
817 		 * it is not known which ones the AUX trace hits.
818 		 */
819 		if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
820 		    inject->have_auxtrace && !inject->itrace_synth_opts.set)
821 			dsos__hit_all(session);
822 		/*
823 		 * The AUX areas have been removed and replaced with
824 		 * synthesized hardware events, so clear the feature flag.
825 		 */
826 		if (inject->itrace_synth_opts.set) {
827 			perf_header__clear_feat(&session->header,
828 						HEADER_AUXTRACE);
829 			if (inject->itrace_synth_opts.last_branch ||
830 			    inject->itrace_synth_opts.add_last_branch)
831 				perf_header__set_feat(&session->header,
832 						      HEADER_BRANCH_STACK);
833 		}
834 		session->header.data_offset = output_data_offset;
835 		session->header.data_size = inject->bytes_written;
836 		perf_session__write_header(session, session->evlist, fd, true);
837 	}
838 
839 	return ret;
840 }
841 
842 int cmd_inject(int argc, const char **argv)
843 {
844 	struct perf_inject inject = {
845 		.tool = {
846 			.sample		= perf_event__repipe_sample,
847 			.read		= perf_event__repipe_sample,
848 			.mmap		= perf_event__repipe,
849 			.mmap2		= perf_event__repipe,
850 			.comm		= perf_event__repipe,
851 			.namespaces	= perf_event__repipe,
852 			.cgroup		= perf_event__repipe,
853 			.fork		= perf_event__repipe,
854 			.exit		= perf_event__repipe,
855 			.lost		= perf_event__repipe,
856 			.lost_samples	= perf_event__repipe,
857 			.aux		= perf_event__repipe,
858 			.itrace_start	= perf_event__repipe,
859 			.context_switch	= perf_event__repipe,
860 			.throttle	= perf_event__repipe,
861 			.unthrottle	= perf_event__repipe,
862 			.ksymbol	= perf_event__repipe,
863 			.bpf		= perf_event__repipe,
864 			.text_poke	= perf_event__repipe,
865 			.attr		= perf_event__repipe_attr,
866 			.event_update	= perf_event__repipe_event_update,
867 			.tracing_data	= perf_event__repipe_op2_synth,
868 			.finished_round	= perf_event__repipe_oe_synth,
869 			.build_id	= perf_event__repipe_op2_synth,
870 			.id_index	= perf_event__repipe_op2_synth,
871 			.auxtrace_info	= perf_event__repipe_op2_synth,
872 			.auxtrace_error	= perf_event__repipe_op2_synth,
873 			.time_conv	= perf_event__repipe_op2_synth,
874 			.thread_map	= perf_event__repipe_op2_synth,
875 			.cpu_map	= perf_event__repipe_op2_synth,
876 			.stat_config	= perf_event__repipe_op2_synth,
877 			.stat		= perf_event__repipe_op2_synth,
878 			.stat_round	= perf_event__repipe_op2_synth,
879 			.feature	= perf_event__repipe_op2_synth,
880 			.compressed	= perf_event__repipe_op4_synth,
881 			.auxtrace	= perf_event__repipe_auxtrace,
882 		},
883 		.input_name  = "-",
884 		.samples = LIST_HEAD_INIT(inject.samples),
885 		.output = {
886 			.path = "-",
887 			.mode = PERF_DATA_MODE_WRITE,
888 			.use_stdio = true,
889 		},
890 	};
891 	struct perf_data data = {
892 		.mode = PERF_DATA_MODE_READ,
893 		.use_stdio = true,
894 	};
895 	int ret;
896 
897 	struct option options[] = {
898 		OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
899 			    "Inject build-ids into the output stream"),
900 		OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
901 			    "Inject build-ids of all DSOs into the output stream"),
902 		OPT_STRING('i', "input", &inject.input_name, "file",
903 			   "input file name"),
904 		OPT_STRING('o', "output", &inject.output.path, "file",
905 			   "output file name"),
906 		OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
907 			    "Merge sched-stat and sched-switch for getting events "
908 			    "where and how long tasks slept"),
909 #ifdef HAVE_JITDUMP
910 		OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
911 #endif
912 		OPT_INCR('v', "verbose", &verbose,
913 			 "be more verbose (show build ids, etc)"),
914 		OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
915 			   "kallsyms pathname"),
916 		OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
917 		OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
918 				    NULL, "opts", "Instruction Tracing options\n"
919 				    ITRACE_HELP,
920 				    itrace_parse_synth_opts),
921 		OPT_BOOLEAN(0, "strip", &inject.strip,
922 			    "strip non-synthesized events (use with --itrace)"),
923 		OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
924 				    "correlate time between VM guests and the host",
925 				    parse_vm_time_correlation),
926 		OPT_END()
927 	};
928 	const char * const inject_usage[] = {
929 		"perf inject [<options>]",
930 		NULL
931 	};
932 #ifndef HAVE_JITDUMP
933 	set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
934 #endif
935 	argc = parse_options(argc, argv, options, inject_usage, 0);
936 
937 	/*
938 	 * Any (unrecognized) arguments left?
939 	 */
940 	if (argc)
941 		usage_with_options(inject_usage, options);
942 
943 	if (inject.strip && !inject.itrace_synth_opts.set) {
944 		pr_err("--strip option requires --itrace option\n");
945 		return -1;
946 	}
947 
948 	if (inject.in_place_update) {
949 		if (!strcmp(inject.input_name, "-")) {
950 			pr_err("Input file name required for in-place updating\n");
951 			return -1;
952 		}
953 		if (strcmp(inject.output.path, "-")) {
954 			pr_err("Output file name must not be specified for in-place updating\n");
955 			return -1;
956 		}
957 		if (!data.force && !inject.in_place_update_dry_run) {
958 			pr_err("The input file would be updated in place, "
959 				"the --force option is required.\n");
960 			return -1;
961 		}
962 		if (!inject.in_place_update_dry_run)
963 			data.in_place_update = true;
964 	} else if (perf_data__open(&inject.output)) {
965 		perror("failed to create output file");
966 		return -1;
967 	}
968 
969 	data.path = inject.input_name;
970 	inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
971 	if (IS_ERR(inject.session))
972 		return PTR_ERR(inject.session);
973 
974 	if (zstd_init(&(inject.session->zstd_data), 0) < 0)
975 		pr_warning("Decompression initialization failed.\n");
976 
977 	if (inject.build_ids && !inject.build_id_all) {
978 		/*
979 		 * to make sure the mmap records are ordered correctly
980 		 * and so that the correct especially due to jitted code
981 		 * mmaps. We cannot generate the buildid hit list and
982 		 * inject the jit mmaps at the same time for now.
983 		 */
984 		inject.tool.ordered_events = true;
985 		inject.tool.ordering_requires_timestamps = true;
986 	}
987 
988 	if (inject.sched_stat) {
989 		inject.tool.ordered_events = true;
990 	}
991 
992 #ifdef HAVE_JITDUMP
993 	if (inject.jit_mode) {
994 		inject.tool.mmap2	   = perf_event__jit_repipe_mmap2;
995 		inject.tool.mmap	   = perf_event__jit_repipe_mmap;
996 		inject.tool.ordered_events = true;
997 		inject.tool.ordering_requires_timestamps = true;
998 		/*
999 		 * JIT MMAP injection injects all MMAP events in one go, so it
1000 		 * does not obey finished_round semantics.
1001 		 */
1002 		inject.tool.finished_round = perf_event__drop_oe;
1003 	}
1004 #endif
1005 	ret = symbol__init(&inject.session->header.env);
1006 	if (ret < 0)
1007 		goto out_delete;
1008 
1009 	ret = __cmd_inject(&inject);
1010 
1011 out_delete:
1012 	zstd_fini(&(inject.session->zstd_data));
1013 	perf_session__delete(inject.session);
1014 	free(inject.itrace_synth_opts.vm_tm_corr_args);
1015 	return ret;
1016 }
1017