xref: /linux/kernel/trace/trace_fprobe.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fprobe-based tracing events
4  * Copyright (C) 2022 Google LLC.
5  */
6 #define pr_fmt(fmt)	"trace_fprobe: " fmt
7 #include <asm/ptrace.h>
8 
9 #include <linux/fprobe.h>
10 #include <linux/module.h>
11 #include <linux/rculist.h>
12 #include <linux/security.h>
13 #include <linux/tracepoint.h>
14 #include <linux/uaccess.h>
15 
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_kernel.h"
19 #include "trace_probe_tmpl.h"
20 
21 #define FPROBE_EVENT_SYSTEM "fprobes"
22 #define TRACEPOINT_EVENT_SYSTEM "tracepoints"
23 #define RETHOOK_MAXACTIVE_MAX 4096
24 
25 static int trace_fprobe_create(const char *raw_command);
26 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
27 static int trace_fprobe_release(struct dyn_event *ev);
28 static bool trace_fprobe_is_busy(struct dyn_event *ev);
29 static bool trace_fprobe_match(const char *system, const char *event,
30 			int argc, const char **argv, struct dyn_event *ev);
31 
32 static struct dyn_event_operations trace_fprobe_ops = {
33 	.create = trace_fprobe_create,
34 	.show = trace_fprobe_show,
35 	.is_busy = trace_fprobe_is_busy,
36 	.free = trace_fprobe_release,
37 	.match = trace_fprobe_match,
38 };
39 
40 /*
41  * Fprobe event core functions
42  */
43 struct trace_fprobe {
44 	struct dyn_event	devent;
45 	struct fprobe		fp;
46 	const char		*symbol;
47 	struct tracepoint	*tpoint;
48 	struct module		*mod;
49 	struct trace_probe	tp;
50 };
51 
52 static bool is_trace_fprobe(struct dyn_event *ev)
53 {
54 	return ev->ops == &trace_fprobe_ops;
55 }
56 
57 static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
58 {
59 	return container_of(ev, struct trace_fprobe, devent);
60 }
61 
62 /**
63  * for_each_trace_fprobe - iterate over the trace_fprobe list
64  * @pos:	the struct trace_fprobe * for each entry
65  * @dpos:	the struct dyn_event * to use as a loop cursor
66  */
67 #define for_each_trace_fprobe(pos, dpos)	\
68 	for_each_dyn_event(dpos)		\
69 		if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
70 
71 static bool trace_fprobe_is_return(struct trace_fprobe *tf)
72 {
73 	return tf->fp.exit_handler != NULL;
74 }
75 
76 static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
77 {
78 	return tf->tpoint != NULL;
79 }
80 
81 static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
82 {
83 	return tf->symbol ? tf->symbol : "unknown";
84 }
85 
86 static bool trace_fprobe_is_busy(struct dyn_event *ev)
87 {
88 	struct trace_fprobe *tf = to_trace_fprobe(ev);
89 
90 	return trace_probe_is_enabled(&tf->tp);
91 }
92 
93 static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
94 					    int argc, const char **argv)
95 {
96 	char buf[MAX_ARGSTR_LEN + 1];
97 
98 	if (!argc)
99 		return true;
100 
101 	snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
102 	if (strcmp(buf, argv[0]))
103 		return false;
104 	argc--; argv++;
105 
106 	return trace_probe_match_command_args(&tf->tp, argc, argv);
107 }
108 
109 static bool trace_fprobe_match(const char *system, const char *event,
110 			int argc, const char **argv, struct dyn_event *ev)
111 {
112 	struct trace_fprobe *tf = to_trace_fprobe(ev);
113 
114 	if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
115 		return false;
116 
117 	if (system && strcmp(trace_probe_group_name(&tf->tp), system))
118 		return false;
119 
120 	return trace_fprobe_match_command_head(tf, argc, argv);
121 }
122 
123 static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
124 {
125 	return fprobe_is_registered(&tf->fp);
126 }
127 
128 /*
129  * Note that we don't verify the fetch_insn code, since it does not come
130  * from user space.
131  */
132 static int
133 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
134 		   void *dest, void *base)
135 {
136 	struct pt_regs *regs = rec;
137 	unsigned long val;
138 	int ret;
139 
140 retry:
141 	/* 1st stage: get value from context */
142 	switch (code->op) {
143 	case FETCH_OP_STACK:
144 		val = regs_get_kernel_stack_nth(regs, code->param);
145 		break;
146 	case FETCH_OP_STACKP:
147 		val = kernel_stack_pointer(regs);
148 		break;
149 	case FETCH_OP_RETVAL:
150 		val = regs_return_value(regs);
151 		break;
152 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
153 	case FETCH_OP_ARG:
154 		val = regs_get_kernel_argument(regs, code->param);
155 		break;
156 	case FETCH_OP_EDATA:
157 		val = *(unsigned long *)((unsigned long)edata + code->offset);
158 		break;
159 #endif
160 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
161 		code++;
162 		goto retry;
163 	default:
164 		ret = process_common_fetch_insn(code, &val);
165 		if (ret < 0)
166 			return ret;
167 	}
168 	code++;
169 
170 	return process_fetch_insn_bottom(code, val, dest, base);
171 }
172 NOKPROBE_SYMBOL(process_fetch_insn)
173 
174 /* function entry handler */
175 static nokprobe_inline void
176 __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
177 		    struct pt_regs *regs,
178 		    struct trace_event_file *trace_file)
179 {
180 	struct fentry_trace_entry_head *entry;
181 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
182 	struct trace_event_buffer fbuffer;
183 	int dsize;
184 
185 	if (WARN_ON_ONCE(call != trace_file->event_call))
186 		return;
187 
188 	if (trace_trigger_soft_disabled(trace_file))
189 		return;
190 
191 	dsize = __get_data_size(&tf->tp, regs, NULL);
192 
193 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
194 					   sizeof(*entry) + tf->tp.size + dsize);
195 	if (!entry)
196 		return;
197 
198 	fbuffer.regs = regs;
199 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
200 	entry->ip = entry_ip;
201 	store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
202 
203 	trace_event_buffer_commit(&fbuffer);
204 }
205 
206 static void
207 fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
208 		  struct pt_regs *regs)
209 {
210 	struct event_file_link *link;
211 
212 	trace_probe_for_each_link_rcu(link, &tf->tp)
213 		__fentry_trace_func(tf, entry_ip, regs, link->file);
214 }
215 NOKPROBE_SYMBOL(fentry_trace_func);
216 
217 /* function exit handler */
218 static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
219 				unsigned long ret_ip, struct pt_regs *regs,
220 				void *entry_data)
221 {
222 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
223 
224 	if (tf->tp.entry_arg)
225 		store_trace_entry_data(entry_data, &tf->tp, regs);
226 
227 	return 0;
228 }
229 NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
230 
231 static nokprobe_inline void
232 __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
233 		   unsigned long ret_ip, struct pt_regs *regs,
234 		   void *entry_data, struct trace_event_file *trace_file)
235 {
236 	struct fexit_trace_entry_head *entry;
237 	struct trace_event_buffer fbuffer;
238 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
239 	int dsize;
240 
241 	if (WARN_ON_ONCE(call != trace_file->event_call))
242 		return;
243 
244 	if (trace_trigger_soft_disabled(trace_file))
245 		return;
246 
247 	dsize = __get_data_size(&tf->tp, regs, entry_data);
248 
249 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
250 					   sizeof(*entry) + tf->tp.size + dsize);
251 	if (!entry)
252 		return;
253 
254 	fbuffer.regs = regs;
255 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
256 	entry->func = entry_ip;
257 	entry->ret_ip = ret_ip;
258 	store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
259 
260 	trace_event_buffer_commit(&fbuffer);
261 }
262 
263 static void
264 fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
265 		 unsigned long ret_ip, struct pt_regs *regs, void *entry_data)
266 {
267 	struct event_file_link *link;
268 
269 	trace_probe_for_each_link_rcu(link, &tf->tp)
270 		__fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data, link->file);
271 }
272 NOKPROBE_SYMBOL(fexit_trace_func);
273 
274 #ifdef CONFIG_PERF_EVENTS
275 
276 static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
277 			    struct pt_regs *regs)
278 {
279 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
280 	struct fentry_trace_entry_head *entry;
281 	struct hlist_head *head;
282 	int size, __size, dsize;
283 	int rctx;
284 
285 	head = this_cpu_ptr(call->perf_events);
286 	if (hlist_empty(head))
287 		return 0;
288 
289 	dsize = __get_data_size(&tf->tp, regs, NULL);
290 	__size = sizeof(*entry) + tf->tp.size + dsize;
291 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
292 	size -= sizeof(u32);
293 
294 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
295 	if (!entry)
296 		return 0;
297 
298 	entry->ip = entry_ip;
299 	memset(&entry[1], 0, dsize);
300 	store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
301 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
302 			      head, NULL);
303 	return 0;
304 }
305 NOKPROBE_SYMBOL(fentry_perf_func);
306 
307 static void
308 fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
309 		unsigned long ret_ip, struct pt_regs *regs,
310 		void *entry_data)
311 {
312 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
313 	struct fexit_trace_entry_head *entry;
314 	struct hlist_head *head;
315 	int size, __size, dsize;
316 	int rctx;
317 
318 	head = this_cpu_ptr(call->perf_events);
319 	if (hlist_empty(head))
320 		return;
321 
322 	dsize = __get_data_size(&tf->tp, regs, entry_data);
323 	__size = sizeof(*entry) + tf->tp.size + dsize;
324 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
325 	size -= sizeof(u32);
326 
327 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
328 	if (!entry)
329 		return;
330 
331 	entry->func = entry_ip;
332 	entry->ret_ip = ret_ip;
333 	store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
334 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
335 			      head, NULL);
336 }
337 NOKPROBE_SYMBOL(fexit_perf_func);
338 #endif	/* CONFIG_PERF_EVENTS */
339 
340 static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
341 			     unsigned long ret_ip, struct pt_regs *regs,
342 			     void *entry_data)
343 {
344 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
345 	int ret = 0;
346 
347 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
348 		fentry_trace_func(tf, entry_ip, regs);
349 #ifdef CONFIG_PERF_EVENTS
350 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
351 		ret = fentry_perf_func(tf, entry_ip, regs);
352 #endif
353 	return ret;
354 }
355 NOKPROBE_SYMBOL(fentry_dispatcher);
356 
357 static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
358 			     unsigned long ret_ip, struct pt_regs *regs,
359 			     void *entry_data)
360 {
361 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
362 
363 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
364 		fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
365 #ifdef CONFIG_PERF_EVENTS
366 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
367 		fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
368 #endif
369 }
370 NOKPROBE_SYMBOL(fexit_dispatcher);
371 
372 static void free_trace_fprobe(struct trace_fprobe *tf)
373 {
374 	if (tf) {
375 		trace_probe_cleanup(&tf->tp);
376 		kfree(tf->symbol);
377 		kfree(tf);
378 	}
379 }
380 
381 /*
382  * Allocate new trace_probe and initialize it (including fprobe).
383  */
384 static struct trace_fprobe *alloc_trace_fprobe(const char *group,
385 					       const char *event,
386 					       const char *symbol,
387 					       struct tracepoint *tpoint,
388 					       int maxactive,
389 					       int nargs, bool is_return)
390 {
391 	struct trace_fprobe *tf;
392 	int ret = -ENOMEM;
393 
394 	tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
395 	if (!tf)
396 		return ERR_PTR(ret);
397 
398 	tf->symbol = kstrdup(symbol, GFP_KERNEL);
399 	if (!tf->symbol)
400 		goto error;
401 
402 	if (is_return)
403 		tf->fp.exit_handler = fexit_dispatcher;
404 	else
405 		tf->fp.entry_handler = fentry_dispatcher;
406 
407 	tf->tpoint = tpoint;
408 	tf->fp.nr_maxactive = maxactive;
409 
410 	ret = trace_probe_init(&tf->tp, event, group, false, nargs);
411 	if (ret < 0)
412 		goto error;
413 
414 	dyn_event_init(&tf->devent, &trace_fprobe_ops);
415 	return tf;
416 error:
417 	free_trace_fprobe(tf);
418 	return ERR_PTR(ret);
419 }
420 
421 static struct trace_fprobe *find_trace_fprobe(const char *event,
422 					      const char *group)
423 {
424 	struct dyn_event *pos;
425 	struct trace_fprobe *tf;
426 
427 	for_each_trace_fprobe(tf, pos)
428 		if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
429 		    strcmp(trace_probe_group_name(&tf->tp), group) == 0)
430 			return tf;
431 	return NULL;
432 }
433 
434 static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
435 {
436 	if (trace_fprobe_is_registered(tf))
437 		enable_fprobe(&tf->fp);
438 
439 	return 0;
440 }
441 
442 static void __disable_trace_fprobe(struct trace_probe *tp)
443 {
444 	struct trace_fprobe *tf;
445 
446 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
447 		if (!trace_fprobe_is_registered(tf))
448 			continue;
449 		disable_fprobe(&tf->fp);
450 	}
451 }
452 
453 /*
454  * Enable trace_probe
455  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
456  */
457 static int enable_trace_fprobe(struct trace_event_call *call,
458 			       struct trace_event_file *file)
459 {
460 	struct trace_probe *tp;
461 	struct trace_fprobe *tf;
462 	bool enabled;
463 	int ret = 0;
464 
465 	tp = trace_probe_primary_from_call(call);
466 	if (WARN_ON_ONCE(!tp))
467 		return -ENODEV;
468 	enabled = trace_probe_is_enabled(tp);
469 
470 	/* This also changes "enabled" state */
471 	if (file) {
472 		ret = trace_probe_add_file(tp, file);
473 		if (ret)
474 			return ret;
475 	} else
476 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
477 
478 	if (!enabled) {
479 		list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
480 			/* TODO: check the fprobe is gone */
481 			__enable_trace_fprobe(tf);
482 		}
483 	}
484 
485 	return 0;
486 }
487 
488 /*
489  * Disable trace_probe
490  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
491  */
492 static int disable_trace_fprobe(struct trace_event_call *call,
493 				struct trace_event_file *file)
494 {
495 	struct trace_probe *tp;
496 
497 	tp = trace_probe_primary_from_call(call);
498 	if (WARN_ON_ONCE(!tp))
499 		return -ENODEV;
500 
501 	if (file) {
502 		if (!trace_probe_get_file_link(tp, file))
503 			return -ENOENT;
504 		if (!trace_probe_has_single_file(tp))
505 			goto out;
506 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
507 	} else
508 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
509 
510 	if (!trace_probe_is_enabled(tp))
511 		__disable_trace_fprobe(tp);
512 
513  out:
514 	if (file)
515 		/*
516 		 * Synchronization is done in below function. For perf event,
517 		 * file == NULL and perf_trace_event_unreg() calls
518 		 * tracepoint_synchronize_unregister() to ensure synchronize
519 		 * event. We don't need to care about it.
520 		 */
521 		trace_probe_remove_file(tp, file);
522 
523 	return 0;
524 }
525 
526 /* Event entry printers */
527 static enum print_line_t
528 print_fentry_event(struct trace_iterator *iter, int flags,
529 		   struct trace_event *event)
530 {
531 	struct fentry_trace_entry_head *field;
532 	struct trace_seq *s = &iter->seq;
533 	struct trace_probe *tp;
534 
535 	field = (struct fentry_trace_entry_head *)iter->ent;
536 	tp = trace_probe_primary_from_call(
537 		container_of(event, struct trace_event_call, event));
538 	if (WARN_ON_ONCE(!tp))
539 		goto out;
540 
541 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
542 
543 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
544 		goto out;
545 
546 	trace_seq_putc(s, ')');
547 
548 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
549 			     (u8 *)&field[1], field) < 0)
550 		goto out;
551 
552 	trace_seq_putc(s, '\n');
553  out:
554 	return trace_handle_return(s);
555 }
556 
557 static enum print_line_t
558 print_fexit_event(struct trace_iterator *iter, int flags,
559 		  struct trace_event *event)
560 {
561 	struct fexit_trace_entry_head *field;
562 	struct trace_seq *s = &iter->seq;
563 	struct trace_probe *tp;
564 
565 	field = (struct fexit_trace_entry_head *)iter->ent;
566 	tp = trace_probe_primary_from_call(
567 		container_of(event, struct trace_event_call, event));
568 	if (WARN_ON_ONCE(!tp))
569 		goto out;
570 
571 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
572 
573 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
574 		goto out;
575 
576 	trace_seq_puts(s, " <- ");
577 
578 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
579 		goto out;
580 
581 	trace_seq_putc(s, ')');
582 
583 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
584 			     (u8 *)&field[1], field) < 0)
585 		goto out;
586 
587 	trace_seq_putc(s, '\n');
588 
589  out:
590 	return trace_handle_return(s);
591 }
592 
593 static int fentry_event_define_fields(struct trace_event_call *event_call)
594 {
595 	int ret;
596 	struct fentry_trace_entry_head field;
597 	struct trace_probe *tp;
598 
599 	tp = trace_probe_primary_from_call(event_call);
600 	if (WARN_ON_ONCE(!tp))
601 		return -ENOENT;
602 
603 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
604 
605 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
606 }
607 
608 static int fexit_event_define_fields(struct trace_event_call *event_call)
609 {
610 	int ret;
611 	struct fexit_trace_entry_head field;
612 	struct trace_probe *tp;
613 
614 	tp = trace_probe_primary_from_call(event_call);
615 	if (WARN_ON_ONCE(!tp))
616 		return -ENOENT;
617 
618 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
619 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
620 
621 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
622 }
623 
624 static struct trace_event_functions fentry_funcs = {
625 	.trace		= print_fentry_event
626 };
627 
628 static struct trace_event_functions fexit_funcs = {
629 	.trace		= print_fexit_event
630 };
631 
632 static struct trace_event_fields fentry_fields_array[] = {
633 	{ .type = TRACE_FUNCTION_TYPE,
634 	  .define_fields = fentry_event_define_fields },
635 	{}
636 };
637 
638 static struct trace_event_fields fexit_fields_array[] = {
639 	{ .type = TRACE_FUNCTION_TYPE,
640 	  .define_fields = fexit_event_define_fields },
641 	{}
642 };
643 
644 static int fprobe_register(struct trace_event_call *event,
645 			   enum trace_reg type, void *data);
646 
647 static inline void init_trace_event_call(struct trace_fprobe *tf)
648 {
649 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
650 
651 	if (trace_fprobe_is_return(tf)) {
652 		call->event.funcs = &fexit_funcs;
653 		call->class->fields_array = fexit_fields_array;
654 	} else {
655 		call->event.funcs = &fentry_funcs;
656 		call->class->fields_array = fentry_fields_array;
657 	}
658 
659 	call->flags = TRACE_EVENT_FL_FPROBE;
660 	call->class->reg = fprobe_register;
661 }
662 
663 static int register_fprobe_event(struct trace_fprobe *tf)
664 {
665 	init_trace_event_call(tf);
666 
667 	return trace_probe_register_event_call(&tf->tp);
668 }
669 
670 static int unregister_fprobe_event(struct trace_fprobe *tf)
671 {
672 	return trace_probe_unregister_event_call(&tf->tp);
673 }
674 
675 /* Internal register function - just handle fprobe and flags */
676 static int __register_trace_fprobe(struct trace_fprobe *tf)
677 {
678 	int i, ret;
679 
680 	/* Should we need new LOCKDOWN flag for fprobe? */
681 	ret = security_locked_down(LOCKDOWN_KPROBES);
682 	if (ret)
683 		return ret;
684 
685 	if (trace_fprobe_is_registered(tf))
686 		return -EINVAL;
687 
688 	for (i = 0; i < tf->tp.nr_args; i++) {
689 		ret = traceprobe_update_arg(&tf->tp.args[i]);
690 		if (ret)
691 			return ret;
692 	}
693 
694 	/* Set/clear disabled flag according to tp->flag */
695 	if (trace_probe_is_enabled(&tf->tp))
696 		tf->fp.flags &= ~FPROBE_FL_DISABLED;
697 	else
698 		tf->fp.flags |= FPROBE_FL_DISABLED;
699 
700 	if (trace_fprobe_is_tracepoint(tf)) {
701 		struct tracepoint *tpoint = tf->tpoint;
702 		unsigned long ip = (unsigned long)tpoint->probestub;
703 		/*
704 		 * Here, we do 2 steps to enable fprobe on a tracepoint.
705 		 * At first, put __probestub_##TP function on the tracepoint
706 		 * and put a fprobe on the stub function.
707 		 */
708 		ret = tracepoint_probe_register_prio_may_exist(tpoint,
709 					tpoint->probestub, NULL, 0);
710 		if (ret < 0)
711 			return ret;
712 		return register_fprobe_ips(&tf->fp, &ip, 1);
713 	}
714 
715 	/* TODO: handle filter, nofilter or symbol list */
716 	return register_fprobe(&tf->fp, tf->symbol, NULL);
717 }
718 
719 /* Internal unregister function - just handle fprobe and flags */
720 static void __unregister_trace_fprobe(struct trace_fprobe *tf)
721 {
722 	if (trace_fprobe_is_registered(tf)) {
723 		unregister_fprobe(&tf->fp);
724 		memset(&tf->fp, 0, sizeof(tf->fp));
725 		if (trace_fprobe_is_tracepoint(tf)) {
726 			tracepoint_probe_unregister(tf->tpoint,
727 					tf->tpoint->probestub, NULL);
728 			tf->tpoint = NULL;
729 			tf->mod = NULL;
730 		}
731 	}
732 }
733 
734 /* TODO: make this trace_*probe common function */
735 /* Unregister a trace_probe and probe_event */
736 static int unregister_trace_fprobe(struct trace_fprobe *tf)
737 {
738 	/* If other probes are on the event, just unregister fprobe */
739 	if (trace_probe_has_sibling(&tf->tp))
740 		goto unreg;
741 
742 	/* Enabled event can not be unregistered */
743 	if (trace_probe_is_enabled(&tf->tp))
744 		return -EBUSY;
745 
746 	/* If there's a reference to the dynamic event */
747 	if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
748 		return -EBUSY;
749 
750 	/* Will fail if probe is being used by ftrace or perf */
751 	if (unregister_fprobe_event(tf))
752 		return -EBUSY;
753 
754 unreg:
755 	__unregister_trace_fprobe(tf);
756 	dyn_event_remove(&tf->devent);
757 	trace_probe_unlink(&tf->tp);
758 
759 	return 0;
760 }
761 
762 static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
763 					 struct trace_fprobe *comp)
764 {
765 	struct trace_probe_event *tpe = orig->tp.event;
766 	int i;
767 
768 	list_for_each_entry(orig, &tpe->probes, tp.list) {
769 		if (strcmp(trace_fprobe_symbol(orig),
770 			   trace_fprobe_symbol(comp)))
771 			continue;
772 
773 		/*
774 		 * trace_probe_compare_arg_type() ensured that nr_args and
775 		 * each argument name and type are same. Let's compare comm.
776 		 */
777 		for (i = 0; i < orig->tp.nr_args; i++) {
778 			if (strcmp(orig->tp.args[i].comm,
779 				   comp->tp.args[i].comm))
780 				break;
781 		}
782 
783 		if (i == orig->tp.nr_args)
784 			return true;
785 	}
786 
787 	return false;
788 }
789 
790 static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
791 {
792 	int ret;
793 
794 	if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
795 	    trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
796 		trace_probe_log_set_index(0);
797 		trace_probe_log_err(0, DIFF_PROBE_TYPE);
798 		return -EEXIST;
799 	}
800 	ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
801 	if (ret) {
802 		/* Note that argument starts index = 2 */
803 		trace_probe_log_set_index(ret + 1);
804 		trace_probe_log_err(0, DIFF_ARG_TYPE);
805 		return -EEXIST;
806 	}
807 	if (trace_fprobe_has_same_fprobe(to, tf)) {
808 		trace_probe_log_set_index(0);
809 		trace_probe_log_err(0, SAME_PROBE);
810 		return -EEXIST;
811 	}
812 
813 	/* Append to existing event */
814 	ret = trace_probe_append(&tf->tp, &to->tp);
815 	if (ret)
816 		return ret;
817 
818 	ret = __register_trace_fprobe(tf);
819 	if (ret)
820 		trace_probe_unlink(&tf->tp);
821 	else
822 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
823 
824 	return ret;
825 }
826 
827 /* Register a trace_probe and probe_event */
828 static int register_trace_fprobe(struct trace_fprobe *tf)
829 {
830 	struct trace_fprobe *old_tf;
831 	int ret;
832 
833 	mutex_lock(&event_mutex);
834 
835 	old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
836 				   trace_probe_group_name(&tf->tp));
837 	if (old_tf) {
838 		ret = append_trace_fprobe(tf, old_tf);
839 		goto end;
840 	}
841 
842 	/* Register new event */
843 	ret = register_fprobe_event(tf);
844 	if (ret) {
845 		if (ret == -EEXIST) {
846 			trace_probe_log_set_index(0);
847 			trace_probe_log_err(0, EVENT_EXIST);
848 		} else
849 			pr_warn("Failed to register probe event(%d)\n", ret);
850 		goto end;
851 	}
852 
853 	/* Register fprobe */
854 	ret = __register_trace_fprobe(tf);
855 	if (ret < 0)
856 		unregister_fprobe_event(tf);
857 	else
858 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
859 
860 end:
861 	mutex_unlock(&event_mutex);
862 	return ret;
863 }
864 
865 #ifdef CONFIG_MODULES
866 static int __tracepoint_probe_module_cb(struct notifier_block *self,
867 					unsigned long val, void *data)
868 {
869 	struct tp_module *tp_mod = data;
870 	struct trace_fprobe *tf;
871 	struct dyn_event *pos;
872 
873 	if (val != MODULE_STATE_GOING)
874 		return NOTIFY_DONE;
875 
876 	mutex_lock(&event_mutex);
877 	for_each_trace_fprobe(tf, pos) {
878 		if (tp_mod->mod == tf->mod) {
879 			tracepoint_probe_unregister(tf->tpoint,
880 					tf->tpoint->probestub, NULL);
881 			tf->tpoint = NULL;
882 			tf->mod = NULL;
883 		}
884 	}
885 	mutex_unlock(&event_mutex);
886 
887 	return NOTIFY_DONE;
888 }
889 
890 static struct notifier_block tracepoint_module_nb = {
891 	.notifier_call = __tracepoint_probe_module_cb,
892 };
893 #endif /* CONFIG_MODULES */
894 
895 struct __find_tracepoint_cb_data {
896 	const char *tp_name;
897 	struct tracepoint *tpoint;
898 };
899 
900 static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
901 {
902 	struct __find_tracepoint_cb_data *data = priv;
903 
904 	if (!data->tpoint && !strcmp(data->tp_name, tp->name))
905 		data->tpoint = tp;
906 }
907 
908 static struct tracepoint *find_tracepoint(const char *tp_name)
909 {
910 	struct __find_tracepoint_cb_data data = {
911 		.tp_name = tp_name,
912 	};
913 
914 	for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
915 
916 	return data.tpoint;
917 }
918 
919 static int parse_symbol_and_return(int argc, const char *argv[],
920 				   char **symbol, bool *is_return,
921 				   bool is_tracepoint)
922 {
923 	char *tmp = strchr(argv[1], '%');
924 	int i;
925 
926 	if (tmp) {
927 		int len = tmp - argv[1];
928 
929 		if (!is_tracepoint && !strcmp(tmp, "%return")) {
930 			*is_return = true;
931 		} else {
932 			trace_probe_log_err(len, BAD_ADDR_SUFFIX);
933 			return -EINVAL;
934 		}
935 		*symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
936 	} else
937 		*symbol = kstrdup(argv[1], GFP_KERNEL);
938 	if (!*symbol)
939 		return -ENOMEM;
940 
941 	if (*is_return)
942 		return 0;
943 
944 	/* If there is $retval, this should be a return fprobe. */
945 	for (i = 2; i < argc; i++) {
946 		tmp = strstr(argv[i], "$retval");
947 		if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
948 			if (is_tracepoint) {
949 				trace_probe_log_set_index(i);
950 				trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
951 				return -EINVAL;
952 			}
953 			*is_return = true;
954 			break;
955 		}
956 	}
957 	return 0;
958 }
959 
960 static int __trace_fprobe_create(int argc, const char *argv[])
961 {
962 	/*
963 	 * Argument syntax:
964 	 *  - Add fentry probe:
965 	 *      f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
966 	 *  - Add fexit probe:
967 	 *      f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
968 	 *  - Add tracepoint probe:
969 	 *      t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
970 	 *
971 	 * Fetch args:
972 	 *  $retval	: fetch return value
973 	 *  $stack	: fetch stack address
974 	 *  $stackN	: fetch Nth entry of stack (N:0-)
975 	 *  $argN	: fetch Nth argument (N:1-)
976 	 *  $comm       : fetch current task comm
977 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
978 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
979 	 * Dereferencing memory fetch:
980 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
981 	 * Alias name of args:
982 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
983 	 * Type of args:
984 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
985 	 */
986 	struct trace_fprobe *tf = NULL;
987 	int i, len, new_argc = 0, ret = 0;
988 	bool is_return = false;
989 	char *symbol = NULL;
990 	const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
991 	const char **new_argv = NULL;
992 	int maxactive = 0;
993 	char buf[MAX_EVENT_NAME_LEN];
994 	char gbuf[MAX_EVENT_NAME_LEN];
995 	char sbuf[KSYM_NAME_LEN];
996 	char abuf[MAX_BTF_ARGS_LEN];
997 	char *dbuf = NULL;
998 	bool is_tracepoint = false;
999 	struct tracepoint *tpoint = NULL;
1000 	struct traceprobe_parse_context ctx = {
1001 		.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
1002 	};
1003 
1004 	if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
1005 		return -ECANCELED;
1006 
1007 	if (argv[0][0] == 't') {
1008 		is_tracepoint = true;
1009 		group = TRACEPOINT_EVENT_SYSTEM;
1010 	}
1011 
1012 	trace_probe_log_init("trace_fprobe", argc, argv);
1013 
1014 	event = strchr(&argv[0][1], ':');
1015 	if (event)
1016 		event++;
1017 
1018 	if (isdigit(argv[0][1])) {
1019 		if (event)
1020 			len = event - &argv[0][1] - 1;
1021 		else
1022 			len = strlen(&argv[0][1]);
1023 		if (len > MAX_EVENT_NAME_LEN - 1) {
1024 			trace_probe_log_err(1, BAD_MAXACT);
1025 			goto parse_error;
1026 		}
1027 		memcpy(buf, &argv[0][1], len);
1028 		buf[len] = '\0';
1029 		ret = kstrtouint(buf, 0, &maxactive);
1030 		if (ret || !maxactive) {
1031 			trace_probe_log_err(1, BAD_MAXACT);
1032 			goto parse_error;
1033 		}
1034 		/* fprobe rethook instances are iterated over via a list. The
1035 		 * maximum should stay reasonable.
1036 		 */
1037 		if (maxactive > RETHOOK_MAXACTIVE_MAX) {
1038 			trace_probe_log_err(1, MAXACT_TOO_BIG);
1039 			goto parse_error;
1040 		}
1041 	}
1042 
1043 	trace_probe_log_set_index(1);
1044 
1045 	/* a symbol(or tracepoint) must be specified */
1046 	ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
1047 	if (ret < 0)
1048 		goto parse_error;
1049 
1050 	if (!is_return && maxactive) {
1051 		trace_probe_log_set_index(0);
1052 		trace_probe_log_err(1, BAD_MAXACT_TYPE);
1053 		goto parse_error;
1054 	}
1055 
1056 	trace_probe_log_set_index(0);
1057 	if (event) {
1058 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
1059 						  event - argv[0]);
1060 		if (ret)
1061 			goto parse_error;
1062 	}
1063 
1064 	if (!event) {
1065 		/* Make a new event name */
1066 		if (is_tracepoint)
1067 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
1068 				 isdigit(*symbol) ? "_" : "", symbol);
1069 		else
1070 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
1071 				 is_return ? "exit" : "entry");
1072 		sanitize_event_name(buf);
1073 		event = buf;
1074 	}
1075 
1076 	if (is_return)
1077 		ctx.flags |= TPARG_FL_RETURN;
1078 	else
1079 		ctx.flags |= TPARG_FL_FENTRY;
1080 
1081 	if (is_tracepoint) {
1082 		ctx.flags |= TPARG_FL_TPOINT;
1083 		tpoint = find_tracepoint(symbol);
1084 		if (!tpoint) {
1085 			trace_probe_log_set_index(1);
1086 			trace_probe_log_err(0, NO_TRACEPOINT);
1087 			goto parse_error;
1088 		}
1089 		ctx.funcname = kallsyms_lookup(
1090 				(unsigned long)tpoint->probestub,
1091 				NULL, NULL, NULL, sbuf);
1092 	} else
1093 		ctx.funcname = symbol;
1094 
1095 	argc -= 2; argv += 2;
1096 	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1097 					       abuf, MAX_BTF_ARGS_LEN, &ctx);
1098 	if (IS_ERR(new_argv)) {
1099 		ret = PTR_ERR(new_argv);
1100 		new_argv = NULL;
1101 		goto out;
1102 	}
1103 	if (new_argv) {
1104 		argc = new_argc;
1105 		argv = new_argv;
1106 	}
1107 
1108 	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1109 	if (ret)
1110 		goto out;
1111 
1112 	/* setup a probe */
1113 	tf = alloc_trace_fprobe(group, event, symbol, tpoint, maxactive,
1114 				argc, is_return);
1115 	if (IS_ERR(tf)) {
1116 		ret = PTR_ERR(tf);
1117 		/* This must return -ENOMEM, else there is a bug */
1118 		WARN_ON_ONCE(ret != -ENOMEM);
1119 		goto out;	/* We know tf is not allocated */
1120 	}
1121 
1122 	if (is_tracepoint)
1123 		tf->mod = __module_text_address(
1124 				(unsigned long)tf->tpoint->probestub);
1125 
1126 	/* parse arguments */
1127 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
1128 		trace_probe_log_set_index(i + 2);
1129 		ctx.offset = 0;
1130 		ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
1131 		if (ret)
1132 			goto error;	/* This can be -ENOMEM */
1133 	}
1134 
1135 	if (is_return && tf->tp.entry_arg) {
1136 		tf->fp.entry_handler = trace_fprobe_entry_handler;
1137 		tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
1138 	}
1139 
1140 	ret = traceprobe_set_print_fmt(&tf->tp,
1141 			is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
1142 	if (ret < 0)
1143 		goto error;
1144 
1145 	ret = register_trace_fprobe(tf);
1146 	if (ret) {
1147 		trace_probe_log_set_index(1);
1148 		if (ret == -EILSEQ)
1149 			trace_probe_log_err(0, BAD_INSN_BNDRY);
1150 		else if (ret == -ENOENT)
1151 			trace_probe_log_err(0, BAD_PROBE_ADDR);
1152 		else if (ret != -ENOMEM && ret != -EEXIST)
1153 			trace_probe_log_err(0, FAIL_REG_PROBE);
1154 		goto error;
1155 	}
1156 
1157 out:
1158 	traceprobe_finish_parse(&ctx);
1159 	trace_probe_log_clear();
1160 	kfree(new_argv);
1161 	kfree(symbol);
1162 	kfree(dbuf);
1163 	return ret;
1164 
1165 parse_error:
1166 	ret = -EINVAL;
1167 error:
1168 	free_trace_fprobe(tf);
1169 	goto out;
1170 }
1171 
1172 static int trace_fprobe_create(const char *raw_command)
1173 {
1174 	return trace_probe_create(raw_command, __trace_fprobe_create);
1175 }
1176 
1177 static int trace_fprobe_release(struct dyn_event *ev)
1178 {
1179 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1180 	int ret = unregister_trace_fprobe(tf);
1181 
1182 	if (!ret)
1183 		free_trace_fprobe(tf);
1184 	return ret;
1185 }
1186 
1187 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
1188 {
1189 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1190 	int i;
1191 
1192 	if (trace_fprobe_is_tracepoint(tf))
1193 		seq_putc(m, 't');
1194 	else
1195 		seq_putc(m, 'f');
1196 	if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive)
1197 		seq_printf(m, "%d", tf->fp.nr_maxactive);
1198 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
1199 				trace_probe_name(&tf->tp));
1200 
1201 	seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
1202 			       trace_fprobe_is_return(tf) ? "%return" : "");
1203 
1204 	for (i = 0; i < tf->tp.nr_args; i++)
1205 		seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
1206 	seq_putc(m, '\n');
1207 
1208 	return 0;
1209 }
1210 
1211 /*
1212  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1213  */
1214 static int fprobe_register(struct trace_event_call *event,
1215 			   enum trace_reg type, void *data)
1216 {
1217 	struct trace_event_file *file = data;
1218 
1219 	switch (type) {
1220 	case TRACE_REG_REGISTER:
1221 		return enable_trace_fprobe(event, file);
1222 	case TRACE_REG_UNREGISTER:
1223 		return disable_trace_fprobe(event, file);
1224 
1225 #ifdef CONFIG_PERF_EVENTS
1226 	case TRACE_REG_PERF_REGISTER:
1227 		return enable_trace_fprobe(event, NULL);
1228 	case TRACE_REG_PERF_UNREGISTER:
1229 		return disable_trace_fprobe(event, NULL);
1230 	case TRACE_REG_PERF_OPEN:
1231 	case TRACE_REG_PERF_CLOSE:
1232 	case TRACE_REG_PERF_ADD:
1233 	case TRACE_REG_PERF_DEL:
1234 		return 0;
1235 #endif
1236 	}
1237 	return 0;
1238 }
1239 
1240 /*
1241  * Register dynevent at core_initcall. This allows kernel to setup fprobe
1242  * events in postcore_initcall without tracefs.
1243  */
1244 static __init int init_fprobe_trace_early(void)
1245 {
1246 	int ret;
1247 
1248 	ret = dyn_event_register(&trace_fprobe_ops);
1249 	if (ret)
1250 		return ret;
1251 
1252 #ifdef CONFIG_MODULES
1253 	ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
1254 	if (ret)
1255 		return ret;
1256 #endif
1257 
1258 	return 0;
1259 }
1260 core_initcall(init_fprobe_trace_early);
1261