xref: /linux/kernel/trace/trace_fprobe.c (revision 26edad06d5c34038c5d15ee082c80a62dcbd74bc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fprobe-based tracing events
4  * Copyright (C) 2022 Google LLC.
5  */
6 #define pr_fmt(fmt)	"trace_fprobe: " fmt
7 #include <asm/ptrace.h>
8 
9 #include <linux/fprobe.h>
10 #include <linux/module.h>
11 #include <linux/rculist.h>
12 #include <linux/security.h>
13 #include <linux/tracepoint.h>
14 #include <linux/uaccess.h>
15 
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_kernel.h"
19 #include "trace_probe_tmpl.h"
20 
21 #define FPROBE_EVENT_SYSTEM "fprobes"
22 #define TRACEPOINT_EVENT_SYSTEM "tracepoints"
23 #define RETHOOK_MAXACTIVE_MAX 4096
24 #define TRACEPOINT_STUB ERR_PTR(-ENOENT)
25 
26 static int trace_fprobe_create(const char *raw_command);
27 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
28 static int trace_fprobe_release(struct dyn_event *ev);
29 static bool trace_fprobe_is_busy(struct dyn_event *ev);
30 static bool trace_fprobe_match(const char *system, const char *event,
31 			int argc, const char **argv, struct dyn_event *ev);
32 
33 static struct dyn_event_operations trace_fprobe_ops = {
34 	.create = trace_fprobe_create,
35 	.show = trace_fprobe_show,
36 	.is_busy = trace_fprobe_is_busy,
37 	.free = trace_fprobe_release,
38 	.match = trace_fprobe_match,
39 };
40 
41 /*
42  * Fprobe event core functions
43  */
44 struct trace_fprobe {
45 	struct dyn_event	devent;
46 	struct fprobe		fp;
47 	const char		*symbol;
48 	struct tracepoint	*tpoint;
49 	struct module		*mod;
50 	struct trace_probe	tp;
51 };
52 
is_trace_fprobe(struct dyn_event * ev)53 static bool is_trace_fprobe(struct dyn_event *ev)
54 {
55 	return ev->ops == &trace_fprobe_ops;
56 }
57 
to_trace_fprobe(struct dyn_event * ev)58 static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
59 {
60 	return container_of(ev, struct trace_fprobe, devent);
61 }
62 
63 /**
64  * for_each_trace_fprobe - iterate over the trace_fprobe list
65  * @pos:	the struct trace_fprobe * for each entry
66  * @dpos:	the struct dyn_event * to use as a loop cursor
67  */
68 #define for_each_trace_fprobe(pos, dpos)	\
69 	for_each_dyn_event(dpos)		\
70 		if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
71 
trace_fprobe_is_return(struct trace_fprobe * tf)72 static bool trace_fprobe_is_return(struct trace_fprobe *tf)
73 {
74 	return tf->fp.exit_handler != NULL;
75 }
76 
trace_fprobe_is_tracepoint(struct trace_fprobe * tf)77 static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
78 {
79 	return tf->tpoint != NULL;
80 }
81 
trace_fprobe_symbol(struct trace_fprobe * tf)82 static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
83 {
84 	return tf->symbol ? tf->symbol : "unknown";
85 }
86 
trace_fprobe_is_busy(struct dyn_event * ev)87 static bool trace_fprobe_is_busy(struct dyn_event *ev)
88 {
89 	struct trace_fprobe *tf = to_trace_fprobe(ev);
90 
91 	return trace_probe_is_enabled(&tf->tp);
92 }
93 
trace_fprobe_match_command_head(struct trace_fprobe * tf,int argc,const char ** argv)94 static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
95 					    int argc, const char **argv)
96 {
97 	char buf[MAX_ARGSTR_LEN + 1];
98 
99 	if (!argc)
100 		return true;
101 
102 	snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
103 	if (strcmp(buf, argv[0]))
104 		return false;
105 	argc--; argv++;
106 
107 	return trace_probe_match_command_args(&tf->tp, argc, argv);
108 }
109 
trace_fprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)110 static bool trace_fprobe_match(const char *system, const char *event,
111 			int argc, const char **argv, struct dyn_event *ev)
112 {
113 	struct trace_fprobe *tf = to_trace_fprobe(ev);
114 
115 	if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
116 		return false;
117 
118 	if (system && strcmp(trace_probe_group_name(&tf->tp), system))
119 		return false;
120 
121 	return trace_fprobe_match_command_head(tf, argc, argv);
122 }
123 
trace_fprobe_is_registered(struct trace_fprobe * tf)124 static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
125 {
126 	return fprobe_is_registered(&tf->fp);
127 }
128 
129 /*
130  * Note that we don't verify the fetch_insn code, since it does not come
131  * from user space.
132  */
133 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * edata,void * dest,void * base)134 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
135 		   void *dest, void *base)
136 {
137 	struct ftrace_regs *fregs = rec;
138 	unsigned long val;
139 	int ret;
140 
141 retry:
142 	/* 1st stage: get value from context */
143 	switch (code->op) {
144 	case FETCH_OP_STACK:
145 		val = ftrace_regs_get_kernel_stack_nth(fregs, code->param);
146 		break;
147 	case FETCH_OP_STACKP:
148 		val = ftrace_regs_get_stack_pointer(fregs);
149 		break;
150 	case FETCH_OP_RETVAL:
151 		val = ftrace_regs_get_return_value(fregs);
152 		break;
153 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
154 	case FETCH_OP_ARG:
155 		val = ftrace_regs_get_argument(fregs, code->param);
156 		break;
157 	case FETCH_OP_EDATA:
158 		val = *(unsigned long *)((unsigned long)edata + code->offset);
159 		break;
160 #endif
161 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
162 		code++;
163 		goto retry;
164 	default:
165 		ret = process_common_fetch_insn(code, &val);
166 		if (ret < 0)
167 			return ret;
168 	}
169 	code++;
170 
171 	return process_fetch_insn_bottom(code, val, dest, base);
172 }
NOKPROBE_SYMBOL(process_fetch_insn)173 NOKPROBE_SYMBOL(process_fetch_insn)
174 
175 /* function entry handler */
176 static nokprobe_inline void
177 __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
178 		    struct ftrace_regs *fregs,
179 		    struct trace_event_file *trace_file)
180 {
181 	struct fentry_trace_entry_head *entry;
182 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
183 	struct trace_event_buffer fbuffer;
184 	int dsize;
185 
186 	if (WARN_ON_ONCE(call != trace_file->event_call))
187 		return;
188 
189 	if (trace_trigger_soft_disabled(trace_file))
190 		return;
191 
192 	dsize = __get_data_size(&tf->tp, fregs, NULL);
193 
194 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
195 					   sizeof(*entry) + tf->tp.size + dsize);
196 	if (!entry)
197 		return;
198 
199 	fbuffer.regs = ftrace_get_regs(fregs);
200 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
201 	entry->ip = entry_ip;
202 	store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize);
203 
204 	trace_event_buffer_commit(&fbuffer);
205 }
206 
207 static void
fentry_trace_func(struct trace_fprobe * tf,unsigned long entry_ip,struct ftrace_regs * fregs)208 fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
209 		  struct ftrace_regs *fregs)
210 {
211 	struct event_file_link *link;
212 
213 	trace_probe_for_each_link_rcu(link, &tf->tp)
214 		__fentry_trace_func(tf, entry_ip, fregs, link->file);
215 }
216 NOKPROBE_SYMBOL(fentry_trace_func);
217 
218 static nokprobe_inline
store_fprobe_entry_data(void * edata,struct trace_probe * tp,struct ftrace_regs * fregs)219 void store_fprobe_entry_data(void *edata, struct trace_probe *tp, struct ftrace_regs *fregs)
220 {
221 	struct probe_entry_arg *earg = tp->entry_arg;
222 	unsigned long val = 0;
223 	int i;
224 
225 	if (!earg)
226 		return;
227 
228 	for (i = 0; i < earg->size; i++) {
229 		struct fetch_insn *code = &earg->code[i];
230 
231 		switch (code->op) {
232 		case FETCH_OP_ARG:
233 			val = ftrace_regs_get_argument(fregs, code->param);
234 			break;
235 		case FETCH_OP_ST_EDATA:
236 			*(unsigned long *)((unsigned long)edata + code->offset) = val;
237 			break;
238 		case FETCH_OP_END:
239 			goto end;
240 		default:
241 			break;
242 		}
243 	}
244 end:
245 	return;
246 }
247 
248 /* function exit handler */
trace_fprobe_entry_handler(struct fprobe * fp,unsigned long entry_ip,unsigned long ret_ip,struct ftrace_regs * fregs,void * entry_data)249 static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
250 				unsigned long ret_ip, struct ftrace_regs *fregs,
251 				void *entry_data)
252 {
253 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
254 
255 	if (tf->tp.entry_arg)
256 		store_fprobe_entry_data(entry_data, &tf->tp, fregs);
257 
258 	return 0;
259 }
NOKPROBE_SYMBOL(trace_fprobe_entry_handler)260 NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
261 
262 static nokprobe_inline void
263 __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
264 		   unsigned long ret_ip, struct ftrace_regs *fregs,
265 		   void *entry_data, struct trace_event_file *trace_file)
266 {
267 	struct fexit_trace_entry_head *entry;
268 	struct trace_event_buffer fbuffer;
269 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
270 	int dsize;
271 
272 	if (WARN_ON_ONCE(call != trace_file->event_call))
273 		return;
274 
275 	if (trace_trigger_soft_disabled(trace_file))
276 		return;
277 
278 	dsize = __get_data_size(&tf->tp, fregs, entry_data);
279 
280 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
281 					   sizeof(*entry) + tf->tp.size + dsize);
282 	if (!entry)
283 		return;
284 
285 	fbuffer.regs = ftrace_get_regs(fregs);
286 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
287 	entry->func = entry_ip;
288 	entry->ret_ip = ret_ip;
289 	store_trace_args(&entry[1], &tf->tp, fregs, entry_data, sizeof(*entry), dsize);
290 
291 	trace_event_buffer_commit(&fbuffer);
292 }
293 
294 static void
fexit_trace_func(struct trace_fprobe * tf,unsigned long entry_ip,unsigned long ret_ip,struct ftrace_regs * fregs,void * entry_data)295 fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
296 		 unsigned long ret_ip, struct ftrace_regs *fregs, void *entry_data)
297 {
298 	struct event_file_link *link;
299 
300 	trace_probe_for_each_link_rcu(link, &tf->tp)
301 		__fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data, link->file);
302 }
303 NOKPROBE_SYMBOL(fexit_trace_func);
304 
305 #ifdef CONFIG_PERF_EVENTS
306 
fentry_perf_func(struct trace_fprobe * tf,unsigned long entry_ip,struct ftrace_regs * fregs)307 static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
308 			    struct ftrace_regs *fregs)
309 {
310 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
311 	struct fentry_trace_entry_head *entry;
312 	struct hlist_head *head;
313 	int size, __size, dsize;
314 	struct pt_regs *regs;
315 	int rctx;
316 
317 	head = this_cpu_ptr(call->perf_events);
318 	if (hlist_empty(head))
319 		return 0;
320 
321 	dsize = __get_data_size(&tf->tp, fregs, NULL);
322 	__size = sizeof(*entry) + tf->tp.size + dsize;
323 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
324 	size -= sizeof(u32);
325 
326 	entry = perf_trace_buf_alloc(size, &regs, &rctx);
327 	if (!entry)
328 		return 0;
329 
330 	regs = ftrace_fill_perf_regs(fregs, regs);
331 
332 	entry->ip = entry_ip;
333 	memset(&entry[1], 0, dsize);
334 	store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize);
335 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
336 			      head, NULL);
337 	return 0;
338 }
339 NOKPROBE_SYMBOL(fentry_perf_func);
340 
341 static void
fexit_perf_func(struct trace_fprobe * tf,unsigned long entry_ip,unsigned long ret_ip,struct ftrace_regs * fregs,void * entry_data)342 fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
343 		unsigned long ret_ip, struct ftrace_regs *fregs,
344 		void *entry_data)
345 {
346 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
347 	struct fexit_trace_entry_head *entry;
348 	struct hlist_head *head;
349 	int size, __size, dsize;
350 	struct pt_regs *regs;
351 	int rctx;
352 
353 	head = this_cpu_ptr(call->perf_events);
354 	if (hlist_empty(head))
355 		return;
356 
357 	dsize = __get_data_size(&tf->tp, fregs, entry_data);
358 	__size = sizeof(*entry) + tf->tp.size + dsize;
359 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
360 	size -= sizeof(u32);
361 
362 	entry = perf_trace_buf_alloc(size, &regs, &rctx);
363 	if (!entry)
364 		return;
365 
366 	regs = ftrace_fill_perf_regs(fregs, regs);
367 
368 	entry->func = entry_ip;
369 	entry->ret_ip = ret_ip;
370 	store_trace_args(&entry[1], &tf->tp, fregs, entry_data, sizeof(*entry), dsize);
371 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
372 			      head, NULL);
373 }
374 NOKPROBE_SYMBOL(fexit_perf_func);
375 #endif	/* CONFIG_PERF_EVENTS */
376 
fentry_dispatcher(struct fprobe * fp,unsigned long entry_ip,unsigned long ret_ip,struct ftrace_regs * fregs,void * entry_data)377 static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
378 			     unsigned long ret_ip, struct ftrace_regs *fregs,
379 			     void *entry_data)
380 {
381 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
382 	int ret = 0;
383 
384 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
385 		fentry_trace_func(tf, entry_ip, fregs);
386 
387 #ifdef CONFIG_PERF_EVENTS
388 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
389 		ret = fentry_perf_func(tf, entry_ip, fregs);
390 #endif
391 	return ret;
392 }
393 NOKPROBE_SYMBOL(fentry_dispatcher);
394 
fexit_dispatcher(struct fprobe * fp,unsigned long entry_ip,unsigned long ret_ip,struct ftrace_regs * fregs,void * entry_data)395 static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
396 			     unsigned long ret_ip, struct ftrace_regs *fregs,
397 			     void *entry_data)
398 {
399 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
400 
401 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
402 		fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data);
403 #ifdef CONFIG_PERF_EVENTS
404 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
405 		fexit_perf_func(tf, entry_ip, ret_ip, fregs, entry_data);
406 #endif
407 }
408 NOKPROBE_SYMBOL(fexit_dispatcher);
409 
free_trace_fprobe(struct trace_fprobe * tf)410 static void free_trace_fprobe(struct trace_fprobe *tf)
411 {
412 	if (tf) {
413 		trace_probe_cleanup(&tf->tp);
414 		kfree(tf->symbol);
415 		kfree(tf);
416 	}
417 }
418 
419 /* Since alloc_trace_fprobe() can return error, check the pointer is ERR too. */
420 DEFINE_FREE(free_trace_fprobe, struct trace_fprobe *, if (!IS_ERR_OR_NULL(_T)) free_trace_fprobe(_T))
421 
422 /*
423  * Allocate new trace_probe and initialize it (including fprobe).
424  */
alloc_trace_fprobe(const char * group,const char * event,const char * symbol,struct tracepoint * tpoint,struct module * mod,int nargs,bool is_return)425 static struct trace_fprobe *alloc_trace_fprobe(const char *group,
426 					       const char *event,
427 					       const char *symbol,
428 					       struct tracepoint *tpoint,
429 					       struct module *mod,
430 					       int nargs, bool is_return)
431 {
432 	struct trace_fprobe *tf __free(free_trace_fprobe) = NULL;
433 	int ret = -ENOMEM;
434 
435 	tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
436 	if (!tf)
437 		return ERR_PTR(ret);
438 
439 	tf->symbol = kstrdup(symbol, GFP_KERNEL);
440 	if (!tf->symbol)
441 		return ERR_PTR(-ENOMEM);
442 
443 	if (is_return)
444 		tf->fp.exit_handler = fexit_dispatcher;
445 	else
446 		tf->fp.entry_handler = fentry_dispatcher;
447 
448 	tf->tpoint = tpoint;
449 	tf->mod = mod;
450 
451 	ret = trace_probe_init(&tf->tp, event, group, false, nargs);
452 	if (ret < 0)
453 		return ERR_PTR(ret);
454 
455 	dyn_event_init(&tf->devent, &trace_fprobe_ops);
456 	return_ptr(tf);
457 }
458 
find_trace_fprobe(const char * event,const char * group)459 static struct trace_fprobe *find_trace_fprobe(const char *event,
460 					      const char *group)
461 {
462 	struct dyn_event *pos;
463 	struct trace_fprobe *tf;
464 
465 	for_each_trace_fprobe(tf, pos)
466 		if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
467 		    strcmp(trace_probe_group_name(&tf->tp), group) == 0)
468 			return tf;
469 	return NULL;
470 }
471 
__enable_trace_fprobe(struct trace_fprobe * tf)472 static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
473 {
474 	if (trace_fprobe_is_registered(tf))
475 		enable_fprobe(&tf->fp);
476 
477 	return 0;
478 }
479 
__disable_trace_fprobe(struct trace_probe * tp)480 static void __disable_trace_fprobe(struct trace_probe *tp)
481 {
482 	struct trace_fprobe *tf;
483 
484 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
485 		if (!trace_fprobe_is_registered(tf))
486 			continue;
487 		disable_fprobe(&tf->fp);
488 	}
489 }
490 
491 /*
492  * Enable trace_probe
493  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
494  */
enable_trace_fprobe(struct trace_event_call * call,struct trace_event_file * file)495 static int enable_trace_fprobe(struct trace_event_call *call,
496 			       struct trace_event_file *file)
497 {
498 	struct trace_probe *tp;
499 	struct trace_fprobe *tf;
500 	bool enabled;
501 	int ret = 0;
502 
503 	tp = trace_probe_primary_from_call(call);
504 	if (WARN_ON_ONCE(!tp))
505 		return -ENODEV;
506 	enabled = trace_probe_is_enabled(tp);
507 
508 	/* This also changes "enabled" state */
509 	if (file) {
510 		ret = trace_probe_add_file(tp, file);
511 		if (ret)
512 			return ret;
513 	} else
514 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
515 
516 	if (!enabled) {
517 		list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
518 			/* TODO: check the fprobe is gone */
519 			__enable_trace_fprobe(tf);
520 		}
521 	}
522 
523 	return 0;
524 }
525 
526 /*
527  * Disable trace_probe
528  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
529  */
disable_trace_fprobe(struct trace_event_call * call,struct trace_event_file * file)530 static int disable_trace_fprobe(struct trace_event_call *call,
531 				struct trace_event_file *file)
532 {
533 	struct trace_probe *tp;
534 
535 	tp = trace_probe_primary_from_call(call);
536 	if (WARN_ON_ONCE(!tp))
537 		return -ENODEV;
538 
539 	if (file) {
540 		if (!trace_probe_get_file_link(tp, file))
541 			return -ENOENT;
542 		if (!trace_probe_has_single_file(tp))
543 			goto out;
544 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
545 	} else
546 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
547 
548 	if (!trace_probe_is_enabled(tp))
549 		__disable_trace_fprobe(tp);
550 
551  out:
552 	if (file)
553 		/*
554 		 * Synchronization is done in below function. For perf event,
555 		 * file == NULL and perf_trace_event_unreg() calls
556 		 * tracepoint_synchronize_unregister() to ensure synchronize
557 		 * event. We don't need to care about it.
558 		 */
559 		trace_probe_remove_file(tp, file);
560 
561 	return 0;
562 }
563 
564 /* Event entry printers */
565 static enum print_line_t
print_fentry_event(struct trace_iterator * iter,int flags,struct trace_event * event)566 print_fentry_event(struct trace_iterator *iter, int flags,
567 		   struct trace_event *event)
568 {
569 	struct fentry_trace_entry_head *field;
570 	struct trace_seq *s = &iter->seq;
571 	struct trace_probe *tp;
572 
573 	field = (struct fentry_trace_entry_head *)iter->ent;
574 	tp = trace_probe_primary_from_call(
575 		container_of(event, struct trace_event_call, event));
576 	if (WARN_ON_ONCE(!tp))
577 		goto out;
578 
579 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
580 
581 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
582 		goto out;
583 
584 	trace_seq_putc(s, ')');
585 
586 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
587 			     (u8 *)&field[1], field) < 0)
588 		goto out;
589 
590 	trace_seq_putc(s, '\n');
591  out:
592 	return trace_handle_return(s);
593 }
594 
595 static enum print_line_t
print_fexit_event(struct trace_iterator * iter,int flags,struct trace_event * event)596 print_fexit_event(struct trace_iterator *iter, int flags,
597 		  struct trace_event *event)
598 {
599 	struct fexit_trace_entry_head *field;
600 	struct trace_seq *s = &iter->seq;
601 	struct trace_probe *tp;
602 
603 	field = (struct fexit_trace_entry_head *)iter->ent;
604 	tp = trace_probe_primary_from_call(
605 		container_of(event, struct trace_event_call, event));
606 	if (WARN_ON_ONCE(!tp))
607 		goto out;
608 
609 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
610 
611 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
612 		goto out;
613 
614 	trace_seq_puts(s, " <- ");
615 
616 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
617 		goto out;
618 
619 	trace_seq_putc(s, ')');
620 
621 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
622 			     (u8 *)&field[1], field) < 0)
623 		goto out;
624 
625 	trace_seq_putc(s, '\n');
626 
627  out:
628 	return trace_handle_return(s);
629 }
630 
fentry_event_define_fields(struct trace_event_call * event_call)631 static int fentry_event_define_fields(struct trace_event_call *event_call)
632 {
633 	int ret;
634 	struct fentry_trace_entry_head field;
635 	struct trace_probe *tp;
636 
637 	tp = trace_probe_primary_from_call(event_call);
638 	if (WARN_ON_ONCE(!tp))
639 		return -ENOENT;
640 
641 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
642 
643 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
644 }
645 
fexit_event_define_fields(struct trace_event_call * event_call)646 static int fexit_event_define_fields(struct trace_event_call *event_call)
647 {
648 	int ret;
649 	struct fexit_trace_entry_head field;
650 	struct trace_probe *tp;
651 
652 	tp = trace_probe_primary_from_call(event_call);
653 	if (WARN_ON_ONCE(!tp))
654 		return -ENOENT;
655 
656 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
657 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
658 
659 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
660 }
661 
662 static struct trace_event_functions fentry_funcs = {
663 	.trace		= print_fentry_event
664 };
665 
666 static struct trace_event_functions fexit_funcs = {
667 	.trace		= print_fexit_event
668 };
669 
670 static struct trace_event_fields fentry_fields_array[] = {
671 	{ .type = TRACE_FUNCTION_TYPE,
672 	  .define_fields = fentry_event_define_fields },
673 	{}
674 };
675 
676 static struct trace_event_fields fexit_fields_array[] = {
677 	{ .type = TRACE_FUNCTION_TYPE,
678 	  .define_fields = fexit_event_define_fields },
679 	{}
680 };
681 
682 static int fprobe_register(struct trace_event_call *event,
683 			   enum trace_reg type, void *data);
684 
init_trace_event_call(struct trace_fprobe * tf)685 static inline void init_trace_event_call(struct trace_fprobe *tf)
686 {
687 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
688 
689 	if (trace_fprobe_is_return(tf)) {
690 		call->event.funcs = &fexit_funcs;
691 		call->class->fields_array = fexit_fields_array;
692 	} else {
693 		call->event.funcs = &fentry_funcs;
694 		call->class->fields_array = fentry_fields_array;
695 	}
696 
697 	call->flags = TRACE_EVENT_FL_FPROBE;
698 	call->class->reg = fprobe_register;
699 }
700 
register_fprobe_event(struct trace_fprobe * tf)701 static int register_fprobe_event(struct trace_fprobe *tf)
702 {
703 	init_trace_event_call(tf);
704 
705 	return trace_probe_register_event_call(&tf->tp);
706 }
707 
unregister_fprobe_event(struct trace_fprobe * tf)708 static int unregister_fprobe_event(struct trace_fprobe *tf)
709 {
710 	return trace_probe_unregister_event_call(&tf->tp);
711 }
712 
__regsiter_tracepoint_fprobe(struct trace_fprobe * tf)713 static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
714 {
715 	struct tracepoint *tpoint = tf->tpoint;
716 	unsigned long ip = (unsigned long)tpoint->probestub;
717 	int ret;
718 
719 	/*
720 	 * Here, we do 2 steps to enable fprobe on a tracepoint.
721 	 * At first, put __probestub_##TP function on the tracepoint
722 	 * and put a fprobe on the stub function.
723 	 */
724 	ret = tracepoint_probe_register_prio_may_exist(tpoint,
725 				tpoint->probestub, NULL, 0);
726 	if (ret < 0)
727 		return ret;
728 	return register_fprobe_ips(&tf->fp, &ip, 1);
729 }
730 
731 /* Internal register function - just handle fprobe and flags */
__register_trace_fprobe(struct trace_fprobe * tf)732 static int __register_trace_fprobe(struct trace_fprobe *tf)
733 {
734 	int i, ret;
735 
736 	/* Should we need new LOCKDOWN flag for fprobe? */
737 	ret = security_locked_down(LOCKDOWN_KPROBES);
738 	if (ret)
739 		return ret;
740 
741 	if (trace_fprobe_is_registered(tf))
742 		return -EINVAL;
743 
744 	for (i = 0; i < tf->tp.nr_args; i++) {
745 		ret = traceprobe_update_arg(&tf->tp.args[i]);
746 		if (ret)
747 			return ret;
748 	}
749 
750 	/* Set/clear disabled flag according to tp->flag */
751 	if (trace_probe_is_enabled(&tf->tp))
752 		tf->fp.flags &= ~FPROBE_FL_DISABLED;
753 	else
754 		tf->fp.flags |= FPROBE_FL_DISABLED;
755 
756 	if (trace_fprobe_is_tracepoint(tf)) {
757 
758 		/* This tracepoint is not loaded yet */
759 		if (tf->tpoint == TRACEPOINT_STUB)
760 			return 0;
761 
762 		return __regsiter_tracepoint_fprobe(tf);
763 	}
764 
765 	/* TODO: handle filter, nofilter or symbol list */
766 	return register_fprobe(&tf->fp, tf->symbol, NULL);
767 }
768 
769 /* Internal unregister function - just handle fprobe and flags */
__unregister_trace_fprobe(struct trace_fprobe * tf)770 static void __unregister_trace_fprobe(struct trace_fprobe *tf)
771 {
772 	if (trace_fprobe_is_registered(tf)) {
773 		unregister_fprobe(&tf->fp);
774 		memset(&tf->fp, 0, sizeof(tf->fp));
775 		if (trace_fprobe_is_tracepoint(tf)) {
776 			tracepoint_probe_unregister(tf->tpoint,
777 					tf->tpoint->probestub, NULL);
778 			tf->tpoint = NULL;
779 			tf->mod = NULL;
780 		}
781 	}
782 }
783 
784 /* TODO: make this trace_*probe common function */
785 /* Unregister a trace_probe and probe_event */
unregister_trace_fprobe(struct trace_fprobe * tf)786 static int unregister_trace_fprobe(struct trace_fprobe *tf)
787 {
788 	/* If other probes are on the event, just unregister fprobe */
789 	if (trace_probe_has_sibling(&tf->tp))
790 		goto unreg;
791 
792 	/* Enabled event can not be unregistered */
793 	if (trace_probe_is_enabled(&tf->tp))
794 		return -EBUSY;
795 
796 	/* If there's a reference to the dynamic event */
797 	if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
798 		return -EBUSY;
799 
800 	/* Will fail if probe is being used by ftrace or perf */
801 	if (unregister_fprobe_event(tf))
802 		return -EBUSY;
803 
804 unreg:
805 	__unregister_trace_fprobe(tf);
806 	dyn_event_remove(&tf->devent);
807 	trace_probe_unlink(&tf->tp);
808 
809 	return 0;
810 }
811 
trace_fprobe_has_same_fprobe(struct trace_fprobe * orig,struct trace_fprobe * comp)812 static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
813 					 struct trace_fprobe *comp)
814 {
815 	struct trace_probe_event *tpe = orig->tp.event;
816 	int i;
817 
818 	list_for_each_entry(orig, &tpe->probes, tp.list) {
819 		if (strcmp(trace_fprobe_symbol(orig),
820 			   trace_fprobe_symbol(comp)))
821 			continue;
822 
823 		/*
824 		 * trace_probe_compare_arg_type() ensured that nr_args and
825 		 * each argument name and type are same. Let's compare comm.
826 		 */
827 		for (i = 0; i < orig->tp.nr_args; i++) {
828 			if (strcmp(orig->tp.args[i].comm,
829 				   comp->tp.args[i].comm))
830 				break;
831 		}
832 
833 		if (i == orig->tp.nr_args)
834 			return true;
835 	}
836 
837 	return false;
838 }
839 
append_trace_fprobe(struct trace_fprobe * tf,struct trace_fprobe * to)840 static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
841 {
842 	int ret;
843 
844 	if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
845 	    trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
846 		trace_probe_log_set_index(0);
847 		trace_probe_log_err(0, DIFF_PROBE_TYPE);
848 		return -EEXIST;
849 	}
850 	ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
851 	if (ret) {
852 		/* Note that argument starts index = 2 */
853 		trace_probe_log_set_index(ret + 1);
854 		trace_probe_log_err(0, DIFF_ARG_TYPE);
855 		return -EEXIST;
856 	}
857 	if (trace_fprobe_has_same_fprobe(to, tf)) {
858 		trace_probe_log_set_index(0);
859 		trace_probe_log_err(0, SAME_PROBE);
860 		return -EEXIST;
861 	}
862 
863 	/* Append to existing event */
864 	ret = trace_probe_append(&tf->tp, &to->tp);
865 	if (ret)
866 		return ret;
867 
868 	ret = __register_trace_fprobe(tf);
869 	if (ret)
870 		trace_probe_unlink(&tf->tp);
871 	else
872 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
873 
874 	return ret;
875 }
876 
877 /* Register a trace_probe and probe_event */
register_trace_fprobe(struct trace_fprobe * tf)878 static int register_trace_fprobe(struct trace_fprobe *tf)
879 {
880 	struct trace_fprobe *old_tf;
881 	int ret;
882 
883 	guard(mutex)(&event_mutex);
884 
885 	old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
886 				   trace_probe_group_name(&tf->tp));
887 	if (old_tf)
888 		return append_trace_fprobe(tf, old_tf);
889 
890 	/* Register new event */
891 	ret = register_fprobe_event(tf);
892 	if (ret) {
893 		if (ret == -EEXIST) {
894 			trace_probe_log_set_index(0);
895 			trace_probe_log_err(0, EVENT_EXIST);
896 		} else
897 			pr_warn("Failed to register probe event(%d)\n", ret);
898 		return ret;
899 	}
900 
901 	/* Register fprobe */
902 	ret = __register_trace_fprobe(tf);
903 	if (ret < 0)
904 		unregister_fprobe_event(tf);
905 	else
906 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
907 
908 	return ret;
909 }
910 
911 struct __find_tracepoint_cb_data {
912 	const char *tp_name;
913 	struct tracepoint *tpoint;
914 	struct module *mod;
915 };
916 
__find_tracepoint_module_cb(struct tracepoint * tp,struct module * mod,void * priv)917 static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
918 {
919 	struct __find_tracepoint_cb_data *data = priv;
920 
921 	if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
922 		data->tpoint = tp;
923 		if (!data->mod) {
924 			data->mod = mod;
925 			if (!try_module_get(data->mod)) {
926 				data->tpoint = NULL;
927 				data->mod = NULL;
928 			}
929 		}
930 	}
931 }
932 
__find_tracepoint_cb(struct tracepoint * tp,void * priv)933 static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
934 {
935 	struct __find_tracepoint_cb_data *data = priv;
936 
937 	if (!data->tpoint && !strcmp(data->tp_name, tp->name))
938 		data->tpoint = tp;
939 }
940 
941 /*
942  * Find a tracepoint from kernel and module. If the tracepoint is in a module,
943  * this increments the module refcount to prevent unloading until the
944  * trace_fprobe is registered to the list. After registering the trace_fprobe
945  * on the trace_fprobe list, the module refcount is decremented because
946  * tracepoint_probe_module_cb will handle it.
947  */
find_tracepoint(const char * tp_name,struct module ** tp_mod)948 static struct tracepoint *find_tracepoint(const char *tp_name,
949 					  struct module **tp_mod)
950 {
951 	struct __find_tracepoint_cb_data data = {
952 		.tp_name = tp_name,
953 		.mod = NULL,
954 	};
955 
956 	for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
957 
958 	if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
959 		for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
960 		*tp_mod = data.mod;
961 	}
962 
963 	return data.tpoint;
964 }
965 
966 #ifdef CONFIG_MODULES
reenable_trace_fprobe(struct trace_fprobe * tf)967 static void reenable_trace_fprobe(struct trace_fprobe *tf)
968 {
969 	struct trace_probe *tp = &tf->tp;
970 
971 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
972 		__enable_trace_fprobe(tf);
973 	}
974 }
975 
find_tracepoint_in_module(struct module * mod,const char * tp_name)976 static struct tracepoint *find_tracepoint_in_module(struct module *mod,
977 						    const char *tp_name)
978 {
979 	struct __find_tracepoint_cb_data data = {
980 		.tp_name = tp_name,
981 		.mod = mod,
982 	};
983 
984 	for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
985 	return data.tpoint;
986 }
987 
__tracepoint_probe_module_cb(struct notifier_block * self,unsigned long val,void * data)988 static int __tracepoint_probe_module_cb(struct notifier_block *self,
989 					unsigned long val, void *data)
990 {
991 	struct tp_module *tp_mod = data;
992 	struct tracepoint *tpoint;
993 	struct trace_fprobe *tf;
994 	struct dyn_event *pos;
995 
996 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
997 		return NOTIFY_DONE;
998 
999 	mutex_lock(&event_mutex);
1000 	for_each_trace_fprobe(tf, pos) {
1001 		if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
1002 			tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
1003 			if (tpoint) {
1004 				tf->tpoint = tpoint;
1005 				tf->mod = tp_mod->mod;
1006 				if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
1007 				    trace_probe_is_enabled(&tf->tp))
1008 					reenable_trace_fprobe(tf);
1009 			}
1010 		} else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
1011 			tracepoint_probe_unregister(tf->tpoint,
1012 					tf->tpoint->probestub, NULL);
1013 			tf->tpoint = NULL;
1014 			tf->mod = NULL;
1015 		}
1016 	}
1017 	mutex_unlock(&event_mutex);
1018 
1019 	return NOTIFY_DONE;
1020 }
1021 
1022 static struct notifier_block tracepoint_module_nb = {
1023 	.notifier_call = __tracepoint_probe_module_cb,
1024 };
1025 #endif /* CONFIG_MODULES */
1026 
parse_symbol_and_return(int argc,const char * argv[],char ** symbol,bool * is_return,bool is_tracepoint)1027 static int parse_symbol_and_return(int argc, const char *argv[],
1028 				   char **symbol, bool *is_return,
1029 				   bool is_tracepoint)
1030 {
1031 	char *tmp = strchr(argv[1], '%');
1032 	int i;
1033 
1034 	if (tmp) {
1035 		int len = tmp - argv[1];
1036 
1037 		if (!is_tracepoint && !strcmp(tmp, "%return")) {
1038 			*is_return = true;
1039 		} else {
1040 			trace_probe_log_err(len, BAD_ADDR_SUFFIX);
1041 			return -EINVAL;
1042 		}
1043 		*symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
1044 	} else
1045 		*symbol = kstrdup(argv[1], GFP_KERNEL);
1046 	if (!*symbol)
1047 		return -ENOMEM;
1048 
1049 	if (*is_return)
1050 		return 0;
1051 
1052 	if (is_tracepoint) {
1053 		tmp = *symbol;
1054 		while (*tmp && (isalnum(*tmp) || *tmp == '_'))
1055 			tmp++;
1056 		if (*tmp) {
1057 			/* find a wrong character. */
1058 			trace_probe_log_err(tmp - *symbol, BAD_TP_NAME);
1059 			kfree(*symbol);
1060 			*symbol = NULL;
1061 			return -EINVAL;
1062 		}
1063 	}
1064 
1065 	/* If there is $retval, this should be a return fprobe. */
1066 	for (i = 2; i < argc; i++) {
1067 		tmp = strstr(argv[i], "$retval");
1068 		if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
1069 			if (is_tracepoint) {
1070 				trace_probe_log_set_index(i);
1071 				trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
1072 				kfree(*symbol);
1073 				*symbol = NULL;
1074 				return -EINVAL;
1075 			}
1076 			*is_return = true;
1077 			break;
1078 		}
1079 	}
1080 	return 0;
1081 }
1082 
DEFINE_FREE(module_put,struct module *,if (_T)module_put (_T))1083 DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T))
1084 
1085 static int trace_fprobe_create_internal(int argc, const char *argv[],
1086 					struct traceprobe_parse_context *ctx)
1087 {
1088 	/*
1089 	 * Argument syntax:
1090 	 *  - Add fentry probe:
1091 	 *      f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
1092 	 *  - Add fexit probe:
1093 	 *      f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
1094 	 *  - Add tracepoint probe:
1095 	 *      t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
1096 	 *
1097 	 * Fetch args:
1098 	 *  $retval	: fetch return value
1099 	 *  $stack	: fetch stack address
1100 	 *  $stackN	: fetch Nth entry of stack (N:0-)
1101 	 *  $argN	: fetch Nth argument (N:1-)
1102 	 *  $comm       : fetch current task comm
1103 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
1104 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
1105 	 * Dereferencing memory fetch:
1106 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
1107 	 * Alias name of args:
1108 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
1109 	 * Type of args:
1110 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
1111 	 */
1112 	struct trace_fprobe *tf __free(free_trace_fprobe) = NULL;
1113 	int i, new_argc = 0, ret = 0;
1114 	bool is_return = false;
1115 	char *symbol __free(kfree) = NULL;
1116 	const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
1117 	const char **new_argv __free(kfree) = NULL;
1118 	char buf[MAX_EVENT_NAME_LEN];
1119 	char gbuf[MAX_EVENT_NAME_LEN];
1120 	char sbuf[KSYM_NAME_LEN];
1121 	char abuf[MAX_BTF_ARGS_LEN];
1122 	char *dbuf __free(kfree) = NULL;
1123 	bool is_tracepoint = false;
1124 	struct module *tp_mod __free(module_put) = NULL;
1125 	struct tracepoint *tpoint = NULL;
1126 
1127 	if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
1128 		return -ECANCELED;
1129 
1130 	if (argv[0][0] == 't') {
1131 		is_tracepoint = true;
1132 		group = TRACEPOINT_EVENT_SYSTEM;
1133 	}
1134 
1135 	if (argv[0][1] != '\0') {
1136 		if (argv[0][1] != ':') {
1137 			trace_probe_log_set_index(0);
1138 			trace_probe_log_err(1, BAD_MAXACT);
1139 			return -EINVAL;
1140 		}
1141 		event = &argv[0][2];
1142 	}
1143 
1144 	trace_probe_log_set_index(1);
1145 
1146 	/* a symbol(or tracepoint) must be specified */
1147 	ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
1148 	if (ret < 0)
1149 		return -EINVAL;
1150 
1151 	trace_probe_log_set_index(0);
1152 	if (event) {
1153 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
1154 						  event - argv[0]);
1155 		if (ret)
1156 			return -EINVAL;
1157 	}
1158 
1159 	if (!event) {
1160 		/* Make a new event name */
1161 		if (is_tracepoint)
1162 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
1163 				 isdigit(*symbol) ? "_" : "", symbol);
1164 		else
1165 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
1166 				 is_return ? "exit" : "entry");
1167 		sanitize_event_name(buf);
1168 		event = buf;
1169 	}
1170 
1171 	if (is_return)
1172 		ctx->flags |= TPARG_FL_RETURN;
1173 	else
1174 		ctx->flags |= TPARG_FL_FENTRY;
1175 
1176 	if (is_tracepoint) {
1177 		ctx->flags |= TPARG_FL_TPOINT;
1178 		tpoint = find_tracepoint(symbol, &tp_mod);
1179 		if (tpoint) {
1180 			ctx->funcname = kallsyms_lookup(
1181 				(unsigned long)tpoint->probestub,
1182 				NULL, NULL, NULL, sbuf);
1183 		} else if (IS_ENABLED(CONFIG_MODULES)) {
1184 				/* This *may* be loaded afterwards */
1185 				tpoint = TRACEPOINT_STUB;
1186 				ctx->funcname = symbol;
1187 		} else {
1188 			trace_probe_log_set_index(1);
1189 			trace_probe_log_err(0, NO_TRACEPOINT);
1190 			return -EINVAL;
1191 		}
1192 	} else
1193 		ctx->funcname = symbol;
1194 
1195 	argc -= 2; argv += 2;
1196 	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1197 					       abuf, MAX_BTF_ARGS_LEN, ctx);
1198 	if (IS_ERR(new_argv))
1199 		return PTR_ERR(new_argv);
1200 	if (new_argv) {
1201 		argc = new_argc;
1202 		argv = new_argv;
1203 	}
1204 	if (argc > MAX_TRACE_ARGS)
1205 		return -E2BIG;
1206 
1207 	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1208 	if (ret)
1209 		return ret;
1210 
1211 	/* setup a probe */
1212 	tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
1213 				argc, is_return);
1214 	if (IS_ERR(tf)) {
1215 		ret = PTR_ERR(tf);
1216 		/* This must return -ENOMEM, else there is a bug */
1217 		WARN_ON_ONCE(ret != -ENOMEM);
1218 		return ret;
1219 	}
1220 
1221 	/* parse arguments */
1222 	for (i = 0; i < argc; i++) {
1223 		trace_probe_log_set_index(i + 2);
1224 		ctx->offset = 0;
1225 		ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], ctx);
1226 		if (ret)
1227 			return ret;	/* This can be -ENOMEM */
1228 	}
1229 
1230 	if (is_return && tf->tp.entry_arg) {
1231 		tf->fp.entry_handler = trace_fprobe_entry_handler;
1232 		tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
1233 		if (ALIGN(tf->fp.entry_data_size, sizeof(long)) > MAX_FPROBE_DATA_SIZE) {
1234 			trace_probe_log_set_index(2);
1235 			trace_probe_log_err(0, TOO_MANY_EARGS);
1236 			return -E2BIG;
1237 		}
1238 	}
1239 
1240 	ret = traceprobe_set_print_fmt(&tf->tp,
1241 			is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
1242 	if (ret < 0)
1243 		return ret;
1244 
1245 	ret = register_trace_fprobe(tf);
1246 	if (ret) {
1247 		trace_probe_log_set_index(1);
1248 		if (ret == -EILSEQ)
1249 			trace_probe_log_err(0, BAD_INSN_BNDRY);
1250 		else if (ret == -ENOENT)
1251 			trace_probe_log_err(0, BAD_PROBE_ADDR);
1252 		else if (ret != -ENOMEM && ret != -EEXIST)
1253 			trace_probe_log_err(0, FAIL_REG_PROBE);
1254 		return -EINVAL;
1255 	}
1256 
1257 	/* 'tf' is successfully registered. To avoid freeing, assign NULL. */
1258 	tf = NULL;
1259 
1260 	return 0;
1261 }
1262 
trace_fprobe_create_cb(int argc,const char * argv[])1263 static int trace_fprobe_create_cb(int argc, const char *argv[])
1264 {
1265 	struct traceprobe_parse_context ctx = {
1266 		.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
1267 	};
1268 	int ret;
1269 
1270 	trace_probe_log_init("trace_fprobe", argc, argv);
1271 	ret = trace_fprobe_create_internal(argc, argv, &ctx);
1272 	traceprobe_finish_parse(&ctx);
1273 	trace_probe_log_clear();
1274 	return ret;
1275 }
1276 
trace_fprobe_create(const char * raw_command)1277 static int trace_fprobe_create(const char *raw_command)
1278 {
1279 	return trace_probe_create(raw_command, trace_fprobe_create_cb);
1280 }
1281 
trace_fprobe_release(struct dyn_event * ev)1282 static int trace_fprobe_release(struct dyn_event *ev)
1283 {
1284 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1285 	int ret = unregister_trace_fprobe(tf);
1286 
1287 	if (!ret)
1288 		free_trace_fprobe(tf);
1289 	return ret;
1290 }
1291 
trace_fprobe_show(struct seq_file * m,struct dyn_event * ev)1292 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
1293 {
1294 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1295 	int i;
1296 
1297 	if (trace_fprobe_is_tracepoint(tf))
1298 		seq_putc(m, 't');
1299 	else
1300 		seq_putc(m, 'f');
1301 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
1302 				trace_probe_name(&tf->tp));
1303 
1304 	seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
1305 			       trace_fprobe_is_return(tf) ? "%return" : "");
1306 
1307 	for (i = 0; i < tf->tp.nr_args; i++)
1308 		seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
1309 	seq_putc(m, '\n');
1310 
1311 	return 0;
1312 }
1313 
1314 /*
1315  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1316  */
fprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1317 static int fprobe_register(struct trace_event_call *event,
1318 			   enum trace_reg type, void *data)
1319 {
1320 	struct trace_event_file *file = data;
1321 
1322 	switch (type) {
1323 	case TRACE_REG_REGISTER:
1324 		return enable_trace_fprobe(event, file);
1325 	case TRACE_REG_UNREGISTER:
1326 		return disable_trace_fprobe(event, file);
1327 
1328 #ifdef CONFIG_PERF_EVENTS
1329 	case TRACE_REG_PERF_REGISTER:
1330 		return enable_trace_fprobe(event, NULL);
1331 	case TRACE_REG_PERF_UNREGISTER:
1332 		return disable_trace_fprobe(event, NULL);
1333 	case TRACE_REG_PERF_OPEN:
1334 	case TRACE_REG_PERF_CLOSE:
1335 	case TRACE_REG_PERF_ADD:
1336 	case TRACE_REG_PERF_DEL:
1337 		return 0;
1338 #endif
1339 	}
1340 	return 0;
1341 }
1342 
1343 /*
1344  * Register dynevent at core_initcall. This allows kernel to setup fprobe
1345  * events in postcore_initcall without tracefs.
1346  */
init_fprobe_trace_early(void)1347 static __init int init_fprobe_trace_early(void)
1348 {
1349 	int ret;
1350 
1351 	ret = dyn_event_register(&trace_fprobe_ops);
1352 	if (ret)
1353 		return ret;
1354 
1355 #ifdef CONFIG_MODULES
1356 	ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
1357 	if (ret)
1358 		return ret;
1359 #endif
1360 
1361 	return 0;
1362 }
1363 core_initcall(init_fprobe_trace_early);
1364