xref: /linux/kernel/trace/trace_fprobe.c (revision 762abbc0d09f7ae123c82d315eb1a961c1a2cf7b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fprobe-based tracing events
4  * Copyright (C) 2022 Google LLC.
5  */
6 #define pr_fmt(fmt)	"trace_fprobe: " fmt
7 #include <asm/ptrace.h>
8 
9 #include <linux/fprobe.h>
10 #include <linux/module.h>
11 #include <linux/rculist.h>
12 #include <linux/security.h>
13 #include <linux/tracepoint.h>
14 #include <linux/uaccess.h>
15 
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_kernel.h"
19 #include "trace_probe_tmpl.h"
20 
21 #define FPROBE_EVENT_SYSTEM "fprobes"
22 #define TRACEPOINT_EVENT_SYSTEM "tracepoints"
23 #define RETHOOK_MAXACTIVE_MAX 4096
24 #define TRACEPOINT_STUB ERR_PTR(-ENOENT)
25 
26 static int trace_fprobe_create(const char *raw_command);
27 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
28 static int trace_fprobe_release(struct dyn_event *ev);
29 static bool trace_fprobe_is_busy(struct dyn_event *ev);
30 static bool trace_fprobe_match(const char *system, const char *event,
31 			int argc, const char **argv, struct dyn_event *ev);
32 
33 static struct dyn_event_operations trace_fprobe_ops = {
34 	.create = trace_fprobe_create,
35 	.show = trace_fprobe_show,
36 	.is_busy = trace_fprobe_is_busy,
37 	.free = trace_fprobe_release,
38 	.match = trace_fprobe_match,
39 };
40 
41 /*
42  * Fprobe event core functions
43  */
44 struct trace_fprobe {
45 	struct dyn_event	devent;
46 	struct fprobe		fp;
47 	const char		*symbol;
48 	struct tracepoint	*tpoint;
49 	struct module		*mod;
50 	struct trace_probe	tp;
51 };
52 
53 static bool is_trace_fprobe(struct dyn_event *ev)
54 {
55 	return ev->ops == &trace_fprobe_ops;
56 }
57 
58 static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
59 {
60 	return container_of(ev, struct trace_fprobe, devent);
61 }
62 
63 /**
64  * for_each_trace_fprobe - iterate over the trace_fprobe list
65  * @pos:	the struct trace_fprobe * for each entry
66  * @dpos:	the struct dyn_event * to use as a loop cursor
67  */
68 #define for_each_trace_fprobe(pos, dpos)	\
69 	for_each_dyn_event(dpos)		\
70 		if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
71 
72 static bool trace_fprobe_is_return(struct trace_fprobe *tf)
73 {
74 	return tf->fp.exit_handler != NULL;
75 }
76 
77 static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
78 {
79 	return tf->tpoint != NULL;
80 }
81 
82 static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
83 {
84 	return tf->symbol ? tf->symbol : "unknown";
85 }
86 
87 static bool trace_fprobe_is_busy(struct dyn_event *ev)
88 {
89 	struct trace_fprobe *tf = to_trace_fprobe(ev);
90 
91 	return trace_probe_is_enabled(&tf->tp);
92 }
93 
94 static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
95 					    int argc, const char **argv)
96 {
97 	char buf[MAX_ARGSTR_LEN + 1];
98 
99 	if (!argc)
100 		return true;
101 
102 	snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
103 	if (strcmp(buf, argv[0]))
104 		return false;
105 	argc--; argv++;
106 
107 	return trace_probe_match_command_args(&tf->tp, argc, argv);
108 }
109 
110 static bool trace_fprobe_match(const char *system, const char *event,
111 			int argc, const char **argv, struct dyn_event *ev)
112 {
113 	struct trace_fprobe *tf = to_trace_fprobe(ev);
114 
115 	if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
116 		return false;
117 
118 	if (system && strcmp(trace_probe_group_name(&tf->tp), system))
119 		return false;
120 
121 	return trace_fprobe_match_command_head(tf, argc, argv);
122 }
123 
124 static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
125 {
126 	return fprobe_is_registered(&tf->fp);
127 }
128 
129 /*
130  * Note that we don't verify the fetch_insn code, since it does not come
131  * from user space.
132  */
133 static int
134 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
135 		   void *dest, void *base)
136 {
137 	struct pt_regs *regs = rec;
138 	unsigned long val;
139 	int ret;
140 
141 retry:
142 	/* 1st stage: get value from context */
143 	switch (code->op) {
144 	case FETCH_OP_STACK:
145 		val = regs_get_kernel_stack_nth(regs, code->param);
146 		break;
147 	case FETCH_OP_STACKP:
148 		val = kernel_stack_pointer(regs);
149 		break;
150 	case FETCH_OP_RETVAL:
151 		val = regs_return_value(regs);
152 		break;
153 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
154 	case FETCH_OP_ARG:
155 		val = regs_get_kernel_argument(regs, code->param);
156 		break;
157 	case FETCH_OP_EDATA:
158 		val = *(unsigned long *)((unsigned long)edata + code->offset);
159 		break;
160 #endif
161 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
162 		code++;
163 		goto retry;
164 	default:
165 		ret = process_common_fetch_insn(code, &val);
166 		if (ret < 0)
167 			return ret;
168 	}
169 	code++;
170 
171 	return process_fetch_insn_bottom(code, val, dest, base);
172 }
173 NOKPROBE_SYMBOL(process_fetch_insn)
174 
175 /* function entry handler */
176 static nokprobe_inline void
177 __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
178 		    struct pt_regs *regs,
179 		    struct trace_event_file *trace_file)
180 {
181 	struct fentry_trace_entry_head *entry;
182 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
183 	struct trace_event_buffer fbuffer;
184 	int dsize;
185 
186 	if (WARN_ON_ONCE(call != trace_file->event_call))
187 		return;
188 
189 	if (trace_trigger_soft_disabled(trace_file))
190 		return;
191 
192 	dsize = __get_data_size(&tf->tp, regs, NULL);
193 
194 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
195 					   sizeof(*entry) + tf->tp.size + dsize);
196 	if (!entry)
197 		return;
198 
199 	fbuffer.regs = regs;
200 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
201 	entry->ip = entry_ip;
202 	store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
203 
204 	trace_event_buffer_commit(&fbuffer);
205 }
206 
207 static void
208 fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
209 		  struct pt_regs *regs)
210 {
211 	struct event_file_link *link;
212 
213 	trace_probe_for_each_link_rcu(link, &tf->tp)
214 		__fentry_trace_func(tf, entry_ip, regs, link->file);
215 }
216 NOKPROBE_SYMBOL(fentry_trace_func);
217 
218 /* function exit handler */
219 static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
220 				unsigned long ret_ip, struct ftrace_regs *fregs,
221 				void *entry_data)
222 {
223 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
224 	struct pt_regs *regs = ftrace_get_regs(fregs);
225 
226 	if (regs && tf->tp.entry_arg)
227 		store_trace_entry_data(entry_data, &tf->tp, regs);
228 
229 	return 0;
230 }
231 NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
232 
233 static nokprobe_inline void
234 __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
235 		   unsigned long ret_ip, struct pt_regs *regs,
236 		   void *entry_data, struct trace_event_file *trace_file)
237 {
238 	struct fexit_trace_entry_head *entry;
239 	struct trace_event_buffer fbuffer;
240 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
241 	int dsize;
242 
243 	if (WARN_ON_ONCE(call != trace_file->event_call))
244 		return;
245 
246 	if (trace_trigger_soft_disabled(trace_file))
247 		return;
248 
249 	dsize = __get_data_size(&tf->tp, regs, entry_data);
250 
251 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
252 					   sizeof(*entry) + tf->tp.size + dsize);
253 	if (!entry)
254 		return;
255 
256 	fbuffer.regs = regs;
257 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
258 	entry->func = entry_ip;
259 	entry->ret_ip = ret_ip;
260 	store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
261 
262 	trace_event_buffer_commit(&fbuffer);
263 }
264 
265 static void
266 fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
267 		 unsigned long ret_ip, struct pt_regs *regs, void *entry_data)
268 {
269 	struct event_file_link *link;
270 
271 	trace_probe_for_each_link_rcu(link, &tf->tp)
272 		__fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data, link->file);
273 }
274 NOKPROBE_SYMBOL(fexit_trace_func);
275 
276 #ifdef CONFIG_PERF_EVENTS
277 
278 static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
279 			    struct pt_regs *regs)
280 {
281 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
282 	struct fentry_trace_entry_head *entry;
283 	struct hlist_head *head;
284 	int size, __size, dsize;
285 	int rctx;
286 
287 	head = this_cpu_ptr(call->perf_events);
288 	if (hlist_empty(head))
289 		return 0;
290 
291 	dsize = __get_data_size(&tf->tp, regs, NULL);
292 	__size = sizeof(*entry) + tf->tp.size + dsize;
293 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
294 	size -= sizeof(u32);
295 
296 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
297 	if (!entry)
298 		return 0;
299 
300 	entry->ip = entry_ip;
301 	memset(&entry[1], 0, dsize);
302 	store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
303 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
304 			      head, NULL);
305 	return 0;
306 }
307 NOKPROBE_SYMBOL(fentry_perf_func);
308 
309 static void
310 fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
311 		unsigned long ret_ip, struct pt_regs *regs,
312 		void *entry_data)
313 {
314 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
315 	struct fexit_trace_entry_head *entry;
316 	struct hlist_head *head;
317 	int size, __size, dsize;
318 	int rctx;
319 
320 	head = this_cpu_ptr(call->perf_events);
321 	if (hlist_empty(head))
322 		return;
323 
324 	dsize = __get_data_size(&tf->tp, regs, entry_data);
325 	__size = sizeof(*entry) + tf->tp.size + dsize;
326 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
327 	size -= sizeof(u32);
328 
329 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
330 	if (!entry)
331 		return;
332 
333 	entry->func = entry_ip;
334 	entry->ret_ip = ret_ip;
335 	store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
336 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
337 			      head, NULL);
338 }
339 NOKPROBE_SYMBOL(fexit_perf_func);
340 #endif	/* CONFIG_PERF_EVENTS */
341 
342 static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
343 			     unsigned long ret_ip, struct ftrace_regs *fregs,
344 			     void *entry_data)
345 {
346 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
347 	struct pt_regs *regs = ftrace_get_regs(fregs);
348 	int ret = 0;
349 
350 	if (!regs)
351 		return 0;
352 
353 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
354 		fentry_trace_func(tf, entry_ip, regs);
355 #ifdef CONFIG_PERF_EVENTS
356 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
357 		ret = fentry_perf_func(tf, entry_ip, regs);
358 #endif
359 	return ret;
360 }
361 NOKPROBE_SYMBOL(fentry_dispatcher);
362 
363 static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
364 			     unsigned long ret_ip, struct ftrace_regs *fregs,
365 			     void *entry_data)
366 {
367 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
368 	struct pt_regs *regs = ftrace_get_regs(fregs);
369 
370 	if (!regs)
371 		return;
372 
373 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
374 		fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
375 #ifdef CONFIG_PERF_EVENTS
376 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
377 		fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
378 #endif
379 }
380 NOKPROBE_SYMBOL(fexit_dispatcher);
381 
382 static void free_trace_fprobe(struct trace_fprobe *tf)
383 {
384 	if (tf) {
385 		trace_probe_cleanup(&tf->tp);
386 		kfree(tf->symbol);
387 		kfree(tf);
388 	}
389 }
390 
391 /*
392  * Allocate new trace_probe and initialize it (including fprobe).
393  */
394 static struct trace_fprobe *alloc_trace_fprobe(const char *group,
395 					       const char *event,
396 					       const char *symbol,
397 					       struct tracepoint *tpoint,
398 					       struct module *mod,
399 					       int maxactive,
400 					       int nargs, bool is_return)
401 {
402 	struct trace_fprobe *tf;
403 	int ret = -ENOMEM;
404 
405 	tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
406 	if (!tf)
407 		return ERR_PTR(ret);
408 
409 	tf->symbol = kstrdup(symbol, GFP_KERNEL);
410 	if (!tf->symbol)
411 		goto error;
412 
413 	if (is_return)
414 		tf->fp.exit_handler = fexit_dispatcher;
415 	else
416 		tf->fp.entry_handler = fentry_dispatcher;
417 
418 	tf->tpoint = tpoint;
419 	tf->mod = mod;
420 	tf->fp.nr_maxactive = maxactive;
421 
422 	ret = trace_probe_init(&tf->tp, event, group, false, nargs);
423 	if (ret < 0)
424 		goto error;
425 
426 	dyn_event_init(&tf->devent, &trace_fprobe_ops);
427 	return tf;
428 error:
429 	free_trace_fprobe(tf);
430 	return ERR_PTR(ret);
431 }
432 
433 static struct trace_fprobe *find_trace_fprobe(const char *event,
434 					      const char *group)
435 {
436 	struct dyn_event *pos;
437 	struct trace_fprobe *tf;
438 
439 	for_each_trace_fprobe(tf, pos)
440 		if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
441 		    strcmp(trace_probe_group_name(&tf->tp), group) == 0)
442 			return tf;
443 	return NULL;
444 }
445 
446 static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
447 {
448 	if (trace_fprobe_is_registered(tf))
449 		enable_fprobe(&tf->fp);
450 
451 	return 0;
452 }
453 
454 static void __disable_trace_fprobe(struct trace_probe *tp)
455 {
456 	struct trace_fprobe *tf;
457 
458 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
459 		if (!trace_fprobe_is_registered(tf))
460 			continue;
461 		disable_fprobe(&tf->fp);
462 	}
463 }
464 
465 /*
466  * Enable trace_probe
467  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
468  */
469 static int enable_trace_fprobe(struct trace_event_call *call,
470 			       struct trace_event_file *file)
471 {
472 	struct trace_probe *tp;
473 	struct trace_fprobe *tf;
474 	bool enabled;
475 	int ret = 0;
476 
477 	tp = trace_probe_primary_from_call(call);
478 	if (WARN_ON_ONCE(!tp))
479 		return -ENODEV;
480 	enabled = trace_probe_is_enabled(tp);
481 
482 	/* This also changes "enabled" state */
483 	if (file) {
484 		ret = trace_probe_add_file(tp, file);
485 		if (ret)
486 			return ret;
487 	} else
488 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
489 
490 	if (!enabled) {
491 		list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
492 			/* TODO: check the fprobe is gone */
493 			__enable_trace_fprobe(tf);
494 		}
495 	}
496 
497 	return 0;
498 }
499 
500 /*
501  * Disable trace_probe
502  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
503  */
504 static int disable_trace_fprobe(struct trace_event_call *call,
505 				struct trace_event_file *file)
506 {
507 	struct trace_probe *tp;
508 
509 	tp = trace_probe_primary_from_call(call);
510 	if (WARN_ON_ONCE(!tp))
511 		return -ENODEV;
512 
513 	if (file) {
514 		if (!trace_probe_get_file_link(tp, file))
515 			return -ENOENT;
516 		if (!trace_probe_has_single_file(tp))
517 			goto out;
518 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
519 	} else
520 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
521 
522 	if (!trace_probe_is_enabled(tp))
523 		__disable_trace_fprobe(tp);
524 
525  out:
526 	if (file)
527 		/*
528 		 * Synchronization is done in below function. For perf event,
529 		 * file == NULL and perf_trace_event_unreg() calls
530 		 * tracepoint_synchronize_unregister() to ensure synchronize
531 		 * event. We don't need to care about it.
532 		 */
533 		trace_probe_remove_file(tp, file);
534 
535 	return 0;
536 }
537 
538 /* Event entry printers */
539 static enum print_line_t
540 print_fentry_event(struct trace_iterator *iter, int flags,
541 		   struct trace_event *event)
542 {
543 	struct fentry_trace_entry_head *field;
544 	struct trace_seq *s = &iter->seq;
545 	struct trace_probe *tp;
546 
547 	field = (struct fentry_trace_entry_head *)iter->ent;
548 	tp = trace_probe_primary_from_call(
549 		container_of(event, struct trace_event_call, event));
550 	if (WARN_ON_ONCE(!tp))
551 		goto out;
552 
553 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
554 
555 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
556 		goto out;
557 
558 	trace_seq_putc(s, ')');
559 
560 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
561 			     (u8 *)&field[1], field) < 0)
562 		goto out;
563 
564 	trace_seq_putc(s, '\n');
565  out:
566 	return trace_handle_return(s);
567 }
568 
569 static enum print_line_t
570 print_fexit_event(struct trace_iterator *iter, int flags,
571 		  struct trace_event *event)
572 {
573 	struct fexit_trace_entry_head *field;
574 	struct trace_seq *s = &iter->seq;
575 	struct trace_probe *tp;
576 
577 	field = (struct fexit_trace_entry_head *)iter->ent;
578 	tp = trace_probe_primary_from_call(
579 		container_of(event, struct trace_event_call, event));
580 	if (WARN_ON_ONCE(!tp))
581 		goto out;
582 
583 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
584 
585 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
586 		goto out;
587 
588 	trace_seq_puts(s, " <- ");
589 
590 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
591 		goto out;
592 
593 	trace_seq_putc(s, ')');
594 
595 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
596 			     (u8 *)&field[1], field) < 0)
597 		goto out;
598 
599 	trace_seq_putc(s, '\n');
600 
601  out:
602 	return trace_handle_return(s);
603 }
604 
605 static int fentry_event_define_fields(struct trace_event_call *event_call)
606 {
607 	int ret;
608 	struct fentry_trace_entry_head field;
609 	struct trace_probe *tp;
610 
611 	tp = trace_probe_primary_from_call(event_call);
612 	if (WARN_ON_ONCE(!tp))
613 		return -ENOENT;
614 
615 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
616 
617 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
618 }
619 
620 static int fexit_event_define_fields(struct trace_event_call *event_call)
621 {
622 	int ret;
623 	struct fexit_trace_entry_head field;
624 	struct trace_probe *tp;
625 
626 	tp = trace_probe_primary_from_call(event_call);
627 	if (WARN_ON_ONCE(!tp))
628 		return -ENOENT;
629 
630 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
631 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
632 
633 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
634 }
635 
636 static struct trace_event_functions fentry_funcs = {
637 	.trace		= print_fentry_event
638 };
639 
640 static struct trace_event_functions fexit_funcs = {
641 	.trace		= print_fexit_event
642 };
643 
644 static struct trace_event_fields fentry_fields_array[] = {
645 	{ .type = TRACE_FUNCTION_TYPE,
646 	  .define_fields = fentry_event_define_fields },
647 	{}
648 };
649 
650 static struct trace_event_fields fexit_fields_array[] = {
651 	{ .type = TRACE_FUNCTION_TYPE,
652 	  .define_fields = fexit_event_define_fields },
653 	{}
654 };
655 
656 static int fprobe_register(struct trace_event_call *event,
657 			   enum trace_reg type, void *data);
658 
659 static inline void init_trace_event_call(struct trace_fprobe *tf)
660 {
661 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
662 
663 	if (trace_fprobe_is_return(tf)) {
664 		call->event.funcs = &fexit_funcs;
665 		call->class->fields_array = fexit_fields_array;
666 	} else {
667 		call->event.funcs = &fentry_funcs;
668 		call->class->fields_array = fentry_fields_array;
669 	}
670 
671 	call->flags = TRACE_EVENT_FL_FPROBE;
672 	call->class->reg = fprobe_register;
673 }
674 
675 static int register_fprobe_event(struct trace_fprobe *tf)
676 {
677 	init_trace_event_call(tf);
678 
679 	return trace_probe_register_event_call(&tf->tp);
680 }
681 
682 static int unregister_fprobe_event(struct trace_fprobe *tf)
683 {
684 	return trace_probe_unregister_event_call(&tf->tp);
685 }
686 
687 static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
688 {
689 	struct tracepoint *tpoint = tf->tpoint;
690 	unsigned long ip = (unsigned long)tpoint->probestub;
691 	int ret;
692 
693 	/*
694 	 * Here, we do 2 steps to enable fprobe on a tracepoint.
695 	 * At first, put __probestub_##TP function on the tracepoint
696 	 * and put a fprobe on the stub function.
697 	 */
698 	ret = tracepoint_probe_register_prio_may_exist(tpoint,
699 				tpoint->probestub, NULL, 0);
700 	if (ret < 0)
701 		return ret;
702 	return register_fprobe_ips(&tf->fp, &ip, 1);
703 }
704 
705 /* Internal register function - just handle fprobe and flags */
706 static int __register_trace_fprobe(struct trace_fprobe *tf)
707 {
708 	int i, ret;
709 
710 	/* Should we need new LOCKDOWN flag for fprobe? */
711 	ret = security_locked_down(LOCKDOWN_KPROBES);
712 	if (ret)
713 		return ret;
714 
715 	if (trace_fprobe_is_registered(tf))
716 		return -EINVAL;
717 
718 	for (i = 0; i < tf->tp.nr_args; i++) {
719 		ret = traceprobe_update_arg(&tf->tp.args[i]);
720 		if (ret)
721 			return ret;
722 	}
723 
724 	/* Set/clear disabled flag according to tp->flag */
725 	if (trace_probe_is_enabled(&tf->tp))
726 		tf->fp.flags &= ~FPROBE_FL_DISABLED;
727 	else
728 		tf->fp.flags |= FPROBE_FL_DISABLED;
729 
730 	if (trace_fprobe_is_tracepoint(tf)) {
731 
732 		/* This tracepoint is not loaded yet */
733 		if (tf->tpoint == TRACEPOINT_STUB)
734 			return 0;
735 
736 		return __regsiter_tracepoint_fprobe(tf);
737 	}
738 
739 	/* TODO: handle filter, nofilter or symbol list */
740 	return register_fprobe(&tf->fp, tf->symbol, NULL);
741 }
742 
743 /* Internal unregister function - just handle fprobe and flags */
744 static void __unregister_trace_fprobe(struct trace_fprobe *tf)
745 {
746 	if (trace_fprobe_is_registered(tf)) {
747 		unregister_fprobe(&tf->fp);
748 		memset(&tf->fp, 0, sizeof(tf->fp));
749 		if (trace_fprobe_is_tracepoint(tf)) {
750 			tracepoint_probe_unregister(tf->tpoint,
751 					tf->tpoint->probestub, NULL);
752 			tf->tpoint = NULL;
753 			tf->mod = NULL;
754 		}
755 	}
756 }
757 
758 /* TODO: make this trace_*probe common function */
759 /* Unregister a trace_probe and probe_event */
760 static int unregister_trace_fprobe(struct trace_fprobe *tf)
761 {
762 	/* If other probes are on the event, just unregister fprobe */
763 	if (trace_probe_has_sibling(&tf->tp))
764 		goto unreg;
765 
766 	/* Enabled event can not be unregistered */
767 	if (trace_probe_is_enabled(&tf->tp))
768 		return -EBUSY;
769 
770 	/* If there's a reference to the dynamic event */
771 	if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
772 		return -EBUSY;
773 
774 	/* Will fail if probe is being used by ftrace or perf */
775 	if (unregister_fprobe_event(tf))
776 		return -EBUSY;
777 
778 unreg:
779 	__unregister_trace_fprobe(tf);
780 	dyn_event_remove(&tf->devent);
781 	trace_probe_unlink(&tf->tp);
782 
783 	return 0;
784 }
785 
786 static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
787 					 struct trace_fprobe *comp)
788 {
789 	struct trace_probe_event *tpe = orig->tp.event;
790 	int i;
791 
792 	list_for_each_entry(orig, &tpe->probes, tp.list) {
793 		if (strcmp(trace_fprobe_symbol(orig),
794 			   trace_fprobe_symbol(comp)))
795 			continue;
796 
797 		/*
798 		 * trace_probe_compare_arg_type() ensured that nr_args and
799 		 * each argument name and type are same. Let's compare comm.
800 		 */
801 		for (i = 0; i < orig->tp.nr_args; i++) {
802 			if (strcmp(orig->tp.args[i].comm,
803 				   comp->tp.args[i].comm))
804 				break;
805 		}
806 
807 		if (i == orig->tp.nr_args)
808 			return true;
809 	}
810 
811 	return false;
812 }
813 
814 static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
815 {
816 	int ret;
817 
818 	if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
819 	    trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
820 		trace_probe_log_set_index(0);
821 		trace_probe_log_err(0, DIFF_PROBE_TYPE);
822 		return -EEXIST;
823 	}
824 	ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
825 	if (ret) {
826 		/* Note that argument starts index = 2 */
827 		trace_probe_log_set_index(ret + 1);
828 		trace_probe_log_err(0, DIFF_ARG_TYPE);
829 		return -EEXIST;
830 	}
831 	if (trace_fprobe_has_same_fprobe(to, tf)) {
832 		trace_probe_log_set_index(0);
833 		trace_probe_log_err(0, SAME_PROBE);
834 		return -EEXIST;
835 	}
836 
837 	/* Append to existing event */
838 	ret = trace_probe_append(&tf->tp, &to->tp);
839 	if (ret)
840 		return ret;
841 
842 	ret = __register_trace_fprobe(tf);
843 	if (ret)
844 		trace_probe_unlink(&tf->tp);
845 	else
846 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
847 
848 	return ret;
849 }
850 
851 /* Register a trace_probe and probe_event */
852 static int register_trace_fprobe(struct trace_fprobe *tf)
853 {
854 	struct trace_fprobe *old_tf;
855 	int ret;
856 
857 	mutex_lock(&event_mutex);
858 
859 	old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
860 				   trace_probe_group_name(&tf->tp));
861 	if (old_tf) {
862 		ret = append_trace_fprobe(tf, old_tf);
863 		goto end;
864 	}
865 
866 	/* Register new event */
867 	ret = register_fprobe_event(tf);
868 	if (ret) {
869 		if (ret == -EEXIST) {
870 			trace_probe_log_set_index(0);
871 			trace_probe_log_err(0, EVENT_EXIST);
872 		} else
873 			pr_warn("Failed to register probe event(%d)\n", ret);
874 		goto end;
875 	}
876 
877 	/* Register fprobe */
878 	ret = __register_trace_fprobe(tf);
879 	if (ret < 0)
880 		unregister_fprobe_event(tf);
881 	else
882 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
883 
884 end:
885 	mutex_unlock(&event_mutex);
886 	return ret;
887 }
888 
889 struct __find_tracepoint_cb_data {
890 	const char *tp_name;
891 	struct tracepoint *tpoint;
892 	struct module *mod;
893 };
894 
895 static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
896 {
897 	struct __find_tracepoint_cb_data *data = priv;
898 
899 	if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
900 		data->tpoint = tp;
901 		if (!data->mod) {
902 			data->mod = mod;
903 			if (!try_module_get(data->mod)) {
904 				data->tpoint = NULL;
905 				data->mod = NULL;
906 			}
907 		}
908 	}
909 }
910 
911 static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
912 {
913 	struct __find_tracepoint_cb_data *data = priv;
914 
915 	if (!data->tpoint && !strcmp(data->tp_name, tp->name))
916 		data->tpoint = tp;
917 }
918 
919 /*
920  * Find a tracepoint from kernel and module. If the tracepoint is in a module,
921  * this increments the module refcount to prevent unloading until the
922  * trace_fprobe is registered to the list. After registering the trace_fprobe
923  * on the trace_fprobe list, the module refcount is decremented because
924  * tracepoint_probe_module_cb will handle it.
925  */
926 static struct tracepoint *find_tracepoint(const char *tp_name,
927 					  struct module **tp_mod)
928 {
929 	struct __find_tracepoint_cb_data data = {
930 		.tp_name = tp_name,
931 		.mod = NULL,
932 	};
933 
934 	for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
935 
936 	if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
937 		for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
938 		*tp_mod = data.mod;
939 	}
940 
941 	return data.tpoint;
942 }
943 
944 #ifdef CONFIG_MODULES
945 static void reenable_trace_fprobe(struct trace_fprobe *tf)
946 {
947 	struct trace_probe *tp = &tf->tp;
948 
949 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
950 		__enable_trace_fprobe(tf);
951 	}
952 }
953 
954 static struct tracepoint *find_tracepoint_in_module(struct module *mod,
955 						    const char *tp_name)
956 {
957 	struct __find_tracepoint_cb_data data = {
958 		.tp_name = tp_name,
959 		.mod = mod,
960 	};
961 
962 	for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
963 	return data.tpoint;
964 }
965 
966 static int __tracepoint_probe_module_cb(struct notifier_block *self,
967 					unsigned long val, void *data)
968 {
969 	struct tp_module *tp_mod = data;
970 	struct tracepoint *tpoint;
971 	struct trace_fprobe *tf;
972 	struct dyn_event *pos;
973 
974 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
975 		return NOTIFY_DONE;
976 
977 	mutex_lock(&event_mutex);
978 	for_each_trace_fprobe(tf, pos) {
979 		if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
980 			tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
981 			if (tpoint) {
982 				tf->tpoint = tpoint;
983 				tf->mod = tp_mod->mod;
984 				if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
985 				    trace_probe_is_enabled(&tf->tp))
986 					reenable_trace_fprobe(tf);
987 			}
988 		} else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
989 			tracepoint_probe_unregister(tf->tpoint,
990 					tf->tpoint->probestub, NULL);
991 			tf->tpoint = NULL;
992 			tf->mod = NULL;
993 		}
994 	}
995 	mutex_unlock(&event_mutex);
996 
997 	return NOTIFY_DONE;
998 }
999 
1000 static struct notifier_block tracepoint_module_nb = {
1001 	.notifier_call = __tracepoint_probe_module_cb,
1002 };
1003 #endif /* CONFIG_MODULES */
1004 
1005 static int parse_symbol_and_return(int argc, const char *argv[],
1006 				   char **symbol, bool *is_return,
1007 				   bool is_tracepoint)
1008 {
1009 	char *tmp = strchr(argv[1], '%');
1010 	int i;
1011 
1012 	if (tmp) {
1013 		int len = tmp - argv[1];
1014 
1015 		if (!is_tracepoint && !strcmp(tmp, "%return")) {
1016 			*is_return = true;
1017 		} else {
1018 			trace_probe_log_err(len, BAD_ADDR_SUFFIX);
1019 			return -EINVAL;
1020 		}
1021 		*symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
1022 	} else
1023 		*symbol = kstrdup(argv[1], GFP_KERNEL);
1024 	if (!*symbol)
1025 		return -ENOMEM;
1026 
1027 	if (*is_return)
1028 		return 0;
1029 
1030 	/* If there is $retval, this should be a return fprobe. */
1031 	for (i = 2; i < argc; i++) {
1032 		tmp = strstr(argv[i], "$retval");
1033 		if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
1034 			if (is_tracepoint) {
1035 				trace_probe_log_set_index(i);
1036 				trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
1037 				return -EINVAL;
1038 			}
1039 			*is_return = true;
1040 			break;
1041 		}
1042 	}
1043 	return 0;
1044 }
1045 
1046 static int __trace_fprobe_create(int argc, const char *argv[])
1047 {
1048 	/*
1049 	 * Argument syntax:
1050 	 *  - Add fentry probe:
1051 	 *      f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
1052 	 *  - Add fexit probe:
1053 	 *      f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
1054 	 *  - Add tracepoint probe:
1055 	 *      t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
1056 	 *
1057 	 * Fetch args:
1058 	 *  $retval	: fetch return value
1059 	 *  $stack	: fetch stack address
1060 	 *  $stackN	: fetch Nth entry of stack (N:0-)
1061 	 *  $argN	: fetch Nth argument (N:1-)
1062 	 *  $comm       : fetch current task comm
1063 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
1064 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
1065 	 * Dereferencing memory fetch:
1066 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
1067 	 * Alias name of args:
1068 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
1069 	 * Type of args:
1070 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
1071 	 */
1072 	struct trace_fprobe *tf = NULL;
1073 	int i, len, new_argc = 0, ret = 0;
1074 	bool is_return = false;
1075 	char *symbol = NULL;
1076 	const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
1077 	const char **new_argv = NULL;
1078 	int maxactive = 0;
1079 	char buf[MAX_EVENT_NAME_LEN];
1080 	char gbuf[MAX_EVENT_NAME_LEN];
1081 	char sbuf[KSYM_NAME_LEN];
1082 	char abuf[MAX_BTF_ARGS_LEN];
1083 	char *dbuf = NULL;
1084 	bool is_tracepoint = false;
1085 	struct module *tp_mod = NULL;
1086 	struct tracepoint *tpoint = NULL;
1087 	struct traceprobe_parse_context ctx = {
1088 		.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
1089 	};
1090 
1091 	if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
1092 		return -ECANCELED;
1093 
1094 	if (argv[0][0] == 't') {
1095 		is_tracepoint = true;
1096 		group = TRACEPOINT_EVENT_SYSTEM;
1097 	}
1098 
1099 	trace_probe_log_init("trace_fprobe", argc, argv);
1100 
1101 	event = strchr(&argv[0][1], ':');
1102 	if (event)
1103 		event++;
1104 
1105 	if (isdigit(argv[0][1])) {
1106 		if (event)
1107 			len = event - &argv[0][1] - 1;
1108 		else
1109 			len = strlen(&argv[0][1]);
1110 		if (len > MAX_EVENT_NAME_LEN - 1) {
1111 			trace_probe_log_err(1, BAD_MAXACT);
1112 			goto parse_error;
1113 		}
1114 		memcpy(buf, &argv[0][1], len);
1115 		buf[len] = '\0';
1116 		ret = kstrtouint(buf, 0, &maxactive);
1117 		if (ret || !maxactive) {
1118 			trace_probe_log_err(1, BAD_MAXACT);
1119 			goto parse_error;
1120 		}
1121 		/* fprobe rethook instances are iterated over via a list. The
1122 		 * maximum should stay reasonable.
1123 		 */
1124 		if (maxactive > RETHOOK_MAXACTIVE_MAX) {
1125 			trace_probe_log_err(1, MAXACT_TOO_BIG);
1126 			goto parse_error;
1127 		}
1128 	}
1129 
1130 	trace_probe_log_set_index(1);
1131 
1132 	/* a symbol(or tracepoint) must be specified */
1133 	ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
1134 	if (ret < 0)
1135 		goto parse_error;
1136 
1137 	if (!is_return && maxactive) {
1138 		trace_probe_log_set_index(0);
1139 		trace_probe_log_err(1, BAD_MAXACT_TYPE);
1140 		goto parse_error;
1141 	}
1142 
1143 	trace_probe_log_set_index(0);
1144 	if (event) {
1145 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
1146 						  event - argv[0]);
1147 		if (ret)
1148 			goto parse_error;
1149 	}
1150 
1151 	if (!event) {
1152 		/* Make a new event name */
1153 		if (is_tracepoint)
1154 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
1155 				 isdigit(*symbol) ? "_" : "", symbol);
1156 		else
1157 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
1158 				 is_return ? "exit" : "entry");
1159 		sanitize_event_name(buf);
1160 		event = buf;
1161 	}
1162 
1163 	if (is_return)
1164 		ctx.flags |= TPARG_FL_RETURN;
1165 	else
1166 		ctx.flags |= TPARG_FL_FENTRY;
1167 
1168 	if (is_tracepoint) {
1169 		ctx.flags |= TPARG_FL_TPOINT;
1170 		tpoint = find_tracepoint(symbol, &tp_mod);
1171 		if (tpoint) {
1172 			ctx.funcname = kallsyms_lookup(
1173 				(unsigned long)tpoint->probestub,
1174 				NULL, NULL, NULL, sbuf);
1175 		} else if (IS_ENABLED(CONFIG_MODULES)) {
1176 				/* This *may* be loaded afterwards */
1177 				tpoint = TRACEPOINT_STUB;
1178 				ctx.funcname = symbol;
1179 		} else {
1180 			trace_probe_log_set_index(1);
1181 			trace_probe_log_err(0, NO_TRACEPOINT);
1182 			goto parse_error;
1183 		}
1184 	} else
1185 		ctx.funcname = symbol;
1186 
1187 	argc -= 2; argv += 2;
1188 	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1189 					       abuf, MAX_BTF_ARGS_LEN, &ctx);
1190 	if (IS_ERR(new_argv)) {
1191 		ret = PTR_ERR(new_argv);
1192 		new_argv = NULL;
1193 		goto out;
1194 	}
1195 	if (new_argv) {
1196 		argc = new_argc;
1197 		argv = new_argv;
1198 	}
1199 	if (argc > MAX_TRACE_ARGS) {
1200 		ret = -E2BIG;
1201 		goto out;
1202 	}
1203 
1204 	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1205 	if (ret)
1206 		goto out;
1207 
1208 	/* setup a probe */
1209 	tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
1210 				maxactive, argc, is_return);
1211 	if (IS_ERR(tf)) {
1212 		ret = PTR_ERR(tf);
1213 		/* This must return -ENOMEM, else there is a bug */
1214 		WARN_ON_ONCE(ret != -ENOMEM);
1215 		goto out;	/* We know tf is not allocated */
1216 	}
1217 
1218 	/* parse arguments */
1219 	for (i = 0; i < argc; i++) {
1220 		trace_probe_log_set_index(i + 2);
1221 		ctx.offset = 0;
1222 		ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
1223 		if (ret)
1224 			goto error;	/* This can be -ENOMEM */
1225 	}
1226 
1227 	if (is_return && tf->tp.entry_arg) {
1228 		tf->fp.entry_handler = trace_fprobe_entry_handler;
1229 		tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
1230 	}
1231 
1232 	ret = traceprobe_set_print_fmt(&tf->tp,
1233 			is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
1234 	if (ret < 0)
1235 		goto error;
1236 
1237 	ret = register_trace_fprobe(tf);
1238 	if (ret) {
1239 		trace_probe_log_set_index(1);
1240 		if (ret == -EILSEQ)
1241 			trace_probe_log_err(0, BAD_INSN_BNDRY);
1242 		else if (ret == -ENOENT)
1243 			trace_probe_log_err(0, BAD_PROBE_ADDR);
1244 		else if (ret != -ENOMEM && ret != -EEXIST)
1245 			trace_probe_log_err(0, FAIL_REG_PROBE);
1246 		goto error;
1247 	}
1248 
1249 out:
1250 	if (tp_mod)
1251 		module_put(tp_mod);
1252 	traceprobe_finish_parse(&ctx);
1253 	trace_probe_log_clear();
1254 	kfree(new_argv);
1255 	kfree(symbol);
1256 	kfree(dbuf);
1257 	return ret;
1258 
1259 parse_error:
1260 	ret = -EINVAL;
1261 error:
1262 	free_trace_fprobe(tf);
1263 	goto out;
1264 }
1265 
1266 static int trace_fprobe_create(const char *raw_command)
1267 {
1268 	return trace_probe_create(raw_command, __trace_fprobe_create);
1269 }
1270 
1271 static int trace_fprobe_release(struct dyn_event *ev)
1272 {
1273 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1274 	int ret = unregister_trace_fprobe(tf);
1275 
1276 	if (!ret)
1277 		free_trace_fprobe(tf);
1278 	return ret;
1279 }
1280 
1281 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
1282 {
1283 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1284 	int i;
1285 
1286 	if (trace_fprobe_is_tracepoint(tf))
1287 		seq_putc(m, 't');
1288 	else
1289 		seq_putc(m, 'f');
1290 	if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive)
1291 		seq_printf(m, "%d", tf->fp.nr_maxactive);
1292 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
1293 				trace_probe_name(&tf->tp));
1294 
1295 	seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
1296 			       trace_fprobe_is_return(tf) ? "%return" : "");
1297 
1298 	for (i = 0; i < tf->tp.nr_args; i++)
1299 		seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
1300 	seq_putc(m, '\n');
1301 
1302 	return 0;
1303 }
1304 
1305 /*
1306  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1307  */
1308 static int fprobe_register(struct trace_event_call *event,
1309 			   enum trace_reg type, void *data)
1310 {
1311 	struct trace_event_file *file = data;
1312 
1313 	switch (type) {
1314 	case TRACE_REG_REGISTER:
1315 		return enable_trace_fprobe(event, file);
1316 	case TRACE_REG_UNREGISTER:
1317 		return disable_trace_fprobe(event, file);
1318 
1319 #ifdef CONFIG_PERF_EVENTS
1320 	case TRACE_REG_PERF_REGISTER:
1321 		return enable_trace_fprobe(event, NULL);
1322 	case TRACE_REG_PERF_UNREGISTER:
1323 		return disable_trace_fprobe(event, NULL);
1324 	case TRACE_REG_PERF_OPEN:
1325 	case TRACE_REG_PERF_CLOSE:
1326 	case TRACE_REG_PERF_ADD:
1327 	case TRACE_REG_PERF_DEL:
1328 		return 0;
1329 #endif
1330 	}
1331 	return 0;
1332 }
1333 
1334 /*
1335  * Register dynevent at core_initcall. This allows kernel to setup fprobe
1336  * events in postcore_initcall without tracefs.
1337  */
1338 static __init int init_fprobe_trace_early(void)
1339 {
1340 	int ret;
1341 
1342 	ret = dyn_event_register(&trace_fprobe_ops);
1343 	if (ret)
1344 		return ret;
1345 
1346 #ifdef CONFIG_MODULES
1347 	ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
1348 	if (ret)
1349 		return ret;
1350 #endif
1351 
1352 	return 0;
1353 }
1354 core_initcall(init_fprobe_trace_early);
1355