xref: /linux/kernel/trace/trace_kprobe.c (revision 6eb2fb3170549737207974c2c6ad34bcc2f3025e)
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 
23 #include "trace_probe.h"
24 
25 #define KPROBE_EVENT_SYSTEM "kprobes"
26 
27 /**
28  * Kprobe event core functions
29  */
30 struct trace_probe {
31 	struct list_head	list;
32 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33 	unsigned long 		nhit;
34 	unsigned int		flags;	/* For TP_FLAG_* */
35 	const char		*symbol;	/* symbol name */
36 	struct ftrace_event_class	class;
37 	struct ftrace_event_call	call;
38 	struct ftrace_event_file	**files;
39 	ssize_t			size;		/* trace entry size */
40 	unsigned int		nr_args;
41 	struct probe_arg	args[];
42 };
43 
44 #define SIZEOF_TRACE_PROBE(n)			\
45 	(offsetof(struct trace_probe, args) +	\
46 	(sizeof(struct probe_arg) * (n)))
47 
48 
49 static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50 {
51 	return tp->rp.handler != NULL;
52 }
53 
54 static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55 {
56 	return tp->symbol ? tp->symbol : "unknown";
57 }
58 
59 static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
60 {
61 	return tp->rp.kp.offset;
62 }
63 
64 static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
65 {
66 	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
67 }
68 
69 static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
70 {
71 	return !!(tp->flags & TP_FLAG_REGISTERED);
72 }
73 
74 static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
75 {
76 	return !!(kprobe_gone(&tp->rp.kp));
77 }
78 
79 static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
80 						struct module *mod)
81 {
82 	int len = strlen(mod->name);
83 	const char *name = trace_probe_symbol(tp);
84 	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
85 }
86 
87 static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
88 {
89 	return !!strchr(trace_probe_symbol(tp), ':');
90 }
91 
92 static int register_probe_event(struct trace_probe *tp);
93 static void unregister_probe_event(struct trace_probe *tp);
94 
95 static DEFINE_MUTEX(probe_lock);
96 static LIST_HEAD(probe_list);
97 
98 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
99 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
100 				struct pt_regs *regs);
101 
102 /*
103  * Allocate new trace_probe and initialize it (including kprobes).
104  */
105 static struct trace_probe *alloc_trace_probe(const char *group,
106 					     const char *event,
107 					     void *addr,
108 					     const char *symbol,
109 					     unsigned long offs,
110 					     int nargs, bool is_return)
111 {
112 	struct trace_probe *tp;
113 	int ret = -ENOMEM;
114 
115 	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116 	if (!tp)
117 		return ERR_PTR(ret);
118 
119 	if (symbol) {
120 		tp->symbol = kstrdup(symbol, GFP_KERNEL);
121 		if (!tp->symbol)
122 			goto error;
123 		tp->rp.kp.symbol_name = tp->symbol;
124 		tp->rp.kp.offset = offs;
125 	} else
126 		tp->rp.kp.addr = addr;
127 
128 	if (is_return)
129 		tp->rp.handler = kretprobe_dispatcher;
130 	else
131 		tp->rp.kp.pre_handler = kprobe_dispatcher;
132 
133 	if (!event || !is_good_name(event)) {
134 		ret = -EINVAL;
135 		goto error;
136 	}
137 
138 	tp->call.class = &tp->class;
139 	tp->call.name = kstrdup(event, GFP_KERNEL);
140 	if (!tp->call.name)
141 		goto error;
142 
143 	if (!group || !is_good_name(group)) {
144 		ret = -EINVAL;
145 		goto error;
146 	}
147 
148 	tp->class.system = kstrdup(group, GFP_KERNEL);
149 	if (!tp->class.system)
150 		goto error;
151 
152 	INIT_LIST_HEAD(&tp->list);
153 	return tp;
154 error:
155 	kfree(tp->call.name);
156 	kfree(tp->symbol);
157 	kfree(tp);
158 	return ERR_PTR(ret);
159 }
160 
161 static void free_trace_probe(struct trace_probe *tp)
162 {
163 	int i;
164 
165 	for (i = 0; i < tp->nr_args; i++)
166 		traceprobe_free_probe_arg(&tp->args[i]);
167 
168 	kfree(tp->call.class->system);
169 	kfree(tp->call.name);
170 	kfree(tp->symbol);
171 	kfree(tp);
172 }
173 
174 static struct trace_probe *find_trace_probe(const char *event,
175 					    const char *group)
176 {
177 	struct trace_probe *tp;
178 
179 	list_for_each_entry(tp, &probe_list, list)
180 		if (strcmp(tp->call.name, event) == 0 &&
181 		    strcmp(tp->call.class->system, group) == 0)
182 			return tp;
183 	return NULL;
184 }
185 
186 static int trace_probe_nr_files(struct trace_probe *tp)
187 {
188 	struct ftrace_event_file **file = tp->files;
189 	int ret = 0;
190 
191 	if (file)
192 		while (*(file++))
193 			ret++;
194 
195 	return ret;
196 }
197 
198 static DEFINE_MUTEX(probe_enable_lock);
199 
200 /*
201  * Enable trace_probe
202  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
203  */
204 static int
205 enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
206 {
207 	int ret = 0;
208 
209 	mutex_lock(&probe_enable_lock);
210 
211 	if (file) {
212 		struct ftrace_event_file **new, **old = tp->files;
213 		int n = trace_probe_nr_files(tp);
214 
215 		/* 1 is for new one and 1 is for stopper */
216 		new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
217 			      GFP_KERNEL);
218 		if (!new) {
219 			ret = -ENOMEM;
220 			goto out_unlock;
221 		}
222 		memcpy(new, old, n * sizeof(struct ftrace_event_file *));
223 		new[n] = file;
224 		/* The last one keeps a NULL */
225 
226 		rcu_assign_pointer(tp->files, new);
227 		tp->flags |= TP_FLAG_TRACE;
228 
229 		if (old) {
230 			/* Make sure the probe is done with old files */
231 			synchronize_sched();
232 			kfree(old);
233 		}
234 	} else
235 		tp->flags |= TP_FLAG_PROFILE;
236 
237 	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
238 	    !trace_probe_has_gone(tp)) {
239 		if (trace_probe_is_return(tp))
240 			ret = enable_kretprobe(&tp->rp);
241 		else
242 			ret = enable_kprobe(&tp->rp.kp);
243 	}
244 
245  out_unlock:
246 	mutex_unlock(&probe_enable_lock);
247 
248 	return ret;
249 }
250 
251 static int
252 trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
253 {
254 	int i;
255 
256 	if (tp->files) {
257 		for (i = 0; tp->files[i]; i++)
258 			if (tp->files[i] == file)
259 				return i;
260 	}
261 
262 	return -1;
263 }
264 
265 /*
266  * Disable trace_probe
267  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
268  */
269 static int
270 disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
271 {
272 	int ret = 0;
273 
274 	mutex_lock(&probe_enable_lock);
275 
276 	if (file) {
277 		struct ftrace_event_file **new, **old = tp->files;
278 		int n = trace_probe_nr_files(tp);
279 		int i, j;
280 
281 		if (n == 0 || trace_probe_file_index(tp, file) < 0) {
282 			ret = -EINVAL;
283 			goto out_unlock;
284 		}
285 
286 		if (n == 1) {	/* Remove the last file */
287 			tp->flags &= ~TP_FLAG_TRACE;
288 			new = NULL;
289 		} else {
290 			new = kzalloc(n * sizeof(struct ftrace_event_file *),
291 				      GFP_KERNEL);
292 			if (!new) {
293 				ret = -ENOMEM;
294 				goto out_unlock;
295 			}
296 
297 			/* This copy & check loop copies the NULL stopper too */
298 			for (i = 0, j = 0; j < n && i < n + 1; i++)
299 				if (old[i] != file)
300 					new[j++] = old[i];
301 		}
302 
303 		rcu_assign_pointer(tp->files, new);
304 
305 		/* Make sure the probe is done with old files */
306 		synchronize_sched();
307 		kfree(old);
308 	} else
309 		tp->flags &= ~TP_FLAG_PROFILE;
310 
311 	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
312 		if (trace_probe_is_return(tp))
313 			disable_kretprobe(&tp->rp);
314 		else
315 			disable_kprobe(&tp->rp.kp);
316 	}
317 
318  out_unlock:
319 	mutex_unlock(&probe_enable_lock);
320 
321 	return ret;
322 }
323 
324 /* Internal register function - just handle k*probes and flags */
325 static int __register_trace_probe(struct trace_probe *tp)
326 {
327 	int i, ret;
328 
329 	if (trace_probe_is_registered(tp))
330 		return -EINVAL;
331 
332 	for (i = 0; i < tp->nr_args; i++)
333 		traceprobe_update_arg(&tp->args[i]);
334 
335 	/* Set/clear disabled flag according to tp->flag */
336 	if (trace_probe_is_enabled(tp))
337 		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
338 	else
339 		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
340 
341 	if (trace_probe_is_return(tp))
342 		ret = register_kretprobe(&tp->rp);
343 	else
344 		ret = register_kprobe(&tp->rp.kp);
345 
346 	if (ret == 0)
347 		tp->flags |= TP_FLAG_REGISTERED;
348 	else {
349 		pr_warning("Could not insert probe at %s+%lu: %d\n",
350 			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
351 		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
352 			pr_warning("This probe might be able to register after"
353 				   "target module is loaded. Continue.\n");
354 			ret = 0;
355 		} else if (ret == -EILSEQ) {
356 			pr_warning("Probing address(0x%p) is not an "
357 				   "instruction boundary.\n",
358 				   tp->rp.kp.addr);
359 			ret = -EINVAL;
360 		}
361 	}
362 
363 	return ret;
364 }
365 
366 /* Internal unregister function - just handle k*probes and flags */
367 static void __unregister_trace_probe(struct trace_probe *tp)
368 {
369 	if (trace_probe_is_registered(tp)) {
370 		if (trace_probe_is_return(tp))
371 			unregister_kretprobe(&tp->rp);
372 		else
373 			unregister_kprobe(&tp->rp.kp);
374 		tp->flags &= ~TP_FLAG_REGISTERED;
375 		/* Cleanup kprobe for reuse */
376 		if (tp->rp.kp.symbol_name)
377 			tp->rp.kp.addr = NULL;
378 	}
379 }
380 
381 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
382 static int unregister_trace_probe(struct trace_probe *tp)
383 {
384 	/* Enabled event can not be unregistered */
385 	if (trace_probe_is_enabled(tp))
386 		return -EBUSY;
387 
388 	__unregister_trace_probe(tp);
389 	list_del(&tp->list);
390 	unregister_probe_event(tp);
391 
392 	return 0;
393 }
394 
395 /* Register a trace_probe and probe_event */
396 static int register_trace_probe(struct trace_probe *tp)
397 {
398 	struct trace_probe *old_tp;
399 	int ret;
400 
401 	mutex_lock(&probe_lock);
402 
403 	/* Delete old (same name) event if exist */
404 	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
405 	if (old_tp) {
406 		ret = unregister_trace_probe(old_tp);
407 		if (ret < 0)
408 			goto end;
409 		free_trace_probe(old_tp);
410 	}
411 
412 	/* Register new event */
413 	ret = register_probe_event(tp);
414 	if (ret) {
415 		pr_warning("Failed to register probe event(%d)\n", ret);
416 		goto end;
417 	}
418 
419 	/* Register k*probe */
420 	ret = __register_trace_probe(tp);
421 	if (ret < 0)
422 		unregister_probe_event(tp);
423 	else
424 		list_add_tail(&tp->list, &probe_list);
425 
426 end:
427 	mutex_unlock(&probe_lock);
428 	return ret;
429 }
430 
431 /* Module notifier call back, checking event on the module */
432 static int trace_probe_module_callback(struct notifier_block *nb,
433 				       unsigned long val, void *data)
434 {
435 	struct module *mod = data;
436 	struct trace_probe *tp;
437 	int ret;
438 
439 	if (val != MODULE_STATE_COMING)
440 		return NOTIFY_DONE;
441 
442 	/* Update probes on coming module */
443 	mutex_lock(&probe_lock);
444 	list_for_each_entry(tp, &probe_list, list) {
445 		if (trace_probe_within_module(tp, mod)) {
446 			/* Don't need to check busy - this should have gone. */
447 			__unregister_trace_probe(tp);
448 			ret = __register_trace_probe(tp);
449 			if (ret)
450 				pr_warning("Failed to re-register probe %s on"
451 					   "%s: %d\n",
452 					   tp->call.name, mod->name, ret);
453 		}
454 	}
455 	mutex_unlock(&probe_lock);
456 
457 	return NOTIFY_DONE;
458 }
459 
460 static struct notifier_block trace_probe_module_nb = {
461 	.notifier_call = trace_probe_module_callback,
462 	.priority = 1	/* Invoked after kprobe module callback */
463 };
464 
465 static int create_trace_probe(int argc, char **argv)
466 {
467 	/*
468 	 * Argument syntax:
469 	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
470 	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
471 	 * Fetch args:
472 	 *  $retval	: fetch return value
473 	 *  $stack	: fetch stack address
474 	 *  $stackN	: fetch Nth of stack (N:0-)
475 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
476 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
477 	 *  %REG	: fetch register REG
478 	 * Dereferencing memory fetch:
479 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
480 	 * Alias name of args:
481 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
482 	 * Type of args:
483 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
484 	 */
485 	struct trace_probe *tp;
486 	int i, ret = 0;
487 	bool is_return = false, is_delete = false;
488 	char *symbol = NULL, *event = NULL, *group = NULL;
489 	char *arg;
490 	unsigned long offset = 0;
491 	void *addr = NULL;
492 	char buf[MAX_EVENT_NAME_LEN];
493 
494 	/* argc must be >= 1 */
495 	if (argv[0][0] == 'p')
496 		is_return = false;
497 	else if (argv[0][0] == 'r')
498 		is_return = true;
499 	else if (argv[0][0] == '-')
500 		is_delete = true;
501 	else {
502 		pr_info("Probe definition must be started with 'p', 'r' or"
503 			" '-'.\n");
504 		return -EINVAL;
505 	}
506 
507 	if (argv[0][1] == ':') {
508 		event = &argv[0][2];
509 		if (strchr(event, '/')) {
510 			group = event;
511 			event = strchr(group, '/') + 1;
512 			event[-1] = '\0';
513 			if (strlen(group) == 0) {
514 				pr_info("Group name is not specified\n");
515 				return -EINVAL;
516 			}
517 		}
518 		if (strlen(event) == 0) {
519 			pr_info("Event name is not specified\n");
520 			return -EINVAL;
521 		}
522 	}
523 	if (!group)
524 		group = KPROBE_EVENT_SYSTEM;
525 
526 	if (is_delete) {
527 		if (!event) {
528 			pr_info("Delete command needs an event name.\n");
529 			return -EINVAL;
530 		}
531 		mutex_lock(&probe_lock);
532 		tp = find_trace_probe(event, group);
533 		if (!tp) {
534 			mutex_unlock(&probe_lock);
535 			pr_info("Event %s/%s doesn't exist.\n", group, event);
536 			return -ENOENT;
537 		}
538 		/* delete an event */
539 		ret = unregister_trace_probe(tp);
540 		if (ret == 0)
541 			free_trace_probe(tp);
542 		mutex_unlock(&probe_lock);
543 		return ret;
544 	}
545 
546 	if (argc < 2) {
547 		pr_info("Probe point is not specified.\n");
548 		return -EINVAL;
549 	}
550 	if (isdigit(argv[1][0])) {
551 		if (is_return) {
552 			pr_info("Return probe point must be a symbol.\n");
553 			return -EINVAL;
554 		}
555 		/* an address specified */
556 		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
557 		if (ret) {
558 			pr_info("Failed to parse address.\n");
559 			return ret;
560 		}
561 	} else {
562 		/* a symbol specified */
563 		symbol = argv[1];
564 		/* TODO: support .init module functions */
565 		ret = traceprobe_split_symbol_offset(symbol, &offset);
566 		if (ret) {
567 			pr_info("Failed to parse symbol.\n");
568 			return ret;
569 		}
570 		if (offset && is_return) {
571 			pr_info("Return probe must be used without offset.\n");
572 			return -EINVAL;
573 		}
574 	}
575 	argc -= 2; argv += 2;
576 
577 	/* setup a probe */
578 	if (!event) {
579 		/* Make a new event name */
580 		if (symbol)
581 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
582 				 is_return ? 'r' : 'p', symbol, offset);
583 		else
584 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
585 				 is_return ? 'r' : 'p', addr);
586 		event = buf;
587 	}
588 	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
589 			       is_return);
590 	if (IS_ERR(tp)) {
591 		pr_info("Failed to allocate trace_probe.(%d)\n",
592 			(int)PTR_ERR(tp));
593 		return PTR_ERR(tp);
594 	}
595 
596 	/* parse arguments */
597 	ret = 0;
598 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
599 		/* Increment count for freeing args in error case */
600 		tp->nr_args++;
601 
602 		/* Parse argument name */
603 		arg = strchr(argv[i], '=');
604 		if (arg) {
605 			*arg++ = '\0';
606 			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
607 		} else {
608 			arg = argv[i];
609 			/* If argument name is omitted, set "argN" */
610 			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
611 			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
612 		}
613 
614 		if (!tp->args[i].name) {
615 			pr_info("Failed to allocate argument[%d] name.\n", i);
616 			ret = -ENOMEM;
617 			goto error;
618 		}
619 
620 		if (!is_good_name(tp->args[i].name)) {
621 			pr_info("Invalid argument[%d] name: %s\n",
622 				i, tp->args[i].name);
623 			ret = -EINVAL;
624 			goto error;
625 		}
626 
627 		if (traceprobe_conflict_field_name(tp->args[i].name,
628 							tp->args, i)) {
629 			pr_info("Argument[%d] name '%s' conflicts with "
630 				"another field.\n", i, argv[i]);
631 			ret = -EINVAL;
632 			goto error;
633 		}
634 
635 		/* Parse fetch argument */
636 		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
637 						is_return, true);
638 		if (ret) {
639 			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
640 			goto error;
641 		}
642 	}
643 
644 	ret = register_trace_probe(tp);
645 	if (ret)
646 		goto error;
647 	return 0;
648 
649 error:
650 	free_trace_probe(tp);
651 	return ret;
652 }
653 
654 static int release_all_trace_probes(void)
655 {
656 	struct trace_probe *tp;
657 	int ret = 0;
658 
659 	mutex_lock(&probe_lock);
660 	/* Ensure no probe is in use. */
661 	list_for_each_entry(tp, &probe_list, list)
662 		if (trace_probe_is_enabled(tp)) {
663 			ret = -EBUSY;
664 			goto end;
665 		}
666 	/* TODO: Use batch unregistration */
667 	while (!list_empty(&probe_list)) {
668 		tp = list_entry(probe_list.next, struct trace_probe, list);
669 		unregister_trace_probe(tp);
670 		free_trace_probe(tp);
671 	}
672 
673 end:
674 	mutex_unlock(&probe_lock);
675 
676 	return ret;
677 }
678 
679 /* Probes listing interfaces */
680 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
681 {
682 	mutex_lock(&probe_lock);
683 	return seq_list_start(&probe_list, *pos);
684 }
685 
686 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
687 {
688 	return seq_list_next(v, &probe_list, pos);
689 }
690 
691 static void probes_seq_stop(struct seq_file *m, void *v)
692 {
693 	mutex_unlock(&probe_lock);
694 }
695 
696 static int probes_seq_show(struct seq_file *m, void *v)
697 {
698 	struct trace_probe *tp = v;
699 	int i;
700 
701 	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
702 	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
703 
704 	if (!tp->symbol)
705 		seq_printf(m, " 0x%p", tp->rp.kp.addr);
706 	else if (tp->rp.kp.offset)
707 		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
708 			   tp->rp.kp.offset);
709 	else
710 		seq_printf(m, " %s", trace_probe_symbol(tp));
711 
712 	for (i = 0; i < tp->nr_args; i++)
713 		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
714 	seq_printf(m, "\n");
715 
716 	return 0;
717 }
718 
719 static const struct seq_operations probes_seq_op = {
720 	.start  = probes_seq_start,
721 	.next   = probes_seq_next,
722 	.stop   = probes_seq_stop,
723 	.show   = probes_seq_show
724 };
725 
726 static int probes_open(struct inode *inode, struct file *file)
727 {
728 	int ret;
729 
730 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
731 		ret = release_all_trace_probes();
732 		if (ret < 0)
733 			return ret;
734 	}
735 
736 	return seq_open(file, &probes_seq_op);
737 }
738 
739 static ssize_t probes_write(struct file *file, const char __user *buffer,
740 			    size_t count, loff_t *ppos)
741 {
742 	return traceprobe_probes_write(file, buffer, count, ppos,
743 			create_trace_probe);
744 }
745 
746 static const struct file_operations kprobe_events_ops = {
747 	.owner          = THIS_MODULE,
748 	.open           = probes_open,
749 	.read           = seq_read,
750 	.llseek         = seq_lseek,
751 	.release        = seq_release,
752 	.write		= probes_write,
753 };
754 
755 /* Probes profiling interfaces */
756 static int probes_profile_seq_show(struct seq_file *m, void *v)
757 {
758 	struct trace_probe *tp = v;
759 
760 	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
761 		   tp->rp.kp.nmissed);
762 
763 	return 0;
764 }
765 
766 static const struct seq_operations profile_seq_op = {
767 	.start  = probes_seq_start,
768 	.next   = probes_seq_next,
769 	.stop   = probes_seq_stop,
770 	.show   = probes_profile_seq_show
771 };
772 
773 static int profile_open(struct inode *inode, struct file *file)
774 {
775 	return seq_open(file, &profile_seq_op);
776 }
777 
778 static const struct file_operations kprobe_profile_ops = {
779 	.owner          = THIS_MODULE,
780 	.open           = profile_open,
781 	.read           = seq_read,
782 	.llseek         = seq_lseek,
783 	.release        = seq_release,
784 };
785 
786 /* Sum up total data length for dynamic arraies (strings) */
787 static __kprobes int __get_data_size(struct trace_probe *tp,
788 				     struct pt_regs *regs)
789 {
790 	int i, ret = 0;
791 	u32 len;
792 
793 	for (i = 0; i < tp->nr_args; i++)
794 		if (unlikely(tp->args[i].fetch_size.fn)) {
795 			call_fetch(&tp->args[i].fetch_size, regs, &len);
796 			ret += len;
797 		}
798 
799 	return ret;
800 }
801 
802 /* Store the value of each argument */
803 static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
804 				       struct pt_regs *regs,
805 				       u8 *data, int maxlen)
806 {
807 	int i;
808 	u32 end = tp->size;
809 	u32 *dl;	/* Data (relative) location */
810 
811 	for (i = 0; i < tp->nr_args; i++) {
812 		if (unlikely(tp->args[i].fetch_size.fn)) {
813 			/*
814 			 * First, we set the relative location and
815 			 * maximum data length to *dl
816 			 */
817 			dl = (u32 *)(data + tp->args[i].offset);
818 			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
819 			/* Then try to fetch string or dynamic array data */
820 			call_fetch(&tp->args[i].fetch, regs, dl);
821 			/* Reduce maximum length */
822 			end += get_rloc_len(*dl);
823 			maxlen -= get_rloc_len(*dl);
824 			/* Trick here, convert data_rloc to data_loc */
825 			*dl = convert_rloc_to_loc(*dl,
826 				 ent_size + tp->args[i].offset);
827 		} else
828 			/* Just fetching data normally */
829 			call_fetch(&tp->args[i].fetch, regs,
830 				   data + tp->args[i].offset);
831 	}
832 }
833 
834 /* Kprobe handler */
835 static __kprobes void
836 __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
837 		    struct ftrace_event_file *ftrace_file)
838 {
839 	struct kprobe_trace_entry_head *entry;
840 	struct ring_buffer_event *event;
841 	struct ring_buffer *buffer;
842 	int size, dsize, pc;
843 	unsigned long irq_flags;
844 	struct ftrace_event_call *call = &tp->call;
845 
846 	WARN_ON(call != ftrace_file->event_call);
847 
848 	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
849 		return;
850 
851 	local_save_flags(irq_flags);
852 	pc = preempt_count();
853 
854 	dsize = __get_data_size(tp, regs);
855 	size = sizeof(*entry) + tp->size + dsize;
856 
857 	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
858 						call->event.type,
859 						size, irq_flags, pc);
860 	if (!event)
861 		return;
862 
863 	entry = ring_buffer_event_data(event);
864 	entry->ip = (unsigned long)tp->rp.kp.addr;
865 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
866 
867 	if (!filter_current_check_discard(buffer, call, entry, event))
868 		trace_buffer_unlock_commit_regs(buffer, event,
869 						irq_flags, pc, regs);
870 }
871 
872 static __kprobes void
873 kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
874 {
875 	struct ftrace_event_file **file = tp->files;
876 
877 	/* Note: preempt is already disabled around the kprobe handler */
878 	while (*file) {
879 		__kprobe_trace_func(tp, regs, *file);
880 		file++;
881 	}
882 }
883 
884 /* Kretprobe handler */
885 static __kprobes void
886 __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
887 		       struct pt_regs *regs,
888 		       struct ftrace_event_file *ftrace_file)
889 {
890 	struct kretprobe_trace_entry_head *entry;
891 	struct ring_buffer_event *event;
892 	struct ring_buffer *buffer;
893 	int size, pc, dsize;
894 	unsigned long irq_flags;
895 	struct ftrace_event_call *call = &tp->call;
896 
897 	WARN_ON(call != ftrace_file->event_call);
898 
899 	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
900 		return;
901 
902 	local_save_flags(irq_flags);
903 	pc = preempt_count();
904 
905 	dsize = __get_data_size(tp, regs);
906 	size = sizeof(*entry) + tp->size + dsize;
907 
908 	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
909 						call->event.type,
910 						size, irq_flags, pc);
911 	if (!event)
912 		return;
913 
914 	entry = ring_buffer_event_data(event);
915 	entry->func = (unsigned long)tp->rp.kp.addr;
916 	entry->ret_ip = (unsigned long)ri->ret_addr;
917 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
918 
919 	if (!filter_current_check_discard(buffer, call, entry, event))
920 		trace_buffer_unlock_commit_regs(buffer, event,
921 						irq_flags, pc, regs);
922 }
923 
924 static __kprobes void
925 kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
926 		     struct pt_regs *regs)
927 {
928 	struct ftrace_event_file **file = tp->files;
929 
930 	/* Note: preempt is already disabled around the kprobe handler */
931 	while (*file) {
932 		__kretprobe_trace_func(tp, ri, regs, *file);
933 		file++;
934 	}
935 }
936 
937 /* Event entry printers */
938 enum print_line_t
939 print_kprobe_event(struct trace_iterator *iter, int flags,
940 		   struct trace_event *event)
941 {
942 	struct kprobe_trace_entry_head *field;
943 	struct trace_seq *s = &iter->seq;
944 	struct trace_probe *tp;
945 	u8 *data;
946 	int i;
947 
948 	field = (struct kprobe_trace_entry_head *)iter->ent;
949 	tp = container_of(event, struct trace_probe, call.event);
950 
951 	if (!trace_seq_printf(s, "%s: (", tp->call.name))
952 		goto partial;
953 
954 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
955 		goto partial;
956 
957 	if (!trace_seq_puts(s, ")"))
958 		goto partial;
959 
960 	data = (u8 *)&field[1];
961 	for (i = 0; i < tp->nr_args; i++)
962 		if (!tp->args[i].type->print(s, tp->args[i].name,
963 					     data + tp->args[i].offset, field))
964 			goto partial;
965 
966 	if (!trace_seq_puts(s, "\n"))
967 		goto partial;
968 
969 	return TRACE_TYPE_HANDLED;
970 partial:
971 	return TRACE_TYPE_PARTIAL_LINE;
972 }
973 
974 enum print_line_t
975 print_kretprobe_event(struct trace_iterator *iter, int flags,
976 		      struct trace_event *event)
977 {
978 	struct kretprobe_trace_entry_head *field;
979 	struct trace_seq *s = &iter->seq;
980 	struct trace_probe *tp;
981 	u8 *data;
982 	int i;
983 
984 	field = (struct kretprobe_trace_entry_head *)iter->ent;
985 	tp = container_of(event, struct trace_probe, call.event);
986 
987 	if (!trace_seq_printf(s, "%s: (", tp->call.name))
988 		goto partial;
989 
990 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
991 		goto partial;
992 
993 	if (!trace_seq_puts(s, " <- "))
994 		goto partial;
995 
996 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
997 		goto partial;
998 
999 	if (!trace_seq_puts(s, ")"))
1000 		goto partial;
1001 
1002 	data = (u8 *)&field[1];
1003 	for (i = 0; i < tp->nr_args; i++)
1004 		if (!tp->args[i].type->print(s, tp->args[i].name,
1005 					     data + tp->args[i].offset, field))
1006 			goto partial;
1007 
1008 	if (!trace_seq_puts(s, "\n"))
1009 		goto partial;
1010 
1011 	return TRACE_TYPE_HANDLED;
1012 partial:
1013 	return TRACE_TYPE_PARTIAL_LINE;
1014 }
1015 
1016 
1017 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1018 {
1019 	int ret, i;
1020 	struct kprobe_trace_entry_head field;
1021 	struct trace_probe *tp = (struct trace_probe *)event_call->data;
1022 
1023 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1024 	/* Set argument names as fields */
1025 	for (i = 0; i < tp->nr_args; i++) {
1026 		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1027 					 tp->args[i].name,
1028 					 sizeof(field) + tp->args[i].offset,
1029 					 tp->args[i].type->size,
1030 					 tp->args[i].type->is_signed,
1031 					 FILTER_OTHER);
1032 		if (ret)
1033 			return ret;
1034 	}
1035 	return 0;
1036 }
1037 
1038 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1039 {
1040 	int ret, i;
1041 	struct kretprobe_trace_entry_head field;
1042 	struct trace_probe *tp = (struct trace_probe *)event_call->data;
1043 
1044 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1045 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1046 	/* Set argument names as fields */
1047 	for (i = 0; i < tp->nr_args; i++) {
1048 		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1049 					 tp->args[i].name,
1050 					 sizeof(field) + tp->args[i].offset,
1051 					 tp->args[i].type->size,
1052 					 tp->args[i].type->is_signed,
1053 					 FILTER_OTHER);
1054 		if (ret)
1055 			return ret;
1056 	}
1057 	return 0;
1058 }
1059 
1060 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1061 {
1062 	int i;
1063 	int pos = 0;
1064 
1065 	const char *fmt, *arg;
1066 
1067 	if (!trace_probe_is_return(tp)) {
1068 		fmt = "(%lx)";
1069 		arg = "REC->" FIELD_STRING_IP;
1070 	} else {
1071 		fmt = "(%lx <- %lx)";
1072 		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1073 	}
1074 
1075 	/* When len=0, we just calculate the needed length */
1076 #define LEN_OR_ZERO (len ? len - pos : 0)
1077 
1078 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1079 
1080 	for (i = 0; i < tp->nr_args; i++) {
1081 		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
1082 				tp->args[i].name, tp->args[i].type->fmt);
1083 	}
1084 
1085 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1086 
1087 	for (i = 0; i < tp->nr_args; i++) {
1088 		if (strcmp(tp->args[i].type->name, "string") == 0)
1089 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1090 					", __get_str(%s)",
1091 					tp->args[i].name);
1092 		else
1093 			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1094 					tp->args[i].name);
1095 	}
1096 
1097 #undef LEN_OR_ZERO
1098 
1099 	/* return the length of print_fmt */
1100 	return pos;
1101 }
1102 
1103 static int set_print_fmt(struct trace_probe *tp)
1104 {
1105 	int len;
1106 	char *print_fmt;
1107 
1108 	/* First: called with 0 length to calculate the needed length */
1109 	len = __set_print_fmt(tp, NULL, 0);
1110 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
1111 	if (!print_fmt)
1112 		return -ENOMEM;
1113 
1114 	/* Second: actually write the @print_fmt */
1115 	__set_print_fmt(tp, print_fmt, len + 1);
1116 	tp->call.print_fmt = print_fmt;
1117 
1118 	return 0;
1119 }
1120 
1121 #ifdef CONFIG_PERF_EVENTS
1122 
1123 /* Kprobe profile handler */
1124 static __kprobes void
1125 kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1126 {
1127 	struct ftrace_event_call *call = &tp->call;
1128 	struct kprobe_trace_entry_head *entry;
1129 	struct hlist_head *head;
1130 	int size, __size, dsize;
1131 	int rctx;
1132 
1133 	dsize = __get_data_size(tp, regs);
1134 	__size = sizeof(*entry) + tp->size + dsize;
1135 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1136 	size -= sizeof(u32);
1137 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1138 		     "profile buffer not large enough"))
1139 		return;
1140 
1141 	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1142 	if (!entry)
1143 		return;
1144 
1145 	entry->ip = (unsigned long)tp->rp.kp.addr;
1146 	memset(&entry[1], 0, dsize);
1147 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1148 
1149 	head = this_cpu_ptr(call->perf_events);
1150 	perf_trace_buf_submit(entry, size, rctx,
1151 					entry->ip, 1, regs, head, NULL);
1152 }
1153 
1154 /* Kretprobe profile handler */
1155 static __kprobes void
1156 kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1157 		    struct pt_regs *regs)
1158 {
1159 	struct ftrace_event_call *call = &tp->call;
1160 	struct kretprobe_trace_entry_head *entry;
1161 	struct hlist_head *head;
1162 	int size, __size, dsize;
1163 	int rctx;
1164 
1165 	dsize = __get_data_size(tp, regs);
1166 	__size = sizeof(*entry) + tp->size + dsize;
1167 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1168 	size -= sizeof(u32);
1169 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1170 		     "profile buffer not large enough"))
1171 		return;
1172 
1173 	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1174 	if (!entry)
1175 		return;
1176 
1177 	entry->func = (unsigned long)tp->rp.kp.addr;
1178 	entry->ret_ip = (unsigned long)ri->ret_addr;
1179 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1180 
1181 	head = this_cpu_ptr(call->perf_events);
1182 	perf_trace_buf_submit(entry, size, rctx,
1183 					entry->ret_ip, 1, regs, head, NULL);
1184 }
1185 #endif	/* CONFIG_PERF_EVENTS */
1186 
1187 static __kprobes
1188 int kprobe_register(struct ftrace_event_call *event,
1189 		    enum trace_reg type, void *data)
1190 {
1191 	struct trace_probe *tp = (struct trace_probe *)event->data;
1192 	struct ftrace_event_file *file = data;
1193 
1194 	switch (type) {
1195 	case TRACE_REG_REGISTER:
1196 		return enable_trace_probe(tp, file);
1197 	case TRACE_REG_UNREGISTER:
1198 		return disable_trace_probe(tp, file);
1199 
1200 #ifdef CONFIG_PERF_EVENTS
1201 	case TRACE_REG_PERF_REGISTER:
1202 		return enable_trace_probe(tp, NULL);
1203 	case TRACE_REG_PERF_UNREGISTER:
1204 		return disable_trace_probe(tp, NULL);
1205 	case TRACE_REG_PERF_OPEN:
1206 	case TRACE_REG_PERF_CLOSE:
1207 	case TRACE_REG_PERF_ADD:
1208 	case TRACE_REG_PERF_DEL:
1209 		return 0;
1210 #endif
1211 	}
1212 	return 0;
1213 }
1214 
1215 static __kprobes
1216 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1217 {
1218 	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1219 
1220 	tp->nhit++;
1221 
1222 	if (tp->flags & TP_FLAG_TRACE)
1223 		kprobe_trace_func(tp, regs);
1224 #ifdef CONFIG_PERF_EVENTS
1225 	if (tp->flags & TP_FLAG_PROFILE)
1226 		kprobe_perf_func(tp, regs);
1227 #endif
1228 	return 0;	/* We don't tweek kernel, so just return 0 */
1229 }
1230 
1231 static __kprobes
1232 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1233 {
1234 	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1235 
1236 	tp->nhit++;
1237 
1238 	if (tp->flags & TP_FLAG_TRACE)
1239 		kretprobe_trace_func(tp, ri, regs);
1240 #ifdef CONFIG_PERF_EVENTS
1241 	if (tp->flags & TP_FLAG_PROFILE)
1242 		kretprobe_perf_func(tp, ri, regs);
1243 #endif
1244 	return 0;	/* We don't tweek kernel, so just return 0 */
1245 }
1246 
1247 static struct trace_event_functions kretprobe_funcs = {
1248 	.trace		= print_kretprobe_event
1249 };
1250 
1251 static struct trace_event_functions kprobe_funcs = {
1252 	.trace		= print_kprobe_event
1253 };
1254 
1255 static int register_probe_event(struct trace_probe *tp)
1256 {
1257 	struct ftrace_event_call *call = &tp->call;
1258 	int ret;
1259 
1260 	/* Initialize ftrace_event_call */
1261 	INIT_LIST_HEAD(&call->class->fields);
1262 	if (trace_probe_is_return(tp)) {
1263 		call->event.funcs = &kretprobe_funcs;
1264 		call->class->define_fields = kretprobe_event_define_fields;
1265 	} else {
1266 		call->event.funcs = &kprobe_funcs;
1267 		call->class->define_fields = kprobe_event_define_fields;
1268 	}
1269 	if (set_print_fmt(tp) < 0)
1270 		return -ENOMEM;
1271 	ret = register_ftrace_event(&call->event);
1272 	if (!ret) {
1273 		kfree(call->print_fmt);
1274 		return -ENODEV;
1275 	}
1276 	call->flags = 0;
1277 	call->class->reg = kprobe_register;
1278 	call->data = tp;
1279 	ret = trace_add_event_call(call);
1280 	if (ret) {
1281 		pr_info("Failed to register kprobe event: %s\n", call->name);
1282 		kfree(call->print_fmt);
1283 		unregister_ftrace_event(&call->event);
1284 	}
1285 	return ret;
1286 }
1287 
1288 static void unregister_probe_event(struct trace_probe *tp)
1289 {
1290 	/* tp->event is unregistered in trace_remove_event_call() */
1291 	trace_remove_event_call(&tp->call);
1292 	kfree(tp->call.print_fmt);
1293 }
1294 
1295 /* Make a debugfs interface for controlling probe points */
1296 static __init int init_kprobe_trace(void)
1297 {
1298 	struct dentry *d_tracer;
1299 	struct dentry *entry;
1300 
1301 	if (register_module_notifier(&trace_probe_module_nb))
1302 		return -EINVAL;
1303 
1304 	d_tracer = tracing_init_dentry();
1305 	if (!d_tracer)
1306 		return 0;
1307 
1308 	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1309 				    NULL, &kprobe_events_ops);
1310 
1311 	/* Event list interface */
1312 	if (!entry)
1313 		pr_warning("Could not create debugfs "
1314 			   "'kprobe_events' entry\n");
1315 
1316 	/* Profile interface */
1317 	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1318 				    NULL, &kprobe_profile_ops);
1319 
1320 	if (!entry)
1321 		pr_warning("Could not create debugfs "
1322 			   "'kprobe_profile' entry\n");
1323 	return 0;
1324 }
1325 fs_initcall(init_kprobe_trace);
1326 
1327 
1328 #ifdef CONFIG_FTRACE_STARTUP_TEST
1329 
1330 /*
1331  * The "__used" keeps gcc from removing the function symbol
1332  * from the kallsyms table.
1333  */
1334 static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1335 					       int a4, int a5, int a6)
1336 {
1337 	return a1 + a2 + a3 + a4 + a5 + a6;
1338 }
1339 
1340 static struct ftrace_event_file *
1341 find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
1342 {
1343 	struct ftrace_event_file *file;
1344 
1345 	list_for_each_entry(file, &tr->events, list)
1346 		if (file->event_call == &tp->call)
1347 			return file;
1348 
1349 	return NULL;
1350 }
1351 
1352 static __init int kprobe_trace_self_tests_init(void)
1353 {
1354 	int ret, warn = 0;
1355 	int (*target)(int, int, int, int, int, int);
1356 	struct trace_probe *tp;
1357 	struct ftrace_event_file *file;
1358 
1359 	target = kprobe_trace_selftest_target;
1360 
1361 	pr_info("Testing kprobe tracing: ");
1362 
1363 	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1364 				  "$stack $stack0 +0($stack)",
1365 				  create_trace_probe);
1366 	if (WARN_ON_ONCE(ret)) {
1367 		pr_warn("error on probing function entry.\n");
1368 		warn++;
1369 	} else {
1370 		/* Enable trace point */
1371 		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1372 		if (WARN_ON_ONCE(tp == NULL)) {
1373 			pr_warn("error on getting new probe.\n");
1374 			warn++;
1375 		} else {
1376 			file = find_trace_probe_file(tp, top_trace_array());
1377 			if (WARN_ON_ONCE(file == NULL)) {
1378 				pr_warn("error on getting probe file.\n");
1379 				warn++;
1380 			} else
1381 				enable_trace_probe(tp, file);
1382 		}
1383 	}
1384 
1385 	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1386 				  "$retval", create_trace_probe);
1387 	if (WARN_ON_ONCE(ret)) {
1388 		pr_warn("error on probing function return.\n");
1389 		warn++;
1390 	} else {
1391 		/* Enable trace point */
1392 		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1393 		if (WARN_ON_ONCE(tp == NULL)) {
1394 			pr_warn("error on getting 2nd new probe.\n");
1395 			warn++;
1396 		} else {
1397 			file = find_trace_probe_file(tp, top_trace_array());
1398 			if (WARN_ON_ONCE(file == NULL)) {
1399 				pr_warn("error on getting probe file.\n");
1400 				warn++;
1401 			} else
1402 				enable_trace_probe(tp, file);
1403 		}
1404 	}
1405 
1406 	if (warn)
1407 		goto end;
1408 
1409 	ret = target(1, 2, 3, 4, 5, 6);
1410 
1411 	/* Disable trace points before removing it */
1412 	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1413 	if (WARN_ON_ONCE(tp == NULL)) {
1414 		pr_warn("error on getting test probe.\n");
1415 		warn++;
1416 	} else {
1417 		file = find_trace_probe_file(tp, top_trace_array());
1418 		if (WARN_ON_ONCE(file == NULL)) {
1419 			pr_warn("error on getting probe file.\n");
1420 			warn++;
1421 		} else
1422 			disable_trace_probe(tp, file);
1423 	}
1424 
1425 	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1426 	if (WARN_ON_ONCE(tp == NULL)) {
1427 		pr_warn("error on getting 2nd test probe.\n");
1428 		warn++;
1429 	} else {
1430 		file = find_trace_probe_file(tp, top_trace_array());
1431 		if (WARN_ON_ONCE(file == NULL)) {
1432 			pr_warn("error on getting probe file.\n");
1433 			warn++;
1434 		} else
1435 			disable_trace_probe(tp, file);
1436 	}
1437 
1438 	ret = traceprobe_command("-:testprobe", create_trace_probe);
1439 	if (WARN_ON_ONCE(ret)) {
1440 		pr_warn("error on deleting a probe.\n");
1441 		warn++;
1442 	}
1443 
1444 	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1445 	if (WARN_ON_ONCE(ret)) {
1446 		pr_warn("error on deleting a probe.\n");
1447 		warn++;
1448 	}
1449 
1450 end:
1451 	release_all_trace_probes();
1452 	if (warn)
1453 		pr_cont("NG: Some tests are failed. Please check them.\n");
1454 	else
1455 		pr_cont("OK\n");
1456 	return 0;
1457 }
1458 
1459 late_initcall(kprobe_trace_self_tests_init);
1460 
1461 #endif
1462