xref: /linux/kernel/trace/trace_kprobe.c (revision 544521d6217fb7846b746ada9d70f308f078aa7e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)	"trace_kprobe: " fmt
9 
10 #include <linux/bpf-cgroup.h>
11 #include <linux/cleanup.h>
12 #include <linux/security.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/rculist.h>
16 #include <linux/error-injection.h>
17 
18 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
19 
20 #include "trace_dynevent.h"
21 #include "trace_kprobe_selftest.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
24 #include "trace_probe_kernel.h"
25 
26 #define KPROBE_EVENT_SYSTEM "kprobes"
27 #define KRETPROBE_MAXACTIVE_MAX 4096
28 
29 /* Kprobe early definition from command line */
30 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
31 
set_kprobe_boot_events(char * str)32 static int __init set_kprobe_boot_events(char *str)
33 {
34 	strscpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
35 	disable_tracing_selftest("running kprobe events");
36 
37 	return 1;
38 }
39 __setup("kprobe_event=", set_kprobe_boot_events);
40 
41 static int trace_kprobe_create(const char *raw_command);
42 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
43 static int trace_kprobe_release(struct dyn_event *ev);
44 static bool trace_kprobe_is_busy(struct dyn_event *ev);
45 static bool trace_kprobe_match(const char *system, const char *event,
46 			int argc, const char **argv, struct dyn_event *ev);
47 
48 static struct dyn_event_operations trace_kprobe_ops = {
49 	.create = trace_kprobe_create,
50 	.show = trace_kprobe_show,
51 	.is_busy = trace_kprobe_is_busy,
52 	.free = trace_kprobe_release,
53 	.match = trace_kprobe_match,
54 };
55 
56 /*
57  * Kprobe event core functions
58  */
59 struct trace_kprobe {
60 	struct dyn_event	devent;
61 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
62 	unsigned long __percpu *nhit;
63 	const char		*symbol;	/* symbol name */
64 	struct trace_probe	tp;
65 };
66 
is_trace_kprobe(struct dyn_event * ev)67 static bool is_trace_kprobe(struct dyn_event *ev)
68 {
69 	return ev->ops == &trace_kprobe_ops;
70 }
71 
to_trace_kprobe(struct dyn_event * ev)72 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
73 {
74 	return container_of(ev, struct trace_kprobe, devent);
75 }
76 
77 /**
78  * for_each_trace_kprobe - iterate over the trace_kprobe list
79  * @pos:	the struct trace_kprobe * for each entry
80  * @dpos:	the struct dyn_event * to use as a loop cursor
81  */
82 #define for_each_trace_kprobe(pos, dpos)	\
83 	for_each_dyn_event(dpos)		\
84 		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
85 
trace_kprobe_is_return(struct trace_kprobe * tk)86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
87 {
88 	return tk->rp.handler != NULL;
89 }
90 
trace_kprobe_symbol(struct trace_kprobe * tk)91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
92 {
93 	return tk->symbol ? tk->symbol : "unknown";
94 }
95 
trace_kprobe_offset(struct trace_kprobe * tk)96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
97 {
98 	return tk->rp.kp.offset;
99 }
100 
trace_kprobe_has_gone(struct trace_kprobe * tk)101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
102 {
103 	return kprobe_gone(&tk->rp.kp);
104 }
105 
trace_kprobe_within_module(struct trace_kprobe * tk,struct module * mod)106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
107 						 struct module *mod)
108 {
109 	int len = strlen(module_name(mod));
110 	const char *name = trace_kprobe_symbol(tk);
111 
112 	return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
113 }
114 
115 #ifdef CONFIG_MODULES
trace_kprobe_module_exist(struct trace_kprobe * tk)116 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
117 {
118 	char *p;
119 	bool ret;
120 
121 	if (!tk->symbol)
122 		return false;
123 	p = strchr(tk->symbol, ':');
124 	if (!p)
125 		return true;
126 	*p = '\0';
127 	rcu_read_lock_sched();
128 	ret = !!find_module(tk->symbol);
129 	rcu_read_unlock_sched();
130 	*p = ':';
131 
132 	return ret;
133 }
134 #else
trace_kprobe_module_exist(struct trace_kprobe * tk)135 static inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
136 {
137 	return false;
138 }
139 #endif
140 
trace_kprobe_is_busy(struct dyn_event * ev)141 static bool trace_kprobe_is_busy(struct dyn_event *ev)
142 {
143 	struct trace_kprobe *tk = to_trace_kprobe(ev);
144 
145 	return trace_probe_is_enabled(&tk->tp);
146 }
147 
trace_kprobe_match_command_head(struct trace_kprobe * tk,int argc,const char ** argv)148 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
149 					    int argc, const char **argv)
150 {
151 	char buf[MAX_ARGSTR_LEN + 1];
152 
153 	if (!argc)
154 		return true;
155 
156 	if (!tk->symbol)
157 		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
158 	else if (tk->rp.kp.offset)
159 		snprintf(buf, sizeof(buf), "%s+%u",
160 			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
161 	else
162 		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
163 	if (strcmp(buf, argv[0]))
164 		return false;
165 	argc--; argv++;
166 
167 	return trace_probe_match_command_args(&tk->tp, argc, argv);
168 }
169 
trace_kprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)170 static bool trace_kprobe_match(const char *system, const char *event,
171 			int argc, const char **argv, struct dyn_event *ev)
172 {
173 	struct trace_kprobe *tk = to_trace_kprobe(ev);
174 
175 	return (event[0] == '\0' ||
176 		strcmp(trace_probe_name(&tk->tp), event) == 0) &&
177 	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
178 	    trace_kprobe_match_command_head(tk, argc, argv);
179 }
180 
trace_kprobe_nhit(struct trace_kprobe * tk)181 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
182 {
183 	unsigned long nhit = 0;
184 	int cpu;
185 
186 	for_each_possible_cpu(cpu)
187 		nhit += *per_cpu_ptr(tk->nhit, cpu);
188 
189 	return nhit;
190 }
191 
trace_kprobe_is_registered(struct trace_kprobe * tk)192 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
193 {
194 	return !(list_empty(&tk->rp.kp.list) &&
195 		 hlist_unhashed(&tk->rp.kp.hlist));
196 }
197 
198 /* Return 0 if it fails to find the symbol address */
199 static nokprobe_inline
trace_kprobe_address(struct trace_kprobe * tk)200 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
201 {
202 	unsigned long addr;
203 
204 	if (tk->symbol) {
205 		addr = (unsigned long)
206 			kallsyms_lookup_name(trace_kprobe_symbol(tk));
207 		if (addr)
208 			addr += tk->rp.kp.offset;
209 	} else {
210 		addr = (unsigned long)tk->rp.kp.addr;
211 	}
212 	return addr;
213 }
214 
215 static nokprobe_inline struct trace_kprobe *
trace_kprobe_primary_from_call(struct trace_event_call * call)216 trace_kprobe_primary_from_call(struct trace_event_call *call)
217 {
218 	struct trace_probe *tp;
219 
220 	tp = trace_probe_primary_from_call(call);
221 	if (WARN_ON_ONCE(!tp))
222 		return NULL;
223 
224 	return container_of(tp, struct trace_kprobe, tp);
225 }
226 
trace_kprobe_on_func_entry(struct trace_event_call * call)227 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
228 {
229 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
230 
231 	return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
232 			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
233 			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
234 }
235 
trace_kprobe_error_injectable(struct trace_event_call * call)236 bool trace_kprobe_error_injectable(struct trace_event_call *call)
237 {
238 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
239 
240 	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
241 	       false;
242 }
243 
244 static int register_kprobe_event(struct trace_kprobe *tk);
245 static int unregister_kprobe_event(struct trace_kprobe *tk);
246 
247 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
248 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
249 				struct pt_regs *regs);
250 
free_trace_kprobe(struct trace_kprobe * tk)251 static void free_trace_kprobe(struct trace_kprobe *tk)
252 {
253 	if (tk) {
254 		trace_probe_cleanup(&tk->tp);
255 		kfree(tk->symbol);
256 		free_percpu(tk->nhit);
257 		kfree(tk);
258 	}
259 }
260 
261 DEFINE_FREE(free_trace_kprobe, struct trace_kprobe *,
262 	if (!IS_ERR_OR_NULL(_T)) free_trace_kprobe(_T))
263 
264 /*
265  * Allocate new trace_probe and initialize it (including kprobes).
266  */
alloc_trace_kprobe(const char * group,const char * event,void * addr,const char * symbol,unsigned long offs,int maxactive,int nargs,bool is_return)267 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
268 					     const char *event,
269 					     void *addr,
270 					     const char *symbol,
271 					     unsigned long offs,
272 					     int maxactive,
273 					     int nargs, bool is_return)
274 {
275 	struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
276 	int ret = -ENOMEM;
277 
278 	tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
279 	if (!tk)
280 		return ERR_PTR(ret);
281 
282 	tk->nhit = alloc_percpu(unsigned long);
283 	if (!tk->nhit)
284 		return ERR_PTR(ret);
285 
286 	if (symbol) {
287 		tk->symbol = kstrdup(symbol, GFP_KERNEL);
288 		if (!tk->symbol)
289 			return ERR_PTR(ret);
290 		tk->rp.kp.symbol_name = tk->symbol;
291 		tk->rp.kp.offset = offs;
292 	} else
293 		tk->rp.kp.addr = addr;
294 
295 	if (is_return)
296 		tk->rp.handler = kretprobe_dispatcher;
297 	else
298 		tk->rp.kp.pre_handler = kprobe_dispatcher;
299 
300 	tk->rp.maxactive = maxactive;
301 	INIT_HLIST_NODE(&tk->rp.kp.hlist);
302 	INIT_LIST_HEAD(&tk->rp.kp.list);
303 
304 	ret = trace_probe_init(&tk->tp, event, group, false, nargs);
305 	if (ret < 0)
306 		return ERR_PTR(ret);
307 
308 	dyn_event_init(&tk->devent, &trace_kprobe_ops);
309 	return_ptr(tk);
310 }
311 
find_trace_kprobe(const char * event,const char * group)312 static struct trace_kprobe *find_trace_kprobe(const char *event,
313 					      const char *group)
314 {
315 	struct dyn_event *pos;
316 	struct trace_kprobe *tk;
317 
318 	for_each_trace_kprobe(tk, pos)
319 		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
320 		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
321 			return tk;
322 	return NULL;
323 }
324 
__enable_trace_kprobe(struct trace_kprobe * tk)325 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
326 {
327 	int ret = 0;
328 
329 	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
330 		if (trace_kprobe_is_return(tk))
331 			ret = enable_kretprobe(&tk->rp);
332 		else
333 			ret = enable_kprobe(&tk->rp.kp);
334 	}
335 
336 	return ret;
337 }
338 
__disable_trace_kprobe(struct trace_probe * tp)339 static void __disable_trace_kprobe(struct trace_probe *tp)
340 {
341 	struct trace_kprobe *tk;
342 
343 	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
344 		if (!trace_kprobe_is_registered(tk))
345 			continue;
346 		if (trace_kprobe_is_return(tk))
347 			disable_kretprobe(&tk->rp);
348 		else
349 			disable_kprobe(&tk->rp.kp);
350 	}
351 }
352 
353 /*
354  * Enable trace_probe
355  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
356  */
enable_trace_kprobe(struct trace_event_call * call,struct trace_event_file * file)357 static int enable_trace_kprobe(struct trace_event_call *call,
358 				struct trace_event_file *file)
359 {
360 	struct trace_probe *tp;
361 	struct trace_kprobe *tk;
362 	bool enabled;
363 	int ret = 0;
364 
365 	tp = trace_probe_primary_from_call(call);
366 	if (WARN_ON_ONCE(!tp))
367 		return -ENODEV;
368 	enabled = trace_probe_is_enabled(tp);
369 
370 	/* This also changes "enabled" state */
371 	if (file) {
372 		ret = trace_probe_add_file(tp, file);
373 		if (ret)
374 			return ret;
375 	} else
376 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
377 
378 	if (enabled)
379 		return 0;
380 
381 	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
382 		if (trace_kprobe_has_gone(tk))
383 			continue;
384 		ret = __enable_trace_kprobe(tk);
385 		if (ret)
386 			break;
387 		enabled = true;
388 	}
389 
390 	if (ret) {
391 		/* Failed to enable one of them. Roll back all */
392 		if (enabled)
393 			__disable_trace_kprobe(tp);
394 		if (file)
395 			trace_probe_remove_file(tp, file);
396 		else
397 			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
398 	}
399 
400 	return ret;
401 }
402 
403 /*
404  * Disable trace_probe
405  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
406  */
disable_trace_kprobe(struct trace_event_call * call,struct trace_event_file * file)407 static int disable_trace_kprobe(struct trace_event_call *call,
408 				struct trace_event_file *file)
409 {
410 	struct trace_probe *tp;
411 
412 	tp = trace_probe_primary_from_call(call);
413 	if (WARN_ON_ONCE(!tp))
414 		return -ENODEV;
415 
416 	if (file) {
417 		if (!trace_probe_get_file_link(tp, file))
418 			return -ENOENT;
419 		if (!trace_probe_has_single_file(tp))
420 			goto out;
421 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
422 	} else
423 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
424 
425 	if (!trace_probe_is_enabled(tp))
426 		__disable_trace_kprobe(tp);
427 
428  out:
429 	if (file)
430 		/*
431 		 * Synchronization is done in below function. For perf event,
432 		 * file == NULL and perf_trace_event_unreg() calls
433 		 * tracepoint_synchronize_unregister() to ensure synchronize
434 		 * event. We don't need to care about it.
435 		 */
436 		trace_probe_remove_file(tp, file);
437 
438 	return 0;
439 }
440 
441 #if defined(CONFIG_DYNAMIC_FTRACE) && \
442 	!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
__within_notrace_func(unsigned long addr)443 static bool __within_notrace_func(unsigned long addr)
444 {
445 	unsigned long offset, size;
446 
447 	if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
448 		return false;
449 
450 	/* Get the entry address of the target function */
451 	addr -= offset;
452 
453 	/*
454 	 * Since ftrace_location_range() does inclusive range check, we need
455 	 * to subtract 1 byte from the end address.
456 	 */
457 	return !ftrace_location_range(addr, addr + size - 1);
458 }
459 
within_notrace_func(struct trace_kprobe * tk)460 static bool within_notrace_func(struct trace_kprobe *tk)
461 {
462 	unsigned long addr = trace_kprobe_address(tk);
463 	char symname[KSYM_NAME_LEN], *p;
464 
465 	if (!__within_notrace_func(addr))
466 		return false;
467 
468 	/* Check if the address is on a suffixed-symbol */
469 	if (!lookup_symbol_name(addr, symname)) {
470 		p = strchr(symname, '.');
471 		if (!p)
472 			return true;
473 		*p = '\0';
474 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
475 		if (addr)
476 			return __within_notrace_func(addr);
477 	}
478 
479 	return true;
480 }
481 #else
482 #define within_notrace_func(tk)	(false)
483 #endif
484 
485 /* Internal register function - just handle k*probes and flags */
__register_trace_kprobe(struct trace_kprobe * tk)486 static int __register_trace_kprobe(struct trace_kprobe *tk)
487 {
488 	int i, ret;
489 
490 	ret = security_locked_down(LOCKDOWN_KPROBES);
491 	if (ret)
492 		return ret;
493 
494 	if (trace_kprobe_is_registered(tk))
495 		return -EINVAL;
496 
497 	if (within_notrace_func(tk)) {
498 		pr_warn("Could not probe notrace function %ps\n",
499 			(void *)trace_kprobe_address(tk));
500 		return -EINVAL;
501 	}
502 
503 	for (i = 0; i < tk->tp.nr_args; i++) {
504 		ret = traceprobe_update_arg(&tk->tp.args[i]);
505 		if (ret)
506 			return ret;
507 	}
508 
509 	/* Set/clear disabled flag according to tp->flag */
510 	if (trace_probe_is_enabled(&tk->tp))
511 		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
512 	else
513 		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
514 
515 	if (trace_kprobe_is_return(tk))
516 		ret = register_kretprobe(&tk->rp);
517 	else
518 		ret = register_kprobe(&tk->rp.kp);
519 
520 	return ret;
521 }
522 
523 /* Internal unregister function - just handle k*probes and flags */
__unregister_trace_kprobe(struct trace_kprobe * tk)524 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
525 {
526 	if (trace_kprobe_is_registered(tk)) {
527 		if (trace_kprobe_is_return(tk))
528 			unregister_kretprobe(&tk->rp);
529 		else
530 			unregister_kprobe(&tk->rp.kp);
531 		/* Cleanup kprobe for reuse and mark it unregistered */
532 		INIT_HLIST_NODE(&tk->rp.kp.hlist);
533 		INIT_LIST_HEAD(&tk->rp.kp.list);
534 		if (tk->rp.kp.symbol_name)
535 			tk->rp.kp.addr = NULL;
536 	}
537 }
538 
539 /* Unregister a trace_probe and probe_event */
unregister_trace_kprobe(struct trace_kprobe * tk)540 static int unregister_trace_kprobe(struct trace_kprobe *tk)
541 {
542 	/* If other probes are on the event, just unregister kprobe */
543 	if (trace_probe_has_sibling(&tk->tp))
544 		goto unreg;
545 
546 	/* Enabled event can not be unregistered */
547 	if (trace_probe_is_enabled(&tk->tp))
548 		return -EBUSY;
549 
550 	/* If there's a reference to the dynamic event */
551 	if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
552 		return -EBUSY;
553 
554 	/* Will fail if probe is being used by ftrace or perf */
555 	if (unregister_kprobe_event(tk))
556 		return -EBUSY;
557 
558 unreg:
559 	__unregister_trace_kprobe(tk);
560 	dyn_event_remove(&tk->devent);
561 	trace_probe_unlink(&tk->tp);
562 
563 	return 0;
564 }
565 
trace_kprobe_has_same_kprobe(struct trace_kprobe * orig,struct trace_kprobe * comp)566 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
567 					 struct trace_kprobe *comp)
568 {
569 	struct trace_probe_event *tpe = orig->tp.event;
570 	int i;
571 
572 	list_for_each_entry(orig, &tpe->probes, tp.list) {
573 		if (strcmp(trace_kprobe_symbol(orig),
574 			   trace_kprobe_symbol(comp)) ||
575 		    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
576 			continue;
577 
578 		/*
579 		 * trace_probe_compare_arg_type() ensured that nr_args and
580 		 * each argument name and type are same. Let's compare comm.
581 		 */
582 		for (i = 0; i < orig->tp.nr_args; i++) {
583 			if (strcmp(orig->tp.args[i].comm,
584 				   comp->tp.args[i].comm))
585 				break;
586 		}
587 
588 		if (i == orig->tp.nr_args)
589 			return true;
590 	}
591 
592 	return false;
593 }
594 
append_trace_kprobe(struct trace_kprobe * tk,struct trace_kprobe * to)595 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
596 {
597 	int ret;
598 
599 	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
600 	if (ret) {
601 		/* Note that argument starts index = 2 */
602 		trace_probe_log_set_index(ret + 1);
603 		trace_probe_log_err(0, DIFF_ARG_TYPE);
604 		return -EEXIST;
605 	}
606 	if (trace_kprobe_has_same_kprobe(to, tk)) {
607 		trace_probe_log_set_index(0);
608 		trace_probe_log_err(0, SAME_PROBE);
609 		return -EEXIST;
610 	}
611 
612 	/* Append to existing event */
613 	ret = trace_probe_append(&tk->tp, &to->tp);
614 	if (ret)
615 		return ret;
616 
617 	/* Register k*probe */
618 	ret = __register_trace_kprobe(tk);
619 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
620 		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
621 		ret = 0;
622 	}
623 
624 	if (ret)
625 		trace_probe_unlink(&tk->tp);
626 	else
627 		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
628 
629 	return ret;
630 }
631 
632 /* Register a trace_probe and probe_event */
register_trace_kprobe(struct trace_kprobe * tk)633 static int register_trace_kprobe(struct trace_kprobe *tk)
634 {
635 	struct trace_kprobe *old_tk;
636 	int ret;
637 
638 	guard(mutex)(&event_mutex);
639 
640 	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
641 				   trace_probe_group_name(&tk->tp));
642 	if (old_tk) {
643 		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
644 			trace_probe_log_set_index(0);
645 			trace_probe_log_err(0, DIFF_PROBE_TYPE);
646 			return -EEXIST;
647 		}
648 		return append_trace_kprobe(tk, old_tk);
649 	}
650 
651 	/* Register new event */
652 	ret = register_kprobe_event(tk);
653 	if (ret) {
654 		if (ret == -EEXIST) {
655 			trace_probe_log_set_index(0);
656 			trace_probe_log_err(0, EVENT_EXIST);
657 		} else
658 			pr_warn("Failed to register probe event(%d)\n", ret);
659 		return ret;
660 	}
661 
662 	/* Register k*probe */
663 	ret = __register_trace_kprobe(tk);
664 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
665 		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
666 		ret = 0;
667 	}
668 
669 	if (ret < 0)
670 		unregister_kprobe_event(tk);
671 	else
672 		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
673 
674 	return ret;
675 }
676 
677 #ifdef CONFIG_MODULES
678 static int validate_module_probe_symbol(const char *modname, const char *symbol);
679 
register_module_trace_kprobe(struct module * mod,struct trace_kprobe * tk)680 static int register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk)
681 {
682 	const char *p;
683 	int ret = 0;
684 
685 	p = strchr(trace_kprobe_symbol(tk), ':');
686 	if (p)
687 		ret = validate_module_probe_symbol(module_name(mod), p + 1);
688 	if (!ret)
689 		ret = __register_trace_kprobe(tk);
690 	return ret;
691 }
692 
693 /* Module notifier call back, checking event on the module */
trace_kprobe_module_callback(struct notifier_block * nb,unsigned long val,void * data)694 static int trace_kprobe_module_callback(struct notifier_block *nb,
695 				       unsigned long val, void *data)
696 {
697 	struct module *mod = data;
698 	struct dyn_event *pos;
699 	struct trace_kprobe *tk;
700 	int ret;
701 
702 	if (val != MODULE_STATE_COMING)
703 		return NOTIFY_DONE;
704 
705 	/* Update probes on coming module */
706 	guard(mutex)(&event_mutex);
707 	for_each_trace_kprobe(tk, pos) {
708 		if (trace_kprobe_within_module(tk, mod)) {
709 			/* Don't need to check busy - this should have gone. */
710 			__unregister_trace_kprobe(tk);
711 			ret = register_module_trace_kprobe(mod, tk);
712 			if (ret)
713 				pr_warn("Failed to re-register probe %s on %s: %d\n",
714 					trace_probe_name(&tk->tp),
715 					module_name(mod), ret);
716 		}
717 	}
718 
719 	return NOTIFY_DONE;
720 }
721 
722 static struct notifier_block trace_kprobe_module_nb = {
723 	.notifier_call = trace_kprobe_module_callback,
724 	.priority = 2	/* Invoked after kprobe and jump_label module callback */
725 };
trace_kprobe_register_module_notifier(void)726 static int trace_kprobe_register_module_notifier(void)
727 {
728 	return register_module_notifier(&trace_kprobe_module_nb);
729 }
730 #else
trace_kprobe_register_module_notifier(void)731 static int trace_kprobe_register_module_notifier(void)
732 {
733 	return 0;
734 }
735 #endif /* CONFIG_MODULES */
736 
count_symbols(void * data,unsigned long unused)737 static int count_symbols(void *data, unsigned long unused)
738 {
739 	unsigned int *count = data;
740 
741 	(*count)++;
742 
743 	return 0;
744 }
745 
746 struct sym_count_ctx {
747 	unsigned int count;
748 	const char *name;
749 };
750 
count_mod_symbols(void * data,const char * name,unsigned long unused)751 static int count_mod_symbols(void *data, const char *name, unsigned long unused)
752 {
753 	struct sym_count_ctx *ctx = data;
754 
755 	if (strcmp(name, ctx->name) == 0)
756 		ctx->count++;
757 
758 	return 0;
759 }
760 
number_of_same_symbols(const char * mod,const char * func_name)761 static unsigned int number_of_same_symbols(const char *mod, const char *func_name)
762 {
763 	struct sym_count_ctx ctx = { .count = 0, .name = func_name };
764 
765 	if (!mod)
766 		kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
767 
768 	module_kallsyms_on_each_symbol(mod, count_mod_symbols, &ctx);
769 
770 	return ctx.count;
771 }
772 
validate_module_probe_symbol(const char * modname,const char * symbol)773 static int validate_module_probe_symbol(const char *modname, const char *symbol)
774 {
775 	unsigned int count = number_of_same_symbols(modname, symbol);
776 
777 	if (count > 1) {
778 		/*
779 		 * Users should use ADDR to remove the ambiguity of
780 		 * using KSYM only.
781 		 */
782 		return -EADDRNOTAVAIL;
783 	} else if (count == 0) {
784 		/*
785 		 * We can return ENOENT earlier than when register the
786 		 * kprobe.
787 		 */
788 		return -ENOENT;
789 	}
790 	return 0;
791 }
792 
793 #ifdef CONFIG_MODULES
794 /* Return NULL if the module is not loaded or under unloading. */
try_module_get_by_name(const char * name)795 static struct module *try_module_get_by_name(const char *name)
796 {
797 	struct module *mod;
798 
799 	rcu_read_lock_sched();
800 	mod = find_module(name);
801 	if (mod && !try_module_get(mod))
802 		mod = NULL;
803 	rcu_read_unlock_sched();
804 
805 	return mod;
806 }
807 #else
808 #define try_module_get_by_name(name)	(NULL)
809 #endif
810 
validate_probe_symbol(char * symbol)811 static int validate_probe_symbol(char *symbol)
812 {
813 	struct module *mod = NULL;
814 	char *modname = NULL, *p;
815 	int ret = 0;
816 
817 	p = strchr(symbol, ':');
818 	if (p) {
819 		modname = symbol;
820 		symbol = p + 1;
821 		*p = '\0';
822 		mod = try_module_get_by_name(modname);
823 		if (!mod)
824 			goto out;
825 	}
826 
827 	ret = validate_module_probe_symbol(modname, symbol);
828 out:
829 	if (p)
830 		*p = ':';
831 	if (mod)
832 		module_put(mod);
833 	return ret;
834 }
835 
836 static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
837 				      struct pt_regs *regs);
838 
trace_kprobe_create_internal(int argc,const char * argv[],struct traceprobe_parse_context * ctx)839 static int trace_kprobe_create_internal(int argc, const char *argv[],
840 					struct traceprobe_parse_context *ctx)
841 {
842 	/*
843 	 * Argument syntax:
844 	 *  - Add kprobe:
845 	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
846 	 *  - Add kretprobe:
847 	 *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
848 	 *    Or
849 	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
850 	 *
851 	 * Fetch args:
852 	 *  $retval	: fetch return value
853 	 *  $stack	: fetch stack address
854 	 *  $stackN	: fetch Nth of stack (N:0-)
855 	 *  $comm       : fetch current task comm
856 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
857 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
858 	 *  %REG	: fetch register REG
859 	 * Dereferencing memory fetch:
860 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
861 	 * Alias name of args:
862 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
863 	 * Type of args:
864 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
865 	 */
866 	struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
867 	int i, len, new_argc = 0, ret = 0;
868 	bool is_return = false;
869 	char *symbol __free(kfree) = NULL;
870 	char *tmp = NULL;
871 	const char **new_argv __free(kfree) = NULL;
872 	const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
873 	enum probe_print_type ptype;
874 	int maxactive = 0;
875 	long offset = 0;
876 	void *addr = NULL;
877 	char buf[MAX_EVENT_NAME_LEN];
878 	char gbuf[MAX_EVENT_NAME_LEN];
879 	char abuf[MAX_BTF_ARGS_LEN];
880 	char *dbuf __free(kfree) = NULL;
881 
882 	switch (argv[0][0]) {
883 	case 'r':
884 		is_return = true;
885 		break;
886 	case 'p':
887 		break;
888 	default:
889 		return -ECANCELED;
890 	}
891 	if (argc < 2)
892 		return -ECANCELED;
893 
894 	event = strchr(&argv[0][1], ':');
895 	if (event)
896 		event++;
897 
898 	if (isdigit(argv[0][1])) {
899 		if (!is_return) {
900 			trace_probe_log_err(1, BAD_MAXACT_TYPE);
901 			return -EINVAL;
902 		}
903 		if (event)
904 			len = event - &argv[0][1] - 1;
905 		else
906 			len = strlen(&argv[0][1]);
907 		if (len > MAX_EVENT_NAME_LEN - 1) {
908 			trace_probe_log_err(1, BAD_MAXACT);
909 			return -EINVAL;
910 		}
911 		memcpy(buf, &argv[0][1], len);
912 		buf[len] = '\0';
913 		ret = kstrtouint(buf, 0, &maxactive);
914 		if (ret || !maxactive) {
915 			trace_probe_log_err(1, BAD_MAXACT);
916 			return -EINVAL;
917 		}
918 		/* kretprobes instances are iterated over via a list. The
919 		 * maximum should stay reasonable.
920 		 */
921 		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
922 			trace_probe_log_err(1, MAXACT_TOO_BIG);
923 			return -EINVAL;
924 		}
925 	}
926 
927 	/* try to parse an address. if that fails, try to read the
928 	 * input as a symbol. */
929 	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
930 		trace_probe_log_set_index(1);
931 		/* Check whether uprobe event specified */
932 		if (strchr(argv[1], '/') && strchr(argv[1], ':'))
933 			return -ECANCELED;
934 
935 		/* a symbol specified */
936 		symbol = kstrdup(argv[1], GFP_KERNEL);
937 		if (!symbol)
938 			return -ENOMEM;
939 
940 		tmp = strchr(symbol, '%');
941 		if (tmp) {
942 			if (!strcmp(tmp, "%return")) {
943 				*tmp = '\0';
944 				is_return = true;
945 			} else {
946 				trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
947 				return -EINVAL;
948 			}
949 		}
950 
951 		/* TODO: support .init module functions */
952 		ret = traceprobe_split_symbol_offset(symbol, &offset);
953 		if (ret || offset < 0 || offset > UINT_MAX) {
954 			trace_probe_log_err(0, BAD_PROBE_ADDR);
955 			return -EINVAL;
956 		}
957 		ret = validate_probe_symbol(symbol);
958 		if (ret) {
959 			if (ret == -EADDRNOTAVAIL)
960 				trace_probe_log_err(0, NON_UNIQ_SYMBOL);
961 			else
962 				trace_probe_log_err(0, BAD_PROBE_ADDR);
963 			return -EINVAL;
964 		}
965 		if (is_return)
966 			ctx->flags |= TPARG_FL_RETURN;
967 		ret = kprobe_on_func_entry(NULL, symbol, offset);
968 		if (ret == 0 && !is_return)
969 			ctx->flags |= TPARG_FL_FENTRY;
970 		/* Defer the ENOENT case until register kprobe */
971 		if (ret == -EINVAL && is_return) {
972 			trace_probe_log_err(0, BAD_RETPROBE);
973 			return -EINVAL;
974 		}
975 	}
976 
977 	trace_probe_log_set_index(0);
978 	if (event) {
979 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
980 						  event - argv[0]);
981 		if (ret)
982 			return ret;
983 	}
984 
985 	if (!event) {
986 		/* Make a new event name */
987 		if (symbol)
988 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
989 				 is_return ? 'r' : 'p', symbol, offset);
990 		else
991 			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
992 				 is_return ? 'r' : 'p', addr);
993 		sanitize_event_name(buf);
994 		event = buf;
995 	}
996 
997 	argc -= 2; argv += 2;
998 	ctx->funcname = symbol;
999 	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1000 					       abuf, MAX_BTF_ARGS_LEN, ctx);
1001 	if (IS_ERR(new_argv)) {
1002 		ret = PTR_ERR(new_argv);
1003 		new_argv = NULL;
1004 		return ret;
1005 	}
1006 	if (new_argv) {
1007 		argc = new_argc;
1008 		argv = new_argv;
1009 	}
1010 	if (argc > MAX_TRACE_ARGS)
1011 		return -E2BIG;
1012 
1013 	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1014 	if (ret)
1015 		return ret;
1016 
1017 	/* setup a probe */
1018 	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
1019 				argc, is_return);
1020 	if (IS_ERR(tk)) {
1021 		ret = PTR_ERR(tk);
1022 		/* This must return -ENOMEM, else there is a bug */
1023 		WARN_ON_ONCE(ret != -ENOMEM);
1024 		return ret;	/* We know tk is not allocated */
1025 	}
1026 
1027 	/* parse arguments */
1028 	for (i = 0; i < argc; i++) {
1029 		trace_probe_log_set_index(i + 2);
1030 		ctx->offset = 0;
1031 		ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], ctx);
1032 		if (ret)
1033 			return ret;	/* This can be -ENOMEM */
1034 	}
1035 	/* entry handler for kretprobe */
1036 	if (is_return && tk->tp.entry_arg) {
1037 		tk->rp.entry_handler = trace_kprobe_entry_handler;
1038 		tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
1039 	}
1040 
1041 	ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1042 	ret = traceprobe_set_print_fmt(&tk->tp, ptype);
1043 	if (ret < 0)
1044 		return ret;
1045 
1046 	ret = register_trace_kprobe(tk);
1047 	if (ret) {
1048 		trace_probe_log_set_index(1);
1049 		if (ret == -EILSEQ)
1050 			trace_probe_log_err(0, BAD_INSN_BNDRY);
1051 		else if (ret == -ENOENT)
1052 			trace_probe_log_err(0, BAD_PROBE_ADDR);
1053 		else if (ret != -ENOMEM && ret != -EEXIST)
1054 			trace_probe_log_err(0, FAIL_REG_PROBE);
1055 		return ret;
1056 	}
1057 	/*
1058 	 * Here, 'tk' has been registered to the list successfully,
1059 	 * so we don't need to free it.
1060 	 */
1061 	tk = NULL;
1062 
1063 	return 0;
1064 }
1065 
trace_kprobe_create_cb(int argc,const char * argv[])1066 static int trace_kprobe_create_cb(int argc, const char *argv[])
1067 {
1068 	struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
1069 	int ret;
1070 
1071 	trace_probe_log_init("trace_kprobe", argc, argv);
1072 
1073 	ret = trace_kprobe_create_internal(argc, argv, &ctx);
1074 
1075 	traceprobe_finish_parse(&ctx);
1076 	trace_probe_log_clear();
1077 	return ret;
1078 }
1079 
trace_kprobe_create(const char * raw_command)1080 static int trace_kprobe_create(const char *raw_command)
1081 {
1082 	return trace_probe_create(raw_command, trace_kprobe_create_cb);
1083 }
1084 
create_or_delete_trace_kprobe(const char * raw_command)1085 static int create_or_delete_trace_kprobe(const char *raw_command)
1086 {
1087 	int ret;
1088 
1089 	if (raw_command[0] == '-')
1090 		return dyn_event_release(raw_command, &trace_kprobe_ops);
1091 
1092 	ret = trace_kprobe_create(raw_command);
1093 	return ret == -ECANCELED ? -EINVAL : ret;
1094 }
1095 
trace_kprobe_run_command(struct dynevent_cmd * cmd)1096 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
1097 {
1098 	return create_or_delete_trace_kprobe(cmd->seq.buffer);
1099 }
1100 
1101 /**
1102  * kprobe_event_cmd_init - Initialize a kprobe event command object
1103  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1104  * @buf: A pointer to the buffer used to build the command
1105  * @maxlen: The length of the buffer passed in @buf
1106  *
1107  * Initialize a synthetic event command object.  Use this before
1108  * calling any of the other kprobe_event functions.
1109  */
kprobe_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1110 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1111 {
1112 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
1113 			  trace_kprobe_run_command);
1114 }
1115 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
1116 
1117 /**
1118  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
1119  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1120  * @kretprobe: Is this a return probe?
1121  * @name: The name of the kprobe event
1122  * @loc: The location of the kprobe event
1123  * @...: Variable number of arg (pairs), one pair for each field
1124  *
1125  * NOTE: Users normally won't want to call this function directly, but
1126  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
1127  * adds a NULL to the end of the arg list.  If this function is used
1128  * directly, make sure the last arg in the variable arg list is NULL.
1129  *
1130  * Generate a kprobe event command to be executed by
1131  * kprobe_event_gen_cmd_end().  This function can be used to generate the
1132  * complete command or only the first part of it; in the latter case,
1133  * kprobe_event_add_fields() can be used to add more fields following this.
1134  *
1135  * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
1136  * returns -EINVAL if @loc == NULL.
1137  *
1138  * Return: 0 if successful, error otherwise.
1139  */
__kprobe_event_gen_cmd_start(struct dynevent_cmd * cmd,bool kretprobe,const char * name,const char * loc,...)1140 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
1141 				 const char *name, const char *loc, ...)
1142 {
1143 	char buf[MAX_EVENT_NAME_LEN];
1144 	struct dynevent_arg arg;
1145 	va_list args;
1146 	int ret;
1147 
1148 	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1149 		return -EINVAL;
1150 
1151 	if (!loc)
1152 		return -EINVAL;
1153 
1154 	if (kretprobe)
1155 		snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
1156 	else
1157 		snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
1158 
1159 	ret = dynevent_str_add(cmd, buf);
1160 	if (ret)
1161 		return ret;
1162 
1163 	dynevent_arg_init(&arg, 0);
1164 	arg.str = loc;
1165 	ret = dynevent_arg_add(cmd, &arg, NULL);
1166 	if (ret)
1167 		return ret;
1168 
1169 	va_start(args, loc);
1170 	for (;;) {
1171 		const char *field;
1172 
1173 		field = va_arg(args, const char *);
1174 		if (!field)
1175 			break;
1176 
1177 		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1178 			ret = -EINVAL;
1179 			break;
1180 		}
1181 
1182 		arg.str = field;
1183 		ret = dynevent_arg_add(cmd, &arg, NULL);
1184 		if (ret)
1185 			break;
1186 	}
1187 	va_end(args);
1188 
1189 	return ret;
1190 }
1191 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1192 
1193 /**
1194  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1195  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1196  * @...: Variable number of arg (pairs), one pair for each field
1197  *
1198  * NOTE: Users normally won't want to call this function directly, but
1199  * rather use the kprobe_event_add_fields() wrapper, which
1200  * automatically adds a NULL to the end of the arg list.  If this
1201  * function is used directly, make sure the last arg in the variable
1202  * arg list is NULL.
1203  *
1204  * Add probe fields to an existing kprobe command using a variable
1205  * list of args.  Fields are added in the same order they're listed.
1206  *
1207  * Return: 0 if successful, error otherwise.
1208  */
__kprobe_event_add_fields(struct dynevent_cmd * cmd,...)1209 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1210 {
1211 	struct dynevent_arg arg;
1212 	va_list args;
1213 	int ret = 0;
1214 
1215 	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1216 		return -EINVAL;
1217 
1218 	dynevent_arg_init(&arg, 0);
1219 
1220 	va_start(args, cmd);
1221 	for (;;) {
1222 		const char *field;
1223 
1224 		field = va_arg(args, const char *);
1225 		if (!field)
1226 			break;
1227 
1228 		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1229 			ret = -EINVAL;
1230 			break;
1231 		}
1232 
1233 		arg.str = field;
1234 		ret = dynevent_arg_add(cmd, &arg, NULL);
1235 		if (ret)
1236 			break;
1237 	}
1238 	va_end(args);
1239 
1240 	return ret;
1241 }
1242 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1243 
1244 /**
1245  * kprobe_event_delete - Delete a kprobe event
1246  * @name: The name of the kprobe event to delete
1247  *
1248  * Delete a kprobe event with the give @name from kernel code rather
1249  * than directly from the command line.
1250  *
1251  * Return: 0 if successful, error otherwise.
1252  */
kprobe_event_delete(const char * name)1253 int kprobe_event_delete(const char *name)
1254 {
1255 	char buf[MAX_EVENT_NAME_LEN];
1256 
1257 	snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1258 
1259 	return create_or_delete_trace_kprobe(buf);
1260 }
1261 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1262 
trace_kprobe_release(struct dyn_event * ev)1263 static int trace_kprobe_release(struct dyn_event *ev)
1264 {
1265 	struct trace_kprobe *tk = to_trace_kprobe(ev);
1266 	int ret = unregister_trace_kprobe(tk);
1267 
1268 	if (!ret)
1269 		free_trace_kprobe(tk);
1270 	return ret;
1271 }
1272 
trace_kprobe_show(struct seq_file * m,struct dyn_event * ev)1273 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1274 {
1275 	struct trace_kprobe *tk = to_trace_kprobe(ev);
1276 	int i;
1277 
1278 	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1279 	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1280 		seq_printf(m, "%d", tk->rp.maxactive);
1281 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1282 				trace_probe_name(&tk->tp));
1283 
1284 	if (!tk->symbol)
1285 		seq_printf(m, " 0x%p", tk->rp.kp.addr);
1286 	else if (tk->rp.kp.offset)
1287 		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1288 			   tk->rp.kp.offset);
1289 	else
1290 		seq_printf(m, " %s", trace_kprobe_symbol(tk));
1291 
1292 	for (i = 0; i < tk->tp.nr_args; i++)
1293 		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1294 	seq_putc(m, '\n');
1295 
1296 	return 0;
1297 }
1298 
probes_seq_show(struct seq_file * m,void * v)1299 static int probes_seq_show(struct seq_file *m, void *v)
1300 {
1301 	struct dyn_event *ev = v;
1302 
1303 	if (!is_trace_kprobe(ev))
1304 		return 0;
1305 
1306 	return trace_kprobe_show(m, ev);
1307 }
1308 
1309 static const struct seq_operations probes_seq_op = {
1310 	.start  = dyn_event_seq_start,
1311 	.next   = dyn_event_seq_next,
1312 	.stop   = dyn_event_seq_stop,
1313 	.show   = probes_seq_show
1314 };
1315 
probes_open(struct inode * inode,struct file * file)1316 static int probes_open(struct inode *inode, struct file *file)
1317 {
1318 	int ret;
1319 
1320 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1321 	if (ret)
1322 		return ret;
1323 
1324 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1325 		ret = dyn_events_release_all(&trace_kprobe_ops);
1326 		if (ret < 0)
1327 			return ret;
1328 	}
1329 
1330 	return seq_open(file, &probes_seq_op);
1331 }
1332 
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)1333 static ssize_t probes_write(struct file *file, const char __user *buffer,
1334 			    size_t count, loff_t *ppos)
1335 {
1336 	return trace_parse_run_command(file, buffer, count, ppos,
1337 				       create_or_delete_trace_kprobe);
1338 }
1339 
1340 static const struct file_operations kprobe_events_ops = {
1341 	.owner          = THIS_MODULE,
1342 	.open           = probes_open,
1343 	.read           = seq_read,
1344 	.llseek         = seq_lseek,
1345 	.release        = seq_release,
1346 	.write		= probes_write,
1347 };
1348 
trace_kprobe_missed(struct trace_kprobe * tk)1349 static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
1350 {
1351 	return trace_kprobe_is_return(tk) ?
1352 		tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1353 }
1354 
1355 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)1356 static int probes_profile_seq_show(struct seq_file *m, void *v)
1357 {
1358 	struct dyn_event *ev = v;
1359 	struct trace_kprobe *tk;
1360 	unsigned long nmissed;
1361 
1362 	if (!is_trace_kprobe(ev))
1363 		return 0;
1364 
1365 	tk = to_trace_kprobe(ev);
1366 	nmissed = trace_kprobe_missed(tk);
1367 	seq_printf(m, "  %-44s %15lu %15lu\n",
1368 		   trace_probe_name(&tk->tp),
1369 		   trace_kprobe_nhit(tk),
1370 		   nmissed);
1371 
1372 	return 0;
1373 }
1374 
1375 static const struct seq_operations profile_seq_op = {
1376 	.start  = dyn_event_seq_start,
1377 	.next   = dyn_event_seq_next,
1378 	.stop   = dyn_event_seq_stop,
1379 	.show   = probes_profile_seq_show
1380 };
1381 
profile_open(struct inode * inode,struct file * file)1382 static int profile_open(struct inode *inode, struct file *file)
1383 {
1384 	int ret;
1385 
1386 	ret = security_locked_down(LOCKDOWN_TRACEFS);
1387 	if (ret)
1388 		return ret;
1389 
1390 	return seq_open(file, &profile_seq_op);
1391 }
1392 
1393 static const struct file_operations kprobe_profile_ops = {
1394 	.owner          = THIS_MODULE,
1395 	.open           = profile_open,
1396 	.read           = seq_read,
1397 	.llseek         = seq_lseek,
1398 	.release        = seq_release,
1399 };
1400 
1401 /* Note that we don't verify it, since the code does not come from user space */
1402 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * edata,void * dest,void * base)1403 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
1404 		   void *dest, void *base)
1405 {
1406 	struct pt_regs *regs = rec;
1407 	unsigned long val;
1408 	int ret;
1409 
1410 retry:
1411 	/* 1st stage: get value from context */
1412 	switch (code->op) {
1413 	case FETCH_OP_REG:
1414 		val = regs_get_register(regs, code->param);
1415 		break;
1416 	case FETCH_OP_STACK:
1417 		val = regs_get_kernel_stack_nth(regs, code->param);
1418 		break;
1419 	case FETCH_OP_STACKP:
1420 		val = kernel_stack_pointer(regs);
1421 		break;
1422 	case FETCH_OP_RETVAL:
1423 		val = regs_return_value(regs);
1424 		break;
1425 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1426 	case FETCH_OP_ARG:
1427 		val = regs_get_kernel_argument(regs, code->param);
1428 		break;
1429 	case FETCH_OP_EDATA:
1430 		val = *(unsigned long *)((unsigned long)edata + code->offset);
1431 		break;
1432 #endif
1433 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
1434 		code++;
1435 		goto retry;
1436 	default:
1437 		ret = process_common_fetch_insn(code, &val);
1438 		if (ret < 0)
1439 			return ret;
1440 	}
1441 	code++;
1442 
1443 	return process_fetch_insn_bottom(code, val, dest, base);
1444 }
NOKPROBE_SYMBOL(process_fetch_insn)1445 NOKPROBE_SYMBOL(process_fetch_insn)
1446 
1447 /* Kprobe handler */
1448 static nokprobe_inline void
1449 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1450 		    struct trace_event_file *trace_file)
1451 {
1452 	struct kprobe_trace_entry_head *entry;
1453 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1454 	struct trace_event_buffer fbuffer;
1455 	int dsize;
1456 
1457 	WARN_ON(call != trace_file->event_call);
1458 
1459 	if (trace_trigger_soft_disabled(trace_file))
1460 		return;
1461 
1462 	dsize = __get_data_size(&tk->tp, regs, NULL);
1463 
1464 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1465 					   sizeof(*entry) + tk->tp.size + dsize);
1466 	if (!entry)
1467 		return;
1468 
1469 	fbuffer.regs = regs;
1470 	entry->ip = (unsigned long)tk->rp.kp.addr;
1471 	store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1472 
1473 	trace_event_buffer_commit(&fbuffer);
1474 }
1475 
1476 static void
kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs)1477 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1478 {
1479 	struct event_file_link *link;
1480 
1481 	trace_probe_for_each_link_rcu(link, &tk->tp)
1482 		__kprobe_trace_func(tk, regs, link->file);
1483 }
1484 NOKPROBE_SYMBOL(kprobe_trace_func);
1485 
1486 /* Kretprobe handler */
1487 
trace_kprobe_entry_handler(struct kretprobe_instance * ri,struct pt_regs * regs)1488 static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
1489 				      struct pt_regs *regs)
1490 {
1491 	struct kretprobe *rp = get_kretprobe(ri);
1492 	struct trace_kprobe *tk;
1493 
1494 	/*
1495 	 * There is a small chance that get_kretprobe(ri) returns NULL when
1496 	 * the kretprobe is unregister on another CPU between kretprobe's
1497 	 * trampoline_handler and this function.
1498 	 */
1499 	if (unlikely(!rp))
1500 		return -ENOENT;
1501 
1502 	tk = container_of(rp, struct trace_kprobe, rp);
1503 
1504 	/* store argument values into ri->data as entry data */
1505 	if (tk->tp.entry_arg)
1506 		store_trace_entry_data(ri->data, &tk->tp, regs);
1507 
1508 	return 0;
1509 }
1510 
1511 
1512 static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs,struct trace_event_file * trace_file)1513 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1514 		       struct pt_regs *regs,
1515 		       struct trace_event_file *trace_file)
1516 {
1517 	struct kretprobe_trace_entry_head *entry;
1518 	struct trace_event_buffer fbuffer;
1519 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1520 	int dsize;
1521 
1522 	WARN_ON(call != trace_file->event_call);
1523 
1524 	if (trace_trigger_soft_disabled(trace_file))
1525 		return;
1526 
1527 	dsize = __get_data_size(&tk->tp, regs, ri->data);
1528 
1529 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1530 					   sizeof(*entry) + tk->tp.size + dsize);
1531 	if (!entry)
1532 		return;
1533 
1534 	fbuffer.regs = regs;
1535 	entry->func = (unsigned long)tk->rp.kp.addr;
1536 	entry->ret_ip = get_kretprobe_retaddr(ri);
1537 	store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1538 
1539 	trace_event_buffer_commit(&fbuffer);
1540 }
1541 
1542 static void
kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1543 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1544 		     struct pt_regs *regs)
1545 {
1546 	struct event_file_link *link;
1547 
1548 	trace_probe_for_each_link_rcu(link, &tk->tp)
1549 		__kretprobe_trace_func(tk, ri, regs, link->file);
1550 }
1551 NOKPROBE_SYMBOL(kretprobe_trace_func);
1552 
1553 /* Event entry printers */
1554 static enum print_line_t
print_kprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1555 print_kprobe_event(struct trace_iterator *iter, int flags,
1556 		   struct trace_event *event)
1557 {
1558 	struct kprobe_trace_entry_head *field;
1559 	struct trace_seq *s = &iter->seq;
1560 	struct trace_probe *tp;
1561 
1562 	field = (struct kprobe_trace_entry_head *)iter->ent;
1563 	tp = trace_probe_primary_from_call(
1564 		container_of(event, struct trace_event_call, event));
1565 	if (WARN_ON_ONCE(!tp))
1566 		goto out;
1567 
1568 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1569 
1570 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1571 		goto out;
1572 
1573 	trace_seq_putc(s, ')');
1574 
1575 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
1576 			     (u8 *)&field[1], field) < 0)
1577 		goto out;
1578 
1579 	trace_seq_putc(s, '\n');
1580  out:
1581 	return trace_handle_return(s);
1582 }
1583 
1584 static enum print_line_t
print_kretprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1585 print_kretprobe_event(struct trace_iterator *iter, int flags,
1586 		      struct trace_event *event)
1587 {
1588 	struct kretprobe_trace_entry_head *field;
1589 	struct trace_seq *s = &iter->seq;
1590 	struct trace_probe *tp;
1591 
1592 	field = (struct kretprobe_trace_entry_head *)iter->ent;
1593 	tp = trace_probe_primary_from_call(
1594 		container_of(event, struct trace_event_call, event));
1595 	if (WARN_ON_ONCE(!tp))
1596 		goto out;
1597 
1598 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1599 
1600 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1601 		goto out;
1602 
1603 	trace_seq_puts(s, " <- ");
1604 
1605 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1606 		goto out;
1607 
1608 	trace_seq_putc(s, ')');
1609 
1610 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
1611 			     (u8 *)&field[1], field) < 0)
1612 		goto out;
1613 
1614 	trace_seq_putc(s, '\n');
1615 
1616  out:
1617 	return trace_handle_return(s);
1618 }
1619 
1620 
kprobe_event_define_fields(struct trace_event_call * event_call)1621 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1622 {
1623 	int ret;
1624 	struct kprobe_trace_entry_head field;
1625 	struct trace_probe *tp;
1626 
1627 	tp = trace_probe_primary_from_call(event_call);
1628 	if (WARN_ON_ONCE(!tp))
1629 		return -ENOENT;
1630 
1631 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1632 
1633 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1634 }
1635 
kretprobe_event_define_fields(struct trace_event_call * event_call)1636 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1637 {
1638 	int ret;
1639 	struct kretprobe_trace_entry_head field;
1640 	struct trace_probe *tp;
1641 
1642 	tp = trace_probe_primary_from_call(event_call);
1643 	if (WARN_ON_ONCE(!tp))
1644 		return -ENOENT;
1645 
1646 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1647 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1648 
1649 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1650 }
1651 
1652 #ifdef CONFIG_PERF_EVENTS
1653 
1654 /* Kprobe profile handler */
1655 static int
kprobe_perf_func(struct trace_kprobe * tk,struct pt_regs * regs)1656 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1657 {
1658 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1659 	struct kprobe_trace_entry_head *entry;
1660 	struct hlist_head *head;
1661 	int size, __size, dsize;
1662 	int rctx;
1663 
1664 	if (bpf_prog_array_valid(call)) {
1665 		unsigned long orig_ip = instruction_pointer(regs);
1666 		int ret;
1667 
1668 		ret = trace_call_bpf(call, regs);
1669 
1670 		/*
1671 		 * We need to check and see if we modified the pc of the
1672 		 * pt_regs, and if so return 1 so that we don't do the
1673 		 * single stepping.
1674 		 */
1675 		if (orig_ip != instruction_pointer(regs))
1676 			return 1;
1677 		if (!ret)
1678 			return 0;
1679 	}
1680 
1681 	head = this_cpu_ptr(call->perf_events);
1682 	if (hlist_empty(head))
1683 		return 0;
1684 
1685 	dsize = __get_data_size(&tk->tp, regs, NULL);
1686 	__size = sizeof(*entry) + tk->tp.size + dsize;
1687 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1688 	size -= sizeof(u32);
1689 
1690 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1691 	if (!entry)
1692 		return 0;
1693 
1694 	entry->ip = (unsigned long)tk->rp.kp.addr;
1695 	memset(&entry[1], 0, dsize);
1696 	store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1697 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1698 			      head, NULL);
1699 	return 0;
1700 }
1701 NOKPROBE_SYMBOL(kprobe_perf_func);
1702 
1703 /* Kretprobe profile handler */
1704 static void
kretprobe_perf_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1705 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1706 		    struct pt_regs *regs)
1707 {
1708 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1709 	struct kretprobe_trace_entry_head *entry;
1710 	struct hlist_head *head;
1711 	int size, __size, dsize;
1712 	int rctx;
1713 
1714 	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1715 		return;
1716 
1717 	head = this_cpu_ptr(call->perf_events);
1718 	if (hlist_empty(head))
1719 		return;
1720 
1721 	dsize = __get_data_size(&tk->tp, regs, ri->data);
1722 	__size = sizeof(*entry) + tk->tp.size + dsize;
1723 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1724 	size -= sizeof(u32);
1725 
1726 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1727 	if (!entry)
1728 		return;
1729 
1730 	entry->func = (unsigned long)tk->rp.kp.addr;
1731 	entry->ret_ip = get_kretprobe_retaddr(ri);
1732 	store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1733 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1734 			      head, NULL);
1735 }
1736 NOKPROBE_SYMBOL(kretprobe_perf_func);
1737 
bpf_get_kprobe_info(const struct perf_event * event,u32 * fd_type,const char ** symbol,u64 * probe_offset,u64 * probe_addr,unsigned long * missed,bool perf_type_tracepoint)1738 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1739 			const char **symbol, u64 *probe_offset,
1740 			u64 *probe_addr, unsigned long *missed,
1741 			bool perf_type_tracepoint)
1742 {
1743 	const char *pevent = trace_event_name(event->tp_event);
1744 	const char *group = event->tp_event->class->system;
1745 	struct trace_kprobe *tk;
1746 
1747 	if (perf_type_tracepoint)
1748 		tk = find_trace_kprobe(pevent, group);
1749 	else
1750 		tk = trace_kprobe_primary_from_call(event->tp_event);
1751 	if (!tk)
1752 		return -EINVAL;
1753 
1754 	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1755 					      : BPF_FD_TYPE_KPROBE;
1756 	*probe_offset = tk->rp.kp.offset;
1757 	*probe_addr = kallsyms_show_value(current_cred()) ?
1758 		      (unsigned long)tk->rp.kp.addr : 0;
1759 	*symbol = tk->symbol;
1760 	if (missed)
1761 		*missed = trace_kprobe_missed(tk);
1762 	return 0;
1763 }
1764 #endif	/* CONFIG_PERF_EVENTS */
1765 
1766 /*
1767  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1768  *
1769  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1770  * lockless, but we can't race with this __init function.
1771  */
kprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1772 static int kprobe_register(struct trace_event_call *event,
1773 			   enum trace_reg type, void *data)
1774 {
1775 	struct trace_event_file *file = data;
1776 
1777 	switch (type) {
1778 	case TRACE_REG_REGISTER:
1779 		return enable_trace_kprobe(event, file);
1780 	case TRACE_REG_UNREGISTER:
1781 		return disable_trace_kprobe(event, file);
1782 
1783 #ifdef CONFIG_PERF_EVENTS
1784 	case TRACE_REG_PERF_REGISTER:
1785 		return enable_trace_kprobe(event, NULL);
1786 	case TRACE_REG_PERF_UNREGISTER:
1787 		return disable_trace_kprobe(event, NULL);
1788 	case TRACE_REG_PERF_OPEN:
1789 	case TRACE_REG_PERF_CLOSE:
1790 	case TRACE_REG_PERF_ADD:
1791 	case TRACE_REG_PERF_DEL:
1792 		return 0;
1793 #endif
1794 	}
1795 	return 0;
1796 }
1797 
kprobe_dispatcher(struct kprobe * kp,struct pt_regs * regs)1798 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1799 {
1800 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1801 	int ret = 0;
1802 
1803 	raw_cpu_inc(*tk->nhit);
1804 
1805 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1806 		kprobe_trace_func(tk, regs);
1807 #ifdef CONFIG_PERF_EVENTS
1808 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1809 		ret = kprobe_perf_func(tk, regs);
1810 #endif
1811 	return ret;
1812 }
1813 NOKPROBE_SYMBOL(kprobe_dispatcher);
1814 
1815 static int
kretprobe_dispatcher(struct kretprobe_instance * ri,struct pt_regs * regs)1816 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1817 {
1818 	struct kretprobe *rp = get_kretprobe(ri);
1819 	struct trace_kprobe *tk;
1820 
1821 	/*
1822 	 * There is a small chance that get_kretprobe(ri) returns NULL when
1823 	 * the kretprobe is unregister on another CPU between kretprobe's
1824 	 * trampoline_handler and this function.
1825 	 */
1826 	if (unlikely(!rp))
1827 		return 0;
1828 
1829 	tk = container_of(rp, struct trace_kprobe, rp);
1830 	raw_cpu_inc(*tk->nhit);
1831 
1832 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1833 		kretprobe_trace_func(tk, ri, regs);
1834 #ifdef CONFIG_PERF_EVENTS
1835 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1836 		kretprobe_perf_func(tk, ri, regs);
1837 #endif
1838 	return 0;	/* We don't tweak kernel, so just return 0 */
1839 }
1840 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1841 
1842 static struct trace_event_functions kretprobe_funcs = {
1843 	.trace		= print_kretprobe_event
1844 };
1845 
1846 static struct trace_event_functions kprobe_funcs = {
1847 	.trace		= print_kprobe_event
1848 };
1849 
1850 static struct trace_event_fields kretprobe_fields_array[] = {
1851 	{ .type = TRACE_FUNCTION_TYPE,
1852 	  .define_fields = kretprobe_event_define_fields },
1853 	{}
1854 };
1855 
1856 static struct trace_event_fields kprobe_fields_array[] = {
1857 	{ .type = TRACE_FUNCTION_TYPE,
1858 	  .define_fields = kprobe_event_define_fields },
1859 	{}
1860 };
1861 
init_trace_event_call(struct trace_kprobe * tk)1862 static inline void init_trace_event_call(struct trace_kprobe *tk)
1863 {
1864 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1865 
1866 	if (trace_kprobe_is_return(tk)) {
1867 		call->event.funcs = &kretprobe_funcs;
1868 		call->class->fields_array = kretprobe_fields_array;
1869 	} else {
1870 		call->event.funcs = &kprobe_funcs;
1871 		call->class->fields_array = kprobe_fields_array;
1872 	}
1873 
1874 	call->flags = TRACE_EVENT_FL_KPROBE;
1875 	call->class->reg = kprobe_register;
1876 }
1877 
register_kprobe_event(struct trace_kprobe * tk)1878 static int register_kprobe_event(struct trace_kprobe *tk)
1879 {
1880 	init_trace_event_call(tk);
1881 
1882 	return trace_probe_register_event_call(&tk->tp);
1883 }
1884 
unregister_kprobe_event(struct trace_kprobe * tk)1885 static int unregister_kprobe_event(struct trace_kprobe *tk)
1886 {
1887 	return trace_probe_unregister_event_call(&tk->tp);
1888 }
1889 
1890 #ifdef CONFIG_PERF_EVENTS
1891 
1892 /* create a trace_kprobe, but don't add it to global lists */
1893 struct trace_event_call *
create_local_trace_kprobe(char * func,void * addr,unsigned long offs,bool is_return)1894 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1895 			  bool is_return)
1896 {
1897 	enum probe_print_type ptype;
1898 	struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
1899 	int ret;
1900 	char *event;
1901 
1902 	if (func) {
1903 		ret = validate_probe_symbol(func);
1904 		if (ret)
1905 			return ERR_PTR(ret);
1906 	}
1907 
1908 	/*
1909 	 * local trace_kprobes are not added to dyn_event, so they are never
1910 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1911 	 * duplicated name here.
1912 	 */
1913 	event = func ? func : "DUMMY_EVENT";
1914 
1915 	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1916 				offs, 0 /* maxactive */, 0 /* nargs */,
1917 				is_return);
1918 
1919 	if (IS_ERR(tk)) {
1920 		pr_info("Failed to allocate trace_probe.(%d)\n",
1921 			(int)PTR_ERR(tk));
1922 		return ERR_CAST(tk);
1923 	}
1924 
1925 	init_trace_event_call(tk);
1926 
1927 	ptype = trace_kprobe_is_return(tk) ?
1928 		PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1929 	if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0)
1930 		return ERR_PTR(-ENOMEM);
1931 
1932 	ret = __register_trace_kprobe(tk);
1933 	if (ret < 0)
1934 		return ERR_PTR(ret);
1935 
1936 	return trace_probe_event_call(&(no_free_ptr(tk)->tp));
1937 }
1938 
destroy_local_trace_kprobe(struct trace_event_call * event_call)1939 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1940 {
1941 	struct trace_kprobe *tk;
1942 
1943 	tk = trace_kprobe_primary_from_call(event_call);
1944 	if (unlikely(!tk))
1945 		return;
1946 
1947 	if (trace_probe_is_enabled(&tk->tp)) {
1948 		WARN_ON(1);
1949 		return;
1950 	}
1951 
1952 	__unregister_trace_kprobe(tk);
1953 
1954 	free_trace_kprobe(tk);
1955 }
1956 #endif /* CONFIG_PERF_EVENTS */
1957 
enable_boot_kprobe_events(void)1958 static __init void enable_boot_kprobe_events(void)
1959 {
1960 	struct trace_array *tr = top_trace_array();
1961 	struct trace_event_file *file;
1962 	struct trace_kprobe *tk;
1963 	struct dyn_event *pos;
1964 
1965 	guard(mutex)(&event_mutex);
1966 	for_each_trace_kprobe(tk, pos) {
1967 		list_for_each_entry(file, &tr->events, list)
1968 			if (file->event_call == trace_probe_event_call(&tk->tp))
1969 				trace_event_enable_disable(file, 1, 0);
1970 	}
1971 }
1972 
setup_boot_kprobe_events(void)1973 static __init void setup_boot_kprobe_events(void)
1974 {
1975 	char *p, *cmd = kprobe_boot_events_buf;
1976 	int ret;
1977 
1978 	strreplace(kprobe_boot_events_buf, ',', ' ');
1979 
1980 	while (cmd && *cmd != '\0') {
1981 		p = strchr(cmd, ';');
1982 		if (p)
1983 			*p++ = '\0';
1984 
1985 		ret = create_or_delete_trace_kprobe(cmd);
1986 		if (ret)
1987 			pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1988 
1989 		cmd = p;
1990 	}
1991 
1992 	enable_boot_kprobe_events();
1993 }
1994 
1995 /*
1996  * Register dynevent at core_initcall. This allows kernel to setup kprobe
1997  * events in postcore_initcall without tracefs.
1998  */
init_kprobe_trace_early(void)1999 static __init int init_kprobe_trace_early(void)
2000 {
2001 	int ret;
2002 
2003 	ret = dyn_event_register(&trace_kprobe_ops);
2004 	if (ret)
2005 		return ret;
2006 
2007 	if (trace_kprobe_register_module_notifier())
2008 		return -EINVAL;
2009 
2010 	return 0;
2011 }
2012 core_initcall(init_kprobe_trace_early);
2013 
2014 /* Make a tracefs interface for controlling probe points */
init_kprobe_trace(void)2015 static __init int init_kprobe_trace(void)
2016 {
2017 	int ret;
2018 
2019 	ret = tracing_init_dentry();
2020 	if (ret)
2021 		return 0;
2022 
2023 	/* Event list interface */
2024 	trace_create_file("kprobe_events", TRACE_MODE_WRITE,
2025 			  NULL, NULL, &kprobe_events_ops);
2026 
2027 	/* Profile interface */
2028 	trace_create_file("kprobe_profile", TRACE_MODE_READ,
2029 			  NULL, NULL, &kprobe_profile_ops);
2030 
2031 	setup_boot_kprobe_events();
2032 
2033 	return 0;
2034 }
2035 fs_initcall(init_kprobe_trace);
2036 
2037 
2038 #ifdef CONFIG_FTRACE_STARTUP_TEST
2039 static __init struct trace_event_file *
find_trace_probe_file(struct trace_kprobe * tk,struct trace_array * tr)2040 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
2041 {
2042 	struct trace_event_file *file;
2043 
2044 	list_for_each_entry(file, &tr->events, list)
2045 		if (file->event_call == trace_probe_event_call(&tk->tp))
2046 			return file;
2047 
2048 	return NULL;
2049 }
2050 
2051 /*
2052  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
2053  * stage, we can do this lockless.
2054  */
kprobe_trace_self_tests_init(void)2055 static __init int kprobe_trace_self_tests_init(void)
2056 {
2057 	int ret, warn = 0;
2058 	int (*target)(int, int, int, int, int, int);
2059 	struct trace_kprobe *tk;
2060 	struct trace_event_file *file;
2061 
2062 	if (tracing_is_disabled())
2063 		return -ENODEV;
2064 
2065 	if (tracing_selftest_disabled)
2066 		return 0;
2067 
2068 	target = kprobe_trace_selftest_target;
2069 
2070 	pr_info("Testing kprobe tracing: ");
2071 
2072 	ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
2073 	if (WARN_ONCE(ret, "error on probing function entry.")) {
2074 		warn++;
2075 	} else {
2076 		/* Enable trace point */
2077 		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2078 		if (WARN_ONCE(tk == NULL, "error on probing function entry.")) {
2079 			warn++;
2080 		} else {
2081 			file = find_trace_probe_file(tk, top_trace_array());
2082 			if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2083 				warn++;
2084 			} else
2085 				enable_trace_kprobe(
2086 					trace_probe_event_call(&tk->tp), file);
2087 		}
2088 	}
2089 
2090 	ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2091 	if (WARN_ONCE(ret, "error on probing function return.")) {
2092 		warn++;
2093 	} else {
2094 		/* Enable trace point */
2095 		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2096 		if (WARN_ONCE(tk == NULL, "error on getting 2nd new probe.")) {
2097 			warn++;
2098 		} else {
2099 			file = find_trace_probe_file(tk, top_trace_array());
2100 			if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2101 				warn++;
2102 			} else
2103 				enable_trace_kprobe(
2104 					trace_probe_event_call(&tk->tp), file);
2105 		}
2106 	}
2107 
2108 	if (warn)
2109 		goto end;
2110 
2111 	ret = target(1, 2, 3, 4, 5, 6);
2112 
2113 	/*
2114 	 * Not expecting an error here, the check is only to prevent the
2115 	 * optimizer from removing the call to target() as otherwise there
2116 	 * are no side-effects and the call is never performed.
2117 	 */
2118 	if (ret != 21)
2119 		warn++;
2120 
2121 	/* Disable trace points before removing it */
2122 	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2123 	if (WARN_ONCE(tk == NULL, "error on getting test probe.")) {
2124 		warn++;
2125 	} else {
2126 		if (WARN_ONCE(trace_kprobe_nhit(tk) != 1,
2127 				 "incorrect number of testprobe hits."))
2128 			warn++;
2129 
2130 		file = find_trace_probe_file(tk, top_trace_array());
2131 		if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2132 			warn++;
2133 		} else
2134 			disable_trace_kprobe(
2135 				trace_probe_event_call(&tk->tp), file);
2136 	}
2137 
2138 	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2139 	if (WARN_ONCE(tk == NULL, "error on getting 2nd test probe.")) {
2140 		warn++;
2141 	} else {
2142 		if (WARN_ONCE(trace_kprobe_nhit(tk) != 1,
2143 				 "incorrect number of testprobe2 hits."))
2144 			warn++;
2145 
2146 		file = find_trace_probe_file(tk, top_trace_array());
2147 		if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2148 			warn++;
2149 		} else
2150 			disable_trace_kprobe(
2151 				trace_probe_event_call(&tk->tp), file);
2152 	}
2153 
2154 	ret = create_or_delete_trace_kprobe("-:testprobe");
2155 	if (WARN_ONCE(ret, "error on deleting a probe."))
2156 		warn++;
2157 
2158 	ret = create_or_delete_trace_kprobe("-:testprobe2");
2159 	if (WARN_ONCE(ret, "error on deleting a probe."))
2160 		warn++;
2161 
2162 
2163 end:
2164 	/*
2165 	 * Wait for the optimizer work to finish. Otherwise it might fiddle
2166 	 * with probes in already freed __init text.
2167 	 */
2168 	wait_for_kprobe_optimizer();
2169 	if (warn)
2170 		pr_cont("NG: Some tests are failed. Please check them.\n");
2171 	else
2172 		pr_cont("OK\n");
2173 	return 0;
2174 }
2175 
2176 late_initcall(kprobe_trace_self_tests_init);
2177 
2178 #endif
2179