xref: /linux/kernel/trace/trace_uprobe.c (revision 544521d6217fb7846b746ada9d70f308f078aa7e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)	"trace_uprobe: " fmt
9 
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
19 #include <linux/filter.h>
20 #include <linux/percpu.h>
21 
22 #include "trace_dynevent.h"
23 #include "trace_probe.h"
24 #include "trace_probe_tmpl.h"
25 
26 #define UPROBE_EVENT_SYSTEM	"uprobes"
27 
28 struct uprobe_trace_entry_head {
29 	struct trace_entry	ent;
30 	unsigned long		vaddr[];
31 };
32 
33 #define SIZEOF_TRACE_ENTRY(is_return)			\
34 	(sizeof(struct uprobe_trace_entry_head) +	\
35 	 sizeof(unsigned long) * (is_return ? 2 : 1))
36 
37 #define DATAOF_TRACE_ENTRY(entry, is_return)		\
38 	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
39 
40 static int trace_uprobe_create(const char *raw_command);
41 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
42 static int trace_uprobe_release(struct dyn_event *ev);
43 static bool trace_uprobe_is_busy(struct dyn_event *ev);
44 static bool trace_uprobe_match(const char *system, const char *event,
45 			int argc, const char **argv, struct dyn_event *ev);
46 
47 static struct dyn_event_operations trace_uprobe_ops = {
48 	.create = trace_uprobe_create,
49 	.show = trace_uprobe_show,
50 	.is_busy = trace_uprobe_is_busy,
51 	.free = trace_uprobe_release,
52 	.match = trace_uprobe_match,
53 };
54 
55 /*
56  * uprobe event core functions
57  */
58 struct trace_uprobe {
59 	struct dyn_event		devent;
60 	struct uprobe_consumer		consumer;
61 	struct path			path;
62 	char				*filename;
63 	struct uprobe			*uprobe;
64 	unsigned long			offset;
65 	unsigned long			ref_ctr_offset;
66 	unsigned long __percpu		*nhits;
67 	struct trace_probe		tp;
68 };
69 
is_trace_uprobe(struct dyn_event * ev)70 static bool is_trace_uprobe(struct dyn_event *ev)
71 {
72 	return ev->ops == &trace_uprobe_ops;
73 }
74 
to_trace_uprobe(struct dyn_event * ev)75 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
76 {
77 	return container_of(ev, struct trace_uprobe, devent);
78 }
79 
80 /**
81  * for_each_trace_uprobe - iterate over the trace_uprobe list
82  * @pos:	the struct trace_uprobe * for each entry
83  * @dpos:	the struct dyn_event * to use as a loop cursor
84  */
85 #define for_each_trace_uprobe(pos, dpos)	\
86 	for_each_dyn_event(dpos)		\
87 		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
88 
89 static int register_uprobe_event(struct trace_uprobe *tu);
90 static int unregister_uprobe_event(struct trace_uprobe *tu);
91 
92 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
93 			     __u64 *data);
94 static int uretprobe_dispatcher(struct uprobe_consumer *con,
95 				unsigned long func, struct pt_regs *regs,
96 				__u64 *data);
97 
98 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)99 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
100 {
101 	return addr - (n * sizeof(long));
102 }
103 #else
adjust_stack_addr(unsigned long addr,unsigned int n)104 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
105 {
106 	return addr + (n * sizeof(long));
107 }
108 #endif
109 
get_user_stack_nth(struct pt_regs * regs,unsigned int n)110 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
111 {
112 	unsigned long ret;
113 	unsigned long addr = user_stack_pointer(regs);
114 
115 	addr = adjust_stack_addr(addr, n);
116 
117 	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
118 		return 0;
119 
120 	return ret;
121 }
122 
123 /*
124  * Uprobes-specific fetch functions
125  */
126 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)127 probe_mem_read(void *dest, void *src, size_t size)
128 {
129 	void __user *vaddr = (void __force __user *)src;
130 
131 	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
132 }
133 
134 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)135 probe_mem_read_user(void *dest, void *src, size_t size)
136 {
137 	return probe_mem_read(dest, src, size);
138 }
139 
140 /*
141  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
142  * length and relative data location.
143  */
144 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)145 fetch_store_string(unsigned long addr, void *dest, void *base)
146 {
147 	long ret;
148 	u32 loc = *(u32 *)dest;
149 	int maxlen  = get_loc_len(loc);
150 	u8 *dst = get_loc_data(dest, base);
151 	void __user *src = (void __force __user *) addr;
152 
153 	if (unlikely(!maxlen))
154 		return -ENOMEM;
155 
156 	if (addr == FETCH_TOKEN_COMM)
157 		ret = strscpy(dst, current->comm, maxlen);
158 	else
159 		ret = strncpy_from_user(dst, src, maxlen);
160 	if (ret >= 0) {
161 		if (ret == maxlen)
162 			dst[ret - 1] = '\0';
163 		else
164 			/*
165 			 * Include the terminating null byte. In this case it
166 			 * was copied by strncpy_from_user but not accounted
167 			 * for in ret.
168 			 */
169 			ret++;
170 		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
171 	} else
172 		*(u32 *)dest = make_data_loc(0, (void *)dst - base);
173 
174 	return ret;
175 }
176 
177 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)178 fetch_store_string_user(unsigned long addr, void *dest, void *base)
179 {
180 	return fetch_store_string(addr, dest, base);
181 }
182 
183 /* Return the length of string -- including null terminal byte */
184 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)185 fetch_store_strlen(unsigned long addr)
186 {
187 	int len;
188 	void __user *vaddr = (void __force __user *) addr;
189 
190 	if (addr == FETCH_TOKEN_COMM)
191 		len = strlen(current->comm) + 1;
192 	else
193 		len = strnlen_user(vaddr, MAX_STRING_SIZE);
194 
195 	return (len > MAX_STRING_SIZE) ? 0 : len;
196 }
197 
198 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)199 fetch_store_strlen_user(unsigned long addr)
200 {
201 	return fetch_store_strlen(addr);
202 }
203 
translate_user_vaddr(unsigned long file_offset)204 static unsigned long translate_user_vaddr(unsigned long file_offset)
205 {
206 	unsigned long base_addr;
207 	struct uprobe_dispatch_data *udd;
208 
209 	udd = (void *) current->utask->vaddr;
210 
211 	base_addr = udd->bp_addr - udd->tu->offset;
212 	return base_addr + file_offset;
213 }
214 
215 /* Note that we don't verify it, since the code does not come from user space */
216 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * edata,void * dest,void * base)217 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
218 		   void *dest, void *base)
219 {
220 	struct pt_regs *regs = rec;
221 	unsigned long val;
222 	int ret;
223 
224 	/* 1st stage: get value from context */
225 	switch (code->op) {
226 	case FETCH_OP_REG:
227 		val = regs_get_register(regs, code->param);
228 		break;
229 	case FETCH_OP_STACK:
230 		val = get_user_stack_nth(regs, code->param);
231 		break;
232 	case FETCH_OP_STACKP:
233 		val = user_stack_pointer(regs);
234 		break;
235 	case FETCH_OP_RETVAL:
236 		val = regs_return_value(regs);
237 		break;
238 	case FETCH_OP_COMM:
239 		val = FETCH_TOKEN_COMM;
240 		break;
241 	case FETCH_OP_FOFFS:
242 		val = translate_user_vaddr(code->immediate);
243 		break;
244 	default:
245 		ret = process_common_fetch_insn(code, &val);
246 		if (ret < 0)
247 			return ret;
248 	}
249 	code++;
250 
251 	return process_fetch_insn_bottom(code, val, dest, base);
252 }
NOKPROBE_SYMBOL(process_fetch_insn)253 NOKPROBE_SYMBOL(process_fetch_insn)
254 
255 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
256 {
257 	rwlock_init(&filter->rwlock);
258 	filter->nr_systemwide = 0;
259 	INIT_LIST_HEAD(&filter->perf_events);
260 }
261 
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)262 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
263 {
264 	return !filter->nr_systemwide && list_empty(&filter->perf_events);
265 }
266 
is_ret_probe(struct trace_uprobe * tu)267 static inline bool is_ret_probe(struct trace_uprobe *tu)
268 {
269 	return tu->consumer.ret_handler != NULL;
270 }
271 
trace_uprobe_is_busy(struct dyn_event * ev)272 static bool trace_uprobe_is_busy(struct dyn_event *ev)
273 {
274 	struct trace_uprobe *tu = to_trace_uprobe(ev);
275 
276 	return trace_probe_is_enabled(&tu->tp);
277 }
278 
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)279 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
280 					    int argc, const char **argv)
281 {
282 	char buf[MAX_ARGSTR_LEN + 1];
283 	int len;
284 
285 	if (!argc)
286 		return true;
287 
288 	len = strlen(tu->filename);
289 	if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
290 		return false;
291 
292 	if (tu->ref_ctr_offset == 0)
293 		snprintf(buf, sizeof(buf), "0x%0*lx",
294 				(int)(sizeof(void *) * 2), tu->offset);
295 	else
296 		snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
297 				(int)(sizeof(void *) * 2), tu->offset,
298 				tu->ref_ctr_offset);
299 	if (strcmp(buf, &argv[0][len + 1]))
300 		return false;
301 
302 	argc--; argv++;
303 
304 	return trace_probe_match_command_args(&tu->tp, argc, argv);
305 }
306 
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)307 static bool trace_uprobe_match(const char *system, const char *event,
308 			int argc, const char **argv, struct dyn_event *ev)
309 {
310 	struct trace_uprobe *tu = to_trace_uprobe(ev);
311 
312 	return (event[0] == '\0' ||
313 		strcmp(trace_probe_name(&tu->tp), event) == 0) &&
314 	   (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
315 	   trace_uprobe_match_command_head(tu, argc, argv);
316 }
317 
318 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)319 trace_uprobe_primary_from_call(struct trace_event_call *call)
320 {
321 	struct trace_probe *tp;
322 
323 	tp = trace_probe_primary_from_call(call);
324 	if (WARN_ON_ONCE(!tp))
325 		return NULL;
326 
327 	return container_of(tp, struct trace_uprobe, tp);
328 }
329 
330 /*
331  * Allocate new trace_uprobe and initialize it (including uprobes).
332  */
333 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)334 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
335 {
336 	struct trace_uprobe *tu;
337 	int ret;
338 
339 	tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
340 	if (!tu)
341 		return ERR_PTR(-ENOMEM);
342 
343 	tu->nhits = alloc_percpu(unsigned long);
344 	if (!tu->nhits) {
345 		ret = -ENOMEM;
346 		goto error;
347 	}
348 
349 	ret = trace_probe_init(&tu->tp, event, group, true, nargs);
350 	if (ret < 0)
351 		goto error;
352 
353 	dyn_event_init(&tu->devent, &trace_uprobe_ops);
354 	tu->consumer.handler = uprobe_dispatcher;
355 	if (is_ret)
356 		tu->consumer.ret_handler = uretprobe_dispatcher;
357 	init_trace_uprobe_filter(tu->tp.event->filter);
358 	return tu;
359 
360 error:
361 	free_percpu(tu->nhits);
362 	kfree(tu);
363 
364 	return ERR_PTR(ret);
365 }
366 
free_trace_uprobe(struct trace_uprobe * tu)367 static void free_trace_uprobe(struct trace_uprobe *tu)
368 {
369 	if (!tu)
370 		return;
371 
372 	path_put(&tu->path);
373 	trace_probe_cleanup(&tu->tp);
374 	kfree(tu->filename);
375 	free_percpu(tu->nhits);
376 	kfree(tu);
377 }
378 
find_probe_event(const char * event,const char * group)379 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
380 {
381 	struct dyn_event *pos;
382 	struct trace_uprobe *tu;
383 
384 	for_each_trace_uprobe(tu, pos)
385 		if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
386 		    strcmp(trace_probe_group_name(&tu->tp), group) == 0)
387 			return tu;
388 
389 	return NULL;
390 }
391 
392 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)393 static int unregister_trace_uprobe(struct trace_uprobe *tu)
394 {
395 	int ret;
396 
397 	if (trace_probe_has_sibling(&tu->tp))
398 		goto unreg;
399 
400 	/* If there's a reference to the dynamic event */
401 	if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
402 		return -EBUSY;
403 
404 	ret = unregister_uprobe_event(tu);
405 	if (ret)
406 		return ret;
407 
408 unreg:
409 	dyn_event_remove(&tu->devent);
410 	trace_probe_unlink(&tu->tp);
411 	free_trace_uprobe(tu);
412 	return 0;
413 }
414 
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)415 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
416 					 struct trace_uprobe *comp)
417 {
418 	struct trace_probe_event *tpe = orig->tp.event;
419 	struct inode *comp_inode = d_real_inode(comp->path.dentry);
420 	int i;
421 
422 	list_for_each_entry(orig, &tpe->probes, tp.list) {
423 		if (comp_inode != d_real_inode(orig->path.dentry) ||
424 		    comp->offset != orig->offset)
425 			continue;
426 
427 		/*
428 		 * trace_probe_compare_arg_type() ensured that nr_args and
429 		 * each argument name and type are same. Let's compare comm.
430 		 */
431 		for (i = 0; i < orig->tp.nr_args; i++) {
432 			if (strcmp(orig->tp.args[i].comm,
433 				   comp->tp.args[i].comm))
434 				break;
435 		}
436 
437 		if (i == orig->tp.nr_args)
438 			return true;
439 	}
440 
441 	return false;
442 }
443 
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)444 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
445 {
446 	int ret;
447 
448 	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
449 	if (ret) {
450 		/* Note that argument starts index = 2 */
451 		trace_probe_log_set_index(ret + 1);
452 		trace_probe_log_err(0, DIFF_ARG_TYPE);
453 		return -EEXIST;
454 	}
455 	if (trace_uprobe_has_same_uprobe(to, tu)) {
456 		trace_probe_log_set_index(0);
457 		trace_probe_log_err(0, SAME_PROBE);
458 		return -EEXIST;
459 	}
460 
461 	/* Append to existing event */
462 	ret = trace_probe_append(&tu->tp, &to->tp);
463 	if (!ret)
464 		dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
465 
466 	return ret;
467 }
468 
469 /*
470  * Uprobe with multiple reference counter is not allowed. i.e.
471  * If inode and offset matches, reference counter offset *must*
472  * match as well. Though, there is one exception: If user is
473  * replacing old trace_uprobe with new one(same group/event),
474  * then we allow same uprobe with new reference counter as far
475  * as the new one does not conflict with any other existing
476  * ones.
477  */
validate_ref_ctr_offset(struct trace_uprobe * new)478 static int validate_ref_ctr_offset(struct trace_uprobe *new)
479 {
480 	struct dyn_event *pos;
481 	struct trace_uprobe *tmp;
482 	struct inode *new_inode = d_real_inode(new->path.dentry);
483 
484 	for_each_trace_uprobe(tmp, pos) {
485 		if (new_inode == d_real_inode(tmp->path.dentry) &&
486 		    new->offset == tmp->offset &&
487 		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
488 			pr_warn("Reference counter offset mismatch.");
489 			return -EINVAL;
490 		}
491 	}
492 	return 0;
493 }
494 
495 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)496 static int register_trace_uprobe(struct trace_uprobe *tu)
497 {
498 	struct trace_uprobe *old_tu;
499 	int ret;
500 
501 	guard(mutex)(&event_mutex);
502 
503 	ret = validate_ref_ctr_offset(tu);
504 	if (ret)
505 		return ret;
506 
507 	/* register as an event */
508 	old_tu = find_probe_event(trace_probe_name(&tu->tp),
509 				  trace_probe_group_name(&tu->tp));
510 	if (old_tu) {
511 		if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
512 			trace_probe_log_set_index(0);
513 			trace_probe_log_err(0, DIFF_PROBE_TYPE);
514 			return -EEXIST;
515 		}
516 		return append_trace_uprobe(tu, old_tu);
517 	}
518 
519 	ret = register_uprobe_event(tu);
520 	if (ret) {
521 		if (ret == -EEXIST) {
522 			trace_probe_log_set_index(0);
523 			trace_probe_log_err(0, EVENT_EXIST);
524 		} else
525 			pr_warn("Failed to register probe event(%d)\n", ret);
526 		return ret;
527 	}
528 
529 	dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
530 
531 	return ret;
532 }
533 
534 /*
535  * Argument syntax:
536  *  - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
537  */
__trace_uprobe_create(int argc,const char ** argv)538 static int __trace_uprobe_create(int argc, const char **argv)
539 {
540 	struct trace_uprobe *tu;
541 	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
542 	char *arg, *filename, *rctr, *rctr_end, *tmp;
543 	char buf[MAX_EVENT_NAME_LEN];
544 	char gbuf[MAX_EVENT_NAME_LEN];
545 	enum probe_print_type ptype;
546 	struct path path;
547 	unsigned long offset, ref_ctr_offset;
548 	bool is_return = false;
549 	int i, ret;
550 
551 	ref_ctr_offset = 0;
552 
553 	switch (argv[0][0]) {
554 	case 'r':
555 		is_return = true;
556 		break;
557 	case 'p':
558 		break;
559 	default:
560 		return -ECANCELED;
561 	}
562 
563 	if (argc < 2)
564 		return -ECANCELED;
565 	if (argc - 2 > MAX_TRACE_ARGS)
566 		return -E2BIG;
567 
568 	if (argv[0][1] == ':')
569 		event = &argv[0][2];
570 
571 	if (!strchr(argv[1], '/'))
572 		return -ECANCELED;
573 
574 	filename = kstrdup(argv[1], GFP_KERNEL);
575 	if (!filename)
576 		return -ENOMEM;
577 
578 	/* Find the last occurrence, in case the path contains ':' too. */
579 	arg = strrchr(filename, ':');
580 	if (!arg || !isdigit(arg[1])) {
581 		kfree(filename);
582 		return -ECANCELED;
583 	}
584 
585 	trace_probe_log_init("trace_uprobe", argc, argv);
586 	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
587 
588 	*arg++ = '\0';
589 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
590 	if (ret) {
591 		trace_probe_log_err(0, FILE_NOT_FOUND);
592 		kfree(filename);
593 		trace_probe_log_clear();
594 		return ret;
595 	}
596 	if (!d_is_reg(path.dentry)) {
597 		trace_probe_log_err(0, NO_REGULAR_FILE);
598 		ret = -EINVAL;
599 		goto fail_address_parse;
600 	}
601 
602 	/* Parse reference counter offset if specified. */
603 	rctr = strchr(arg, '(');
604 	if (rctr) {
605 		rctr_end = strchr(rctr, ')');
606 		if (!rctr_end) {
607 			ret = -EINVAL;
608 			rctr_end = rctr + strlen(rctr);
609 			trace_probe_log_err(rctr_end - filename,
610 					    REFCNT_OPEN_BRACE);
611 			goto fail_address_parse;
612 		} else if (rctr_end[1] != '\0') {
613 			ret = -EINVAL;
614 			trace_probe_log_err(rctr_end + 1 - filename,
615 					    BAD_REFCNT_SUFFIX);
616 			goto fail_address_parse;
617 		}
618 
619 		*rctr++ = '\0';
620 		*rctr_end = '\0';
621 		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
622 		if (ret) {
623 			trace_probe_log_err(rctr - filename, BAD_REFCNT);
624 			goto fail_address_parse;
625 		}
626 	}
627 
628 	/* Check if there is %return suffix */
629 	tmp = strchr(arg, '%');
630 	if (tmp) {
631 		if (!strcmp(tmp, "%return")) {
632 			*tmp = '\0';
633 			is_return = true;
634 		} else {
635 			trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
636 			ret = -EINVAL;
637 			goto fail_address_parse;
638 		}
639 	}
640 
641 	/* Parse uprobe offset. */
642 	ret = kstrtoul(arg, 0, &offset);
643 	if (ret) {
644 		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
645 		goto fail_address_parse;
646 	}
647 
648 	/* setup a probe */
649 	trace_probe_log_set_index(0);
650 	if (event) {
651 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
652 						  event - argv[0]);
653 		if (ret)
654 			goto fail_address_parse;
655 	}
656 
657 	if (!event) {
658 		char *tail;
659 		char *ptr;
660 
661 		tail = kstrdup(kbasename(filename), GFP_KERNEL);
662 		if (!tail) {
663 			ret = -ENOMEM;
664 			goto fail_address_parse;
665 		}
666 
667 		ptr = strpbrk(tail, ".-_");
668 		if (ptr)
669 			*ptr = '\0';
670 
671 		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
672 		event = buf;
673 		kfree(tail);
674 	}
675 
676 	argc -= 2;
677 	argv += 2;
678 
679 	tu = alloc_trace_uprobe(group, event, argc, is_return);
680 	if (IS_ERR(tu)) {
681 		ret = PTR_ERR(tu);
682 		/* This must return -ENOMEM otherwise there is a bug */
683 		WARN_ON_ONCE(ret != -ENOMEM);
684 		goto fail_address_parse;
685 	}
686 	tu->offset = offset;
687 	tu->ref_ctr_offset = ref_ctr_offset;
688 	tu->path = path;
689 	tu->filename = filename;
690 
691 	/* parse arguments */
692 	for (i = 0; i < argc; i++) {
693 		struct traceprobe_parse_context ctx = {
694 			.flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
695 		};
696 
697 		trace_probe_log_set_index(i + 2);
698 		ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
699 		traceprobe_finish_parse(&ctx);
700 		if (ret)
701 			goto error;
702 	}
703 
704 	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
705 	ret = traceprobe_set_print_fmt(&tu->tp, ptype);
706 	if (ret < 0)
707 		goto error;
708 
709 	ret = register_trace_uprobe(tu);
710 	if (!ret)
711 		goto out;
712 
713 error:
714 	free_trace_uprobe(tu);
715 out:
716 	trace_probe_log_clear();
717 	return ret;
718 
719 fail_address_parse:
720 	trace_probe_log_clear();
721 	path_put(&path);
722 	kfree(filename);
723 
724 	return ret;
725 }
726 
trace_uprobe_create(const char * raw_command)727 int trace_uprobe_create(const char *raw_command)
728 {
729 	return trace_probe_create(raw_command, __trace_uprobe_create);
730 }
731 
create_or_delete_trace_uprobe(const char * raw_command)732 static int create_or_delete_trace_uprobe(const char *raw_command)
733 {
734 	int ret;
735 
736 	if (raw_command[0] == '-')
737 		return dyn_event_release(raw_command, &trace_uprobe_ops);
738 
739 	ret = trace_uprobe_create(raw_command);
740 	return ret == -ECANCELED ? -EINVAL : ret;
741 }
742 
trace_uprobe_release(struct dyn_event * ev)743 static int trace_uprobe_release(struct dyn_event *ev)
744 {
745 	struct trace_uprobe *tu = to_trace_uprobe(ev);
746 
747 	return unregister_trace_uprobe(tu);
748 }
749 
750 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)751 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
752 {
753 	struct trace_uprobe *tu = to_trace_uprobe(ev);
754 	char c = is_ret_probe(tu) ? 'r' : 'p';
755 	int i;
756 
757 	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
758 			trace_probe_name(&tu->tp), tu->filename,
759 			(int)(sizeof(void *) * 2), tu->offset);
760 
761 	if (tu->ref_ctr_offset)
762 		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
763 
764 	for (i = 0; i < tu->tp.nr_args; i++)
765 		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
766 
767 	seq_putc(m, '\n');
768 	return 0;
769 }
770 
probes_seq_show(struct seq_file * m,void * v)771 static int probes_seq_show(struct seq_file *m, void *v)
772 {
773 	struct dyn_event *ev = v;
774 
775 	if (!is_trace_uprobe(ev))
776 		return 0;
777 
778 	return trace_uprobe_show(m, ev);
779 }
780 
781 static const struct seq_operations probes_seq_op = {
782 	.start  = dyn_event_seq_start,
783 	.next   = dyn_event_seq_next,
784 	.stop   = dyn_event_seq_stop,
785 	.show   = probes_seq_show
786 };
787 
probes_open(struct inode * inode,struct file * file)788 static int probes_open(struct inode *inode, struct file *file)
789 {
790 	int ret;
791 
792 	ret = security_locked_down(LOCKDOWN_TRACEFS);
793 	if (ret)
794 		return ret;
795 
796 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
797 		ret = dyn_events_release_all(&trace_uprobe_ops);
798 		if (ret)
799 			return ret;
800 	}
801 
802 	return seq_open(file, &probes_seq_op);
803 }
804 
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)805 static ssize_t probes_write(struct file *file, const char __user *buffer,
806 			    size_t count, loff_t *ppos)
807 {
808 	return trace_parse_run_command(file, buffer, count, ppos,
809 					create_or_delete_trace_uprobe);
810 }
811 
812 static const struct file_operations uprobe_events_ops = {
813 	.owner		= THIS_MODULE,
814 	.open		= probes_open,
815 	.read		= seq_read,
816 	.llseek		= seq_lseek,
817 	.release	= seq_release,
818 	.write		= probes_write,
819 };
820 
821 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)822 static int probes_profile_seq_show(struct seq_file *m, void *v)
823 {
824 	struct dyn_event *ev = v;
825 	struct trace_uprobe *tu;
826 	unsigned long nhits;
827 	int cpu;
828 
829 	if (!is_trace_uprobe(ev))
830 		return 0;
831 
832 	tu = to_trace_uprobe(ev);
833 
834 	nhits = 0;
835 	for_each_possible_cpu(cpu) {
836 		nhits += per_cpu(*tu->nhits, cpu);
837 	}
838 
839 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
840 		   trace_probe_name(&tu->tp), nhits);
841 	return 0;
842 }
843 
844 static const struct seq_operations profile_seq_op = {
845 	.start  = dyn_event_seq_start,
846 	.next   = dyn_event_seq_next,
847 	.stop   = dyn_event_seq_stop,
848 	.show	= probes_profile_seq_show
849 };
850 
profile_open(struct inode * inode,struct file * file)851 static int profile_open(struct inode *inode, struct file *file)
852 {
853 	int ret;
854 
855 	ret = security_locked_down(LOCKDOWN_TRACEFS);
856 	if (ret)
857 		return ret;
858 
859 	return seq_open(file, &profile_seq_op);
860 }
861 
862 static const struct file_operations uprobe_profile_ops = {
863 	.owner		= THIS_MODULE,
864 	.open		= profile_open,
865 	.read		= seq_read,
866 	.llseek		= seq_lseek,
867 	.release	= seq_release,
868 };
869 
870 struct uprobe_cpu_buffer {
871 	struct mutex mutex;
872 	void *buf;
873 	int dsize;
874 };
875 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
876 static int uprobe_buffer_refcnt;
877 #define MAX_UCB_BUFFER_SIZE PAGE_SIZE
878 
uprobe_buffer_init(void)879 static int uprobe_buffer_init(void)
880 {
881 	int cpu, err_cpu;
882 
883 	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
884 	if (uprobe_cpu_buffer == NULL)
885 		return -ENOMEM;
886 
887 	for_each_possible_cpu(cpu) {
888 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
889 						  GFP_KERNEL, 0);
890 		if (p == NULL) {
891 			err_cpu = cpu;
892 			goto err;
893 		}
894 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
895 		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
896 	}
897 
898 	return 0;
899 
900 err:
901 	for_each_possible_cpu(cpu) {
902 		if (cpu == err_cpu)
903 			break;
904 		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
905 	}
906 
907 	free_percpu(uprobe_cpu_buffer);
908 	return -ENOMEM;
909 }
910 
uprobe_buffer_enable(void)911 static int uprobe_buffer_enable(void)
912 {
913 	int ret = 0;
914 
915 	BUG_ON(!mutex_is_locked(&event_mutex));
916 
917 	if (uprobe_buffer_refcnt++ == 0) {
918 		ret = uprobe_buffer_init();
919 		if (ret < 0)
920 			uprobe_buffer_refcnt--;
921 	}
922 
923 	return ret;
924 }
925 
uprobe_buffer_disable(void)926 static void uprobe_buffer_disable(void)
927 {
928 	int cpu;
929 
930 	BUG_ON(!mutex_is_locked(&event_mutex));
931 
932 	if (--uprobe_buffer_refcnt == 0) {
933 		for_each_possible_cpu(cpu)
934 			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
935 							     cpu)->buf);
936 
937 		free_percpu(uprobe_cpu_buffer);
938 		uprobe_cpu_buffer = NULL;
939 	}
940 }
941 
uprobe_buffer_get(void)942 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
943 {
944 	struct uprobe_cpu_buffer *ucb;
945 	int cpu;
946 
947 	cpu = raw_smp_processor_id();
948 	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
949 
950 	/*
951 	 * Use per-cpu buffers for fastest access, but we might migrate
952 	 * so the mutex makes sure we have sole access to it.
953 	 */
954 	mutex_lock(&ucb->mutex);
955 
956 	return ucb;
957 }
958 
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)959 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
960 {
961 	if (!ucb)
962 		return;
963 	mutex_unlock(&ucb->mutex);
964 }
965 
prepare_uprobe_buffer(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)966 static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
967 						       struct pt_regs *regs,
968 						       struct uprobe_cpu_buffer **ucbp)
969 {
970 	struct uprobe_cpu_buffer *ucb;
971 	int dsize, esize;
972 
973 	if (*ucbp)
974 		return *ucbp;
975 
976 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
977 	dsize = __get_data_size(&tu->tp, regs, NULL);
978 
979 	ucb = uprobe_buffer_get();
980 	ucb->dsize = tu->tp.size + dsize;
981 
982 	if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
983 		ucb->dsize = MAX_UCB_BUFFER_SIZE;
984 		dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
985 	}
986 
987 	store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
988 
989 	*ucbp = ucb;
990 	return ucb;
991 }
992 
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,struct trace_event_file * trace_file)993 static void __uprobe_trace_func(struct trace_uprobe *tu,
994 				unsigned long func, struct pt_regs *regs,
995 				struct uprobe_cpu_buffer *ucb,
996 				struct trace_event_file *trace_file)
997 {
998 	struct uprobe_trace_entry_head *entry;
999 	struct trace_event_buffer fbuffer;
1000 	void *data;
1001 	int size, esize;
1002 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1003 
1004 	WARN_ON(call != trace_file->event_call);
1005 
1006 	if (trace_trigger_soft_disabled(trace_file))
1007 		return;
1008 
1009 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1010 	size = esize + ucb->dsize;
1011 	entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
1012 	if (!entry)
1013 		return;
1014 
1015 	if (is_ret_probe(tu)) {
1016 		entry->vaddr[0] = func;
1017 		entry->vaddr[1] = instruction_pointer(regs);
1018 		data = DATAOF_TRACE_ENTRY(entry, true);
1019 	} else {
1020 		entry->vaddr[0] = instruction_pointer(regs);
1021 		data = DATAOF_TRACE_ENTRY(entry, false);
1022 	}
1023 
1024 	memcpy(data, ucb->buf, ucb->dsize);
1025 
1026 	trace_event_buffer_commit(&fbuffer);
1027 }
1028 
1029 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1030 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
1031 			     struct uprobe_cpu_buffer **ucbp)
1032 {
1033 	struct event_file_link *link;
1034 	struct uprobe_cpu_buffer *ucb;
1035 
1036 	if (is_ret_probe(tu))
1037 		return 0;
1038 
1039 	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1040 
1041 	rcu_read_lock();
1042 	trace_probe_for_each_link_rcu(link, &tu->tp)
1043 		__uprobe_trace_func(tu, 0, regs, ucb, link->file);
1044 	rcu_read_unlock();
1045 
1046 	return 0;
1047 }
1048 
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1049 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1050 				 struct pt_regs *regs,
1051 				 struct uprobe_cpu_buffer **ucbp)
1052 {
1053 	struct event_file_link *link;
1054 	struct uprobe_cpu_buffer *ucb;
1055 
1056 	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1057 
1058 	rcu_read_lock();
1059 	trace_probe_for_each_link_rcu(link, &tu->tp)
1060 		__uprobe_trace_func(tu, func, regs, ucb, link->file);
1061 	rcu_read_unlock();
1062 }
1063 
1064 /* Event entry printers */
1065 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1066 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1067 {
1068 	struct uprobe_trace_entry_head *entry;
1069 	struct trace_seq *s = &iter->seq;
1070 	struct trace_uprobe *tu;
1071 	u8 *data;
1072 
1073 	entry = (struct uprobe_trace_entry_head *)iter->ent;
1074 	tu = trace_uprobe_primary_from_call(
1075 		container_of(event, struct trace_event_call, event));
1076 	if (unlikely(!tu))
1077 		goto out;
1078 
1079 	if (is_ret_probe(tu)) {
1080 		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1081 				 trace_probe_name(&tu->tp),
1082 				 entry->vaddr[1], entry->vaddr[0]);
1083 		data = DATAOF_TRACE_ENTRY(entry, true);
1084 	} else {
1085 		trace_seq_printf(s, "%s: (0x%lx)",
1086 				 trace_probe_name(&tu->tp),
1087 				 entry->vaddr[0]);
1088 		data = DATAOF_TRACE_ENTRY(entry, false);
1089 	}
1090 
1091 	if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1092 		goto out;
1093 
1094 	trace_seq_putc(s, '\n');
1095 
1096  out:
1097 	return trace_handle_return(s);
1098 }
1099 
1100 typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm);
1101 
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1102 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1103 {
1104 	struct inode *inode = d_real_inode(tu->path.dentry);
1105 	struct uprobe *uprobe;
1106 
1107 	tu->consumer.filter = filter;
1108 	uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer);
1109 	if (IS_ERR(uprobe))
1110 		return PTR_ERR(uprobe);
1111 
1112 	tu->uprobe = uprobe;
1113 	return 0;
1114 }
1115 
__probe_event_disable(struct trace_probe * tp)1116 static void __probe_event_disable(struct trace_probe *tp)
1117 {
1118 	struct trace_uprobe *tu;
1119 	bool sync = false;
1120 
1121 	tu = container_of(tp, struct trace_uprobe, tp);
1122 	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1123 
1124 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1125 		if (!tu->uprobe)
1126 			continue;
1127 
1128 		uprobe_unregister_nosync(tu->uprobe, &tu->consumer);
1129 		sync = true;
1130 		tu->uprobe = NULL;
1131 	}
1132 	if (sync)
1133 		uprobe_unregister_sync();
1134 }
1135 
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1136 static int probe_event_enable(struct trace_event_call *call,
1137 			struct trace_event_file *file, filter_func_t filter)
1138 {
1139 	struct trace_probe *tp;
1140 	struct trace_uprobe *tu;
1141 	bool enabled;
1142 	int ret;
1143 
1144 	tp = trace_probe_primary_from_call(call);
1145 	if (WARN_ON_ONCE(!tp))
1146 		return -ENODEV;
1147 	enabled = trace_probe_is_enabled(tp);
1148 
1149 	/* This may also change "enabled" state */
1150 	if (file) {
1151 		if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1152 			return -EINTR;
1153 
1154 		ret = trace_probe_add_file(tp, file);
1155 		if (ret < 0)
1156 			return ret;
1157 	} else {
1158 		if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1159 			return -EINTR;
1160 
1161 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1162 	}
1163 
1164 	tu = container_of(tp, struct trace_uprobe, tp);
1165 	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1166 
1167 	if (enabled)
1168 		return 0;
1169 
1170 	ret = uprobe_buffer_enable();
1171 	if (ret)
1172 		goto err_flags;
1173 
1174 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1175 		ret = trace_uprobe_enable(tu, filter);
1176 		if (ret) {
1177 			__probe_event_disable(tp);
1178 			goto err_buffer;
1179 		}
1180 	}
1181 
1182 	return 0;
1183 
1184  err_buffer:
1185 	uprobe_buffer_disable();
1186 
1187  err_flags:
1188 	if (file)
1189 		trace_probe_remove_file(tp, file);
1190 	else
1191 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1192 
1193 	return ret;
1194 }
1195 
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1196 static void probe_event_disable(struct trace_event_call *call,
1197 				struct trace_event_file *file)
1198 {
1199 	struct trace_probe *tp;
1200 
1201 	tp = trace_probe_primary_from_call(call);
1202 	if (WARN_ON_ONCE(!tp))
1203 		return;
1204 
1205 	if (!trace_probe_is_enabled(tp))
1206 		return;
1207 
1208 	if (file) {
1209 		if (trace_probe_remove_file(tp, file) < 0)
1210 			return;
1211 
1212 		if (trace_probe_is_enabled(tp))
1213 			return;
1214 	} else
1215 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1216 
1217 	__probe_event_disable(tp);
1218 	uprobe_buffer_disable();
1219 }
1220 
uprobe_event_define_fields(struct trace_event_call * event_call)1221 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1222 {
1223 	int ret, size;
1224 	struct uprobe_trace_entry_head field;
1225 	struct trace_uprobe *tu;
1226 
1227 	tu = trace_uprobe_primary_from_call(event_call);
1228 	if (unlikely(!tu))
1229 		return -ENODEV;
1230 
1231 	if (is_ret_probe(tu)) {
1232 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1233 		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1234 		size = SIZEOF_TRACE_ENTRY(true);
1235 	} else {
1236 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1237 		size = SIZEOF_TRACE_ENTRY(false);
1238 	}
1239 
1240 	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1241 }
1242 
1243 #ifdef CONFIG_PERF_EVENTS
1244 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1245 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1246 {
1247 	struct perf_event *event;
1248 
1249 	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1250 		if (event->hw.target->mm == mm)
1251 			return true;
1252 	}
1253 
1254 	return false;
1255 }
1256 
1257 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1258 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1259 			  struct perf_event *event)
1260 {
1261 	return __uprobe_perf_filter(filter, event->hw.target->mm);
1262 }
1263 
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1264 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1265 				       struct perf_event *event)
1266 {
1267 	bool done;
1268 
1269 	write_lock(&filter->rwlock);
1270 	if (event->hw.target) {
1271 		list_del(&event->hw.tp_list);
1272 		done = filter->nr_systemwide ||
1273 			(event->hw.target->flags & PF_EXITING) ||
1274 			trace_uprobe_filter_event(filter, event);
1275 	} else {
1276 		filter->nr_systemwide--;
1277 		done = filter->nr_systemwide;
1278 	}
1279 	write_unlock(&filter->rwlock);
1280 
1281 	return done;
1282 }
1283 
1284 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1285 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1286 				    struct perf_event *event)
1287 {
1288 	bool done;
1289 
1290 	write_lock(&filter->rwlock);
1291 	if (event->hw.target) {
1292 		/*
1293 		 * event->parent != NULL means copy_process(), we can avoid
1294 		 * uprobe_apply(). current->mm must be probed and we can rely
1295 		 * on dup_mmap() which preserves the already installed bp's.
1296 		 *
1297 		 * attr.enable_on_exec means that exec/mmap will install the
1298 		 * breakpoints we need.
1299 		 */
1300 		done = filter->nr_systemwide ||
1301 			event->parent || event->attr.enable_on_exec ||
1302 			trace_uprobe_filter_event(filter, event);
1303 		list_add(&event->hw.tp_list, &filter->perf_events);
1304 	} else {
1305 		done = filter->nr_systemwide;
1306 		filter->nr_systemwide++;
1307 	}
1308 	write_unlock(&filter->rwlock);
1309 
1310 	return done;
1311 }
1312 
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1313 static int uprobe_perf_close(struct trace_event_call *call,
1314 			     struct perf_event *event)
1315 {
1316 	struct trace_probe *tp;
1317 	struct trace_uprobe *tu;
1318 	int ret = 0;
1319 
1320 	tp = trace_probe_primary_from_call(call);
1321 	if (WARN_ON_ONCE(!tp))
1322 		return -ENODEV;
1323 
1324 	tu = container_of(tp, struct trace_uprobe, tp);
1325 	if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1326 		return 0;
1327 
1328 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1329 		ret = uprobe_apply(tu->uprobe, &tu->consumer, false);
1330 		if (ret)
1331 			break;
1332 	}
1333 
1334 	return ret;
1335 }
1336 
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1337 static int uprobe_perf_open(struct trace_event_call *call,
1338 			    struct perf_event *event)
1339 {
1340 	struct trace_probe *tp;
1341 	struct trace_uprobe *tu;
1342 	int err = 0;
1343 
1344 	tp = trace_probe_primary_from_call(call);
1345 	if (WARN_ON_ONCE(!tp))
1346 		return -ENODEV;
1347 
1348 	tu = container_of(tp, struct trace_uprobe, tp);
1349 	if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1350 		return 0;
1351 
1352 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1353 		err = uprobe_apply(tu->uprobe, &tu->consumer, true);
1354 		if (err) {
1355 			uprobe_perf_close(call, event);
1356 			break;
1357 		}
1358 	}
1359 
1360 	return err;
1361 }
1362 
uprobe_perf_filter(struct uprobe_consumer * uc,struct mm_struct * mm)1363 static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
1364 {
1365 	struct trace_uprobe_filter *filter;
1366 	struct trace_uprobe *tu;
1367 	int ret;
1368 
1369 	tu = container_of(uc, struct trace_uprobe, consumer);
1370 	filter = tu->tp.event->filter;
1371 
1372 	/*
1373 	 * speculative short-circuiting check to avoid unnecessarily taking
1374 	 * filter->rwlock below, if the uprobe has system-wide consumer
1375 	 */
1376 	if (READ_ONCE(filter->nr_systemwide))
1377 		return true;
1378 
1379 	read_lock(&filter->rwlock);
1380 	ret = __uprobe_perf_filter(filter, mm);
1381 	read_unlock(&filter->rwlock);
1382 
1383 	return ret;
1384 }
1385 
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1386 static void __uprobe_perf_func(struct trace_uprobe *tu,
1387 			       unsigned long func, struct pt_regs *regs,
1388 			       struct uprobe_cpu_buffer **ucbp)
1389 {
1390 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1391 	struct uprobe_trace_entry_head *entry;
1392 	struct uprobe_cpu_buffer *ucb;
1393 	struct hlist_head *head;
1394 	void *data;
1395 	int size, esize;
1396 	int rctx;
1397 
1398 #ifdef CONFIG_BPF_EVENTS
1399 	if (bpf_prog_array_valid(call)) {
1400 		const struct bpf_prog_array *array;
1401 		u32 ret;
1402 
1403 		rcu_read_lock_trace();
1404 		array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held());
1405 		ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run);
1406 		rcu_read_unlock_trace();
1407 		if (!ret)
1408 			return;
1409 	}
1410 #endif /* CONFIG_BPF_EVENTS */
1411 
1412 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1413 
1414 	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1415 	size = esize + ucb->dsize;
1416 	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1417 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1418 		return;
1419 
1420 	preempt_disable();
1421 	head = this_cpu_ptr(call->perf_events);
1422 	if (hlist_empty(head))
1423 		goto out;
1424 
1425 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1426 	if (!entry)
1427 		goto out;
1428 
1429 	if (is_ret_probe(tu)) {
1430 		entry->vaddr[0] = func;
1431 		entry->vaddr[1] = instruction_pointer(regs);
1432 		data = DATAOF_TRACE_ENTRY(entry, true);
1433 	} else {
1434 		entry->vaddr[0] = instruction_pointer(regs);
1435 		data = DATAOF_TRACE_ENTRY(entry, false);
1436 	}
1437 
1438 	memcpy(data, ucb->buf, ucb->dsize);
1439 
1440 	if (size - esize > ucb->dsize)
1441 		memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
1442 
1443 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1444 			      head, NULL);
1445  out:
1446 	preempt_enable();
1447 }
1448 
1449 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1450 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1451 			    struct uprobe_cpu_buffer **ucbp)
1452 {
1453 	if (!uprobe_perf_filter(&tu->consumer, current->mm))
1454 		return UPROBE_HANDLER_REMOVE;
1455 
1456 	if (!is_ret_probe(tu))
1457 		__uprobe_perf_func(tu, 0, regs, ucbp);
1458 	return 0;
1459 }
1460 
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1461 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1462 				struct pt_regs *regs,
1463 				struct uprobe_cpu_buffer **ucbp)
1464 {
1465 	__uprobe_perf_func(tu, func, regs, ucbp);
1466 }
1467 
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,u64 * probe_addr,bool perf_type_tracepoint)1468 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1469 			const char **filename, u64 *probe_offset,
1470 			u64 *probe_addr, bool perf_type_tracepoint)
1471 {
1472 	const char *pevent = trace_event_name(event->tp_event);
1473 	const char *group = event->tp_event->class->system;
1474 	struct trace_uprobe *tu;
1475 
1476 	if (perf_type_tracepoint)
1477 		tu = find_probe_event(pevent, group);
1478 	else
1479 		tu = trace_uprobe_primary_from_call(event->tp_event);
1480 	if (!tu)
1481 		return -EINVAL;
1482 
1483 	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1484 				    : BPF_FD_TYPE_UPROBE;
1485 	*filename = tu->filename;
1486 	*probe_offset = tu->offset;
1487 	*probe_addr = 0;
1488 	return 0;
1489 }
1490 #endif	/* CONFIG_PERF_EVENTS */
1491 
1492 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1493 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1494 		      void *data)
1495 {
1496 	struct trace_event_file *file = data;
1497 
1498 	switch (type) {
1499 	case TRACE_REG_REGISTER:
1500 		return probe_event_enable(event, file, NULL);
1501 
1502 	case TRACE_REG_UNREGISTER:
1503 		probe_event_disable(event, file);
1504 		return 0;
1505 
1506 #ifdef CONFIG_PERF_EVENTS
1507 	case TRACE_REG_PERF_REGISTER:
1508 		return probe_event_enable(event, NULL, uprobe_perf_filter);
1509 
1510 	case TRACE_REG_PERF_UNREGISTER:
1511 		probe_event_disable(event, NULL);
1512 		return 0;
1513 
1514 	case TRACE_REG_PERF_OPEN:
1515 		return uprobe_perf_open(event, data);
1516 
1517 	case TRACE_REG_PERF_CLOSE:
1518 		return uprobe_perf_close(event, data);
1519 
1520 #endif
1521 	default:
1522 		return 0;
1523 	}
1524 }
1525 
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs,__u64 * data)1526 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
1527 			     __u64 *data)
1528 {
1529 	struct trace_uprobe *tu;
1530 	struct uprobe_dispatch_data udd;
1531 	struct uprobe_cpu_buffer *ucb = NULL;
1532 	int ret = 0;
1533 
1534 	tu = container_of(con, struct trace_uprobe, consumer);
1535 
1536 	this_cpu_inc(*tu->nhits);
1537 
1538 	udd.tu = tu;
1539 	udd.bp_addr = instruction_pointer(regs);
1540 
1541 	current->utask->vaddr = (unsigned long) &udd;
1542 
1543 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1544 		return 0;
1545 
1546 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1547 		ret |= uprobe_trace_func(tu, regs, &ucb);
1548 
1549 #ifdef CONFIG_PERF_EVENTS
1550 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1551 		ret |= uprobe_perf_func(tu, regs, &ucb);
1552 #endif
1553 	uprobe_buffer_put(ucb);
1554 	return ret;
1555 }
1556 
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs,__u64 * data)1557 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1558 				unsigned long func, struct pt_regs *regs,
1559 				__u64 *data)
1560 {
1561 	struct trace_uprobe *tu;
1562 	struct uprobe_dispatch_data udd;
1563 	struct uprobe_cpu_buffer *ucb = NULL;
1564 
1565 	tu = container_of(con, struct trace_uprobe, consumer);
1566 
1567 	udd.tu = tu;
1568 	udd.bp_addr = func;
1569 
1570 	current->utask->vaddr = (unsigned long) &udd;
1571 
1572 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1573 		return 0;
1574 
1575 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1576 		uretprobe_trace_func(tu, func, regs, &ucb);
1577 
1578 #ifdef CONFIG_PERF_EVENTS
1579 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1580 		uretprobe_perf_func(tu, func, regs, &ucb);
1581 #endif
1582 	uprobe_buffer_put(ucb);
1583 	return 0;
1584 }
1585 
1586 static struct trace_event_functions uprobe_funcs = {
1587 	.trace		= print_uprobe_event
1588 };
1589 
1590 static struct trace_event_fields uprobe_fields_array[] = {
1591 	{ .type = TRACE_FUNCTION_TYPE,
1592 	  .define_fields = uprobe_event_define_fields },
1593 	{}
1594 };
1595 
init_trace_event_call(struct trace_uprobe * tu)1596 static inline void init_trace_event_call(struct trace_uprobe *tu)
1597 {
1598 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1599 	call->event.funcs = &uprobe_funcs;
1600 	call->class->fields_array = uprobe_fields_array;
1601 
1602 	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1603 	call->class->reg = trace_uprobe_register;
1604 }
1605 
register_uprobe_event(struct trace_uprobe * tu)1606 static int register_uprobe_event(struct trace_uprobe *tu)
1607 {
1608 	init_trace_event_call(tu);
1609 
1610 	return trace_probe_register_event_call(&tu->tp);
1611 }
1612 
unregister_uprobe_event(struct trace_uprobe * tu)1613 static int unregister_uprobe_event(struct trace_uprobe *tu)
1614 {
1615 	return trace_probe_unregister_event_call(&tu->tp);
1616 }
1617 
1618 #ifdef CONFIG_PERF_EVENTS
1619 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1620 create_local_trace_uprobe(char *name, unsigned long offs,
1621 			  unsigned long ref_ctr_offset, bool is_return)
1622 {
1623 	enum probe_print_type ptype;
1624 	struct trace_uprobe *tu;
1625 	struct path path;
1626 	int ret;
1627 
1628 	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1629 	if (ret)
1630 		return ERR_PTR(ret);
1631 
1632 	if (!d_is_reg(path.dentry)) {
1633 		path_put(&path);
1634 		return ERR_PTR(-EINVAL);
1635 	}
1636 
1637 	/*
1638 	 * local trace_kprobes are not added to dyn_event, so they are never
1639 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1640 	 * duplicated name "DUMMY_EVENT" here.
1641 	 */
1642 	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1643 				is_return);
1644 
1645 	if (IS_ERR(tu)) {
1646 		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1647 			(int)PTR_ERR(tu));
1648 		path_put(&path);
1649 		return ERR_CAST(tu);
1650 	}
1651 
1652 	tu->offset = offs;
1653 	tu->path = path;
1654 	tu->ref_ctr_offset = ref_ctr_offset;
1655 	tu->filename = kstrdup(name, GFP_KERNEL);
1656 	if (!tu->filename) {
1657 		ret = -ENOMEM;
1658 		goto error;
1659 	}
1660 
1661 	init_trace_event_call(tu);
1662 
1663 	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1664 	if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1665 		ret = -ENOMEM;
1666 		goto error;
1667 	}
1668 
1669 	return trace_probe_event_call(&tu->tp);
1670 error:
1671 	free_trace_uprobe(tu);
1672 	return ERR_PTR(ret);
1673 }
1674 
destroy_local_trace_uprobe(struct trace_event_call * event_call)1675 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1676 {
1677 	struct trace_uprobe *tu;
1678 
1679 	tu = trace_uprobe_primary_from_call(event_call);
1680 
1681 	free_trace_uprobe(tu);
1682 }
1683 #endif /* CONFIG_PERF_EVENTS */
1684 
1685 /* Make a trace interface for controlling probe points */
init_uprobe_trace(void)1686 static __init int init_uprobe_trace(void)
1687 {
1688 	int ret;
1689 
1690 	ret = dyn_event_register(&trace_uprobe_ops);
1691 	if (ret)
1692 		return ret;
1693 
1694 	ret = tracing_init_dentry();
1695 	if (ret)
1696 		return 0;
1697 
1698 	trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1699 				    NULL, &uprobe_events_ops);
1700 	/* Profile interface */
1701 	trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1702 				    NULL, &uprobe_profile_ops);
1703 	return 0;
1704 }
1705 
1706 fs_initcall(init_uprobe_trace);
1707