xref: /linux/kernel/trace/trace_uprobe.c (revision b7dbc2e813e00d61e66fc0267599441493774b93)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)	"trace_uprobe: " fmt
9 
10 #include <linux/bpf-cgroup.h>
11 #include <linux/cleanup.h>
12 #include <linux/ctype.h>
13 #include <linux/filter.h>
14 #include <linux/module.h>
15 #include <linux/namei.h>
16 #include <linux/percpu.h>
17 #include <linux/rculist.h>
18 #include <linux/security.h>
19 #include <linux/string.h>
20 #include <linux/uaccess.h>
21 #include <linux/uprobes.h>
22 
23 #include "trace.h"
24 #include "trace_dynevent.h"
25 #include "trace_probe.h"
26 #include "trace_probe_tmpl.h"
27 
28 #define UPROBE_EVENT_SYSTEM	"uprobes"
29 
30 struct uprobe_trace_entry_head {
31 	struct trace_entry	ent;
32 	unsigned long		vaddr[];
33 };
34 
35 #define SIZEOF_TRACE_ENTRY(is_return)			\
36 	(sizeof(struct uprobe_trace_entry_head) +	\
37 	 sizeof(unsigned long) * (is_return ? 2 : 1))
38 
39 #define DATAOF_TRACE_ENTRY(entry, is_return)		\
40 	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
41 
42 static int trace_uprobe_create(const char *raw_command);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47 			int argc, const char **argv, struct dyn_event *ev);
48 
49 static struct dyn_event_operations trace_uprobe_ops = {
50 	.create = trace_uprobe_create,
51 	.show = trace_uprobe_show,
52 	.is_busy = trace_uprobe_is_busy,
53 	.free = trace_uprobe_release,
54 	.match = trace_uprobe_match,
55 };
56 
57 /*
58  * uprobe event core functions
59  */
60 struct trace_uprobe {
61 	struct dyn_event		devent;
62 	struct uprobe_consumer		consumer;
63 	struct path			path;
64 	char				*filename;
65 	struct uprobe			*uprobe;
66 	unsigned long			offset;
67 	unsigned long			ref_ctr_offset;
68 	unsigned long __percpu		*nhits;
69 	struct trace_probe		tp;
70 };
71 
is_trace_uprobe(struct dyn_event * ev)72 static bool is_trace_uprobe(struct dyn_event *ev)
73 {
74 	return ev->ops == &trace_uprobe_ops;
75 }
76 
to_trace_uprobe(struct dyn_event * ev)77 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
78 {
79 	return container_of(ev, struct trace_uprobe, devent);
80 }
81 
82 /**
83  * for_each_trace_uprobe - iterate over the trace_uprobe list
84  * @pos:	the struct trace_uprobe * for each entry
85  * @dpos:	the struct dyn_event * to use as a loop cursor
86  */
87 #define for_each_trace_uprobe(pos, dpos)	\
88 	for_each_dyn_event(dpos)		\
89 		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
90 
91 static int register_uprobe_event(struct trace_uprobe *tu);
92 static int unregister_uprobe_event(struct trace_uprobe *tu);
93 
94 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
95 			     __u64 *data);
96 static int uretprobe_dispatcher(struct uprobe_consumer *con,
97 				unsigned long func, struct pt_regs *regs,
98 				__u64 *data);
99 
100 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)101 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
102 {
103 	return addr - (n * sizeof(long));
104 }
105 #else
adjust_stack_addr(unsigned long addr,unsigned int n)106 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
107 {
108 	return addr + (n * sizeof(long));
109 }
110 #endif
111 
get_user_stack_nth(struct pt_regs * regs,unsigned int n)112 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
113 {
114 	unsigned long ret;
115 	unsigned long addr = user_stack_pointer(regs);
116 
117 	addr = adjust_stack_addr(addr, n);
118 
119 	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
120 		return 0;
121 
122 	return ret;
123 }
124 
125 /*
126  * Uprobes-specific fetch functions
127  */
128 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)129 probe_mem_read(void *dest, void *src, size_t size)
130 {
131 	void __user *vaddr = (void __force __user *)src;
132 
133 	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
134 }
135 
136 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)137 probe_mem_read_user(void *dest, void *src, size_t size)
138 {
139 	return probe_mem_read(dest, src, size);
140 }
141 
142 /*
143  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
144  * length and relative data location.
145  */
146 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)147 fetch_store_string(unsigned long addr, void *dest, void *base)
148 {
149 	long ret;
150 	u32 loc = *(u32 *)dest;
151 	int maxlen  = get_loc_len(loc);
152 	u8 *dst = get_loc_data(dest, base);
153 	void __user *src = (void __force __user *) addr;
154 
155 	if (unlikely(!maxlen))
156 		return -ENOMEM;
157 
158 	if (addr == FETCH_TOKEN_COMM)
159 		ret = strscpy(dst, current->comm, maxlen);
160 	else
161 		ret = strncpy_from_user(dst, src, maxlen);
162 	if (ret >= 0) {
163 		if (ret == maxlen)
164 			dst[ret - 1] = '\0';
165 		else
166 			/*
167 			 * Include the terminating null byte. In this case it
168 			 * was copied by strncpy_from_user but not accounted
169 			 * for in ret.
170 			 */
171 			ret++;
172 		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
173 	} else
174 		*(u32 *)dest = make_data_loc(0, (void *)dst - base);
175 
176 	return ret;
177 }
178 
179 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)180 fetch_store_string_user(unsigned long addr, void *dest, void *base)
181 {
182 	return fetch_store_string(addr, dest, base);
183 }
184 
185 /* Return the length of string -- including null terminal byte */
186 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)187 fetch_store_strlen(unsigned long addr)
188 {
189 	int len;
190 	void __user *vaddr = (void __force __user *) addr;
191 
192 	if (addr == FETCH_TOKEN_COMM)
193 		len = strlen(current->comm) + 1;
194 	else
195 		len = strnlen_user(vaddr, MAX_STRING_SIZE);
196 
197 	return (len > MAX_STRING_SIZE) ? 0 : len;
198 }
199 
200 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)201 fetch_store_strlen_user(unsigned long addr)
202 {
203 	return fetch_store_strlen(addr);
204 }
205 
translate_user_vaddr(unsigned long file_offset)206 static unsigned long translate_user_vaddr(unsigned long file_offset)
207 {
208 	unsigned long base_addr;
209 	struct uprobe_dispatch_data *udd;
210 
211 	udd = (void *) current->utask->vaddr;
212 
213 	base_addr = udd->bp_addr - udd->tu->offset;
214 	return base_addr + file_offset;
215 }
216 
217 /* Note that we don't verify it, since the code does not come from user space */
218 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * edata,void * dest,void * base)219 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
220 		   void *dest, void *base)
221 {
222 	struct pt_regs *regs = rec;
223 	unsigned long val;
224 	int ret;
225 
226 	/* 1st stage: get value from context */
227 	switch (code->op) {
228 	case FETCH_OP_REG:
229 		val = regs_get_register(regs, code->param);
230 		break;
231 	case FETCH_OP_STACK:
232 		val = get_user_stack_nth(regs, code->param);
233 		break;
234 	case FETCH_OP_STACKP:
235 		val = user_stack_pointer(regs);
236 		break;
237 	case FETCH_OP_RETVAL:
238 		val = regs_return_value(regs);
239 		break;
240 	case FETCH_OP_COMM:
241 		val = FETCH_TOKEN_COMM;
242 		break;
243 	case FETCH_OP_FOFFS:
244 		val = translate_user_vaddr(code->immediate);
245 		break;
246 	default:
247 		ret = process_common_fetch_insn(code, &val);
248 		if (ret < 0)
249 			return ret;
250 	}
251 	code++;
252 
253 	return process_fetch_insn_bottom(code, val, dest, base);
254 }
NOKPROBE_SYMBOL(process_fetch_insn)255 NOKPROBE_SYMBOL(process_fetch_insn)
256 
257 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
258 {
259 	rwlock_init(&filter->rwlock);
260 	filter->nr_systemwide = 0;
261 	INIT_LIST_HEAD(&filter->perf_events);
262 }
263 
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)264 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
265 {
266 	return !filter->nr_systemwide && list_empty(&filter->perf_events);
267 }
268 
is_ret_probe(struct trace_uprobe * tu)269 static inline bool is_ret_probe(struct trace_uprobe *tu)
270 {
271 	return tu->consumer.ret_handler != NULL;
272 }
273 
trace_uprobe_is_busy(struct dyn_event * ev)274 static bool trace_uprobe_is_busy(struct dyn_event *ev)
275 {
276 	struct trace_uprobe *tu = to_trace_uprobe(ev);
277 
278 	return trace_probe_is_enabled(&tu->tp);
279 }
280 
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)281 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
282 					    int argc, const char **argv)
283 {
284 	char buf[MAX_ARGSTR_LEN + 1];
285 	int len;
286 
287 	if (!argc)
288 		return true;
289 
290 	len = strlen(tu->filename);
291 	if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
292 		return false;
293 
294 	if (tu->ref_ctr_offset == 0)
295 		snprintf(buf, sizeof(buf), "0x%0*lx",
296 				(int)(sizeof(void *) * 2), tu->offset);
297 	else
298 		snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
299 				(int)(sizeof(void *) * 2), tu->offset,
300 				tu->ref_ctr_offset);
301 	if (strcmp(buf, &argv[0][len + 1]))
302 		return false;
303 
304 	argc--; argv++;
305 
306 	return trace_probe_match_command_args(&tu->tp, argc, argv);
307 }
308 
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)309 static bool trace_uprobe_match(const char *system, const char *event,
310 			int argc, const char **argv, struct dyn_event *ev)
311 {
312 	struct trace_uprobe *tu = to_trace_uprobe(ev);
313 
314 	return (event[0] == '\0' ||
315 		strcmp(trace_probe_name(&tu->tp), event) == 0) &&
316 	   (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
317 	   trace_uprobe_match_command_head(tu, argc, argv);
318 }
319 
320 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)321 trace_uprobe_primary_from_call(struct trace_event_call *call)
322 {
323 	struct trace_probe *tp;
324 
325 	tp = trace_probe_primary_from_call(call);
326 	if (WARN_ON_ONCE(!tp))
327 		return NULL;
328 
329 	return container_of(tp, struct trace_uprobe, tp);
330 }
331 
332 /*
333  * Allocate new trace_uprobe and initialize it (including uprobes).
334  */
335 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)336 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
337 {
338 	struct trace_uprobe *tu;
339 	int ret;
340 
341 	tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
342 	if (!tu)
343 		return ERR_PTR(-ENOMEM);
344 
345 	tu->nhits = alloc_percpu(unsigned long);
346 	if (!tu->nhits) {
347 		ret = -ENOMEM;
348 		goto error;
349 	}
350 
351 	ret = trace_probe_init(&tu->tp, event, group, true, nargs);
352 	if (ret < 0)
353 		goto error;
354 
355 	dyn_event_init(&tu->devent, &trace_uprobe_ops);
356 	tu->consumer.handler = uprobe_dispatcher;
357 	if (is_ret)
358 		tu->consumer.ret_handler = uretprobe_dispatcher;
359 	init_trace_uprobe_filter(tu->tp.event->filter);
360 	return tu;
361 
362 error:
363 	free_percpu(tu->nhits);
364 	kfree(tu);
365 
366 	return ERR_PTR(ret);
367 }
368 
free_trace_uprobe(struct trace_uprobe * tu)369 static void free_trace_uprobe(struct trace_uprobe *tu)
370 {
371 	if (!tu)
372 		return;
373 
374 	path_put(&tu->path);
375 	trace_probe_cleanup(&tu->tp);
376 	kfree(tu->filename);
377 	free_percpu(tu->nhits);
378 	kfree(tu);
379 }
380 
find_probe_event(const char * event,const char * group)381 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
382 {
383 	struct dyn_event *pos;
384 	struct trace_uprobe *tu;
385 
386 	for_each_trace_uprobe(tu, pos)
387 		if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
388 		    strcmp(trace_probe_group_name(&tu->tp), group) == 0)
389 			return tu;
390 
391 	return NULL;
392 }
393 
394 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)395 static int unregister_trace_uprobe(struct trace_uprobe *tu)
396 {
397 	int ret;
398 
399 	if (trace_probe_has_sibling(&tu->tp))
400 		goto unreg;
401 
402 	/* If there's a reference to the dynamic event */
403 	if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
404 		return -EBUSY;
405 
406 	ret = unregister_uprobe_event(tu);
407 	if (ret)
408 		return ret;
409 
410 unreg:
411 	dyn_event_remove(&tu->devent);
412 	trace_probe_unlink(&tu->tp);
413 	free_trace_uprobe(tu);
414 	return 0;
415 }
416 
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)417 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
418 					 struct trace_uprobe *comp)
419 {
420 	struct trace_probe_event *tpe = orig->tp.event;
421 	struct inode *comp_inode = d_real_inode(comp->path.dentry);
422 	int i;
423 
424 	list_for_each_entry(orig, &tpe->probes, tp.list) {
425 		if (comp_inode != d_real_inode(orig->path.dentry) ||
426 		    comp->offset != orig->offset)
427 			continue;
428 
429 		/*
430 		 * trace_probe_compare_arg_type() ensured that nr_args and
431 		 * each argument name and type are same. Let's compare comm.
432 		 */
433 		for (i = 0; i < orig->tp.nr_args; i++) {
434 			if (strcmp(orig->tp.args[i].comm,
435 				   comp->tp.args[i].comm))
436 				break;
437 		}
438 
439 		if (i == orig->tp.nr_args)
440 			return true;
441 	}
442 
443 	return false;
444 }
445 
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)446 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
447 {
448 	int ret;
449 
450 	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
451 	if (ret) {
452 		/* Note that argument starts index = 2 */
453 		trace_probe_log_set_index(ret + 1);
454 		trace_probe_log_err(0, DIFF_ARG_TYPE);
455 		return -EEXIST;
456 	}
457 	if (trace_uprobe_has_same_uprobe(to, tu)) {
458 		trace_probe_log_set_index(0);
459 		trace_probe_log_err(0, SAME_PROBE);
460 		return -EEXIST;
461 	}
462 
463 	/* Append to existing event */
464 	ret = trace_probe_append(&tu->tp, &to->tp);
465 	if (!ret)
466 		dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
467 
468 	return ret;
469 }
470 
471 /*
472  * Uprobe with multiple reference counter is not allowed. i.e.
473  * If inode and offset matches, reference counter offset *must*
474  * match as well. Though, there is one exception: If user is
475  * replacing old trace_uprobe with new one(same group/event),
476  * then we allow same uprobe with new reference counter as far
477  * as the new one does not conflict with any other existing
478  * ones.
479  */
validate_ref_ctr_offset(struct trace_uprobe * new)480 static int validate_ref_ctr_offset(struct trace_uprobe *new)
481 {
482 	struct dyn_event *pos;
483 	struct trace_uprobe *tmp;
484 	struct inode *new_inode = d_real_inode(new->path.dentry);
485 
486 	for_each_trace_uprobe(tmp, pos) {
487 		if (new_inode == d_real_inode(tmp->path.dentry) &&
488 		    new->offset == tmp->offset &&
489 		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
490 			pr_warn("Reference counter offset mismatch.");
491 			return -EINVAL;
492 		}
493 	}
494 	return 0;
495 }
496 
497 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)498 static int register_trace_uprobe(struct trace_uprobe *tu)
499 {
500 	struct trace_uprobe *old_tu;
501 	int ret;
502 
503 	guard(mutex)(&event_mutex);
504 
505 	ret = validate_ref_ctr_offset(tu);
506 	if (ret)
507 		return ret;
508 
509 	/* register as an event */
510 	old_tu = find_probe_event(trace_probe_name(&tu->tp),
511 				  trace_probe_group_name(&tu->tp));
512 	if (old_tu) {
513 		if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
514 			trace_probe_log_set_index(0);
515 			trace_probe_log_err(0, DIFF_PROBE_TYPE);
516 			return -EEXIST;
517 		}
518 		return append_trace_uprobe(tu, old_tu);
519 	}
520 
521 	ret = register_uprobe_event(tu);
522 	if (ret) {
523 		if (ret == -EEXIST) {
524 			trace_probe_log_set_index(0);
525 			trace_probe_log_err(0, EVENT_EXIST);
526 		} else
527 			pr_warn("Failed to register probe event(%d)\n", ret);
528 		return ret;
529 	}
530 
531 	dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
532 
533 	return ret;
534 }
535 
536 /*
537  * Argument syntax:
538  *  - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
539  */
__trace_uprobe_create(int argc,const char ** argv)540 static int __trace_uprobe_create(int argc, const char **argv)
541 {
542 	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
543 	char *arg, *filename, *rctr, *rctr_end, *tmp;
544 	unsigned long offset, ref_ctr_offset;
545 	char *gbuf __free(kfree) = NULL;
546 	char *buf __free(kfree) = NULL;
547 	enum probe_print_type ptype;
548 	struct trace_uprobe *tu;
549 	bool is_return = false;
550 	struct path path;
551 	int i, ret;
552 
553 	ref_ctr_offset = 0;
554 
555 	switch (argv[0][0]) {
556 	case 'r':
557 		is_return = true;
558 		break;
559 	case 'p':
560 		break;
561 	default:
562 		return -ECANCELED;
563 	}
564 
565 	if (argc < 2)
566 		return -ECANCELED;
567 
568 	trace_probe_log_init("trace_uprobe", argc, argv);
569 
570 	if (argc - 2 > MAX_TRACE_ARGS) {
571 		trace_probe_log_set_index(2);
572 		trace_probe_log_err(0, TOO_MANY_ARGS);
573 		return -E2BIG;
574 	}
575 
576 	if (argv[0][1] == ':')
577 		event = &argv[0][2];
578 
579 	if (!strchr(argv[1], '/'))
580 		return -ECANCELED;
581 
582 	filename = kstrdup(argv[1], GFP_KERNEL);
583 	if (!filename)
584 		return -ENOMEM;
585 
586 	/* Find the last occurrence, in case the path contains ':' too. */
587 	arg = strrchr(filename, ':');
588 	if (!arg || !isdigit(arg[1])) {
589 		kfree(filename);
590 		return -ECANCELED;
591 	}
592 
593 	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
594 
595 	*arg++ = '\0';
596 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
597 	if (ret) {
598 		trace_probe_log_err(0, FILE_NOT_FOUND);
599 		kfree(filename);
600 		trace_probe_log_clear();
601 		return ret;
602 	}
603 	if (!d_is_reg(path.dentry)) {
604 		trace_probe_log_err(0, NO_REGULAR_FILE);
605 		ret = -EINVAL;
606 		goto fail_address_parse;
607 	}
608 
609 	/* Parse reference counter offset if specified. */
610 	rctr = strchr(arg, '(');
611 	if (rctr) {
612 		rctr_end = strchr(rctr, ')');
613 		if (!rctr_end) {
614 			ret = -EINVAL;
615 			rctr_end = rctr + strlen(rctr);
616 			trace_probe_log_err(rctr_end - filename,
617 					    REFCNT_OPEN_BRACE);
618 			goto fail_address_parse;
619 		} else if (rctr_end[1] != '\0') {
620 			ret = -EINVAL;
621 			trace_probe_log_err(rctr_end + 1 - filename,
622 					    BAD_REFCNT_SUFFIX);
623 			goto fail_address_parse;
624 		}
625 
626 		*rctr++ = '\0';
627 		*rctr_end = '\0';
628 		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
629 		if (ret) {
630 			trace_probe_log_err(rctr - filename, BAD_REFCNT);
631 			goto fail_address_parse;
632 		}
633 	}
634 
635 	/* Check if there is %return suffix */
636 	tmp = strchr(arg, '%');
637 	if (tmp) {
638 		if (!strcmp(tmp, "%return")) {
639 			*tmp = '\0';
640 			is_return = true;
641 		} else {
642 			trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
643 			ret = -EINVAL;
644 			goto fail_address_parse;
645 		}
646 	}
647 
648 	/* Parse uprobe offset. */
649 	ret = kstrtoul(arg, 0, &offset);
650 	if (ret) {
651 		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
652 		goto fail_address_parse;
653 	}
654 
655 	/* setup a probe */
656 	trace_probe_log_set_index(0);
657 	if (event) {
658 		gbuf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL);
659 		if (!gbuf)
660 			goto fail_mem;
661 
662 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
663 						  event - argv[0]);
664 		if (ret)
665 			goto fail_address_parse;
666 	}
667 
668 	if (!event) {
669 		char *tail;
670 		char *ptr;
671 
672 		tail = kstrdup(kbasename(filename), GFP_KERNEL);
673 		if (!tail)
674 			goto fail_mem;
675 
676 		ptr = strpbrk(tail, ".-_");
677 		if (ptr)
678 			*ptr = '\0';
679 
680 		buf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL);
681 		if (!buf)
682 			goto fail_mem;
683 		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
684 		event = buf;
685 		kfree(tail);
686 	}
687 
688 	argc -= 2;
689 	argv += 2;
690 
691 	tu = alloc_trace_uprobe(group, event, argc, is_return);
692 	if (IS_ERR(tu)) {
693 		ret = PTR_ERR(tu);
694 		/* This must return -ENOMEM otherwise there is a bug */
695 		WARN_ON_ONCE(ret != -ENOMEM);
696 		goto fail_address_parse;
697 	}
698 	tu->offset = offset;
699 	tu->ref_ctr_offset = ref_ctr_offset;
700 	tu->path = path;
701 	tu->filename = filename;
702 
703 	/* parse arguments */
704 	for (i = 0; i < argc; i++) {
705 		struct traceprobe_parse_context *ctx __free(traceprobe_parse_context)
706 			= kzalloc(sizeof(*ctx), GFP_KERNEL);
707 
708 		if (!ctx) {
709 			ret = -ENOMEM;
710 			goto error;
711 		}
712 		ctx->flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER;
713 		trace_probe_log_set_index(i + 2);
714 		ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], ctx);
715 		if (ret)
716 			goto error;
717 	}
718 
719 	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
720 	ret = traceprobe_set_print_fmt(&tu->tp, ptype);
721 	if (ret < 0)
722 		goto error;
723 
724 	ret = register_trace_uprobe(tu);
725 	if (!ret)
726 		goto out;
727 
728 error:
729 	free_trace_uprobe(tu);
730 out:
731 	trace_probe_log_clear();
732 	return ret;
733 
734 fail_mem:
735 	ret = -ENOMEM;
736 
737 fail_address_parse:
738 	trace_probe_log_clear();
739 	path_put(&path);
740 	kfree(filename);
741 
742 	return ret;
743 }
744 
trace_uprobe_create(const char * raw_command)745 int trace_uprobe_create(const char *raw_command)
746 {
747 	return trace_probe_create(raw_command, __trace_uprobe_create);
748 }
749 
create_or_delete_trace_uprobe(const char * raw_command)750 static int create_or_delete_trace_uprobe(const char *raw_command)
751 {
752 	int ret;
753 
754 	if (raw_command[0] == '-')
755 		return dyn_event_release(raw_command, &trace_uprobe_ops);
756 
757 	ret = dyn_event_create(raw_command, &trace_uprobe_ops);
758 	return ret == -ECANCELED ? -EINVAL : ret;
759 }
760 
trace_uprobe_release(struct dyn_event * ev)761 static int trace_uprobe_release(struct dyn_event *ev)
762 {
763 	struct trace_uprobe *tu = to_trace_uprobe(ev);
764 
765 	return unregister_trace_uprobe(tu);
766 }
767 
768 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)769 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
770 {
771 	struct trace_uprobe *tu = to_trace_uprobe(ev);
772 	char c = is_ret_probe(tu) ? 'r' : 'p';
773 	int i;
774 
775 	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
776 			trace_probe_name(&tu->tp), tu->filename,
777 			(int)(sizeof(void *) * 2), tu->offset);
778 
779 	if (tu->ref_ctr_offset)
780 		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
781 
782 	for (i = 0; i < tu->tp.nr_args; i++)
783 		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
784 
785 	seq_putc(m, '\n');
786 	return 0;
787 }
788 
probes_seq_show(struct seq_file * m,void * v)789 static int probes_seq_show(struct seq_file *m, void *v)
790 {
791 	struct dyn_event *ev = v;
792 
793 	if (!is_trace_uprobe(ev))
794 		return 0;
795 
796 	return trace_uprobe_show(m, ev);
797 }
798 
799 static const struct seq_operations probes_seq_op = {
800 	.start  = dyn_event_seq_start,
801 	.next   = dyn_event_seq_next,
802 	.stop   = dyn_event_seq_stop,
803 	.show   = probes_seq_show
804 };
805 
probes_open(struct inode * inode,struct file * file)806 static int probes_open(struct inode *inode, struct file *file)
807 {
808 	int ret;
809 
810 	ret = security_locked_down(LOCKDOWN_TRACEFS);
811 	if (ret)
812 		return ret;
813 
814 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
815 		ret = dyn_events_release_all(&trace_uprobe_ops);
816 		if (ret)
817 			return ret;
818 	}
819 
820 	return seq_open(file, &probes_seq_op);
821 }
822 
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)823 static ssize_t probes_write(struct file *file, const char __user *buffer,
824 			    size_t count, loff_t *ppos)
825 {
826 	return trace_parse_run_command(file, buffer, count, ppos,
827 					create_or_delete_trace_uprobe);
828 }
829 
830 static const struct file_operations uprobe_events_ops = {
831 	.owner		= THIS_MODULE,
832 	.open		= probes_open,
833 	.read		= seq_read,
834 	.llseek		= seq_lseek,
835 	.release	= seq_release,
836 	.write		= probes_write,
837 };
838 
839 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)840 static int probes_profile_seq_show(struct seq_file *m, void *v)
841 {
842 	struct dyn_event *ev = v;
843 	struct trace_uprobe *tu;
844 	unsigned long nhits;
845 	int cpu;
846 
847 	if (!is_trace_uprobe(ev))
848 		return 0;
849 
850 	tu = to_trace_uprobe(ev);
851 
852 	nhits = 0;
853 	for_each_possible_cpu(cpu) {
854 		nhits += per_cpu(*tu->nhits, cpu);
855 	}
856 
857 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
858 		   trace_probe_name(&tu->tp), nhits);
859 	return 0;
860 }
861 
862 static const struct seq_operations profile_seq_op = {
863 	.start  = dyn_event_seq_start,
864 	.next   = dyn_event_seq_next,
865 	.stop   = dyn_event_seq_stop,
866 	.show	= probes_profile_seq_show
867 };
868 
profile_open(struct inode * inode,struct file * file)869 static int profile_open(struct inode *inode, struct file *file)
870 {
871 	int ret;
872 
873 	ret = security_locked_down(LOCKDOWN_TRACEFS);
874 	if (ret)
875 		return ret;
876 
877 	return seq_open(file, &profile_seq_op);
878 }
879 
880 static const struct file_operations uprobe_profile_ops = {
881 	.owner		= THIS_MODULE,
882 	.open		= profile_open,
883 	.read		= seq_read,
884 	.llseek		= seq_lseek,
885 	.release	= seq_release,
886 };
887 
888 struct uprobe_cpu_buffer {
889 	struct mutex mutex;
890 	void *buf;
891 	int dsize;
892 };
893 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
894 static int uprobe_buffer_refcnt;
895 #define MAX_UCB_BUFFER_SIZE PAGE_SIZE
896 
uprobe_buffer_init(void)897 static int uprobe_buffer_init(void)
898 {
899 	int cpu, err_cpu;
900 
901 	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
902 	if (uprobe_cpu_buffer == NULL)
903 		return -ENOMEM;
904 
905 	for_each_possible_cpu(cpu) {
906 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
907 						  GFP_KERNEL, 0);
908 		if (p == NULL) {
909 			err_cpu = cpu;
910 			goto err;
911 		}
912 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
913 		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
914 	}
915 
916 	return 0;
917 
918 err:
919 	for_each_possible_cpu(cpu) {
920 		if (cpu == err_cpu)
921 			break;
922 		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
923 	}
924 
925 	free_percpu(uprobe_cpu_buffer);
926 	return -ENOMEM;
927 }
928 
uprobe_buffer_enable(void)929 static int uprobe_buffer_enable(void)
930 {
931 	int ret = 0;
932 
933 	BUG_ON(!mutex_is_locked(&event_mutex));
934 
935 	if (uprobe_buffer_refcnt++ == 0) {
936 		ret = uprobe_buffer_init();
937 		if (ret < 0)
938 			uprobe_buffer_refcnt--;
939 	}
940 
941 	return ret;
942 }
943 
uprobe_buffer_disable(void)944 static void uprobe_buffer_disable(void)
945 {
946 	int cpu;
947 
948 	BUG_ON(!mutex_is_locked(&event_mutex));
949 
950 	if (--uprobe_buffer_refcnt == 0) {
951 		for_each_possible_cpu(cpu)
952 			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
953 							     cpu)->buf);
954 
955 		free_percpu(uprobe_cpu_buffer);
956 		uprobe_cpu_buffer = NULL;
957 	}
958 }
959 
uprobe_buffer_get(void)960 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
961 {
962 	struct uprobe_cpu_buffer *ucb;
963 	int cpu;
964 
965 	cpu = raw_smp_processor_id();
966 	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
967 
968 	/*
969 	 * Use per-cpu buffers for fastest access, but we might migrate
970 	 * so the mutex makes sure we have sole access to it.
971 	 */
972 	mutex_lock(&ucb->mutex);
973 
974 	return ucb;
975 }
976 
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)977 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
978 {
979 	if (!ucb)
980 		return;
981 	mutex_unlock(&ucb->mutex);
982 }
983 
prepare_uprobe_buffer(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)984 static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
985 						       struct pt_regs *regs,
986 						       struct uprobe_cpu_buffer **ucbp)
987 {
988 	struct uprobe_cpu_buffer *ucb;
989 	int dsize, esize;
990 
991 	if (*ucbp)
992 		return *ucbp;
993 
994 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
995 	dsize = __get_data_size(&tu->tp, regs, NULL);
996 
997 	ucb = uprobe_buffer_get();
998 	ucb->dsize = tu->tp.size + dsize;
999 
1000 	if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
1001 		ucb->dsize = MAX_UCB_BUFFER_SIZE;
1002 		dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
1003 	}
1004 
1005 	store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
1006 
1007 	*ucbp = ucb;
1008 	return ucb;
1009 }
1010 
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,struct trace_event_file * trace_file)1011 static void __uprobe_trace_func(struct trace_uprobe *tu,
1012 				unsigned long func, struct pt_regs *regs,
1013 				struct uprobe_cpu_buffer *ucb,
1014 				struct trace_event_file *trace_file)
1015 {
1016 	struct uprobe_trace_entry_head *entry;
1017 	struct trace_event_buffer fbuffer;
1018 	void *data;
1019 	int size, esize;
1020 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1021 
1022 	WARN_ON(call != trace_file->event_call);
1023 
1024 	if (trace_trigger_soft_disabled(trace_file))
1025 		return;
1026 
1027 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1028 	size = esize + ucb->dsize;
1029 	entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
1030 	if (!entry)
1031 		return;
1032 
1033 	if (is_ret_probe(tu)) {
1034 		entry->vaddr[0] = func;
1035 		entry->vaddr[1] = instruction_pointer(regs);
1036 		data = DATAOF_TRACE_ENTRY(entry, true);
1037 	} else {
1038 		entry->vaddr[0] = instruction_pointer(regs);
1039 		data = DATAOF_TRACE_ENTRY(entry, false);
1040 	}
1041 
1042 	memcpy(data, ucb->buf, ucb->dsize);
1043 
1044 	trace_event_buffer_commit(&fbuffer);
1045 }
1046 
1047 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1048 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
1049 			     struct uprobe_cpu_buffer **ucbp)
1050 {
1051 	struct event_file_link *link;
1052 	struct uprobe_cpu_buffer *ucb;
1053 
1054 	if (is_ret_probe(tu))
1055 		return 0;
1056 
1057 	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1058 
1059 	rcu_read_lock();
1060 	trace_probe_for_each_link_rcu(link, &tu->tp)
1061 		__uprobe_trace_func(tu, 0, regs, ucb, link->file);
1062 	rcu_read_unlock();
1063 
1064 	return 0;
1065 }
1066 
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1067 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1068 				 struct pt_regs *regs,
1069 				 struct uprobe_cpu_buffer **ucbp)
1070 {
1071 	struct event_file_link *link;
1072 	struct uprobe_cpu_buffer *ucb;
1073 
1074 	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1075 
1076 	rcu_read_lock();
1077 	trace_probe_for_each_link_rcu(link, &tu->tp)
1078 		__uprobe_trace_func(tu, func, regs, ucb, link->file);
1079 	rcu_read_unlock();
1080 }
1081 
1082 /* Event entry printers */
1083 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1084 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1085 {
1086 	struct uprobe_trace_entry_head *entry;
1087 	struct trace_seq *s = &iter->seq;
1088 	struct trace_uprobe *tu;
1089 	u8 *data;
1090 
1091 	entry = (struct uprobe_trace_entry_head *)iter->ent;
1092 	tu = trace_uprobe_primary_from_call(
1093 		container_of(event, struct trace_event_call, event));
1094 	if (unlikely(!tu))
1095 		goto out;
1096 
1097 	if (is_ret_probe(tu)) {
1098 		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1099 				 trace_probe_name(&tu->tp),
1100 				 entry->vaddr[1], entry->vaddr[0]);
1101 		data = DATAOF_TRACE_ENTRY(entry, true);
1102 	} else {
1103 		trace_seq_printf(s, "%s: (0x%lx)",
1104 				 trace_probe_name(&tu->tp),
1105 				 entry->vaddr[0]);
1106 		data = DATAOF_TRACE_ENTRY(entry, false);
1107 	}
1108 
1109 	if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1110 		goto out;
1111 
1112 	trace_seq_putc(s, '\n');
1113 
1114  out:
1115 	return trace_handle_return(s);
1116 }
1117 
1118 typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm);
1119 
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1120 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1121 {
1122 	struct inode *inode = d_real_inode(tu->path.dentry);
1123 	struct uprobe *uprobe;
1124 
1125 	tu->consumer.filter = filter;
1126 	uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer);
1127 	if (IS_ERR(uprobe))
1128 		return PTR_ERR(uprobe);
1129 
1130 	tu->uprobe = uprobe;
1131 	return 0;
1132 }
1133 
__probe_event_disable(struct trace_probe * tp)1134 static void __probe_event_disable(struct trace_probe *tp)
1135 {
1136 	struct trace_uprobe *tu;
1137 	bool sync = false;
1138 
1139 	tu = container_of(tp, struct trace_uprobe, tp);
1140 	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1141 
1142 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1143 		if (!tu->uprobe)
1144 			continue;
1145 
1146 		uprobe_unregister_nosync(tu->uprobe, &tu->consumer);
1147 		sync = true;
1148 		tu->uprobe = NULL;
1149 	}
1150 	if (sync)
1151 		uprobe_unregister_sync();
1152 }
1153 
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1154 static int probe_event_enable(struct trace_event_call *call,
1155 			struct trace_event_file *file, filter_func_t filter)
1156 {
1157 	struct trace_probe *tp;
1158 	struct trace_uprobe *tu;
1159 	bool enabled;
1160 	int ret;
1161 
1162 	tp = trace_probe_primary_from_call(call);
1163 	if (WARN_ON_ONCE(!tp))
1164 		return -ENODEV;
1165 	enabled = trace_probe_is_enabled(tp);
1166 
1167 	/* This may also change "enabled" state */
1168 	if (file) {
1169 		if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1170 			return -EINTR;
1171 
1172 		ret = trace_probe_add_file(tp, file);
1173 		if (ret < 0)
1174 			return ret;
1175 	} else {
1176 		if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1177 			return -EINTR;
1178 
1179 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1180 	}
1181 
1182 	tu = container_of(tp, struct trace_uprobe, tp);
1183 	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1184 
1185 	if (enabled)
1186 		return 0;
1187 
1188 	ret = uprobe_buffer_enable();
1189 	if (ret)
1190 		goto err_flags;
1191 
1192 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1193 		ret = trace_uprobe_enable(tu, filter);
1194 		if (ret) {
1195 			__probe_event_disable(tp);
1196 			goto err_buffer;
1197 		}
1198 	}
1199 
1200 	return 0;
1201 
1202  err_buffer:
1203 	uprobe_buffer_disable();
1204 
1205  err_flags:
1206 	if (file)
1207 		trace_probe_remove_file(tp, file);
1208 	else
1209 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1210 
1211 	return ret;
1212 }
1213 
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1214 static void probe_event_disable(struct trace_event_call *call,
1215 				struct trace_event_file *file)
1216 {
1217 	struct trace_probe *tp;
1218 
1219 	tp = trace_probe_primary_from_call(call);
1220 	if (WARN_ON_ONCE(!tp))
1221 		return;
1222 
1223 	if (!trace_probe_is_enabled(tp))
1224 		return;
1225 
1226 	if (file) {
1227 		if (trace_probe_remove_file(tp, file) < 0)
1228 			return;
1229 
1230 		if (trace_probe_is_enabled(tp))
1231 			return;
1232 	} else
1233 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1234 
1235 	__probe_event_disable(tp);
1236 	uprobe_buffer_disable();
1237 }
1238 
uprobe_event_define_fields(struct trace_event_call * event_call)1239 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1240 {
1241 	int ret, size;
1242 	struct uprobe_trace_entry_head field;
1243 	struct trace_uprobe *tu;
1244 
1245 	tu = trace_uprobe_primary_from_call(event_call);
1246 	if (unlikely(!tu))
1247 		return -ENODEV;
1248 
1249 	if (is_ret_probe(tu)) {
1250 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1251 		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1252 		size = SIZEOF_TRACE_ENTRY(true);
1253 	} else {
1254 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1255 		size = SIZEOF_TRACE_ENTRY(false);
1256 	}
1257 
1258 	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1259 }
1260 
1261 #ifdef CONFIG_PERF_EVENTS
1262 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1263 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1264 {
1265 	struct perf_event *event;
1266 
1267 	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1268 		if (event->hw.target->mm == mm)
1269 			return true;
1270 	}
1271 
1272 	return false;
1273 }
1274 
1275 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1276 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1277 			  struct perf_event *event)
1278 {
1279 	return __uprobe_perf_filter(filter, event->hw.target->mm);
1280 }
1281 
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1282 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1283 				       struct perf_event *event)
1284 {
1285 	bool done;
1286 
1287 	write_lock(&filter->rwlock);
1288 	if (event->hw.target) {
1289 		list_del(&event->hw.tp_list);
1290 		done = filter->nr_systemwide ||
1291 			(event->hw.target->flags & PF_EXITING) ||
1292 			trace_uprobe_filter_event(filter, event);
1293 	} else {
1294 		filter->nr_systemwide--;
1295 		done = filter->nr_systemwide;
1296 	}
1297 	write_unlock(&filter->rwlock);
1298 
1299 	return done;
1300 }
1301 
1302 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1303 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1304 				    struct perf_event *event)
1305 {
1306 	bool done;
1307 
1308 	write_lock(&filter->rwlock);
1309 	if (event->hw.target) {
1310 		/*
1311 		 * event->parent != NULL means copy_process(), we can avoid
1312 		 * uprobe_apply(). current->mm must be probed and we can rely
1313 		 * on dup_mmap() which preserves the already installed bp's.
1314 		 *
1315 		 * attr.enable_on_exec means that exec/mmap will install the
1316 		 * breakpoints we need.
1317 		 */
1318 		done = filter->nr_systemwide ||
1319 			event->parent || event->attr.enable_on_exec ||
1320 			trace_uprobe_filter_event(filter, event);
1321 		list_add(&event->hw.tp_list, &filter->perf_events);
1322 	} else {
1323 		done = filter->nr_systemwide;
1324 		filter->nr_systemwide++;
1325 	}
1326 	write_unlock(&filter->rwlock);
1327 
1328 	return done;
1329 }
1330 
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1331 static int uprobe_perf_close(struct trace_event_call *call,
1332 			     struct perf_event *event)
1333 {
1334 	struct trace_probe *tp;
1335 	struct trace_uprobe *tu;
1336 	int ret = 0;
1337 
1338 	tp = trace_probe_primary_from_call(call);
1339 	if (WARN_ON_ONCE(!tp))
1340 		return -ENODEV;
1341 
1342 	tu = container_of(tp, struct trace_uprobe, tp);
1343 	if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1344 		return 0;
1345 
1346 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1347 		ret = uprobe_apply(tu->uprobe, &tu->consumer, false);
1348 		if (ret)
1349 			break;
1350 	}
1351 
1352 	return ret;
1353 }
1354 
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1355 static int uprobe_perf_open(struct trace_event_call *call,
1356 			    struct perf_event *event)
1357 {
1358 	struct trace_probe *tp;
1359 	struct trace_uprobe *tu;
1360 	int err = 0;
1361 
1362 	tp = trace_probe_primary_from_call(call);
1363 	if (WARN_ON_ONCE(!tp))
1364 		return -ENODEV;
1365 
1366 	tu = container_of(tp, struct trace_uprobe, tp);
1367 	if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1368 		return 0;
1369 
1370 	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1371 		err = uprobe_apply(tu->uprobe, &tu->consumer, true);
1372 		if (err) {
1373 			uprobe_perf_close(call, event);
1374 			break;
1375 		}
1376 	}
1377 
1378 	return err;
1379 }
1380 
uprobe_perf_filter(struct uprobe_consumer * uc,struct mm_struct * mm)1381 static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
1382 {
1383 	struct trace_uprobe_filter *filter;
1384 	struct trace_uprobe *tu;
1385 	int ret;
1386 
1387 	tu = container_of(uc, struct trace_uprobe, consumer);
1388 	filter = tu->tp.event->filter;
1389 
1390 	/*
1391 	 * speculative short-circuiting check to avoid unnecessarily taking
1392 	 * filter->rwlock below, if the uprobe has system-wide consumer
1393 	 */
1394 	if (READ_ONCE(filter->nr_systemwide))
1395 		return true;
1396 
1397 	read_lock(&filter->rwlock);
1398 	ret = __uprobe_perf_filter(filter, mm);
1399 	read_unlock(&filter->rwlock);
1400 
1401 	return ret;
1402 }
1403 
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1404 static void __uprobe_perf_func(struct trace_uprobe *tu,
1405 			       unsigned long func, struct pt_regs *regs,
1406 			       struct uprobe_cpu_buffer **ucbp)
1407 {
1408 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1409 	struct uprobe_trace_entry_head *entry;
1410 	struct uprobe_cpu_buffer *ucb;
1411 	struct hlist_head *head;
1412 	void *data;
1413 	int size, esize;
1414 	int rctx;
1415 
1416 #ifdef CONFIG_BPF_EVENTS
1417 	if (bpf_prog_array_valid(call)) {
1418 		const struct bpf_prog_array *array;
1419 		u32 ret;
1420 
1421 		rcu_read_lock_trace();
1422 		array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held());
1423 		ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run);
1424 		rcu_read_unlock_trace();
1425 		if (!ret)
1426 			return;
1427 	}
1428 #endif /* CONFIG_BPF_EVENTS */
1429 
1430 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1431 
1432 	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1433 	size = esize + ucb->dsize;
1434 	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1435 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1436 		return;
1437 
1438 	preempt_disable();
1439 	head = this_cpu_ptr(call->perf_events);
1440 	if (hlist_empty(head))
1441 		goto out;
1442 
1443 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1444 	if (!entry)
1445 		goto out;
1446 
1447 	if (is_ret_probe(tu)) {
1448 		entry->vaddr[0] = func;
1449 		entry->vaddr[1] = instruction_pointer(regs);
1450 		data = DATAOF_TRACE_ENTRY(entry, true);
1451 	} else {
1452 		entry->vaddr[0] = instruction_pointer(regs);
1453 		data = DATAOF_TRACE_ENTRY(entry, false);
1454 	}
1455 
1456 	memcpy(data, ucb->buf, ucb->dsize);
1457 
1458 	if (size - esize > ucb->dsize)
1459 		memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
1460 
1461 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1462 			      head, NULL);
1463  out:
1464 	preempt_enable();
1465 }
1466 
1467 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1468 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1469 			    struct uprobe_cpu_buffer **ucbp)
1470 {
1471 	if (!uprobe_perf_filter(&tu->consumer, current->mm))
1472 		return UPROBE_HANDLER_REMOVE;
1473 
1474 	if (!is_ret_probe(tu))
1475 		__uprobe_perf_func(tu, 0, regs, ucbp);
1476 	return 0;
1477 }
1478 
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer ** ucbp)1479 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1480 				struct pt_regs *regs,
1481 				struct uprobe_cpu_buffer **ucbp)
1482 {
1483 	__uprobe_perf_func(tu, func, regs, ucbp);
1484 }
1485 
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,u64 * probe_addr,bool perf_type_tracepoint)1486 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1487 			const char **filename, u64 *probe_offset,
1488 			u64 *probe_addr, bool perf_type_tracepoint)
1489 {
1490 	const char *pevent = trace_event_name(event->tp_event);
1491 	const char *group = event->tp_event->class->system;
1492 	struct trace_uprobe *tu;
1493 
1494 	if (perf_type_tracepoint)
1495 		tu = find_probe_event(pevent, group);
1496 	else
1497 		tu = trace_uprobe_primary_from_call(event->tp_event);
1498 	if (!tu)
1499 		return -EINVAL;
1500 
1501 	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1502 				    : BPF_FD_TYPE_UPROBE;
1503 	*filename = tu->filename;
1504 	*probe_offset = tu->offset;
1505 	*probe_addr = tu->ref_ctr_offset;
1506 	return 0;
1507 }
1508 #endif	/* CONFIG_PERF_EVENTS */
1509 
1510 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1511 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1512 		      void *data)
1513 {
1514 	struct trace_event_file *file = data;
1515 
1516 	switch (type) {
1517 	case TRACE_REG_REGISTER:
1518 		return probe_event_enable(event, file, NULL);
1519 
1520 	case TRACE_REG_UNREGISTER:
1521 		probe_event_disable(event, file);
1522 		return 0;
1523 
1524 #ifdef CONFIG_PERF_EVENTS
1525 	case TRACE_REG_PERF_REGISTER:
1526 		return probe_event_enable(event, NULL, uprobe_perf_filter);
1527 
1528 	case TRACE_REG_PERF_UNREGISTER:
1529 		probe_event_disable(event, NULL);
1530 		return 0;
1531 
1532 	case TRACE_REG_PERF_OPEN:
1533 		return uprobe_perf_open(event, data);
1534 
1535 	case TRACE_REG_PERF_CLOSE:
1536 		return uprobe_perf_close(event, data);
1537 
1538 #endif
1539 	default:
1540 		return 0;
1541 	}
1542 }
1543 
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs,__u64 * data)1544 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
1545 			     __u64 *data)
1546 {
1547 	struct trace_uprobe *tu;
1548 	struct uprobe_dispatch_data udd;
1549 	struct uprobe_cpu_buffer *ucb = NULL;
1550 	int ret = 0;
1551 
1552 	tu = container_of(con, struct trace_uprobe, consumer);
1553 
1554 	this_cpu_inc(*tu->nhits);
1555 
1556 	udd.tu = tu;
1557 	udd.bp_addr = instruction_pointer(regs);
1558 
1559 	current->utask->vaddr = (unsigned long) &udd;
1560 
1561 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1562 		return 0;
1563 
1564 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1565 		ret |= uprobe_trace_func(tu, regs, &ucb);
1566 
1567 #ifdef CONFIG_PERF_EVENTS
1568 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1569 		ret |= uprobe_perf_func(tu, regs, &ucb);
1570 #endif
1571 	uprobe_buffer_put(ucb);
1572 	return ret;
1573 }
1574 
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs,__u64 * data)1575 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1576 				unsigned long func, struct pt_regs *regs,
1577 				__u64 *data)
1578 {
1579 	struct trace_uprobe *tu;
1580 	struct uprobe_dispatch_data udd;
1581 	struct uprobe_cpu_buffer *ucb = NULL;
1582 
1583 	tu = container_of(con, struct trace_uprobe, consumer);
1584 
1585 	udd.tu = tu;
1586 	udd.bp_addr = func;
1587 
1588 	current->utask->vaddr = (unsigned long) &udd;
1589 
1590 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1591 		return 0;
1592 
1593 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1594 		uretprobe_trace_func(tu, func, regs, &ucb);
1595 
1596 #ifdef CONFIG_PERF_EVENTS
1597 	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1598 		uretprobe_perf_func(tu, func, regs, &ucb);
1599 #endif
1600 	uprobe_buffer_put(ucb);
1601 	return 0;
1602 }
1603 
1604 static struct trace_event_functions uprobe_funcs = {
1605 	.trace		= print_uprobe_event
1606 };
1607 
1608 static struct trace_event_fields uprobe_fields_array[] = {
1609 	{ .type = TRACE_FUNCTION_TYPE,
1610 	  .define_fields = uprobe_event_define_fields },
1611 	{}
1612 };
1613 
init_trace_event_call(struct trace_uprobe * tu)1614 static inline void init_trace_event_call(struct trace_uprobe *tu)
1615 {
1616 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1617 	call->event.funcs = &uprobe_funcs;
1618 	call->class->fields_array = uprobe_fields_array;
1619 
1620 	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1621 	call->class->reg = trace_uprobe_register;
1622 }
1623 
register_uprobe_event(struct trace_uprobe * tu)1624 static int register_uprobe_event(struct trace_uprobe *tu)
1625 {
1626 	init_trace_event_call(tu);
1627 
1628 	return trace_probe_register_event_call(&tu->tp);
1629 }
1630 
unregister_uprobe_event(struct trace_uprobe * tu)1631 static int unregister_uprobe_event(struct trace_uprobe *tu)
1632 {
1633 	return trace_probe_unregister_event_call(&tu->tp);
1634 }
1635 
1636 #ifdef CONFIG_PERF_EVENTS
1637 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1638 create_local_trace_uprobe(char *name, unsigned long offs,
1639 			  unsigned long ref_ctr_offset, bool is_return)
1640 {
1641 	enum probe_print_type ptype;
1642 	struct trace_uprobe *tu;
1643 	struct path path;
1644 	int ret;
1645 
1646 	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1647 	if (ret)
1648 		return ERR_PTR(ret);
1649 
1650 	if (!d_is_reg(path.dentry)) {
1651 		path_put(&path);
1652 		return ERR_PTR(-EINVAL);
1653 	}
1654 
1655 	/*
1656 	 * local trace_kprobes are not added to dyn_event, so they are never
1657 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1658 	 * duplicated name "DUMMY_EVENT" here.
1659 	 */
1660 	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1661 				is_return);
1662 
1663 	if (IS_ERR(tu)) {
1664 		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1665 			(int)PTR_ERR(tu));
1666 		path_put(&path);
1667 		return ERR_CAST(tu);
1668 	}
1669 
1670 	tu->offset = offs;
1671 	tu->path = path;
1672 	tu->ref_ctr_offset = ref_ctr_offset;
1673 	tu->filename = kstrdup(name, GFP_KERNEL);
1674 	if (!tu->filename) {
1675 		ret = -ENOMEM;
1676 		goto error;
1677 	}
1678 
1679 	init_trace_event_call(tu);
1680 
1681 	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1682 	if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1683 		ret = -ENOMEM;
1684 		goto error;
1685 	}
1686 
1687 	return trace_probe_event_call(&tu->tp);
1688 error:
1689 	free_trace_uprobe(tu);
1690 	return ERR_PTR(ret);
1691 }
1692 
destroy_local_trace_uprobe(struct trace_event_call * event_call)1693 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1694 {
1695 	struct trace_uprobe *tu;
1696 
1697 	tu = trace_uprobe_primary_from_call(event_call);
1698 
1699 	free_trace_uprobe(tu);
1700 }
1701 #endif /* CONFIG_PERF_EVENTS */
1702 
1703 /* Make a trace interface for controlling probe points */
init_uprobe_trace(void)1704 static __init int init_uprobe_trace(void)
1705 {
1706 	int ret;
1707 
1708 	ret = dyn_event_register(&trace_uprobe_ops);
1709 	if (ret)
1710 		return ret;
1711 
1712 	ret = tracing_init_dentry();
1713 	if (ret)
1714 		return 0;
1715 
1716 	trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1717 				    NULL, &uprobe_events_ops);
1718 	/* Profile interface */
1719 	trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1720 				    NULL, &uprobe_profile_ops);
1721 	return 0;
1722 }
1723 
1724 fs_initcall(init_uprobe_trace);
1725