xref: /linux/kernel/trace/trace_uprobe.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * uprobes-based tracing events
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  *
17  * Copyright (C) IBM Corporation, 2010-2012
18  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/uprobes.h>
24 #include <linux/namei.h>
25 #include <linux/string.h>
26 
27 #include "trace_probe.h"
28 
29 #define UPROBE_EVENT_SYSTEM	"uprobes"
30 
31 /*
32  * uprobe event core functions
33  */
34 struct trace_uprobe;
35 struct uprobe_trace_consumer {
36 	struct uprobe_consumer		cons;
37 	struct trace_uprobe		*tu;
38 };
39 
40 struct trace_uprobe {
41 	struct list_head		list;
42 	struct ftrace_event_class	class;
43 	struct ftrace_event_call	call;
44 	struct uprobe_trace_consumer	*consumer;
45 	struct inode			*inode;
46 	char				*filename;
47 	unsigned long			offset;
48 	unsigned long			nhit;
49 	unsigned int			flags;	/* For TP_FLAG_* */
50 	ssize_t				size;	/* trace entry size */
51 	unsigned int			nr_args;
52 	struct probe_arg		args[];
53 };
54 
55 #define SIZEOF_TRACE_UPROBE(n)			\
56 	(offsetof(struct trace_uprobe, args) +	\
57 	(sizeof(struct probe_arg) * (n)))
58 
59 static int register_uprobe_event(struct trace_uprobe *tu);
60 static void unregister_uprobe_event(struct trace_uprobe *tu);
61 
62 static DEFINE_MUTEX(uprobe_lock);
63 static LIST_HEAD(uprobe_list);
64 
65 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
66 
67 /*
68  * Allocate new trace_uprobe and initialize it (including uprobes).
69  */
70 static struct trace_uprobe *
71 alloc_trace_uprobe(const char *group, const char *event, int nargs)
72 {
73 	struct trace_uprobe *tu;
74 
75 	if (!event || !is_good_name(event))
76 		return ERR_PTR(-EINVAL);
77 
78 	if (!group || !is_good_name(group))
79 		return ERR_PTR(-EINVAL);
80 
81 	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
82 	if (!tu)
83 		return ERR_PTR(-ENOMEM);
84 
85 	tu->call.class = &tu->class;
86 	tu->call.name = kstrdup(event, GFP_KERNEL);
87 	if (!tu->call.name)
88 		goto error;
89 
90 	tu->class.system = kstrdup(group, GFP_KERNEL);
91 	if (!tu->class.system)
92 		goto error;
93 
94 	INIT_LIST_HEAD(&tu->list);
95 	return tu;
96 
97 error:
98 	kfree(tu->call.name);
99 	kfree(tu);
100 
101 	return ERR_PTR(-ENOMEM);
102 }
103 
104 static void free_trace_uprobe(struct trace_uprobe *tu)
105 {
106 	int i;
107 
108 	for (i = 0; i < tu->nr_args; i++)
109 		traceprobe_free_probe_arg(&tu->args[i]);
110 
111 	iput(tu->inode);
112 	kfree(tu->call.class->system);
113 	kfree(tu->call.name);
114 	kfree(tu->filename);
115 	kfree(tu);
116 }
117 
118 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
119 {
120 	struct trace_uprobe *tu;
121 
122 	list_for_each_entry(tu, &uprobe_list, list)
123 		if (strcmp(tu->call.name, event) == 0 &&
124 		    strcmp(tu->call.class->system, group) == 0)
125 			return tu;
126 
127 	return NULL;
128 }
129 
130 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
131 static void unregister_trace_uprobe(struct trace_uprobe *tu)
132 {
133 	list_del(&tu->list);
134 	unregister_uprobe_event(tu);
135 	free_trace_uprobe(tu);
136 }
137 
138 /* Register a trace_uprobe and probe_event */
139 static int register_trace_uprobe(struct trace_uprobe *tu)
140 {
141 	struct trace_uprobe *old_tp;
142 	int ret;
143 
144 	mutex_lock(&uprobe_lock);
145 
146 	/* register as an event */
147 	old_tp = find_probe_event(tu->call.name, tu->call.class->system);
148 	if (old_tp)
149 		/* delete old event */
150 		unregister_trace_uprobe(old_tp);
151 
152 	ret = register_uprobe_event(tu);
153 	if (ret) {
154 		pr_warning("Failed to register probe event(%d)\n", ret);
155 		goto end;
156 	}
157 
158 	list_add_tail(&tu->list, &uprobe_list);
159 
160 end:
161 	mutex_unlock(&uprobe_lock);
162 
163 	return ret;
164 }
165 
166 /*
167  * Argument syntax:
168  *  - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS]
169  *
170  *  - Remove uprobe: -:[GRP/]EVENT
171  */
172 static int create_trace_uprobe(int argc, char **argv)
173 {
174 	struct trace_uprobe *tu;
175 	struct inode *inode;
176 	char *arg, *event, *group, *filename;
177 	char buf[MAX_EVENT_NAME_LEN];
178 	struct path path;
179 	unsigned long offset;
180 	bool is_delete;
181 	int i, ret;
182 
183 	inode = NULL;
184 	ret = 0;
185 	is_delete = false;
186 	event = NULL;
187 	group = NULL;
188 
189 	/* argc must be >= 1 */
190 	if (argv[0][0] == '-')
191 		is_delete = true;
192 	else if (argv[0][0] != 'p') {
193 		pr_info("Probe definition must be started with 'p' or '-'.\n");
194 		return -EINVAL;
195 	}
196 
197 	if (argv[0][1] == ':') {
198 		event = &argv[0][2];
199 		arg = strchr(event, '/');
200 
201 		if (arg) {
202 			group = event;
203 			event = arg + 1;
204 			event[-1] = '\0';
205 
206 			if (strlen(group) == 0) {
207 				pr_info("Group name is not specified\n");
208 				return -EINVAL;
209 			}
210 		}
211 		if (strlen(event) == 0) {
212 			pr_info("Event name is not specified\n");
213 			return -EINVAL;
214 		}
215 	}
216 	if (!group)
217 		group = UPROBE_EVENT_SYSTEM;
218 
219 	if (is_delete) {
220 		if (!event) {
221 			pr_info("Delete command needs an event name.\n");
222 			return -EINVAL;
223 		}
224 		mutex_lock(&uprobe_lock);
225 		tu = find_probe_event(event, group);
226 
227 		if (!tu) {
228 			mutex_unlock(&uprobe_lock);
229 			pr_info("Event %s/%s doesn't exist.\n", group, event);
230 			return -ENOENT;
231 		}
232 		/* delete an event */
233 		unregister_trace_uprobe(tu);
234 		mutex_unlock(&uprobe_lock);
235 		return 0;
236 	}
237 
238 	if (argc < 2) {
239 		pr_info("Probe point is not specified.\n");
240 		return -EINVAL;
241 	}
242 	if (isdigit(argv[1][0])) {
243 		pr_info("probe point must be have a filename.\n");
244 		return -EINVAL;
245 	}
246 	arg = strchr(argv[1], ':');
247 	if (!arg)
248 		goto fail_address_parse;
249 
250 	*arg++ = '\0';
251 	filename = argv[1];
252 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
253 	if (ret)
254 		goto fail_address_parse;
255 
256 	ret = kstrtoul(arg, 0, &offset);
257 	if (ret)
258 		goto fail_address_parse;
259 
260 	inode = igrab(path.dentry->d_inode);
261 
262 	argc -= 2;
263 	argv += 2;
264 
265 	/* setup a probe */
266 	if (!event) {
267 		char *tail;
268 		char *ptr;
269 
270 		tail = kstrdup(kbasename(filename), GFP_KERNEL);
271 		if (!tail) {
272 			ret = -ENOMEM;
273 			goto fail_address_parse;
274 		}
275 
276 		ptr = strpbrk(tail, ".-_");
277 		if (ptr)
278 			*ptr = '\0';
279 
280 		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
281 		event = buf;
282 		kfree(tail);
283 	}
284 
285 	tu = alloc_trace_uprobe(group, event, argc);
286 	if (IS_ERR(tu)) {
287 		pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
288 		ret = PTR_ERR(tu);
289 		goto fail_address_parse;
290 	}
291 	tu->offset = offset;
292 	tu->inode = inode;
293 	tu->filename = kstrdup(filename, GFP_KERNEL);
294 
295 	if (!tu->filename) {
296 		pr_info("Failed to allocate filename.\n");
297 		ret = -ENOMEM;
298 		goto error;
299 	}
300 
301 	/* parse arguments */
302 	ret = 0;
303 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
304 		/* Increment count for freeing args in error case */
305 		tu->nr_args++;
306 
307 		/* Parse argument name */
308 		arg = strchr(argv[i], '=');
309 		if (arg) {
310 			*arg++ = '\0';
311 			tu->args[i].name = kstrdup(argv[i], GFP_KERNEL);
312 		} else {
313 			arg = argv[i];
314 			/* If argument name is omitted, set "argN" */
315 			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
316 			tu->args[i].name = kstrdup(buf, GFP_KERNEL);
317 		}
318 
319 		if (!tu->args[i].name) {
320 			pr_info("Failed to allocate argument[%d] name.\n", i);
321 			ret = -ENOMEM;
322 			goto error;
323 		}
324 
325 		if (!is_good_name(tu->args[i].name)) {
326 			pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name);
327 			ret = -EINVAL;
328 			goto error;
329 		}
330 
331 		if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) {
332 			pr_info("Argument[%d] name '%s' conflicts with "
333 				"another field.\n", i, argv[i]);
334 			ret = -EINVAL;
335 			goto error;
336 		}
337 
338 		/* Parse fetch argument */
339 		ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false);
340 		if (ret) {
341 			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
342 			goto error;
343 		}
344 	}
345 
346 	ret = register_trace_uprobe(tu);
347 	if (ret)
348 		goto error;
349 	return 0;
350 
351 error:
352 	free_trace_uprobe(tu);
353 	return ret;
354 
355 fail_address_parse:
356 	if (inode)
357 		iput(inode);
358 
359 	pr_info("Failed to parse address.\n");
360 
361 	return ret;
362 }
363 
364 static void cleanup_all_probes(void)
365 {
366 	struct trace_uprobe *tu;
367 
368 	mutex_lock(&uprobe_lock);
369 	while (!list_empty(&uprobe_list)) {
370 		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
371 		unregister_trace_uprobe(tu);
372 	}
373 	mutex_unlock(&uprobe_lock);
374 }
375 
376 /* Probes listing interfaces */
377 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
378 {
379 	mutex_lock(&uprobe_lock);
380 	return seq_list_start(&uprobe_list, *pos);
381 }
382 
383 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
384 {
385 	return seq_list_next(v, &uprobe_list, pos);
386 }
387 
388 static void probes_seq_stop(struct seq_file *m, void *v)
389 {
390 	mutex_unlock(&uprobe_lock);
391 }
392 
393 static int probes_seq_show(struct seq_file *m, void *v)
394 {
395 	struct trace_uprobe *tu = v;
396 	int i;
397 
398 	seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name);
399 	seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
400 
401 	for (i = 0; i < tu->nr_args; i++)
402 		seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm);
403 
404 	seq_printf(m, "\n");
405 	return 0;
406 }
407 
408 static const struct seq_operations probes_seq_op = {
409 	.start	= probes_seq_start,
410 	.next	= probes_seq_next,
411 	.stop	= probes_seq_stop,
412 	.show	= probes_seq_show
413 };
414 
415 static int probes_open(struct inode *inode, struct file *file)
416 {
417 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
418 		cleanup_all_probes();
419 
420 	return seq_open(file, &probes_seq_op);
421 }
422 
423 static ssize_t probes_write(struct file *file, const char __user *buffer,
424 			    size_t count, loff_t *ppos)
425 {
426 	return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
427 }
428 
429 static const struct file_operations uprobe_events_ops = {
430 	.owner		= THIS_MODULE,
431 	.open		= probes_open,
432 	.read		= seq_read,
433 	.llseek		= seq_lseek,
434 	.release	= seq_release,
435 	.write		= probes_write,
436 };
437 
438 /* Probes profiling interfaces */
439 static int probes_profile_seq_show(struct seq_file *m, void *v)
440 {
441 	struct trace_uprobe *tu = v;
442 
443 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit);
444 	return 0;
445 }
446 
447 static const struct seq_operations profile_seq_op = {
448 	.start	= probes_seq_start,
449 	.next	= probes_seq_next,
450 	.stop	= probes_seq_stop,
451 	.show	= probes_profile_seq_show
452 };
453 
454 static int profile_open(struct inode *inode, struct file *file)
455 {
456 	return seq_open(file, &profile_seq_op);
457 }
458 
459 static const struct file_operations uprobe_profile_ops = {
460 	.owner		= THIS_MODULE,
461 	.open		= profile_open,
462 	.read		= seq_read,
463 	.llseek		= seq_lseek,
464 	.release	= seq_release,
465 };
466 
467 /* uprobe handler */
468 static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
469 {
470 	struct uprobe_trace_entry_head *entry;
471 	struct ring_buffer_event *event;
472 	struct ring_buffer *buffer;
473 	u8 *data;
474 	int size, i, pc;
475 	unsigned long irq_flags;
476 	struct ftrace_event_call *call = &tu->call;
477 
478 	tu->nhit++;
479 
480 	local_save_flags(irq_flags);
481 	pc = preempt_count();
482 
483 	size = sizeof(*entry) + tu->size;
484 
485 	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
486 						  size, irq_flags, pc);
487 	if (!event)
488 		return;
489 
490 	entry = ring_buffer_event_data(event);
491 	entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
492 	data = (u8 *)&entry[1];
493 	for (i = 0; i < tu->nr_args; i++)
494 		call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
495 
496 	if (!filter_current_check_discard(buffer, call, entry, event))
497 		trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
498 }
499 
500 /* Event entry printers */
501 static enum print_line_t
502 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
503 {
504 	struct uprobe_trace_entry_head *field;
505 	struct trace_seq *s = &iter->seq;
506 	struct trace_uprobe *tu;
507 	u8 *data;
508 	int i;
509 
510 	field = (struct uprobe_trace_entry_head *)iter->ent;
511 	tu = container_of(event, struct trace_uprobe, call.event);
512 
513 	if (!trace_seq_printf(s, "%s: (", tu->call.name))
514 		goto partial;
515 
516 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
517 		goto partial;
518 
519 	if (!trace_seq_puts(s, ")"))
520 		goto partial;
521 
522 	data = (u8 *)&field[1];
523 	for (i = 0; i < tu->nr_args; i++) {
524 		if (!tu->args[i].type->print(s, tu->args[i].name,
525 					     data + tu->args[i].offset, field))
526 			goto partial;
527 	}
528 
529 	if (trace_seq_puts(s, "\n"))
530 		return TRACE_TYPE_HANDLED;
531 
532 partial:
533 	return TRACE_TYPE_PARTIAL_LINE;
534 }
535 
536 static int probe_event_enable(struct trace_uprobe *tu, int flag)
537 {
538 	struct uprobe_trace_consumer *utc;
539 	int ret = 0;
540 
541 	if (!tu->inode || tu->consumer)
542 		return -EINTR;
543 
544 	utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL);
545 	if (!utc)
546 		return -EINTR;
547 
548 	utc->cons.handler = uprobe_dispatcher;
549 	utc->cons.filter = NULL;
550 	ret = uprobe_register(tu->inode, tu->offset, &utc->cons);
551 	if (ret) {
552 		kfree(utc);
553 		return ret;
554 	}
555 
556 	tu->flags |= flag;
557 	utc->tu = tu;
558 	tu->consumer = utc;
559 
560 	return 0;
561 }
562 
563 static void probe_event_disable(struct trace_uprobe *tu, int flag)
564 {
565 	if (!tu->inode || !tu->consumer)
566 		return;
567 
568 	uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons);
569 	tu->flags &= ~flag;
570 	kfree(tu->consumer);
571 	tu->consumer = NULL;
572 }
573 
574 static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
575 {
576 	int ret, i;
577 	struct uprobe_trace_entry_head field;
578 	struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data;
579 
580 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
581 	/* Set argument names as fields */
582 	for (i = 0; i < tu->nr_args; i++) {
583 		ret = trace_define_field(event_call, tu->args[i].type->fmttype,
584 					 tu->args[i].name,
585 					 sizeof(field) + tu->args[i].offset,
586 					 tu->args[i].type->size,
587 					 tu->args[i].type->is_signed,
588 					 FILTER_OTHER);
589 
590 		if (ret)
591 			return ret;
592 	}
593 	return 0;
594 }
595 
596 #define LEN_OR_ZERO		(len ? len - pos : 0)
597 static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len)
598 {
599 	const char *fmt, *arg;
600 	int i;
601 	int pos = 0;
602 
603 	fmt = "(%lx)";
604 	arg = "REC->" FIELD_STRING_IP;
605 
606 	/* When len=0, we just calculate the needed length */
607 
608 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
609 
610 	for (i = 0; i < tu->nr_args; i++) {
611 		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
612 				tu->args[i].name, tu->args[i].type->fmt);
613 	}
614 
615 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
616 
617 	for (i = 0; i < tu->nr_args; i++) {
618 		pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
619 				tu->args[i].name);
620 	}
621 
622 	return pos;	/* return the length of print_fmt */
623 }
624 #undef LEN_OR_ZERO
625 
626 static int set_print_fmt(struct trace_uprobe *tu)
627 {
628 	char *print_fmt;
629 	int len;
630 
631 	/* First: called with 0 length to calculate the needed length */
632 	len = __set_print_fmt(tu, NULL, 0);
633 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
634 	if (!print_fmt)
635 		return -ENOMEM;
636 
637 	/* Second: actually write the @print_fmt */
638 	__set_print_fmt(tu, print_fmt, len + 1);
639 	tu->call.print_fmt = print_fmt;
640 
641 	return 0;
642 }
643 
644 #ifdef CONFIG_PERF_EVENTS
645 /* uprobe profile handler */
646 static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
647 {
648 	struct ftrace_event_call *call = &tu->call;
649 	struct uprobe_trace_entry_head *entry;
650 	struct hlist_head *head;
651 	u8 *data;
652 	int size, __size, i;
653 	int rctx;
654 
655 	__size = sizeof(*entry) + tu->size;
656 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
657 	size -= sizeof(u32);
658 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
659 		return;
660 
661 	preempt_disable();
662 
663 	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
664 	if (!entry)
665 		goto out;
666 
667 	entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
668 	data = (u8 *)&entry[1];
669 	for (i = 0; i < tu->nr_args; i++)
670 		call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
671 
672 	head = this_cpu_ptr(call->perf_events);
673 	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
674 
675  out:
676 	preempt_enable();
677 }
678 #endif	/* CONFIG_PERF_EVENTS */
679 
680 static
681 int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
682 {
683 	struct trace_uprobe *tu = (struct trace_uprobe *)event->data;
684 
685 	switch (type) {
686 	case TRACE_REG_REGISTER:
687 		return probe_event_enable(tu, TP_FLAG_TRACE);
688 
689 	case TRACE_REG_UNREGISTER:
690 		probe_event_disable(tu, TP_FLAG_TRACE);
691 		return 0;
692 
693 #ifdef CONFIG_PERF_EVENTS
694 	case TRACE_REG_PERF_REGISTER:
695 		return probe_event_enable(tu, TP_FLAG_PROFILE);
696 
697 	case TRACE_REG_PERF_UNREGISTER:
698 		probe_event_disable(tu, TP_FLAG_PROFILE);
699 		return 0;
700 #endif
701 	default:
702 		return 0;
703 	}
704 	return 0;
705 }
706 
707 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
708 {
709 	struct uprobe_trace_consumer *utc;
710 	struct trace_uprobe *tu;
711 
712 	utc = container_of(con, struct uprobe_trace_consumer, cons);
713 	tu = utc->tu;
714 	if (!tu || tu->consumer != utc)
715 		return 0;
716 
717 	if (tu->flags & TP_FLAG_TRACE)
718 		uprobe_trace_func(tu, regs);
719 
720 #ifdef CONFIG_PERF_EVENTS
721 	if (tu->flags & TP_FLAG_PROFILE)
722 		uprobe_perf_func(tu, regs);
723 #endif
724 	return 0;
725 }
726 
727 static struct trace_event_functions uprobe_funcs = {
728 	.trace		= print_uprobe_event
729 };
730 
731 static int register_uprobe_event(struct trace_uprobe *tu)
732 {
733 	struct ftrace_event_call *call = &tu->call;
734 	int ret;
735 
736 	/* Initialize ftrace_event_call */
737 	INIT_LIST_HEAD(&call->class->fields);
738 	call->event.funcs = &uprobe_funcs;
739 	call->class->define_fields = uprobe_event_define_fields;
740 
741 	if (set_print_fmt(tu) < 0)
742 		return -ENOMEM;
743 
744 	ret = register_ftrace_event(&call->event);
745 	if (!ret) {
746 		kfree(call->print_fmt);
747 		return -ENODEV;
748 	}
749 	call->flags = 0;
750 	call->class->reg = trace_uprobe_register;
751 	call->data = tu;
752 	ret = trace_add_event_call(call);
753 
754 	if (ret) {
755 		pr_info("Failed to register uprobe event: %s\n", call->name);
756 		kfree(call->print_fmt);
757 		unregister_ftrace_event(&call->event);
758 	}
759 
760 	return ret;
761 }
762 
763 static void unregister_uprobe_event(struct trace_uprobe *tu)
764 {
765 	/* tu->event is unregistered in trace_remove_event_call() */
766 	trace_remove_event_call(&tu->call);
767 	kfree(tu->call.print_fmt);
768 	tu->call.print_fmt = NULL;
769 }
770 
771 /* Make a trace interface for controling probe points */
772 static __init int init_uprobe_trace(void)
773 {
774 	struct dentry *d_tracer;
775 
776 	d_tracer = tracing_init_dentry();
777 	if (!d_tracer)
778 		return 0;
779 
780 	trace_create_file("uprobe_events", 0644, d_tracer,
781 				    NULL, &uprobe_events_ops);
782 	/* Profile interface */
783 	trace_create_file("uprobe_profile", 0444, d_tracer,
784 				    NULL, &uprobe_profile_ops);
785 	return 0;
786 }
787 
788 fs_initcall(init_uprobe_trace);
789