xref: /linux/kernel/trace/trace_event_perf.c (revision f4be073db878d0e79f74bc36f1642847781791a0)
197d5a220SFrederic Weisbecker /*
297d5a220SFrederic Weisbecker  * trace event based perf event profiling/tracing
397d5a220SFrederic Weisbecker  *
497d5a220SFrederic Weisbecker  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
597d5a220SFrederic Weisbecker  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
697d5a220SFrederic Weisbecker  */
797d5a220SFrederic Weisbecker 
897d5a220SFrederic Weisbecker #include <linux/module.h>
997d5a220SFrederic Weisbecker #include <linux/kprobes.h>
1097d5a220SFrederic Weisbecker #include "trace.h"
1197d5a220SFrederic Weisbecker 
126016ee13SNamhyung Kim static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
1397d5a220SFrederic Weisbecker 
14eb1e7961SFrederic Weisbecker /*
15eb1e7961SFrederic Weisbecker  * Force it to be aligned to unsigned long to avoid misaligned accesses
16eb1e7961SFrederic Weisbecker  * suprises
17eb1e7961SFrederic Weisbecker  */
18eb1e7961SFrederic Weisbecker typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19eb1e7961SFrederic Weisbecker 	perf_trace_t;
2097d5a220SFrederic Weisbecker 
2197d5a220SFrederic Weisbecker /* Count the events in use (per event id, not per instance) */
2297d5a220SFrederic Weisbecker static int	total_ref_count;
2397d5a220SFrederic Weisbecker 
2461c32659SFrederic Weisbecker static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
2561c32659SFrederic Weisbecker 				 struct perf_event *p_event)
2661c32659SFrederic Weisbecker {
27d5b5f391SPeter Zijlstra 	if (tp_event->perf_perm) {
28d5b5f391SPeter Zijlstra 		int ret = tp_event->perf_perm(tp_event, p_event);
29d5b5f391SPeter Zijlstra 		if (ret)
30d5b5f391SPeter Zijlstra 			return ret;
31d5b5f391SPeter Zijlstra 	}
32d5b5f391SPeter Zijlstra 
33*f4be073dSJiri Olsa 	/*
34*f4be073dSJiri Olsa 	 * We checked and allowed to create parent,
35*f4be073dSJiri Olsa 	 * allow children without checking.
36*f4be073dSJiri Olsa 	 */
37*f4be073dSJiri Olsa 	if (p_event->parent)
38*f4be073dSJiri Olsa 		return 0;
39*f4be073dSJiri Olsa 
40*f4be073dSJiri Olsa 	/*
41*f4be073dSJiri Olsa 	 * It's ok to check current process (owner) permissions in here,
42*f4be073dSJiri Olsa 	 * because code below is called only via perf_event_open syscall.
43*f4be073dSJiri Olsa 	 */
44*f4be073dSJiri Olsa 
45ced39002SJiri Olsa 	/* The ftrace function trace is allowed only for root. */
46cfa77bc4SJiri Olsa 	if (ftrace_event_is_function(tp_event)) {
47cfa77bc4SJiri Olsa 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
48ced39002SJiri Olsa 			return -EPERM;
49ced39002SJiri Olsa 
50cfa77bc4SJiri Olsa 		/*
51cfa77bc4SJiri Olsa 		 * We don't allow user space callchains for  function trace
52cfa77bc4SJiri Olsa 		 * event, due to issues with page faults while tracing page
53cfa77bc4SJiri Olsa 		 * fault handler and its overall trickiness nature.
54cfa77bc4SJiri Olsa 		 */
55cfa77bc4SJiri Olsa 		if (!p_event->attr.exclude_callchain_user)
56cfa77bc4SJiri Olsa 			return -EINVAL;
5763c45f4bSJiri Olsa 
5863c45f4bSJiri Olsa 		/*
5963c45f4bSJiri Olsa 		 * Same reason to disable user stack dump as for user space
6063c45f4bSJiri Olsa 		 * callchains above.
6163c45f4bSJiri Olsa 		 */
6263c45f4bSJiri Olsa 		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
6363c45f4bSJiri Olsa 			return -EINVAL;
64cfa77bc4SJiri Olsa 	}
65cfa77bc4SJiri Olsa 
6661c32659SFrederic Weisbecker 	/* No tracing, just counting, so no obvious leak */
6761c32659SFrederic Weisbecker 	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
6861c32659SFrederic Weisbecker 		return 0;
6961c32659SFrederic Weisbecker 
7061c32659SFrederic Weisbecker 	/* Some events are ok to be traced by non-root users... */
7161c32659SFrederic Weisbecker 	if (p_event->attach_state == PERF_ATTACH_TASK) {
7261c32659SFrederic Weisbecker 		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
7361c32659SFrederic Weisbecker 			return 0;
7461c32659SFrederic Weisbecker 	}
7561c32659SFrederic Weisbecker 
7661c32659SFrederic Weisbecker 	/*
7761c32659SFrederic Weisbecker 	 * ...otherwise raw tracepoint data can be a severe data leak,
7861c32659SFrederic Weisbecker 	 * only allow root to have these.
7961c32659SFrederic Weisbecker 	 */
8061c32659SFrederic Weisbecker 	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
8161c32659SFrederic Weisbecker 		return -EPERM;
8261c32659SFrederic Weisbecker 
8361c32659SFrederic Weisbecker 	return 0;
8461c32659SFrederic Weisbecker }
8561c32659SFrederic Weisbecker 
86ceec0b6fSJiri Olsa static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
871c024ecaSPeter Zijlstra 				struct perf_event *p_event)
8897d5a220SFrederic Weisbecker {
896016ee13SNamhyung Kim 	struct hlist_head __percpu *list;
90ceec0b6fSJiri Olsa 	int ret = -ENOMEM;
911c024ecaSPeter Zijlstra 	int cpu;
9297d5a220SFrederic Weisbecker 
931c024ecaSPeter Zijlstra 	p_event->tp_event = tp_event;
941c024ecaSPeter Zijlstra 	if (tp_event->perf_refcount++ > 0)
9597d5a220SFrederic Weisbecker 		return 0;
9697d5a220SFrederic Weisbecker 
971c024ecaSPeter Zijlstra 	list = alloc_percpu(struct hlist_head);
981c024ecaSPeter Zijlstra 	if (!list)
991c024ecaSPeter Zijlstra 		goto fail;
1001c024ecaSPeter Zijlstra 
1011c024ecaSPeter Zijlstra 	for_each_possible_cpu(cpu)
1021c024ecaSPeter Zijlstra 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
1031c024ecaSPeter Zijlstra 
1041c024ecaSPeter Zijlstra 	tp_event->perf_events = list;
10597d5a220SFrederic Weisbecker 
10697d5a220SFrederic Weisbecker 	if (!total_ref_count) {
1076016ee13SNamhyung Kim 		char __percpu *buf;
108b7e2ecefSPeter Zijlstra 		int i;
109b7e2ecefSPeter Zijlstra 
1107ae07ea3SFrederic Weisbecker 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
1116016ee13SNamhyung Kim 			buf = (char __percpu *)alloc_percpu(perf_trace_t);
11297d5a220SFrederic Weisbecker 			if (!buf)
1131c024ecaSPeter Zijlstra 				goto fail;
11497d5a220SFrederic Weisbecker 
1151c024ecaSPeter Zijlstra 			perf_trace_buf[i] = buf;
116b7e2ecefSPeter Zijlstra 		}
11797d5a220SFrederic Weisbecker 	}
11897d5a220SFrederic Weisbecker 
119ceec0b6fSJiri Olsa 	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
1201c024ecaSPeter Zijlstra 	if (ret)
1211c024ecaSPeter Zijlstra 		goto fail;
1221c024ecaSPeter Zijlstra 
12397d5a220SFrederic Weisbecker 	total_ref_count++;
12497d5a220SFrederic Weisbecker 	return 0;
12597d5a220SFrederic Weisbecker 
1261c024ecaSPeter Zijlstra fail:
127b7e2ecefSPeter Zijlstra 	if (!total_ref_count) {
128b7e2ecefSPeter Zijlstra 		int i;
129b7e2ecefSPeter Zijlstra 
1307ae07ea3SFrederic Weisbecker 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131b7e2ecefSPeter Zijlstra 			free_percpu(perf_trace_buf[i]);
132b7e2ecefSPeter Zijlstra 			perf_trace_buf[i] = NULL;
133b7e2ecefSPeter Zijlstra 		}
13497d5a220SFrederic Weisbecker 	}
13597d5a220SFrederic Weisbecker 
1361c024ecaSPeter Zijlstra 	if (!--tp_event->perf_refcount) {
1371c024ecaSPeter Zijlstra 		free_percpu(tp_event->perf_events);
1381c024ecaSPeter Zijlstra 		tp_event->perf_events = NULL;
13997d5a220SFrederic Weisbecker 	}
14097d5a220SFrederic Weisbecker 
14197d5a220SFrederic Weisbecker 	return ret;
14297d5a220SFrederic Weisbecker }
14397d5a220SFrederic Weisbecker 
144ceec0b6fSJiri Olsa static void perf_trace_event_unreg(struct perf_event *p_event)
145ceec0b6fSJiri Olsa {
146ceec0b6fSJiri Olsa 	struct ftrace_event_call *tp_event = p_event->tp_event;
147ceec0b6fSJiri Olsa 	int i;
148ceec0b6fSJiri Olsa 
149ceec0b6fSJiri Olsa 	if (--tp_event->perf_refcount > 0)
150ceec0b6fSJiri Olsa 		goto out;
151ceec0b6fSJiri Olsa 
152ceec0b6fSJiri Olsa 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
153ceec0b6fSJiri Olsa 
154ceec0b6fSJiri Olsa 	/*
155ceec0b6fSJiri Olsa 	 * Ensure our callback won't be called anymore. The buffers
156ceec0b6fSJiri Olsa 	 * will be freed after that.
157ceec0b6fSJiri Olsa 	 */
158ceec0b6fSJiri Olsa 	tracepoint_synchronize_unregister();
159ceec0b6fSJiri Olsa 
160ceec0b6fSJiri Olsa 	free_percpu(tp_event->perf_events);
161ceec0b6fSJiri Olsa 	tp_event->perf_events = NULL;
162ceec0b6fSJiri Olsa 
163ceec0b6fSJiri Olsa 	if (!--total_ref_count) {
164ceec0b6fSJiri Olsa 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
165ceec0b6fSJiri Olsa 			free_percpu(perf_trace_buf[i]);
166ceec0b6fSJiri Olsa 			perf_trace_buf[i] = NULL;
167ceec0b6fSJiri Olsa 		}
168ceec0b6fSJiri Olsa 	}
169ceec0b6fSJiri Olsa out:
170ceec0b6fSJiri Olsa 	module_put(tp_event->mod);
171ceec0b6fSJiri Olsa }
172ceec0b6fSJiri Olsa 
173ceec0b6fSJiri Olsa static int perf_trace_event_open(struct perf_event *p_event)
174ceec0b6fSJiri Olsa {
175ceec0b6fSJiri Olsa 	struct ftrace_event_call *tp_event = p_event->tp_event;
176ceec0b6fSJiri Olsa 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
177ceec0b6fSJiri Olsa }
178ceec0b6fSJiri Olsa 
179ceec0b6fSJiri Olsa static void perf_trace_event_close(struct perf_event *p_event)
180ceec0b6fSJiri Olsa {
181ceec0b6fSJiri Olsa 	struct ftrace_event_call *tp_event = p_event->tp_event;
182ceec0b6fSJiri Olsa 	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
183ceec0b6fSJiri Olsa }
184ceec0b6fSJiri Olsa 
185ceec0b6fSJiri Olsa static int perf_trace_event_init(struct ftrace_event_call *tp_event,
186ceec0b6fSJiri Olsa 				 struct perf_event *p_event)
187ceec0b6fSJiri Olsa {
188ceec0b6fSJiri Olsa 	int ret;
189ceec0b6fSJiri Olsa 
190ceec0b6fSJiri Olsa 	ret = perf_trace_event_perm(tp_event, p_event);
191ceec0b6fSJiri Olsa 	if (ret)
192ceec0b6fSJiri Olsa 		return ret;
193ceec0b6fSJiri Olsa 
194ceec0b6fSJiri Olsa 	ret = perf_trace_event_reg(tp_event, p_event);
195ceec0b6fSJiri Olsa 	if (ret)
196ceec0b6fSJiri Olsa 		return ret;
197ceec0b6fSJiri Olsa 
198ceec0b6fSJiri Olsa 	ret = perf_trace_event_open(p_event);
199ceec0b6fSJiri Olsa 	if (ret) {
200ceec0b6fSJiri Olsa 		perf_trace_event_unreg(p_event);
201ceec0b6fSJiri Olsa 		return ret;
202ceec0b6fSJiri Olsa 	}
203ceec0b6fSJiri Olsa 
204ceec0b6fSJiri Olsa 	return 0;
205ceec0b6fSJiri Olsa }
206ceec0b6fSJiri Olsa 
2071c024ecaSPeter Zijlstra int perf_trace_init(struct perf_event *p_event)
20897d5a220SFrederic Weisbecker {
2091c024ecaSPeter Zijlstra 	struct ftrace_event_call *tp_event;
2100022ceddSVince Weaver 	u64 event_id = p_event->attr.config;
21197d5a220SFrederic Weisbecker 	int ret = -EINVAL;
21297d5a220SFrederic Weisbecker 
21397d5a220SFrederic Weisbecker 	mutex_lock(&event_mutex);
2141c024ecaSPeter Zijlstra 	list_for_each_entry(tp_event, &ftrace_events, list) {
215ff5f149bSSteven Rostedt 		if (tp_event->event.type == event_id &&
216a1d0ce82SSteven Rostedt 		    tp_event->class && tp_event->class->reg &&
2171c024ecaSPeter Zijlstra 		    try_module_get(tp_event->mod)) {
2181c024ecaSPeter Zijlstra 			ret = perf_trace_event_init(tp_event, p_event);
2199cb627d5SLi Zefan 			if (ret)
2209cb627d5SLi Zefan 				module_put(tp_event->mod);
22197d5a220SFrederic Weisbecker 			break;
22297d5a220SFrederic Weisbecker 		}
22397d5a220SFrederic Weisbecker 	}
22497d5a220SFrederic Weisbecker 	mutex_unlock(&event_mutex);
22597d5a220SFrederic Weisbecker 
22697d5a220SFrederic Weisbecker 	return ret;
22797d5a220SFrederic Weisbecker }
22897d5a220SFrederic Weisbecker 
229ceec0b6fSJiri Olsa void perf_trace_destroy(struct perf_event *p_event)
230ceec0b6fSJiri Olsa {
231ceec0b6fSJiri Olsa 	mutex_lock(&event_mutex);
232ceec0b6fSJiri Olsa 	perf_trace_event_close(p_event);
233ceec0b6fSJiri Olsa 	perf_trace_event_unreg(p_event);
234ceec0b6fSJiri Olsa 	mutex_unlock(&event_mutex);
235ceec0b6fSJiri Olsa }
236ceec0b6fSJiri Olsa 
237a4eaf7f1SPeter Zijlstra int perf_trace_add(struct perf_event *p_event, int flags)
23897d5a220SFrederic Weisbecker {
2391c024ecaSPeter Zijlstra 	struct ftrace_event_call *tp_event = p_event->tp_event;
2406016ee13SNamhyung Kim 	struct hlist_head __percpu *pcpu_list;
2411c024ecaSPeter Zijlstra 	struct hlist_head *list;
24297d5a220SFrederic Weisbecker 
2436016ee13SNamhyung Kim 	pcpu_list = tp_event->perf_events;
2446016ee13SNamhyung Kim 	if (WARN_ON_ONCE(!pcpu_list))
2451c024ecaSPeter Zijlstra 		return -EINVAL;
24697d5a220SFrederic Weisbecker 
247a4eaf7f1SPeter Zijlstra 	if (!(flags & PERF_EF_START))
248a4eaf7f1SPeter Zijlstra 		p_event->hw.state = PERF_HES_STOPPED;
249a4eaf7f1SPeter Zijlstra 
2506016ee13SNamhyung Kim 	list = this_cpu_ptr(pcpu_list);
2511c024ecaSPeter Zijlstra 	hlist_add_head_rcu(&p_event->hlist_entry, list);
2521c024ecaSPeter Zijlstra 
253489c75c3SJiri Olsa 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
2541c024ecaSPeter Zijlstra }
2551c024ecaSPeter Zijlstra 
256a4eaf7f1SPeter Zijlstra void perf_trace_del(struct perf_event *p_event, int flags)
2571c024ecaSPeter Zijlstra {
258489c75c3SJiri Olsa 	struct ftrace_event_call *tp_event = p_event->tp_event;
2591c024ecaSPeter Zijlstra 	hlist_del_rcu(&p_event->hlist_entry);
260489c75c3SJiri Olsa 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
2611c024ecaSPeter Zijlstra }
2621c024ecaSPeter Zijlstra 
2633da0f180SMasami Hiramatsu void *perf_trace_buf_prepare(int size, unsigned short type,
264b7e2ecefSPeter Zijlstra 			     struct pt_regs *regs, int *rctxp)
26597d5a220SFrederic Weisbecker {
26697d5a220SFrederic Weisbecker 	struct trace_entry *entry;
26787f44bbcSPeter Zijlstra 	unsigned long flags;
2681c024ecaSPeter Zijlstra 	char *raw_data;
269b7e2ecefSPeter Zijlstra 	int pc;
27097d5a220SFrederic Weisbecker 
271eb1e7961SFrederic Weisbecker 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
272eb1e7961SFrederic Weisbecker 
273cd92bf61SOleg Nesterov 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
274cd92bf61SOleg Nesterov 			"perf buffer not large enough"))
275cd92bf61SOleg Nesterov 		return NULL;
276cd92bf61SOleg Nesterov 
27797d5a220SFrederic Weisbecker 	pc = preempt_count();
27897d5a220SFrederic Weisbecker 
27997d5a220SFrederic Weisbecker 	*rctxp = perf_swevent_get_recursion_context();
28097d5a220SFrederic Weisbecker 	if (*rctxp < 0)
2811c024ecaSPeter Zijlstra 		return NULL;
28297d5a220SFrederic Weisbecker 
2833771f077SPeter Zijlstra 	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
28497d5a220SFrederic Weisbecker 
28597d5a220SFrederic Weisbecker 	/* zero the dead bytes from align to not leak stack to user */
286eb1e7961SFrederic Weisbecker 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
28797d5a220SFrederic Weisbecker 
28897d5a220SFrederic Weisbecker 	entry = (struct trace_entry *)raw_data;
28987f44bbcSPeter Zijlstra 	local_save_flags(flags);
29087f44bbcSPeter Zijlstra 	tracing_generic_entry_update(entry, flags, pc);
29197d5a220SFrederic Weisbecker 	entry->type = type;
29297d5a220SFrederic Weisbecker 
29397d5a220SFrederic Weisbecker 	return raw_data;
29497d5a220SFrederic Weisbecker }
29597d5a220SFrederic Weisbecker EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
2963da0f180SMasami Hiramatsu NOKPROBE_SYMBOL(perf_trace_buf_prepare);
297ced39002SJiri Olsa 
298ced39002SJiri Olsa #ifdef CONFIG_FUNCTION_TRACER
299ced39002SJiri Olsa static void
3002f5f6ad9SSteven Rostedt perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
301a1e2e31dSSteven Rostedt 			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
302ced39002SJiri Olsa {
303ced39002SJiri Olsa 	struct ftrace_entry *entry;
304ced39002SJiri Olsa 	struct hlist_head *head;
305ced39002SJiri Olsa 	struct pt_regs regs;
306ced39002SJiri Olsa 	int rctx;
307ced39002SJiri Olsa 
308b8ebfd3fSOleg Nesterov 	head = this_cpu_ptr(event_function.perf_events);
309b8ebfd3fSOleg Nesterov 	if (hlist_empty(head))
310b8ebfd3fSOleg Nesterov 		return;
311b8ebfd3fSOleg Nesterov 
312ced39002SJiri Olsa #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
313ced39002SJiri Olsa 		    sizeof(u64)) - sizeof(u32))
314ced39002SJiri Olsa 
315ced39002SJiri Olsa 	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
316ced39002SJiri Olsa 
317ced39002SJiri Olsa 	perf_fetch_caller_regs(&regs);
318ced39002SJiri Olsa 
319ced39002SJiri Olsa 	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
320ced39002SJiri Olsa 	if (!entry)
321ced39002SJiri Olsa 		return;
322ced39002SJiri Olsa 
323ced39002SJiri Olsa 	entry->ip = ip;
324ced39002SJiri Olsa 	entry->parent_ip = parent_ip;
325ced39002SJiri Olsa 	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
326e6dab5ffSAndrew Vagin 			      1, &regs, head, NULL);
327ced39002SJiri Olsa 
328ced39002SJiri Olsa #undef ENTRY_SIZE
329ced39002SJiri Olsa }
330ced39002SJiri Olsa 
331ced39002SJiri Olsa static int perf_ftrace_function_register(struct perf_event *event)
332ced39002SJiri Olsa {
333ced39002SJiri Olsa 	struct ftrace_ops *ops = &event->ftrace_ops;
334ced39002SJiri Olsa 
335ced39002SJiri Olsa 	ops->flags |= FTRACE_OPS_FL_CONTROL;
336ced39002SJiri Olsa 	ops->func = perf_ftrace_function_call;
337ced39002SJiri Olsa 	return register_ftrace_function(ops);
338ced39002SJiri Olsa }
339ced39002SJiri Olsa 
340ced39002SJiri Olsa static int perf_ftrace_function_unregister(struct perf_event *event)
341ced39002SJiri Olsa {
342ced39002SJiri Olsa 	struct ftrace_ops *ops = &event->ftrace_ops;
3435500fa51SJiri Olsa 	int ret = unregister_ftrace_function(ops);
3445500fa51SJiri Olsa 	ftrace_free_filter(ops);
3455500fa51SJiri Olsa 	return ret;
346ced39002SJiri Olsa }
347ced39002SJiri Olsa 
348ced39002SJiri Olsa static void perf_ftrace_function_enable(struct perf_event *event)
349ced39002SJiri Olsa {
350ced39002SJiri Olsa 	ftrace_function_local_enable(&event->ftrace_ops);
351ced39002SJiri Olsa }
352ced39002SJiri Olsa 
353ced39002SJiri Olsa static void perf_ftrace_function_disable(struct perf_event *event)
354ced39002SJiri Olsa {
355ced39002SJiri Olsa 	ftrace_function_local_disable(&event->ftrace_ops);
356ced39002SJiri Olsa }
357ced39002SJiri Olsa 
358ced39002SJiri Olsa int perf_ftrace_event_register(struct ftrace_event_call *call,
359ced39002SJiri Olsa 			       enum trace_reg type, void *data)
360ced39002SJiri Olsa {
361ced39002SJiri Olsa 	switch (type) {
362ced39002SJiri Olsa 	case TRACE_REG_REGISTER:
363ced39002SJiri Olsa 	case TRACE_REG_UNREGISTER:
364ced39002SJiri Olsa 		break;
365ced39002SJiri Olsa 	case TRACE_REG_PERF_REGISTER:
366ced39002SJiri Olsa 	case TRACE_REG_PERF_UNREGISTER:
367ced39002SJiri Olsa 		return 0;
368ced39002SJiri Olsa 	case TRACE_REG_PERF_OPEN:
369ced39002SJiri Olsa 		return perf_ftrace_function_register(data);
370ced39002SJiri Olsa 	case TRACE_REG_PERF_CLOSE:
371ced39002SJiri Olsa 		return perf_ftrace_function_unregister(data);
372ced39002SJiri Olsa 	case TRACE_REG_PERF_ADD:
373ced39002SJiri Olsa 		perf_ftrace_function_enable(data);
374ced39002SJiri Olsa 		return 0;
375ced39002SJiri Olsa 	case TRACE_REG_PERF_DEL:
376ced39002SJiri Olsa 		perf_ftrace_function_disable(data);
377ced39002SJiri Olsa 		return 0;
378ced39002SJiri Olsa 	}
379ced39002SJiri Olsa 
380ced39002SJiri Olsa 	return -EINVAL;
381ced39002SJiri Olsa }
382ced39002SJiri Olsa #endif /* CONFIG_FUNCTION_TRACER */
383