xref: /linux/kernel/trace/trace_event_perf.c (revision 9cb627d5f38830ca19aa0dca52d1d3a633018bf7)
197d5a220SFrederic Weisbecker /*
297d5a220SFrederic Weisbecker  * trace event based perf event profiling/tracing
397d5a220SFrederic Weisbecker  *
497d5a220SFrederic Weisbecker  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
597d5a220SFrederic Weisbecker  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
697d5a220SFrederic Weisbecker  */
797d5a220SFrederic Weisbecker 
897d5a220SFrederic Weisbecker #include <linux/module.h>
997d5a220SFrederic Weisbecker #include <linux/kprobes.h>
1097d5a220SFrederic Weisbecker #include "trace.h"
1197d5a220SFrederic Weisbecker 
12b7e2ecefSPeter Zijlstra static char *perf_trace_buf[4];
1397d5a220SFrederic Weisbecker 
14eb1e7961SFrederic Weisbecker /*
15eb1e7961SFrederic Weisbecker  * Force it to be aligned to unsigned long to avoid misaligned accesses
16eb1e7961SFrederic Weisbecker  * suprises
17eb1e7961SFrederic Weisbecker  */
18eb1e7961SFrederic Weisbecker typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19eb1e7961SFrederic Weisbecker 	perf_trace_t;
2097d5a220SFrederic Weisbecker 
2197d5a220SFrederic Weisbecker /* Count the events in use (per event id, not per instance) */
2297d5a220SFrederic Weisbecker static int	total_ref_count;
2397d5a220SFrederic Weisbecker 
241c024ecaSPeter Zijlstra static int perf_trace_event_init(struct ftrace_event_call *tp_event,
251c024ecaSPeter Zijlstra 				 struct perf_event *p_event)
2697d5a220SFrederic Weisbecker {
271c024ecaSPeter Zijlstra 	struct hlist_head *list;
2897d5a220SFrederic Weisbecker 	int ret = -ENOMEM;
291c024ecaSPeter Zijlstra 	int cpu;
3097d5a220SFrederic Weisbecker 
311c024ecaSPeter Zijlstra 	p_event->tp_event = tp_event;
321c024ecaSPeter Zijlstra 	if (tp_event->perf_refcount++ > 0)
3397d5a220SFrederic Weisbecker 		return 0;
3497d5a220SFrederic Weisbecker 
351c024ecaSPeter Zijlstra 	list = alloc_percpu(struct hlist_head);
361c024ecaSPeter Zijlstra 	if (!list)
371c024ecaSPeter Zijlstra 		goto fail;
381c024ecaSPeter Zijlstra 
391c024ecaSPeter Zijlstra 	for_each_possible_cpu(cpu)
401c024ecaSPeter Zijlstra 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
411c024ecaSPeter Zijlstra 
421c024ecaSPeter Zijlstra 	tp_event->perf_events = list;
4397d5a220SFrederic Weisbecker 
4497d5a220SFrederic Weisbecker 	if (!total_ref_count) {
45b7e2ecefSPeter Zijlstra 		char *buf;
46b7e2ecefSPeter Zijlstra 		int i;
47b7e2ecefSPeter Zijlstra 
48b7e2ecefSPeter Zijlstra 		for (i = 0; i < 4; i++) {
4997d5a220SFrederic Weisbecker 			buf = (char *)alloc_percpu(perf_trace_t);
5097d5a220SFrederic Weisbecker 			if (!buf)
511c024ecaSPeter Zijlstra 				goto fail;
5297d5a220SFrederic Weisbecker 
531c024ecaSPeter Zijlstra 			perf_trace_buf[i] = buf;
54b7e2ecefSPeter Zijlstra 		}
5597d5a220SFrederic Weisbecker 	}
5697d5a220SFrederic Weisbecker 
57ff5f149bSSteven Rostedt 	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
581c024ecaSPeter Zijlstra 	if (ret)
591c024ecaSPeter Zijlstra 		goto fail;
601c024ecaSPeter Zijlstra 
6197d5a220SFrederic Weisbecker 	total_ref_count++;
6297d5a220SFrederic Weisbecker 	return 0;
6397d5a220SFrederic Weisbecker 
641c024ecaSPeter Zijlstra fail:
65b7e2ecefSPeter Zijlstra 	if (!total_ref_count) {
66b7e2ecefSPeter Zijlstra 		int i;
67b7e2ecefSPeter Zijlstra 
68b7e2ecefSPeter Zijlstra 		for (i = 0; i < 4; i++) {
69b7e2ecefSPeter Zijlstra 			free_percpu(perf_trace_buf[i]);
70b7e2ecefSPeter Zijlstra 			perf_trace_buf[i] = NULL;
71b7e2ecefSPeter Zijlstra 		}
7297d5a220SFrederic Weisbecker 	}
7397d5a220SFrederic Weisbecker 
741c024ecaSPeter Zijlstra 	if (!--tp_event->perf_refcount) {
751c024ecaSPeter Zijlstra 		free_percpu(tp_event->perf_events);
761c024ecaSPeter Zijlstra 		tp_event->perf_events = NULL;
7797d5a220SFrederic Weisbecker 	}
7897d5a220SFrederic Weisbecker 
7997d5a220SFrederic Weisbecker 	return ret;
8097d5a220SFrederic Weisbecker }
8197d5a220SFrederic Weisbecker 
821c024ecaSPeter Zijlstra int perf_trace_init(struct perf_event *p_event)
8397d5a220SFrederic Weisbecker {
841c024ecaSPeter Zijlstra 	struct ftrace_event_call *tp_event;
851c024ecaSPeter Zijlstra 	int event_id = p_event->attr.config;
8697d5a220SFrederic Weisbecker 	int ret = -EINVAL;
8797d5a220SFrederic Weisbecker 
8897d5a220SFrederic Weisbecker 	mutex_lock(&event_mutex);
891c024ecaSPeter Zijlstra 	list_for_each_entry(tp_event, &ftrace_events, list) {
90ff5f149bSSteven Rostedt 		if (tp_event->event.type == event_id &&
91a1d0ce82SSteven Rostedt 		    tp_event->class && tp_event->class->reg &&
921c024ecaSPeter Zijlstra 		    try_module_get(tp_event->mod)) {
931c024ecaSPeter Zijlstra 			ret = perf_trace_event_init(tp_event, p_event);
94*9cb627d5SLi Zefan 			if (ret)
95*9cb627d5SLi Zefan 				module_put(tp_event->mod);
9697d5a220SFrederic Weisbecker 			break;
9797d5a220SFrederic Weisbecker 		}
9897d5a220SFrederic Weisbecker 	}
9997d5a220SFrederic Weisbecker 	mutex_unlock(&event_mutex);
10097d5a220SFrederic Weisbecker 
10197d5a220SFrederic Weisbecker 	return ret;
10297d5a220SFrederic Weisbecker }
10397d5a220SFrederic Weisbecker 
1041c024ecaSPeter Zijlstra int perf_trace_enable(struct perf_event *p_event)
10597d5a220SFrederic Weisbecker {
1061c024ecaSPeter Zijlstra 	struct ftrace_event_call *tp_event = p_event->tp_event;
1071c024ecaSPeter Zijlstra 	struct hlist_head *list;
10897d5a220SFrederic Weisbecker 
1091c024ecaSPeter Zijlstra 	list = tp_event->perf_events;
1101c024ecaSPeter Zijlstra 	if (WARN_ON_ONCE(!list))
1111c024ecaSPeter Zijlstra 		return -EINVAL;
11297d5a220SFrederic Weisbecker 
1133771f077SPeter Zijlstra 	list = this_cpu_ptr(list);
1141c024ecaSPeter Zijlstra 	hlist_add_head_rcu(&p_event->hlist_entry, list);
1151c024ecaSPeter Zijlstra 
1161c024ecaSPeter Zijlstra 	return 0;
1171c024ecaSPeter Zijlstra }
1181c024ecaSPeter Zijlstra 
1191c024ecaSPeter Zijlstra void perf_trace_disable(struct perf_event *p_event)
1201c024ecaSPeter Zijlstra {
1211c024ecaSPeter Zijlstra 	hlist_del_rcu(&p_event->hlist_entry);
1221c024ecaSPeter Zijlstra }
1231c024ecaSPeter Zijlstra 
1241c024ecaSPeter Zijlstra void perf_trace_destroy(struct perf_event *p_event)
1251c024ecaSPeter Zijlstra {
1261c024ecaSPeter Zijlstra 	struct ftrace_event_call *tp_event = p_event->tp_event;
127b7e2ecefSPeter Zijlstra 	int i;
12897d5a220SFrederic Weisbecker 
1292e97942fSPeter Zijlstra 	mutex_lock(&event_mutex);
1301c024ecaSPeter Zijlstra 	if (--tp_event->perf_refcount > 0)
1312e97942fSPeter Zijlstra 		goto out;
13297d5a220SFrederic Weisbecker 
133ff5f149bSSteven Rostedt 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
1341c024ecaSPeter Zijlstra 
1353771f077SPeter Zijlstra 	/*
136669336e4SFrederic Weisbecker 	 * Ensure our callback won't be called anymore. The buffers
137669336e4SFrederic Weisbecker 	 * will be freed after that.
1383771f077SPeter Zijlstra 	 */
139669336e4SFrederic Weisbecker 	tracepoint_synchronize_unregister();
1403771f077SPeter Zijlstra 
1411c024ecaSPeter Zijlstra 	free_percpu(tp_event->perf_events);
1421c024ecaSPeter Zijlstra 	tp_event->perf_events = NULL;
14397d5a220SFrederic Weisbecker 
14497d5a220SFrederic Weisbecker 	if (!--total_ref_count) {
145b7e2ecefSPeter Zijlstra 		for (i = 0; i < 4; i++) {
1461c024ecaSPeter Zijlstra 			free_percpu(perf_trace_buf[i]);
1471c024ecaSPeter Zijlstra 			perf_trace_buf[i] = NULL;
14897d5a220SFrederic Weisbecker 		}
14997d5a220SFrederic Weisbecker 	}
1502e97942fSPeter Zijlstra out:
151*9cb627d5SLi Zefan 	module_put(tp_event->mod);
1522e97942fSPeter Zijlstra 	mutex_unlock(&event_mutex);
15397d5a220SFrederic Weisbecker }
15497d5a220SFrederic Weisbecker 
15597d5a220SFrederic Weisbecker __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
156b7e2ecefSPeter Zijlstra 				       struct pt_regs *regs, int *rctxp)
15797d5a220SFrederic Weisbecker {
15897d5a220SFrederic Weisbecker 	struct trace_entry *entry;
15987f44bbcSPeter Zijlstra 	unsigned long flags;
1601c024ecaSPeter Zijlstra 	char *raw_data;
161b7e2ecefSPeter Zijlstra 	int pc;
16297d5a220SFrederic Weisbecker 
163eb1e7961SFrederic Weisbecker 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
164eb1e7961SFrederic Weisbecker 
16597d5a220SFrederic Weisbecker 	pc = preempt_count();
16697d5a220SFrederic Weisbecker 
16797d5a220SFrederic Weisbecker 	*rctxp = perf_swevent_get_recursion_context();
16897d5a220SFrederic Weisbecker 	if (*rctxp < 0)
1691c024ecaSPeter Zijlstra 		return NULL;
17097d5a220SFrederic Weisbecker 
1713771f077SPeter Zijlstra 	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
17297d5a220SFrederic Weisbecker 
17397d5a220SFrederic Weisbecker 	/* zero the dead bytes from align to not leak stack to user */
174eb1e7961SFrederic Weisbecker 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
17597d5a220SFrederic Weisbecker 
17697d5a220SFrederic Weisbecker 	entry = (struct trace_entry *)raw_data;
17787f44bbcSPeter Zijlstra 	local_save_flags(flags);
17887f44bbcSPeter Zijlstra 	tracing_generic_entry_update(entry, flags, pc);
17997d5a220SFrederic Weisbecker 	entry->type = type;
18097d5a220SFrederic Weisbecker 
18197d5a220SFrederic Weisbecker 	return raw_data;
18297d5a220SFrederic Weisbecker }
18397d5a220SFrederic Weisbecker EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
184