xref: /linux/kernel/trace/trace_event_perf.c (revision b34bce45530ca897aea35915e0e42eb3c8047b52)
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11 
12 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
13 
14 static char *perf_trace_buf[4];
15 
16 /*
17  * Force it to be aligned to unsigned long to avoid misaligned accesses
18  * suprises
19  */
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 	perf_trace_t;
22 
23 /* Count the events in use (per event id, not per instance) */
24 static int	total_ref_count;
25 
26 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
27 				 struct perf_event *p_event)
28 {
29 	struct hlist_head *list;
30 	int ret = -ENOMEM;
31 	int cpu;
32 
33 	p_event->tp_event = tp_event;
34 	if (tp_event->perf_refcount++ > 0)
35 		return 0;
36 
37 	list = alloc_percpu(struct hlist_head);
38 	if (!list)
39 		goto fail;
40 
41 	for_each_possible_cpu(cpu)
42 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
43 
44 	tp_event->perf_events = list;
45 
46 	if (!total_ref_count) {
47 		char *buf;
48 		int i;
49 
50 		for (i = 0; i < 4; i++) {
51 			buf = (char *)alloc_percpu(perf_trace_t);
52 			if (!buf)
53 				goto fail;
54 
55 			perf_trace_buf[i] = buf;
56 		}
57 	}
58 
59 	if (tp_event->class->reg)
60 		ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
61 	else
62 		ret = tracepoint_probe_register(tp_event->name,
63 						tp_event->class->perf_probe,
64 						tp_event);
65 
66 	if (ret)
67 		goto fail;
68 
69 	total_ref_count++;
70 	return 0;
71 
72 fail:
73 	if (!total_ref_count) {
74 		int i;
75 
76 		for (i = 0; i < 4; i++) {
77 			free_percpu(perf_trace_buf[i]);
78 			perf_trace_buf[i] = NULL;
79 		}
80 	}
81 
82 	if (!--tp_event->perf_refcount) {
83 		free_percpu(tp_event->perf_events);
84 		tp_event->perf_events = NULL;
85 	}
86 
87 	return ret;
88 }
89 
90 int perf_trace_init(struct perf_event *p_event)
91 {
92 	struct ftrace_event_call *tp_event;
93 	int event_id = p_event->attr.config;
94 	int ret = -EINVAL;
95 
96 	mutex_lock(&event_mutex);
97 	list_for_each_entry(tp_event, &ftrace_events, list) {
98 		if (tp_event->event.type == event_id &&
99 		    tp_event->class && tp_event->class->perf_probe &&
100 		    try_module_get(tp_event->mod)) {
101 			ret = perf_trace_event_init(tp_event, p_event);
102 			break;
103 		}
104 	}
105 	mutex_unlock(&event_mutex);
106 
107 	return ret;
108 }
109 
110 int perf_trace_enable(struct perf_event *p_event)
111 {
112 	struct ftrace_event_call *tp_event = p_event->tp_event;
113 	struct hlist_head *list;
114 
115 	list = tp_event->perf_events;
116 	if (WARN_ON_ONCE(!list))
117 		return -EINVAL;
118 
119 	list = per_cpu_ptr(list, smp_processor_id());
120 	hlist_add_head_rcu(&p_event->hlist_entry, list);
121 
122 	return 0;
123 }
124 
125 void perf_trace_disable(struct perf_event *p_event)
126 {
127 	hlist_del_rcu(&p_event->hlist_entry);
128 }
129 
130 void perf_trace_destroy(struct perf_event *p_event)
131 {
132 	struct ftrace_event_call *tp_event = p_event->tp_event;
133 	int i;
134 
135 	if (--tp_event->perf_refcount > 0)
136 		return;
137 
138 	if (tp_event->class->reg)
139 		tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
140 	else
141 		tracepoint_probe_unregister(tp_event->name,
142 					    tp_event->class->perf_probe,
143 					    tp_event);
144 
145 	free_percpu(tp_event->perf_events);
146 	tp_event->perf_events = NULL;
147 
148 	if (!--total_ref_count) {
149 		for (i = 0; i < 4; i++) {
150 			free_percpu(perf_trace_buf[i]);
151 			perf_trace_buf[i] = NULL;
152 		}
153 	}
154 }
155 
156 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
157 				       struct pt_regs *regs, int *rctxp)
158 {
159 	struct trace_entry *entry;
160 	unsigned long flags;
161 	char *raw_data;
162 	int pc;
163 
164 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
165 
166 	pc = preempt_count();
167 
168 	*rctxp = perf_swevent_get_recursion_context();
169 	if (*rctxp < 0)
170 		return NULL;
171 
172 	raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id());
173 
174 	/* zero the dead bytes from align to not leak stack to user */
175 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
176 
177 	entry = (struct trace_entry *)raw_data;
178 	local_save_flags(flags);
179 	tracing_generic_entry_update(entry, flags, pc);
180 	entry->type = type;
181 
182 	return raw_data;
183 }
184 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
185