xref: /linux/kernel/trace/trace_event_perf.c (revision 088e88be5a380cc4e81963a9a02815da465d144f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace event based perf event profiling/tracing
4  *
5  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
6  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kprobes.h>
11 #include "trace.h"
12 #include "trace_probe.h"
13 
14 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
15 
16 /*
17  * Force it to be aligned to unsigned long to avoid misaligned accesses
18  * suprises
19  */
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 	perf_trace_t;
22 
23 /* Count the events in use (per event id, not per instance) */
24 static int	total_ref_count;
25 
26 static int perf_trace_event_perm(struct trace_event_call *tp_event,
27 				 struct perf_event *p_event)
28 {
29 	if (tp_event->perf_perm) {
30 		int ret = tp_event->perf_perm(tp_event, p_event);
31 		if (ret)
32 			return ret;
33 	}
34 
35 	/*
36 	 * We checked and allowed to create parent,
37 	 * allow children without checking.
38 	 */
39 	if (p_event->parent)
40 		return 0;
41 
42 	/*
43 	 * It's ok to check current process (owner) permissions in here,
44 	 * because code below is called only via perf_event_open syscall.
45 	 */
46 
47 	/* The ftrace function trace is allowed only for root. */
48 	if (ftrace_event_is_function(tp_event)) {
49 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
50 			return -EPERM;
51 
52 		if (!is_sampling_event(p_event))
53 			return 0;
54 
55 		/*
56 		 * We don't allow user space callchains for  function trace
57 		 * event, due to issues with page faults while tracing page
58 		 * fault handler and its overall trickiness nature.
59 		 */
60 		if (!p_event->attr.exclude_callchain_user)
61 			return -EINVAL;
62 
63 		/*
64 		 * Same reason to disable user stack dump as for user space
65 		 * callchains above.
66 		 */
67 		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
68 			return -EINVAL;
69 	}
70 
71 	/* No tracing, just counting, so no obvious leak */
72 	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
73 		return 0;
74 
75 	/* Some events are ok to be traced by non-root users... */
76 	if (p_event->attach_state == PERF_ATTACH_TASK) {
77 		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
78 			return 0;
79 	}
80 
81 	/*
82 	 * ...otherwise raw tracepoint data can be a severe data leak,
83 	 * only allow root to have these.
84 	 */
85 	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
86 		return -EPERM;
87 
88 	return 0;
89 }
90 
91 static int perf_trace_event_reg(struct trace_event_call *tp_event,
92 				struct perf_event *p_event)
93 {
94 	struct hlist_head __percpu *list;
95 	int ret = -ENOMEM;
96 	int cpu;
97 
98 	p_event->tp_event = tp_event;
99 	if (tp_event->perf_refcount++ > 0)
100 		return 0;
101 
102 	list = alloc_percpu(struct hlist_head);
103 	if (!list)
104 		goto fail;
105 
106 	for_each_possible_cpu(cpu)
107 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
108 
109 	tp_event->perf_events = list;
110 
111 	if (!total_ref_count) {
112 		char __percpu *buf;
113 		int i;
114 
115 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
116 			buf = (char __percpu *)alloc_percpu(perf_trace_t);
117 			if (!buf)
118 				goto fail;
119 
120 			perf_trace_buf[i] = buf;
121 		}
122 	}
123 
124 	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
125 	if (ret)
126 		goto fail;
127 
128 	total_ref_count++;
129 	return 0;
130 
131 fail:
132 	if (!total_ref_count) {
133 		int i;
134 
135 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
136 			free_percpu(perf_trace_buf[i]);
137 			perf_trace_buf[i] = NULL;
138 		}
139 	}
140 
141 	if (!--tp_event->perf_refcount) {
142 		free_percpu(tp_event->perf_events);
143 		tp_event->perf_events = NULL;
144 	}
145 
146 	return ret;
147 }
148 
149 static void perf_trace_event_unreg(struct perf_event *p_event)
150 {
151 	struct trace_event_call *tp_event = p_event->tp_event;
152 	int i;
153 
154 	if (--tp_event->perf_refcount > 0)
155 		goto out;
156 
157 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
158 
159 	/*
160 	 * Ensure our callback won't be called anymore. The buffers
161 	 * will be freed after that.
162 	 */
163 	tracepoint_synchronize_unregister();
164 
165 	free_percpu(tp_event->perf_events);
166 	tp_event->perf_events = NULL;
167 
168 	if (!--total_ref_count) {
169 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
170 			free_percpu(perf_trace_buf[i]);
171 			perf_trace_buf[i] = NULL;
172 		}
173 	}
174 out:
175 	module_put(tp_event->mod);
176 }
177 
178 static int perf_trace_event_open(struct perf_event *p_event)
179 {
180 	struct trace_event_call *tp_event = p_event->tp_event;
181 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
182 }
183 
184 static void perf_trace_event_close(struct perf_event *p_event)
185 {
186 	struct trace_event_call *tp_event = p_event->tp_event;
187 	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
188 }
189 
190 static int perf_trace_event_init(struct trace_event_call *tp_event,
191 				 struct perf_event *p_event)
192 {
193 	int ret;
194 
195 	ret = perf_trace_event_perm(tp_event, p_event);
196 	if (ret)
197 		return ret;
198 
199 	ret = perf_trace_event_reg(tp_event, p_event);
200 	if (ret)
201 		return ret;
202 
203 	ret = perf_trace_event_open(p_event);
204 	if (ret) {
205 		perf_trace_event_unreg(p_event);
206 		return ret;
207 	}
208 
209 	return 0;
210 }
211 
212 int perf_trace_init(struct perf_event *p_event)
213 {
214 	struct trace_event_call *tp_event;
215 	u64 event_id = p_event->attr.config;
216 	int ret = -EINVAL;
217 
218 	mutex_lock(&event_mutex);
219 	list_for_each_entry(tp_event, &ftrace_events, list) {
220 		if (tp_event->event.type == event_id &&
221 		    tp_event->class && tp_event->class->reg &&
222 		    try_module_get(tp_event->mod)) {
223 			ret = perf_trace_event_init(tp_event, p_event);
224 			if (ret)
225 				module_put(tp_event->mod);
226 			break;
227 		}
228 	}
229 	mutex_unlock(&event_mutex);
230 
231 	return ret;
232 }
233 
234 void perf_trace_destroy(struct perf_event *p_event)
235 {
236 	mutex_lock(&event_mutex);
237 	perf_trace_event_close(p_event);
238 	perf_trace_event_unreg(p_event);
239 	mutex_unlock(&event_mutex);
240 }
241 
242 #ifdef CONFIG_KPROBE_EVENTS
243 int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
244 {
245 	int ret;
246 	char *func = NULL;
247 	struct trace_event_call *tp_event;
248 
249 	if (p_event->attr.kprobe_func) {
250 		func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
251 		if (!func)
252 			return -ENOMEM;
253 		ret = strncpy_from_user(
254 			func, u64_to_user_ptr(p_event->attr.kprobe_func),
255 			KSYM_NAME_LEN);
256 		if (ret == KSYM_NAME_LEN)
257 			ret = -E2BIG;
258 		if (ret < 0)
259 			goto out;
260 
261 		if (func[0] == '\0') {
262 			kfree(func);
263 			func = NULL;
264 		}
265 	}
266 
267 	tp_event = create_local_trace_kprobe(
268 		func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
269 		p_event->attr.probe_offset, is_retprobe);
270 	if (IS_ERR(tp_event)) {
271 		ret = PTR_ERR(tp_event);
272 		goto out;
273 	}
274 
275 	ret = perf_trace_event_init(tp_event, p_event);
276 	if (ret)
277 		destroy_local_trace_kprobe(tp_event);
278 out:
279 	kfree(func);
280 	return ret;
281 }
282 
283 void perf_kprobe_destroy(struct perf_event *p_event)
284 {
285 	perf_trace_event_close(p_event);
286 	perf_trace_event_unreg(p_event);
287 
288 	destroy_local_trace_kprobe(p_event->tp_event);
289 }
290 #endif /* CONFIG_KPROBE_EVENTS */
291 
292 #ifdef CONFIG_UPROBE_EVENTS
293 int perf_uprobe_init(struct perf_event *p_event,
294 		     unsigned long ref_ctr_offset, bool is_retprobe)
295 {
296 	int ret;
297 	char *path = NULL;
298 	struct trace_event_call *tp_event;
299 
300 	if (!p_event->attr.uprobe_path)
301 		return -EINVAL;
302 
303 	path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
304 			    PATH_MAX);
305 	if (IS_ERR(path)) {
306 		ret = PTR_ERR(path);
307 		return (ret == -EINVAL) ? -E2BIG : ret;
308 	}
309 	if (path[0] == '\0') {
310 		ret = -EINVAL;
311 		goto out;
312 	}
313 
314 	tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
315 					     ref_ctr_offset, is_retprobe);
316 	if (IS_ERR(tp_event)) {
317 		ret = PTR_ERR(tp_event);
318 		goto out;
319 	}
320 
321 	/*
322 	 * local trace_uprobe need to hold event_mutex to call
323 	 * uprobe_buffer_enable() and uprobe_buffer_disable().
324 	 * event_mutex is not required for local trace_kprobes.
325 	 */
326 	mutex_lock(&event_mutex);
327 	ret = perf_trace_event_init(tp_event, p_event);
328 	if (ret)
329 		destroy_local_trace_uprobe(tp_event);
330 	mutex_unlock(&event_mutex);
331 out:
332 	kfree(path);
333 	return ret;
334 }
335 
336 void perf_uprobe_destroy(struct perf_event *p_event)
337 {
338 	mutex_lock(&event_mutex);
339 	perf_trace_event_close(p_event);
340 	perf_trace_event_unreg(p_event);
341 	mutex_unlock(&event_mutex);
342 	destroy_local_trace_uprobe(p_event->tp_event);
343 }
344 #endif /* CONFIG_UPROBE_EVENTS */
345 
346 int perf_trace_add(struct perf_event *p_event, int flags)
347 {
348 	struct trace_event_call *tp_event = p_event->tp_event;
349 
350 	if (!(flags & PERF_EF_START))
351 		p_event->hw.state = PERF_HES_STOPPED;
352 
353 	/*
354 	 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
355 	 * and we need to take the default action of enqueueing our event on
356 	 * the right per-cpu hlist.
357 	 */
358 	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
359 		struct hlist_head __percpu *pcpu_list;
360 		struct hlist_head *list;
361 
362 		pcpu_list = tp_event->perf_events;
363 		if (WARN_ON_ONCE(!pcpu_list))
364 			return -EINVAL;
365 
366 		list = this_cpu_ptr(pcpu_list);
367 		hlist_add_head_rcu(&p_event->hlist_entry, list);
368 	}
369 
370 	return 0;
371 }
372 
373 void perf_trace_del(struct perf_event *p_event, int flags)
374 {
375 	struct trace_event_call *tp_event = p_event->tp_event;
376 
377 	/*
378 	 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
379 	 * and we need to take the default action of dequeueing our event from
380 	 * the right per-cpu hlist.
381 	 */
382 	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
383 		hlist_del_rcu(&p_event->hlist_entry);
384 }
385 
386 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
387 {
388 	char *raw_data;
389 	int rctx;
390 
391 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
392 
393 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
394 		      "perf buffer not large enough"))
395 		return NULL;
396 
397 	*rctxp = rctx = perf_swevent_get_recursion_context();
398 	if (rctx < 0)
399 		return NULL;
400 
401 	if (regs)
402 		*regs = this_cpu_ptr(&__perf_regs[rctx]);
403 	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
404 
405 	/* zero the dead bytes from align to not leak stack to user */
406 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
407 	return raw_data;
408 }
409 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
410 NOKPROBE_SYMBOL(perf_trace_buf_alloc);
411 
412 void perf_trace_buf_update(void *record, u16 type)
413 {
414 	struct trace_entry *entry = record;
415 	int pc = preempt_count();
416 	unsigned long flags;
417 
418 	local_save_flags(flags);
419 	tracing_generic_entry_update(entry, type, flags, pc);
420 }
421 NOKPROBE_SYMBOL(perf_trace_buf_update);
422 
423 #ifdef CONFIG_FUNCTION_TRACER
424 static void
425 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
426 			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
427 {
428 	struct ftrace_entry *entry;
429 	struct perf_event *event;
430 	struct hlist_head head;
431 	struct pt_regs regs;
432 	int rctx;
433 
434 	if ((unsigned long)ops->private != smp_processor_id())
435 		return;
436 
437 	event = container_of(ops, struct perf_event, ftrace_ops);
438 
439 	/*
440 	 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
441 	 * the perf code does is hlist_for_each_entry_rcu(), so we can
442 	 * get away with simply setting the @head.first pointer in order
443 	 * to create a singular list.
444 	 */
445 	head.first = &event->hlist_entry;
446 
447 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
448 		    sizeof(u64)) - sizeof(u32))
449 
450 	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
451 
452 	memset(&regs, 0, sizeof(regs));
453 	perf_fetch_caller_regs(&regs);
454 
455 	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
456 	if (!entry)
457 		return;
458 
459 	entry->ip = ip;
460 	entry->parent_ip = parent_ip;
461 	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
462 			      1, &regs, &head, NULL);
463 
464 #undef ENTRY_SIZE
465 }
466 
467 static int perf_ftrace_function_register(struct perf_event *event)
468 {
469 	struct ftrace_ops *ops = &event->ftrace_ops;
470 
471 	ops->flags   = FTRACE_OPS_FL_RCU;
472 	ops->func    = perf_ftrace_function_call;
473 	ops->private = (void *)(unsigned long)nr_cpu_ids;
474 
475 	return register_ftrace_function(ops);
476 }
477 
478 static int perf_ftrace_function_unregister(struct perf_event *event)
479 {
480 	struct ftrace_ops *ops = &event->ftrace_ops;
481 	int ret = unregister_ftrace_function(ops);
482 	ftrace_free_filter(ops);
483 	return ret;
484 }
485 
486 int perf_ftrace_event_register(struct trace_event_call *call,
487 			       enum trace_reg type, void *data)
488 {
489 	struct perf_event *event = data;
490 
491 	switch (type) {
492 	case TRACE_REG_REGISTER:
493 	case TRACE_REG_UNREGISTER:
494 		break;
495 	case TRACE_REG_PERF_REGISTER:
496 	case TRACE_REG_PERF_UNREGISTER:
497 		return 0;
498 	case TRACE_REG_PERF_OPEN:
499 		return perf_ftrace_function_register(data);
500 	case TRACE_REG_PERF_CLOSE:
501 		return perf_ftrace_function_unregister(data);
502 	case TRACE_REG_PERF_ADD:
503 		event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
504 		return 1;
505 	case TRACE_REG_PERF_DEL:
506 		event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
507 		return 1;
508 	}
509 
510 	return -EINVAL;
511 }
512 #endif /* CONFIG_FUNCTION_TRACER */
513