xref: /linux/kernel/events/callchain.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
18e86e015SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
29251f904SBorislav Petkov /*
39251f904SBorislav Petkov  * Performance events callchain code, extracted from core.c:
49251f904SBorislav Petkov  *
59251f904SBorislav Petkov  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
69251f904SBorislav Petkov  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
790eec103SPeter Zijlstra  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
83723c632SArnd Bergmann  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
99251f904SBorislav Petkov  */
109251f904SBorislav Petkov 
119251f904SBorislav Petkov #include <linux/perf_event.h>
129251f904SBorislav Petkov #include <linux/slab.h>
1368db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
144a365eb8SAndrii Nakryiko #include <linux/uprobes.h>
1568db0cf1SIngo Molnar 
169251f904SBorislav Petkov #include "internal.h"
179251f904SBorislav Petkov 
189251f904SBorislav Petkov struct callchain_cpus_entries {
199251f904SBorislav Petkov 	struct rcu_head			rcu_head;
20c50c75e9SGustavo A. R. Silva 	struct perf_callchain_entry	*cpu_entries[];
219251f904SBorislav Petkov };
229251f904SBorislav Petkov 
23c5dfd78eSArnaldo Carvalho de Melo int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
24c85b0334SArnaldo Carvalho de Melo int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
25c5dfd78eSArnaldo Carvalho de Melo 
perf_callchain_entry__sizeof(void)26c5dfd78eSArnaldo Carvalho de Melo static inline size_t perf_callchain_entry__sizeof(void)
27c5dfd78eSArnaldo Carvalho de Melo {
28c5dfd78eSArnaldo Carvalho de Melo 	return (sizeof(struct perf_callchain_entry) +
29c85b0334SArnaldo Carvalho de Melo 		sizeof(__u64) * (sysctl_perf_event_max_stack +
30c85b0334SArnaldo Carvalho de Melo 				 sysctl_perf_event_max_contexts_per_stack));
31c5dfd78eSArnaldo Carvalho de Melo }
32c5dfd78eSArnaldo Carvalho de Melo 
335af42f92SSebastian Andrzej Siewior static DEFINE_PER_CPU(u8, callchain_recursion[PERF_NR_CONTEXTS]);
349251f904SBorislav Petkov static atomic_t nr_callchain_events;
359251f904SBorislav Petkov static DEFINE_MUTEX(callchain_mutex);
369251f904SBorislav Petkov static struct callchain_cpus_entries *callchain_cpus_entries;
379251f904SBorislav Petkov 
389251f904SBorislav Petkov 
perf_callchain_kernel(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)39cfbcf468SArnaldo Carvalho de Melo __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
409251f904SBorislav Petkov 				  struct pt_regs *regs)
419251f904SBorislav Petkov {
429251f904SBorislav Petkov }
439251f904SBorislav Petkov 
perf_callchain_user(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)44cfbcf468SArnaldo Carvalho de Melo __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
459251f904SBorislav Petkov 				struct pt_regs *regs)
469251f904SBorislav Petkov {
479251f904SBorislav Petkov }
489251f904SBorislav Petkov 
release_callchain_buffers_rcu(struct rcu_head * head)499251f904SBorislav Petkov static void release_callchain_buffers_rcu(struct rcu_head *head)
509251f904SBorislav Petkov {
519251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
529251f904SBorislav Petkov 	int cpu;
539251f904SBorislav Petkov 
549251f904SBorislav Petkov 	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
559251f904SBorislav Petkov 
569251f904SBorislav Petkov 	for_each_possible_cpu(cpu)
579251f904SBorislav Petkov 		kfree(entries->cpu_entries[cpu]);
589251f904SBorislav Petkov 
599251f904SBorislav Petkov 	kfree(entries);
609251f904SBorislav Petkov }
619251f904SBorislav Petkov 
release_callchain_buffers(void)629251f904SBorislav Petkov static void release_callchain_buffers(void)
639251f904SBorislav Petkov {
649251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
659251f904SBorislav Petkov 
669251f904SBorislav Petkov 	entries = callchain_cpus_entries;
67e0455e19SAndreea-Cristina Bernat 	RCU_INIT_POINTER(callchain_cpus_entries, NULL);
689251f904SBorislav Petkov 	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
699251f904SBorislav Petkov }
709251f904SBorislav Petkov 
alloc_callchain_buffers(void)719251f904SBorislav Petkov static int alloc_callchain_buffers(void)
729251f904SBorislav Petkov {
739251f904SBorislav Petkov 	int cpu;
749251f904SBorislav Petkov 	int size;
759251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
769251f904SBorislav Petkov 
779251f904SBorislav Petkov 	/*
789251f904SBorislav Petkov 	 * We can't use the percpu allocation API for data that can be
799251f904SBorislav Petkov 	 * accessed from NMI. Use a temporary manual per cpu allocation
809251f904SBorislav Petkov 	 * until that gets sorted out.
819251f904SBorislav Petkov 	 */
829251f904SBorislav Petkov 	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
839251f904SBorislav Petkov 
849251f904SBorislav Petkov 	entries = kzalloc(size, GFP_KERNEL);
859251f904SBorislav Petkov 	if (!entries)
869251f904SBorislav Petkov 		return -ENOMEM;
879251f904SBorislav Petkov 
88c5dfd78eSArnaldo Carvalho de Melo 	size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
899251f904SBorislav Petkov 
909251f904SBorislav Petkov 	for_each_possible_cpu(cpu) {
919251f904SBorislav Petkov 		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
929251f904SBorislav Petkov 							 cpu_to_node(cpu));
939251f904SBorislav Petkov 		if (!entries->cpu_entries[cpu])
949251f904SBorislav Petkov 			goto fail;
959251f904SBorislav Petkov 	}
969251f904SBorislav Petkov 
979251f904SBorislav Petkov 	rcu_assign_pointer(callchain_cpus_entries, entries);
989251f904SBorislav Petkov 
999251f904SBorislav Petkov 	return 0;
1009251f904SBorislav Petkov 
1019251f904SBorislav Petkov fail:
1029251f904SBorislav Petkov 	for_each_possible_cpu(cpu)
1039251f904SBorislav Petkov 		kfree(entries->cpu_entries[cpu]);
1049251f904SBorislav Petkov 	kfree(entries);
1059251f904SBorislav Petkov 
1069251f904SBorislav Petkov 	return -ENOMEM;
1079251f904SBorislav Petkov }
1089251f904SBorislav Petkov 
get_callchain_buffers(int event_max_stack)10997c79a38SArnaldo Carvalho de Melo int get_callchain_buffers(int event_max_stack)
1109251f904SBorislav Petkov {
1119251f904SBorislav Petkov 	int err = 0;
1129251f904SBorislav Petkov 	int count;
1139251f904SBorislav Petkov 
1149251f904SBorislav Petkov 	mutex_lock(&callchain_mutex);
1159251f904SBorislav Petkov 
1169251f904SBorislav Petkov 	count = atomic_inc_return(&nr_callchain_events);
1179251f904SBorislav Petkov 	if (WARN_ON_ONCE(count < 1)) {
1189251f904SBorislav Petkov 		err = -EINVAL;
1199251f904SBorislav Petkov 		goto exit;
1209251f904SBorislav Petkov 	}
1219251f904SBorislav Petkov 
12297c79a38SArnaldo Carvalho de Melo 	/*
12397c79a38SArnaldo Carvalho de Melo 	 * If requesting per event more than the global cap,
12497c79a38SArnaldo Carvalho de Melo 	 * return a different error to help userspace figure
12597c79a38SArnaldo Carvalho de Melo 	 * this out.
12697c79a38SArnaldo Carvalho de Melo 	 *
12797c79a38SArnaldo Carvalho de Melo 	 * And also do it here so that we have &callchain_mutex held.
12897c79a38SArnaldo Carvalho de Melo 	 */
1295af44ca5SJiri Olsa 	if (event_max_stack > sysctl_perf_event_max_stack) {
13097c79a38SArnaldo Carvalho de Melo 		err = -EOVERFLOW;
1319251f904SBorislav Petkov 		goto exit;
1329251f904SBorislav Petkov 	}
1339251f904SBorislav Petkov 
134bfb3d7b8SJiri Olsa 	if (count == 1)
1359251f904SBorislav Petkov 		err = alloc_callchain_buffers();
1369251f904SBorislav Petkov exit:
13790983b16SFrederic Weisbecker 	if (err)
13890983b16SFrederic Weisbecker 		atomic_dec(&nr_callchain_events);
1399251f904SBorislav Petkov 
140fc3b86d6SFrederic Weisbecker 	mutex_unlock(&callchain_mutex);
141fc3b86d6SFrederic Weisbecker 
1429251f904SBorislav Petkov 	return err;
1439251f904SBorislav Petkov }
1449251f904SBorislav Petkov 
put_callchain_buffers(void)1459251f904SBorislav Petkov void put_callchain_buffers(void)
1469251f904SBorislav Petkov {
1479251f904SBorislav Petkov 	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1489251f904SBorislav Petkov 		release_callchain_buffers();
1499251f904SBorislav Petkov 		mutex_unlock(&callchain_mutex);
1509251f904SBorislav Petkov 	}
1519251f904SBorislav Petkov }
1529251f904SBorislav Petkov 
get_callchain_entry(int * rctx)153d141b8bcSSong Liu struct perf_callchain_entry *get_callchain_entry(int *rctx)
1549251f904SBorislav Petkov {
1559251f904SBorislav Petkov 	int cpu;
1569251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
1579251f904SBorislav Petkov 
1584a32fea9SChristoph Lameter 	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
1599251f904SBorislav Petkov 	if (*rctx == -1)
1609251f904SBorislav Petkov 		return NULL;
1619251f904SBorislav Petkov 
1629251f904SBorislav Petkov 	entries = rcu_dereference(callchain_cpus_entries);
163d141b8bcSSong Liu 	if (!entries) {
164d141b8bcSSong Liu 		put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
1659251f904SBorislav Petkov 		return NULL;
166d141b8bcSSong Liu 	}
1679251f904SBorislav Petkov 
1689251f904SBorislav Petkov 	cpu = smp_processor_id();
1699251f904SBorislav Petkov 
170c5dfd78eSArnaldo Carvalho de Melo 	return (((void *)entries->cpu_entries[cpu]) +
171c5dfd78eSArnaldo Carvalho de Melo 		(*rctx * perf_callchain_entry__sizeof()));
1729251f904SBorislav Petkov }
1739251f904SBorislav Petkov 
174d141b8bcSSong Liu void
put_callchain_entry(int rctx)1759251f904SBorislav Petkov put_callchain_entry(int rctx)
1769251f904SBorislav Petkov {
1774a32fea9SChristoph Lameter 	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
1789251f904SBorislav Petkov }
1799251f904SBorislav Petkov 
fixup_uretprobe_trampoline_entries(struct perf_callchain_entry * entry,int start_entry_idx)1804a365eb8SAndrii Nakryiko static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entry,
1814a365eb8SAndrii Nakryiko 					       int start_entry_idx)
1824a365eb8SAndrii Nakryiko {
1834a365eb8SAndrii Nakryiko #ifdef CONFIG_UPROBES
1844a365eb8SAndrii Nakryiko 	struct uprobe_task *utask = current->utask;
1854a365eb8SAndrii Nakryiko 	struct return_instance *ri;
1864a365eb8SAndrii Nakryiko 	__u64 *cur_ip, *last_ip, tramp_addr;
1874a365eb8SAndrii Nakryiko 
1884a365eb8SAndrii Nakryiko 	if (likely(!utask || !utask->return_instances))
1894a365eb8SAndrii Nakryiko 		return;
1904a365eb8SAndrii Nakryiko 
1914a365eb8SAndrii Nakryiko 	cur_ip = &entry->ip[start_entry_idx];
1924a365eb8SAndrii Nakryiko 	last_ip = &entry->ip[entry->nr - 1];
1934a365eb8SAndrii Nakryiko 	ri = utask->return_instances;
1944a365eb8SAndrii Nakryiko 	tramp_addr = uprobe_get_trampoline_vaddr();
1954a365eb8SAndrii Nakryiko 
1964a365eb8SAndrii Nakryiko 	/*
1974a365eb8SAndrii Nakryiko 	 * If there are pending uretprobes for the current thread, they are
1984a365eb8SAndrii Nakryiko 	 * recorded in a list inside utask->return_instances; each such
1994a365eb8SAndrii Nakryiko 	 * pending uretprobe replaces traced user function's return address on
2004a365eb8SAndrii Nakryiko 	 * the stack, so when stack trace is captured, instead of seeing
2014a365eb8SAndrii Nakryiko 	 * actual function's return address, we'll have one or many uretprobe
2024a365eb8SAndrii Nakryiko 	 * trampoline addresses in the stack trace, which are not helpful and
2034a365eb8SAndrii Nakryiko 	 * misleading to users.
2044a365eb8SAndrii Nakryiko 	 * So here we go over the pending list of uretprobes, and each
2054a365eb8SAndrii Nakryiko 	 * encountered trampoline address is replaced with actual return
2064a365eb8SAndrii Nakryiko 	 * address.
2074a365eb8SAndrii Nakryiko 	 */
2084a365eb8SAndrii Nakryiko 	while (ri && cur_ip <= last_ip) {
2094a365eb8SAndrii Nakryiko 		if (*cur_ip == tramp_addr) {
2104a365eb8SAndrii Nakryiko 			*cur_ip = ri->orig_ret_vaddr;
2114a365eb8SAndrii Nakryiko 			ri = ri->next;
2124a365eb8SAndrii Nakryiko 		}
2134a365eb8SAndrii Nakryiko 		cur_ip++;
2144a365eb8SAndrii Nakryiko 	}
2154a365eb8SAndrii Nakryiko #endif
2164a365eb8SAndrii Nakryiko }
2174a365eb8SAndrii Nakryiko 
218e6dab5ffSAndrew Vagin struct perf_callchain_entry *
get_perf_callchain(struct pt_regs * regs,u32 init_nr,bool kernel,bool user,u32 max_stack,bool crosstask,bool add_mark)219568b329aSAlexei Starovoitov get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
220cfbcf468SArnaldo Carvalho de Melo 		   u32 max_stack, bool crosstask, bool add_mark)
221568b329aSAlexei Starovoitov {
222568b329aSAlexei Starovoitov 	struct perf_callchain_entry *entry;
223cfbcf468SArnaldo Carvalho de Melo 	struct perf_callchain_entry_ctx ctx;
2244a365eb8SAndrii Nakryiko 	int rctx, start_entry_idx;
225568b329aSAlexei Starovoitov 
2269251f904SBorislav Petkov 	entry = get_callchain_entry(&rctx);
2279251f904SBorislav Petkov 	if (!entry)
228d141b8bcSSong Liu 		return NULL;
2299251f904SBorislav Petkov 
230cfbcf468SArnaldo Carvalho de Melo 	ctx.entry     = entry;
231cfbcf468SArnaldo Carvalho de Melo 	ctx.max_stack = max_stack;
2323b1fff08SArnaldo Carvalho de Melo 	ctx.nr	      = entry->nr = init_nr;
233c85b0334SArnaldo Carvalho de Melo 	ctx.contexts       = 0;
234c85b0334SArnaldo Carvalho de Melo 	ctx.contexts_maxed = false;
2359251f904SBorislav Petkov 
236d0775264SFrederic Weisbecker 	if (kernel && !user_mode(regs)) {
237568b329aSAlexei Starovoitov 		if (add_mark)
2383e4de4ecSArnaldo Carvalho de Melo 			perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
239cfbcf468SArnaldo Carvalho de Melo 		perf_callchain_kernel(&ctx, regs);
240d0775264SFrederic Weisbecker 	}
241d0775264SFrederic Weisbecker 
242d0775264SFrederic Weisbecker 	if (user) {
243d0775264SFrederic Weisbecker 		if (!user_mode(regs)) {
2449251f904SBorislav Petkov 			if  (current->mm)
2459251f904SBorislav Petkov 				regs = task_pt_regs(current);
2469251f904SBorislav Petkov 			else
2479251f904SBorislav Petkov 				regs = NULL;
2489251f904SBorislav Petkov 		}
2499251f904SBorislav Petkov 
2509251f904SBorislav Petkov 		if (regs) {
251568b329aSAlexei Starovoitov 			if (crosstask)
252e6dab5ffSAndrew Vagin 				goto exit_put;
253e6dab5ffSAndrew Vagin 
254568b329aSAlexei Starovoitov 			if (add_mark)
2553e4de4ecSArnaldo Carvalho de Melo 				perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
25688b0193dSWill Deacon 
2574a365eb8SAndrii Nakryiko 			start_entry_idx = entry->nr;
258cfbcf468SArnaldo Carvalho de Melo 			perf_callchain_user(&ctx, regs);
2594a365eb8SAndrii Nakryiko 			fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
2609251f904SBorislav Petkov 		}
261d0775264SFrederic Weisbecker 	}
2629251f904SBorislav Petkov 
2639251f904SBorislav Petkov exit_put:
2649251f904SBorislav Petkov 	put_callchain_entry(rctx);
2659251f904SBorislav Petkov 
2669251f904SBorislav Petkov 	return entry;
2679251f904SBorislav Petkov }
268c5dfd78eSArnaldo Carvalho de Melo 
269c85b0334SArnaldo Carvalho de Melo /*
270c85b0334SArnaldo Carvalho de Melo  * Used for sysctl_perf_event_max_stack and
271c85b0334SArnaldo Carvalho de Melo  * sysctl_perf_event_max_contexts_per_stack.
272c85b0334SArnaldo Carvalho de Melo  */
perf_event_max_stack_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)273*78eb4ea2SJoel Granados int perf_event_max_stack_handler(const struct ctl_table *table, int write,
27432927393SChristoph Hellwig 				 void *buffer, size_t *lenp, loff_t *ppos)
275c5dfd78eSArnaldo Carvalho de Melo {
276a831100aSArnaldo Carvalho de Melo 	int *value = table->data;
277a831100aSArnaldo Carvalho de Melo 	int new_value = *value, ret;
278c5dfd78eSArnaldo Carvalho de Melo 	struct ctl_table new_table = *table;
279c5dfd78eSArnaldo Carvalho de Melo 
280c5dfd78eSArnaldo Carvalho de Melo 	new_table.data = &new_value;
281c5dfd78eSArnaldo Carvalho de Melo 	ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
282c5dfd78eSArnaldo Carvalho de Melo 	if (ret || !write)
283c5dfd78eSArnaldo Carvalho de Melo 		return ret;
284c5dfd78eSArnaldo Carvalho de Melo 
285c5dfd78eSArnaldo Carvalho de Melo 	mutex_lock(&callchain_mutex);
286c5dfd78eSArnaldo Carvalho de Melo 	if (atomic_read(&nr_callchain_events))
287c5dfd78eSArnaldo Carvalho de Melo 		ret = -EBUSY;
288c5dfd78eSArnaldo Carvalho de Melo 	else
289a831100aSArnaldo Carvalho de Melo 		*value = new_value;
290c5dfd78eSArnaldo Carvalho de Melo 
291c5dfd78eSArnaldo Carvalho de Melo 	mutex_unlock(&callchain_mutex);
292c5dfd78eSArnaldo Carvalho de Melo 
293c5dfd78eSArnaldo Carvalho de Melo 	return ret;
294c5dfd78eSArnaldo Carvalho de Melo }
295