xref: /linux/arch/sh/kernel/perf_callchain.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * Performance event callchain support - SuperH architecture code
3  *
4  * Copyright (C) 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/perf_event.h>
13 #include <linux/percpu.h>
14 #include <asm/unwinder.h>
15 #include <asm/ptrace.h>
16 
17 static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
18 {
19 	if (entry->nr < PERF_MAX_STACK_DEPTH)
20 		entry->ip[entry->nr++] = ip;
21 }
22 
23 static void callchain_warning(void *data, char *msg)
24 {
25 }
26 
27 static void
28 callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
29 {
30 }
31 
32 static int callchain_stack(void *data, char *name)
33 {
34 	return 0;
35 }
36 
37 static void callchain_address(void *data, unsigned long addr, int reliable)
38 {
39 	struct perf_callchain_entry *entry = data;
40 
41 	if (reliable)
42 		callchain_store(entry, addr);
43 }
44 
45 static const struct stacktrace_ops callchain_ops = {
46 	.warning	= callchain_warning,
47 	.warning_symbol	= callchain_warning_symbol,
48 	.stack		= callchain_stack,
49 	.address	= callchain_address,
50 };
51 
52 static void
53 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
54 {
55 	callchain_store(entry, PERF_CONTEXT_KERNEL);
56 	callchain_store(entry, regs->pc);
57 
58 	unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
59 }
60 
61 static void
62 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
63 {
64 	int is_user;
65 
66 	if (!regs)
67 		return;
68 
69 	is_user = user_mode(regs);
70 
71 	if (is_user && current->state != TASK_RUNNING)
72 		return;
73 
74 	/*
75 	 * Only the kernel side is implemented for now.
76 	 */
77 	if (!is_user)
78 		perf_callchain_kernel(regs, entry);
79 }
80 
81 /*
82  * No need for separate IRQ and NMI entries.
83  */
84 static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
85 
86 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
87 {
88 	struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
89 
90 	entry->nr = 0;
91 
92 	perf_do_callchain(regs, entry);
93 
94 	return entry;
95 }
96