xref: /linux/arch/arm64/kernel/perf_callchain.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * arm64 callchain support
3  *
4  * Copyright (C) 2015 ARM Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 
21 #include <asm/stacktrace.h>
22 
23 struct frame_tail {
24 	struct frame_tail	__user *fp;
25 	unsigned long		lr;
26 } __attribute__((packed));
27 
28 /*
29  * Get the return address for a single stackframe and return a pointer to the
30  * next frame tail.
31  */
32 static struct frame_tail __user *
33 user_backtrace(struct frame_tail __user *tail,
34 	       struct perf_callchain_entry *entry)
35 {
36 	struct frame_tail buftail;
37 	unsigned long err;
38 
39 	/* Also check accessibility of one struct frame_tail beyond */
40 	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
41 		return NULL;
42 
43 	pagefault_disable();
44 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
45 	pagefault_enable();
46 
47 	if (err)
48 		return NULL;
49 
50 	perf_callchain_store(entry, buftail.lr);
51 
52 	/*
53 	 * Frame pointers should strictly progress back up the stack
54 	 * (towards higher addresses).
55 	 */
56 	if (tail >= buftail.fp)
57 		return NULL;
58 
59 	return buftail.fp;
60 }
61 
62 #ifdef CONFIG_COMPAT
63 /*
64  * The registers we're interested in are at the end of the variable
65  * length saved register structure. The fp points at the end of this
66  * structure so the address of this struct is:
67  * (struct compat_frame_tail *)(xxx->fp)-1
68  *
69  * This code has been adapted from the ARM OProfile support.
70  */
71 struct compat_frame_tail {
72 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
73 	u32		sp;
74 	u32		lr;
75 } __attribute__((packed));
76 
77 static struct compat_frame_tail __user *
78 compat_user_backtrace(struct compat_frame_tail __user *tail,
79 		      struct perf_callchain_entry *entry)
80 {
81 	struct compat_frame_tail buftail;
82 	unsigned long err;
83 
84 	/* Also check accessibility of one struct frame_tail beyond */
85 	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
86 		return NULL;
87 
88 	pagefault_disable();
89 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
90 	pagefault_enable();
91 
92 	if (err)
93 		return NULL;
94 
95 	perf_callchain_store(entry, buftail.lr);
96 
97 	/*
98 	 * Frame pointers should strictly progress back up the stack
99 	 * (towards higher addresses).
100 	 */
101 	if (tail + 1 >= (struct compat_frame_tail __user *)
102 			compat_ptr(buftail.fp))
103 		return NULL;
104 
105 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
106 }
107 #endif /* CONFIG_COMPAT */
108 
109 void perf_callchain_user(struct perf_callchain_entry *entry,
110 			 struct pt_regs *regs)
111 {
112 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
113 		/* We don't support guest os callchain now */
114 		return;
115 	}
116 
117 	perf_callchain_store(entry, regs->pc);
118 
119 	if (!compat_user_mode(regs)) {
120 		/* AARCH64 mode */
121 		struct frame_tail __user *tail;
122 
123 		tail = (struct frame_tail __user *)regs->regs[29];
124 
125 		while (entry->nr < PERF_MAX_STACK_DEPTH &&
126 		       tail && !((unsigned long)tail & 0xf))
127 			tail = user_backtrace(tail, entry);
128 	} else {
129 #ifdef CONFIG_COMPAT
130 		/* AARCH32 compat mode */
131 		struct compat_frame_tail __user *tail;
132 
133 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
134 
135 		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
136 			tail && !((unsigned long)tail & 0x3))
137 			tail = compat_user_backtrace(tail, entry);
138 #endif
139 	}
140 }
141 
142 /*
143  * Gets called by walk_stackframe() for every stackframe. This will be called
144  * whist unwinding the stackframe and is like a subroutine return so we use
145  * the PC.
146  */
147 static int callchain_trace(struct stackframe *frame, void *data)
148 {
149 	struct perf_callchain_entry *entry = data;
150 	perf_callchain_store(entry, frame->pc);
151 	return 0;
152 }
153 
154 void perf_callchain_kernel(struct perf_callchain_entry *entry,
155 			   struct pt_regs *regs)
156 {
157 	struct stackframe frame;
158 
159 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
160 		/* We don't support guest os callchain now */
161 		return;
162 	}
163 
164 	frame.fp = regs->regs[29];
165 	frame.sp = regs->sp;
166 	frame.pc = regs->pc;
167 
168 	walk_stackframe(&frame, callchain_trace, entry);
169 }
170 
171 unsigned long perf_instruction_pointer(struct pt_regs *regs)
172 {
173 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
174 		return perf_guest_cbs->get_guest_ip();
175 
176 	return instruction_pointer(regs);
177 }
178 
179 unsigned long perf_misc_flags(struct pt_regs *regs)
180 {
181 	int misc = 0;
182 
183 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
184 		if (perf_guest_cbs->is_user_mode())
185 			misc |= PERF_RECORD_MISC_GUEST_USER;
186 		else
187 			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
188 	} else {
189 		if (user_mode(regs))
190 			misc |= PERF_RECORD_MISC_USER;
191 		else
192 			misc |= PERF_RECORD_MISC_KERNEL;
193 	}
194 
195 	return misc;
196 }
197