xref: /linux/arch/arc/kernel/stacktrace.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *	stacktrace.c : stacktracing APIs needed by rest of kernel
3  *			(wrappers over ARC dwarf based unwinder)
4  *
5  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  *  vineetg: aug 2009
12  *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
13  *   for displaying task's kernel mode call stack in /proc/<pid>/stack
14  *  -Iterator based approach to have single copy of unwinding core and APIs
15  *   needing unwinding, implement the logic in iterator regarding:
16  *      = which frame onwards to start capture
17  *      = which frame to stop capturing (wchan)
18  *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
19  *
20  *  vineetg: March 2009
21  *  -Implemented correct versions of thread_saved_pc() and get_wchan()
22  *
23  *  rajeshwarr: 2008
24  *  -Initial implementation
25  */
26 
27 #include <linux/ptrace.h>
28 #include <linux/export.h>
29 #include <linux/stacktrace.h>
30 #include <linux/kallsyms.h>
31 #include <asm/arcregs.h>
32 #include <asm/unwind.h>
33 #include <asm/switch_to.h>
34 
35 /*-------------------------------------------------------------------------
36  *              Unwinder Iterator
37  *-------------------------------------------------------------------------
38  */
39 
40 #ifdef CONFIG_ARC_DW2_UNWIND
41 
42 static void seed_unwind_frame_info(struct task_struct *tsk,
43 				   struct pt_regs *regs,
44 				   struct unwind_frame_info *frame_info)
45 {
46 	/*
47 	 * synchronous unwinding (e.g. dump_stack)
48 	 *  - uses current values of SP and friends
49 	 */
50 	if (tsk == NULL && regs == NULL) {
51 		unsigned long fp, sp, blink, ret;
52 		frame_info->task = current;
53 
54 		__asm__ __volatile__(
55 			"mov %0,r27\n\t"
56 			"mov %1,r28\n\t"
57 			"mov %2,r31\n\t"
58 			"mov %3,r63\n\t"
59 			: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
60 		);
61 
62 		frame_info->regs.r27 = fp;
63 		frame_info->regs.r28 = sp;
64 		frame_info->regs.r31 = blink;
65 		frame_info->regs.r63 = ret;
66 		frame_info->call_frame = 0;
67 	} else if (regs == NULL) {
68 		/*
69 		 * Asynchronous unwinding of sleeping task
70 		 *  - Gets SP etc from task's pt_regs (saved bottom of kernel
71 		 *    mode stack of task)
72 		 */
73 
74 		frame_info->task = tsk;
75 
76 		frame_info->regs.r27 = TSK_K_FP(tsk);
77 		frame_info->regs.r28 = TSK_K_ESP(tsk);
78 		frame_info->regs.r31 = TSK_K_BLINK(tsk);
79 		frame_info->regs.r63 = (unsigned int)__switch_to;
80 
81 		/* In the prologue of __switch_to, first FP is saved on stack
82 		 * and then SP is copied to FP. Dwarf assumes cfa as FP based
83 		 * but we didn't save FP. The value retrieved above is FP's
84 		 * state in previous frame.
85 		 * As a work around for this, we unwind from __switch_to start
86 		 * and adjust SP accordingly. The other limitation is that
87 		 * __switch_to macro is dwarf rules are not generated for inline
88 		 * assembly code
89 		 */
90 		frame_info->regs.r27 = 0;
91 		frame_info->regs.r28 += 60;
92 		frame_info->call_frame = 0;
93 
94 	} else {
95 		/*
96 		 * Asynchronous unwinding of intr/exception
97 		 *  - Just uses the pt_regs passed
98 		 */
99 		frame_info->task = tsk;
100 
101 		frame_info->regs.r27 = regs->fp;
102 		frame_info->regs.r28 = regs->sp;
103 		frame_info->regs.r31 = regs->blink;
104 		frame_info->regs.r63 = regs->ret;
105 		frame_info->call_frame = 0;
106 	}
107 }
108 
109 #endif
110 
111 notrace noinline unsigned int
112 arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
113 		int (*consumer_fn) (unsigned int, void *), void *arg)
114 {
115 #ifdef CONFIG_ARC_DW2_UNWIND
116 	int ret = 0;
117 	unsigned int address;
118 	struct unwind_frame_info frame_info;
119 
120 	seed_unwind_frame_info(tsk, regs, &frame_info);
121 
122 	while (1) {
123 		address = UNW_PC(&frame_info);
124 
125 		if (!address || !__kernel_text_address(address))
126 			break;
127 
128 		if (consumer_fn(address, arg) == -1)
129 			break;
130 
131 		ret = arc_unwind(&frame_info);
132 		if (ret)
133 			break;
134 
135 		frame_info.regs.r63 = frame_info.regs.r31;
136 	}
137 
138 	return address;		/* return the last address it saw */
139 #else
140 	/* On ARC, only Dward based unwinder works. fp based backtracing is
141 	 * not possible (-fno-omit-frame-pointer) because of the way function
142 	 * prelogue is setup (callee regs saved and then fp set and not other
143 	 * way around
144 	 */
145 	pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
146 	return 0;
147 
148 #endif
149 }
150 
151 /*-------------------------------------------------------------------------
152  * callbacks called by unwinder iterator to implement kernel APIs
153  *
154  * The callback can return -1 to force the iterator to stop, which by default
155  * keeps going till the bottom-most frame.
156  *-------------------------------------------------------------------------
157  */
158 
159 /* Call-back which plugs into unwinding core to dump the stack in
160  * case of panic/OOPs/BUG etc
161  */
162 static int __print_sym(unsigned int address, void *unused)
163 {
164 	__print_symbol("  %s\n", address);
165 	return 0;
166 }
167 
168 #ifdef CONFIG_STACKTRACE
169 
170 /* Call-back which plugs into unwinding core to capture the
171  * traces needed by kernel on /proc/<pid>/stack
172  */
173 static int __collect_all(unsigned int address, void *arg)
174 {
175 	struct stack_trace *trace = arg;
176 
177 	if (trace->skip > 0)
178 		trace->skip--;
179 	else
180 		trace->entries[trace->nr_entries++] = address;
181 
182 	if (trace->nr_entries >= trace->max_entries)
183 		return -1;
184 
185 	return 0;
186 }
187 
188 static int __collect_all_but_sched(unsigned int address, void *arg)
189 {
190 	struct stack_trace *trace = arg;
191 
192 	if (in_sched_functions(address))
193 		return 0;
194 
195 	if (trace->skip > 0)
196 		trace->skip--;
197 	else
198 		trace->entries[trace->nr_entries++] = address;
199 
200 	if (trace->nr_entries >= trace->max_entries)
201 		return -1;
202 
203 	return 0;
204 }
205 
206 #endif
207 
208 static int __get_first_nonsched(unsigned int address, void *unused)
209 {
210 	if (in_sched_functions(address))
211 		return 0;
212 
213 	return -1;
214 }
215 
216 /*-------------------------------------------------------------------------
217  *              APIs expected by various kernel sub-systems
218  *-------------------------------------------------------------------------
219  */
220 
221 noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
222 {
223 	pr_info("\nStack Trace:\n");
224 	arc_unwind_core(tsk, regs, __print_sym, NULL);
225 }
226 EXPORT_SYMBOL(show_stacktrace);
227 
228 /* Expected by sched Code */
229 void show_stack(struct task_struct *tsk, unsigned long *sp)
230 {
231 	show_stacktrace(tsk, NULL);
232 }
233 
234 /* Another API expected by schedular, shows up in "ps" as Wait Channel
235  * Ofcourse just returning schedule( ) would be pointless so unwind until
236  * the function is not in schedular code
237  */
238 unsigned int get_wchan(struct task_struct *tsk)
239 {
240 	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
241 }
242 
243 #ifdef CONFIG_STACKTRACE
244 
245 /*
246  * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
247  * A typical use is when /proc/<pid>/stack is queried by userland
248  */
249 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
250 {
251 	/* Assumes @tsk is sleeping so unwinds from __switch_to */
252 	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
253 }
254 
255 void save_stack_trace(struct stack_trace *trace)
256 {
257 	/* Pass NULL for task so it unwinds the current call frame */
258 	arc_unwind_core(NULL, NULL, __collect_all, trace);
259 }
260 EXPORT_SYMBOL_GPL(save_stack_trace);
261 #endif
262