xref: /linux/arch/sparc/kernel/ftrace.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 #include <linux/spinlock.h>
2 #include <linux/hardirq.h>
3 #include <linux/ftrace.h>
4 #include <linux/percpu.h>
5 #include <linux/init.h>
6 #include <linux/list.h>
7 #include <trace/syscall.h>
8 
9 #include <asm/ftrace.h>
10 
11 #ifdef CONFIG_DYNAMIC_FTRACE
12 static const u32 ftrace_nop = 0x01000000;
13 
14 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
15 {
16 	u32 call;
17 	s32 off;
18 
19 	off = ((s32)addr - (s32)ip);
20 	call = 0x40000000 | ((u32)off >> 2);
21 
22 	return call;
23 }
24 
25 static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
26 {
27 	u32 replaced;
28 	int faulted;
29 
30 	__asm__ __volatile__(
31 	"1:	cas	[%[ip]], %[old], %[new]\n"
32 	"	flush	%[ip]\n"
33 	"	mov	0, %[faulted]\n"
34 	"2:\n"
35 	"	.section .fixup,#alloc,#execinstr\n"
36 	"	.align	4\n"
37 	"3:	sethi	%%hi(2b), %[faulted]\n"
38 	"	jmpl	%[faulted] + %%lo(2b), %%g0\n"
39 	"	 mov	1, %[faulted]\n"
40 	"	.previous\n"
41 	"	.section __ex_table,\"a\"\n"
42 	"	.align	4\n"
43 	"	.word	1b, 3b\n"
44 	"	.previous\n"
45 	: "=r" (replaced), [faulted] "=r" (faulted)
46 	: [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
47 	: "memory");
48 
49 	if (replaced != old && replaced != new)
50 		faulted = 2;
51 
52 	return faulted;
53 }
54 
55 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
56 {
57 	unsigned long ip = rec->ip;
58 	u32 old, new;
59 
60 	old = ftrace_call_replace(ip, addr);
61 	new = ftrace_nop;
62 	return ftrace_modify_code(ip, old, new);
63 }
64 
65 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
66 {
67 	unsigned long ip = rec->ip;
68 	u32 old, new;
69 
70 	old = ftrace_nop;
71 	new = ftrace_call_replace(ip, addr);
72 	return ftrace_modify_code(ip, old, new);
73 }
74 
75 int ftrace_update_ftrace_func(ftrace_func_t func)
76 {
77 	unsigned long ip = (unsigned long)(&ftrace_call);
78 	u32 old, new;
79 
80 	old = *(u32 *) &ftrace_call;
81 	new = ftrace_call_replace(ip, (unsigned long)func);
82 	return ftrace_modify_code(ip, old, new);
83 }
84 
85 int __init ftrace_dyn_arch_init(void *data)
86 {
87 	unsigned long *p = data;
88 
89 	*p = 0;
90 
91 	return 0;
92 }
93 #endif
94 
95 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
96 
97 #ifdef CONFIG_DYNAMIC_FTRACE
98 extern void ftrace_graph_call(void);
99 
100 int ftrace_enable_ftrace_graph_caller(void)
101 {
102 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
103 	u32 old, new;
104 
105 	old = *(u32 *) &ftrace_graph_call;
106 	new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
107 	return ftrace_modify_code(ip, old, new);
108 }
109 
110 int ftrace_disable_ftrace_graph_caller(void)
111 {
112 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
113 	u32 old, new;
114 
115 	old = *(u32 *) &ftrace_graph_call;
116 	new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
117 
118 	return ftrace_modify_code(ip, old, new);
119 }
120 
121 #endif /* !CONFIG_DYNAMIC_FTRACE */
122 
123 /*
124  * Hook the return address and push it in the stack of return addrs
125  * in current thread info.
126  */
127 unsigned long prepare_ftrace_return(unsigned long parent,
128 				    unsigned long self_addr,
129 				    unsigned long frame_pointer)
130 {
131 	unsigned long return_hooker = (unsigned long) &return_to_handler;
132 	struct ftrace_graph_ent trace;
133 
134 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
135 		return parent + 8UL;
136 
137 	if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
138 				     frame_pointer) == -EBUSY)
139 		return parent + 8UL;
140 
141 	trace.func = self_addr;
142 
143 	/* Only trace if the calling function expects to */
144 	if (!ftrace_graph_entry(&trace)) {
145 		current->curr_ret_stack--;
146 		return parent + 8UL;
147 	}
148 
149 	return return_hooker;
150 }
151 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
152