xref: /linux/arch/parisc/kernel/ftrace.c (revision 07578f16ef38bb8061bf7e3132e685ed4e3f5c10)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for tracing calls in Linux kernel.
4  * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
5  *
6  * based on code for x86 which is:
7  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8  *
9  * future possible enhancements:
10  *	- add CONFIG_STACK_TRACER
11  */
12 
13 #include <linux/init.h>
14 #include <linux/ftrace.h>
15 #include <linux/uaccess.h>
16 #include <linux/kprobes.h>
17 #include <linux/ptrace.h>
18 #include <linux/jump_label.h>
19 
20 #include <asm/assembly.h>
21 #include <asm/sections.h>
22 #include <asm/ftrace.h>
23 #include <asm/patch.h>
24 
25 #define __hot __section(".text.hot")
26 
27 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
28 static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
29 
30 /*
31  * Hook the return address and push it in the stack of return addrs
32  * in current thread info.
33  */
34 static void __hot prepare_ftrace_return(unsigned long *parent,
35 					unsigned long self_addr)
36 {
37 	unsigned long old;
38 	extern int parisc_return_to_handler;
39 
40 	if (unlikely(ftrace_graph_is_dead()))
41 		return;
42 
43 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
44 		return;
45 
46 	old = *parent;
47 
48 	if (!function_graph_enter(old, self_addr, 0, NULL))
49 		/* activate parisc_return_to_handler() as return point */
50 		*parent = (unsigned long) &parisc_return_to_handler;
51 }
52 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
53 
54 static ftrace_func_t ftrace_func;
55 
56 void notrace __hot ftrace_function_trampoline(unsigned long parent,
57 				unsigned long self_addr,
58 				unsigned long org_sp_gr3,
59 				struct ftrace_regs *fregs)
60 {
61 	extern struct ftrace_ops *function_trace_op;
62 
63 	ftrace_func(self_addr, parent, function_trace_op, fregs);
64 
65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
66 	if (static_branch_unlikely(&ftrace_graph_enable)) {
67 		unsigned long *parent_rp;
68 
69 		/* calculate pointer to %rp in stack */
70 		parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
71 		/* sanity check: parent_rp should hold parent */
72 		if (*parent_rp != parent)
73 			return;
74 
75 		prepare_ftrace_return(parent_rp, self_addr);
76 		return;
77 	}
78 #endif
79 }
80 
81 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
82 int ftrace_enable_ftrace_graph_caller(void)
83 {
84 	static_key_enable(&ftrace_graph_enable.key);
85 	return 0;
86 }
87 
88 int ftrace_disable_ftrace_graph_caller(void)
89 {
90 	static_key_enable(&ftrace_graph_enable.key);
91 	return 0;
92 }
93 #endif
94 
95 #ifdef CONFIG_DYNAMIC_FTRACE
96 
97 int __init ftrace_dyn_arch_init(void)
98 {
99 	return 0;
100 }
101 
102 int ftrace_update_ftrace_func(ftrace_func_t func)
103 {
104 	ftrace_func = func;
105 	return 0;
106 }
107 
108 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
109 			unsigned long addr)
110 {
111 	return 0;
112 }
113 
114 unsigned long ftrace_call_adjust(unsigned long addr)
115 {
116 	return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
117 }
118 
119 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
120 {
121 	u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
122 	u32 *tramp;
123 	int size, ret, i;
124 	void *ip;
125 
126 #ifdef CONFIG_64BIT
127 	unsigned long addr2 =
128 		(unsigned long)dereference_function_descriptor((void *)addr);
129 
130 	u32 ftrace_trampoline[] = {
131 		0x73c10208, /* std,ma r1,100(sp) */
132 		0x0c2110c1, /* ldd -10(r1),r1 */
133 		0xe820d002, /* bve,n (r1) */
134 		addr2 >> 32,
135 		addr2 & 0xffffffff,
136 		0xe83f1fd7, /* b,l,n .-14,r1 */
137 	};
138 
139 	u32 ftrace_trampoline_unaligned[] = {
140 		addr2 >> 32,
141 		addr2 & 0xffffffff,
142 		0x37de0200, /* ldo 100(sp),sp */
143 		0x73c13e01, /* std r1,-100(sp) */
144 		0x34213ff9, /* ldo -4(r1),r1 */
145 		0x50213fc1, /* ldd -20(r1),r1 */
146 		0xe820d002, /* bve,n (r1) */
147 		0xe83f1fcf, /* b,l,n .-20,r1 */
148 	};
149 
150 	BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
151 				FTRACE_PATCHABLE_FUNCTION_SIZE);
152 #else
153 	u32 ftrace_trampoline[] = {
154 		(u32)addr,
155 		0x6fc10080, /* stw,ma r1,40(sp) */
156 		0x48213fd1, /* ldw -18(r1),r1 */
157 		0xe820c002, /* bv,n r0(r1) */
158 		0xe83f1fdf, /* b,l,n .-c,r1 */
159 	};
160 #endif
161 
162 	BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
163 				FTRACE_PATCHABLE_FUNCTION_SIZE);
164 
165 	size = sizeof(ftrace_trampoline);
166 	tramp = ftrace_trampoline;
167 
168 #ifdef CONFIG_64BIT
169 	if (rec->ip & 0x4) {
170 		size = sizeof(ftrace_trampoline_unaligned);
171 		tramp = ftrace_trampoline_unaligned;
172 	}
173 #endif
174 
175 	ip = (void *)(rec->ip + 4 - size);
176 
177 	ret = copy_from_kernel_nofault(insn, ip, size);
178 	if (ret)
179 		return ret;
180 
181 	for (i = 0; i < size / 4; i++) {
182 		if (insn[i] != INSN_NOP)
183 			return -EINVAL;
184 	}
185 
186 	__patch_text_multiple(ip, tramp, size);
187 	return 0;
188 }
189 
190 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
191 		    unsigned long addr)
192 {
193 	u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
194 	int i;
195 
196 	for (i = 0; i < ARRAY_SIZE(insn); i++)
197 		insn[i] = INSN_NOP;
198 
199 	__patch_text((void *)rec->ip, INSN_NOP);
200 	__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
201 			      insn, sizeof(insn)-4);
202 	return 0;
203 }
204 #endif
205 
206 #ifdef CONFIG_KPROBES_ON_FTRACE
207 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
208 			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
209 {
210 	struct kprobe_ctlblk *kcb;
211 	struct pt_regs *regs;
212 	struct kprobe *p;
213 	int bit;
214 
215 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
216 	if (bit < 0)
217 		return;
218 
219 	regs = ftrace_get_regs(fregs);
220 	preempt_disable_notrace();
221 	p = get_kprobe((kprobe_opcode_t *)ip);
222 	if (unlikely(!p) || kprobe_disabled(p))
223 		goto out;
224 
225 	if (kprobe_running()) {
226 		kprobes_inc_nmissed_count(p);
227 		goto out;
228 	}
229 
230 	__this_cpu_write(current_kprobe, p);
231 
232 	kcb = get_kprobe_ctlblk();
233 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
234 
235 	regs->iaoq[0] = ip;
236 	regs->iaoq[1] = ip + 4;
237 
238 	if (!p->pre_handler || !p->pre_handler(p, regs)) {
239 		regs->iaoq[0] = ip + 4;
240 		regs->iaoq[1] = ip + 8;
241 
242 		if (unlikely(p->post_handler)) {
243 			kcb->kprobe_status = KPROBE_HIT_SSDONE;
244 			p->post_handler(p, regs, 0);
245 		}
246 	}
247 	__this_cpu_write(current_kprobe, NULL);
248 out:
249 	preempt_enable_notrace();
250 	ftrace_test_recursion_unlock(bit);
251 }
252 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
253 
254 int arch_prepare_kprobe_ftrace(struct kprobe *p)
255 {
256 	p->ainsn.insn = NULL;
257 	return 0;
258 }
259 #endif
260