xref: /linux/arch/riscv/kernel/probes/kprobes.c (revision 53ed0af4964229595b60594b35334d006d411ef0)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #define pr_fmt(fmt) "kprobes: " fmt
4 
5 #include <linux/kprobes.h>
6 #include <linux/extable.h>
7 #include <linux/slab.h>
8 #include <linux/stop_machine.h>
9 #include <linux/vmalloc.h>
10 #include <asm/ptrace.h>
11 #include <linux/uaccess.h>
12 #include <asm/sections.h>
13 #include <asm/cacheflush.h>
14 #include <asm/bug.h>
15 #include <asm/patch.h>
16 
17 #include "decode-insn.h"
18 
19 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
20 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
21 
22 static void __kprobes
23 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
24 
25 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
26 {
27 	u32 insn = __BUG_INSN_32;
28 	unsigned long offset = GET_INSN_LENGTH(p->opcode);
29 
30 	p->ainsn.api.restore = (unsigned long)p->addr + offset;
31 
32 	patch_text(p->ainsn.api.insn, &p->opcode, 1);
33 	patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
34 		   &insn, 1);
35 }
36 
37 static void __kprobes arch_prepare_simulate(struct kprobe *p)
38 {
39 	p->ainsn.api.restore = 0;
40 }
41 
42 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
43 {
44 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
45 
46 	if (p->ainsn.api.handler)
47 		p->ainsn.api.handler((u32)p->opcode,
48 					(unsigned long)p->addr, regs);
49 
50 	post_kprobe_handler(p, kcb, regs);
51 }
52 
53 static bool __kprobes arch_check_kprobe(struct kprobe *p)
54 {
55 	unsigned long tmp  = (unsigned long)p->addr - p->offset;
56 	unsigned long addr = (unsigned long)p->addr;
57 
58 	while (tmp <= addr) {
59 		if (tmp == addr)
60 			return true;
61 
62 		tmp += GET_INSN_LENGTH(*(u16 *)tmp);
63 	}
64 
65 	return false;
66 }
67 
68 int __kprobes arch_prepare_kprobe(struct kprobe *p)
69 {
70 	u16 *insn = (u16 *)p->addr;
71 
72 	if ((unsigned long)insn & 0x1)
73 		return -EILSEQ;
74 
75 	if (!arch_check_kprobe(p))
76 		return -EILSEQ;
77 
78 	/* copy instruction */
79 	p->opcode = (kprobe_opcode_t)(*insn++);
80 	if (GET_INSN_LENGTH(p->opcode) == 4)
81 		p->opcode |= (kprobe_opcode_t)(*insn) << 16;
82 
83 	/* decode instruction */
84 	switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
85 	case INSN_REJECTED:	/* insn not supported */
86 		return -EINVAL;
87 
88 	case INSN_GOOD_NO_SLOT:	/* insn need simulation */
89 		p->ainsn.api.insn = NULL;
90 		break;
91 
92 	case INSN_GOOD:	/* instruction uses slot */
93 		p->ainsn.api.insn = get_insn_slot();
94 		if (!p->ainsn.api.insn)
95 			return -ENOMEM;
96 		break;
97 	}
98 
99 	/* prepare the instruction */
100 	if (p->ainsn.api.insn)
101 		arch_prepare_ss_slot(p);
102 	else
103 		arch_prepare_simulate(p);
104 
105 	return 0;
106 }
107 
108 #ifdef CONFIG_MMU
109 void *alloc_insn_page(void)
110 {
111 	return  __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
112 				     GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
113 				     VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
114 				     __builtin_return_address(0));
115 }
116 #endif
117 
118 /* install breakpoint in text */
119 void __kprobes arch_arm_kprobe(struct kprobe *p)
120 {
121 	u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
122 		   __BUG_INSN_32 : __BUG_INSN_16;
123 
124 	patch_text(p->addr, &insn, 1);
125 }
126 
127 /* remove breakpoint from text */
128 void __kprobes arch_disarm_kprobe(struct kprobe *p)
129 {
130 	patch_text(p->addr, &p->opcode, 1);
131 }
132 
133 void __kprobes arch_remove_kprobe(struct kprobe *p)
134 {
135 }
136 
137 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
138 {
139 	kcb->prev_kprobe.kp = kprobe_running();
140 	kcb->prev_kprobe.status = kcb->kprobe_status;
141 }
142 
143 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
144 {
145 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
146 	kcb->kprobe_status = kcb->prev_kprobe.status;
147 }
148 
149 static void __kprobes set_current_kprobe(struct kprobe *p)
150 {
151 	__this_cpu_write(current_kprobe, p);
152 }
153 
154 /*
155  * Interrupts need to be disabled before single-step mode is set, and not
156  * reenabled until after single-step mode ends.
157  * Without disabling interrupt on local CPU, there is a chance of
158  * interrupt occurrence in the period of exception return and  start of
159  * out-of-line single-step, that result in wrongly single stepping
160  * into the interrupt handler.
161  */
162 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
163 						struct pt_regs *regs)
164 {
165 	kcb->saved_status = regs->status;
166 	regs->status &= ~SR_SPIE;
167 }
168 
169 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
170 						struct pt_regs *regs)
171 {
172 	regs->status = kcb->saved_status;
173 }
174 
175 static void __kprobes setup_singlestep(struct kprobe *p,
176 				       struct pt_regs *regs,
177 				       struct kprobe_ctlblk *kcb, int reenter)
178 {
179 	unsigned long slot;
180 
181 	if (reenter) {
182 		save_previous_kprobe(kcb);
183 		set_current_kprobe(p);
184 		kcb->kprobe_status = KPROBE_REENTER;
185 	} else {
186 		kcb->kprobe_status = KPROBE_HIT_SS;
187 	}
188 
189 	if (p->ainsn.api.insn) {
190 		/* prepare for single stepping */
191 		slot = (unsigned long)p->ainsn.api.insn;
192 
193 		/* IRQs and single stepping do not mix well. */
194 		kprobes_save_local_irqflag(kcb, regs);
195 
196 		instruction_pointer_set(regs, slot);
197 	} else {
198 		/* insn simulation */
199 		arch_simulate_insn(p, regs);
200 	}
201 }
202 
203 static int __kprobes reenter_kprobe(struct kprobe *p,
204 				    struct pt_regs *regs,
205 				    struct kprobe_ctlblk *kcb)
206 {
207 	switch (kcb->kprobe_status) {
208 	case KPROBE_HIT_SSDONE:
209 	case KPROBE_HIT_ACTIVE:
210 		kprobes_inc_nmissed_count(p);
211 		setup_singlestep(p, regs, kcb, 1);
212 		break;
213 	case KPROBE_HIT_SS:
214 	case KPROBE_REENTER:
215 		pr_warn("Failed to recover from reentered kprobes.\n");
216 		dump_kprobe(p);
217 		BUG();
218 		break;
219 	default:
220 		WARN_ON(1);
221 		return 0;
222 	}
223 
224 	return 1;
225 }
226 
227 static void __kprobes
228 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
229 {
230 	/* return addr restore if non-branching insn */
231 	if (cur->ainsn.api.restore != 0)
232 		regs->epc = cur->ainsn.api.restore;
233 
234 	/* restore back original saved kprobe variables and continue */
235 	if (kcb->kprobe_status == KPROBE_REENTER) {
236 		restore_previous_kprobe(kcb);
237 		return;
238 	}
239 
240 	/* call post handler */
241 	kcb->kprobe_status = KPROBE_HIT_SSDONE;
242 	if (cur->post_handler)	{
243 		/* post_handler can hit breakpoint and single step
244 		 * again, so we enable D-flag for recursive exception.
245 		 */
246 		cur->post_handler(cur, regs, 0);
247 	}
248 
249 	reset_current_kprobe();
250 }
251 
252 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
253 {
254 	struct kprobe *cur = kprobe_running();
255 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
256 
257 	switch (kcb->kprobe_status) {
258 	case KPROBE_HIT_SS:
259 	case KPROBE_REENTER:
260 		/*
261 		 * We are here because the instruction being single
262 		 * stepped caused a page fault. We reset the current
263 		 * kprobe and the ip points back to the probe address
264 		 * and allow the page fault handler to continue as a
265 		 * normal page fault.
266 		 */
267 		regs->epc = (unsigned long) cur->addr;
268 		BUG_ON(!instruction_pointer(regs));
269 
270 		if (kcb->kprobe_status == KPROBE_REENTER)
271 			restore_previous_kprobe(kcb);
272 		else {
273 			kprobes_restore_local_irqflag(kcb, regs);
274 			reset_current_kprobe();
275 		}
276 
277 		break;
278 	case KPROBE_HIT_ACTIVE:
279 	case KPROBE_HIT_SSDONE:
280 		/*
281 		 * In case the user-specified fault handler returned
282 		 * zero, try to fix up.
283 		 */
284 		if (fixup_exception(regs))
285 			return 1;
286 	}
287 	return 0;
288 }
289 
290 bool __kprobes
291 kprobe_breakpoint_handler(struct pt_regs *regs)
292 {
293 	struct kprobe *p, *cur_kprobe;
294 	struct kprobe_ctlblk *kcb;
295 	unsigned long addr = instruction_pointer(regs);
296 
297 	kcb = get_kprobe_ctlblk();
298 	cur_kprobe = kprobe_running();
299 
300 	p = get_kprobe((kprobe_opcode_t *) addr);
301 
302 	if (p) {
303 		if (cur_kprobe) {
304 			if (reenter_kprobe(p, regs, kcb))
305 				return true;
306 		} else {
307 			/* Probe hit */
308 			set_current_kprobe(p);
309 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
310 
311 			/*
312 			 * If we have no pre-handler or it returned 0, we
313 			 * continue with normal processing.  If we have a
314 			 * pre-handler and it returned non-zero, it will
315 			 * modify the execution path and no need to single
316 			 * stepping. Let's just reset current kprobe and exit.
317 			 *
318 			 * pre_handler can hit a breakpoint and can step thru
319 			 * before return.
320 			 */
321 			if (!p->pre_handler || !p->pre_handler(p, regs))
322 				setup_singlestep(p, regs, kcb, 0);
323 			else
324 				reset_current_kprobe();
325 		}
326 		return true;
327 	}
328 
329 	/*
330 	 * The breakpoint instruction was removed right
331 	 * after we hit it.  Another cpu has removed
332 	 * either a probepoint or a debugger breakpoint
333 	 * at this address.  In either case, no further
334 	 * handling of this interrupt is appropriate.
335 	 * Return back to original instruction, and continue.
336 	 */
337 	return false;
338 }
339 
340 bool __kprobes
341 kprobe_single_step_handler(struct pt_regs *regs)
342 {
343 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
344 	unsigned long addr = instruction_pointer(regs);
345 	struct kprobe *cur = kprobe_running();
346 
347 	if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
348 	    ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
349 		kprobes_restore_local_irqflag(kcb, regs);
350 		post_kprobe_handler(cur, kcb, regs);
351 		return true;
352 	}
353 	/* not ours, kprobes should ignore it */
354 	return false;
355 }
356 
357 /*
358  * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
359  * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
360  */
361 int __init arch_populate_kprobe_blacklist(void)
362 {
363 	int ret;
364 
365 	ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
366 					(unsigned long)__irqentry_text_end);
367 	return ret;
368 }
369 
370 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
371 {
372 	return 0;
373 }
374 
375 int __init arch_init_kprobes(void)
376 {
377 	return 0;
378 }
379