xref: /linux/arch/x86/kernel/kprobes/core.c (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  *
7  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8  *		Probes initial implementation ( includes contributions from
9  *		Rusty Russell).
10  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11  *		interface to access function arguments.
12  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
13  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
14  * 2005-Mar	Roland McGrath <roland@redhat.com>
15  *		Fixed to handle %rip-relative addressing mode correctly.
16  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18  *		<prasanna@in.ibm.com> added function-return probes.
19  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
20  *		Added function return probes functionality
21  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
22  *		kprobe-booster and kretprobe-booster for i386.
23  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
24  *		and kretprobe-booster for x86-64
25  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
26  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
27  *		unified x86 kprobes code.
28  */
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
34 #include <linux/preempt.h>
35 #include <linux/sched/debug.h>
36 #include <linux/extable.h>
37 #include <linux/kdebug.h>
38 #include <linux/kallsyms.h>
39 #include <linux/ftrace.h>
40 #include <linux/frame.h>
41 #include <linux/kasan.h>
42 #include <linux/moduleloader.h>
43 #include <linux/vmalloc.h>
44 #include <linux/pgtable.h>
45 
46 #include <asm/text-patching.h>
47 #include <asm/cacheflush.h>
48 #include <asm/desc.h>
49 #include <linux/uaccess.h>
50 #include <asm/alternative.h>
51 #include <asm/insn.h>
52 #include <asm/debugreg.h>
53 #include <asm/set_memory.h>
54 
55 #include "common.h"
56 
57 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
58 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
59 
60 #define stack_addr(regs) ((unsigned long *)regs->sp)
61 
62 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
63 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
64 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
65 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
66 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
67 	 << (row % 32))
68 	/*
69 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
70 	 * Groups, and some special opcodes can not boost.
71 	 * This is non-const and volatile to keep gcc from statically
72 	 * optimizing it out, as variable_test_bit makes gcc think only
73 	 * *(unsigned long*) is used.
74 	 */
75 static volatile u32 twobyte_is_boostable[256 / 32] = {
76 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
77 	/*      ----------------------------------------------          */
78 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
79 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
80 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
81 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
82 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
83 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
84 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
85 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
86 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
87 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
88 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
89 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
90 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
91 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
92 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
93 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
94 	/*      -----------------------------------------------         */
95 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
96 };
97 #undef W
98 
99 struct kretprobe_blackpoint kretprobe_blacklist[] = {
100 	{"__switch_to", }, /* This function switches only current task, but
101 			      doesn't switch kernel stack.*/
102 	{NULL, NULL}	/* Terminator */
103 };
104 
105 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
106 
107 static nokprobe_inline void
108 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
109 {
110 	struct __arch_relative_insn {
111 		u8 op;
112 		s32 raddr;
113 	} __packed *insn;
114 
115 	insn = (struct __arch_relative_insn *)dest;
116 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
117 	insn->op = op;
118 }
119 
120 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
121 void synthesize_reljump(void *dest, void *from, void *to)
122 {
123 	__synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
124 }
125 NOKPROBE_SYMBOL(synthesize_reljump);
126 
127 /* Insert a call instruction at address 'from', which calls address 'to'.*/
128 void synthesize_relcall(void *dest, void *from, void *to)
129 {
130 	__synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
131 }
132 NOKPROBE_SYMBOL(synthesize_relcall);
133 
134 /*
135  * Skip the prefixes of the instruction.
136  */
137 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
138 {
139 	insn_attr_t attr;
140 
141 	attr = inat_get_opcode_attribute((insn_byte_t)*insn);
142 	while (inat_is_legacy_prefix(attr)) {
143 		insn++;
144 		attr = inat_get_opcode_attribute((insn_byte_t)*insn);
145 	}
146 #ifdef CONFIG_X86_64
147 	if (inat_is_rex_prefix(attr))
148 		insn++;
149 #endif
150 	return insn;
151 }
152 NOKPROBE_SYMBOL(skip_prefixes);
153 
154 /*
155  * Returns non-zero if INSN is boostable.
156  * RIP relative instructions are adjusted at copying time in 64 bits mode
157  */
158 int can_boost(struct insn *insn, void *addr)
159 {
160 	kprobe_opcode_t opcode;
161 
162 	if (search_exception_tables((unsigned long)addr))
163 		return 0;	/* Page fault may occur on this address. */
164 
165 	/* 2nd-byte opcode */
166 	if (insn->opcode.nbytes == 2)
167 		return test_bit(insn->opcode.bytes[1],
168 				(unsigned long *)twobyte_is_boostable);
169 
170 	if (insn->opcode.nbytes != 1)
171 		return 0;
172 
173 	/* Can't boost Address-size override prefix */
174 	if (unlikely(inat_is_address_size_prefix(insn->attr)))
175 		return 0;
176 
177 	opcode = insn->opcode.bytes[0];
178 
179 	switch (opcode & 0xf0) {
180 	case 0x60:
181 		/* can't boost "bound" */
182 		return (opcode != 0x62);
183 	case 0x70:
184 		return 0; /* can't boost conditional jump */
185 	case 0x90:
186 		return opcode != 0x9a;	/* can't boost call far */
187 	case 0xc0:
188 		/* can't boost software-interruptions */
189 		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
190 	case 0xd0:
191 		/* can boost AA* and XLAT */
192 		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
193 	case 0xe0:
194 		/* can boost in/out and absolute jmps */
195 		return ((opcode & 0x04) || opcode == 0xea);
196 	case 0xf0:
197 		/* clear and set flags are boostable */
198 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
199 	default:
200 		/* CS override prefix and call are not boostable */
201 		return (opcode != 0x2e && opcode != 0x9a);
202 	}
203 }
204 
205 static unsigned long
206 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
207 {
208 	struct kprobe *kp;
209 	unsigned long faddr;
210 
211 	kp = get_kprobe((void *)addr);
212 	faddr = ftrace_location(addr);
213 	/*
214 	 * Addresses inside the ftrace location are refused by
215 	 * arch_check_ftrace_location(). Something went terribly wrong
216 	 * if such an address is checked here.
217 	 */
218 	if (WARN_ON(faddr && faddr != addr))
219 		return 0UL;
220 	/*
221 	 * Use the current code if it is not modified by Kprobe
222 	 * and it cannot be modified by ftrace.
223 	 */
224 	if (!kp && !faddr)
225 		return addr;
226 
227 	/*
228 	 * Basically, kp->ainsn.insn has an original instruction.
229 	 * However, RIP-relative instruction can not do single-stepping
230 	 * at different place, __copy_instruction() tweaks the displacement of
231 	 * that instruction. In that case, we can't recover the instruction
232 	 * from the kp->ainsn.insn.
233 	 *
234 	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
235 	 * of the first byte of the probed instruction, which is overwritten
236 	 * by int3. And the instruction at kp->addr is not modified by kprobes
237 	 * except for the first byte, we can recover the original instruction
238 	 * from it and kp->opcode.
239 	 *
240 	 * In case of Kprobes using ftrace, we do not have a copy of
241 	 * the original instruction. In fact, the ftrace location might
242 	 * be modified at anytime and even could be in an inconsistent state.
243 	 * Fortunately, we know that the original code is the ideal 5-byte
244 	 * long NOP.
245 	 */
246 	if (copy_from_kernel_nofault(buf, (void *)addr,
247 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
248 		return 0UL;
249 
250 	if (faddr)
251 		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
252 	else
253 		buf[0] = kp->opcode;
254 	return (unsigned long)buf;
255 }
256 
257 /*
258  * Recover the probed instruction at addr for further analysis.
259  * Caller must lock kprobes by kprobe_mutex, or disable preemption
260  * for preventing to release referencing kprobes.
261  * Returns zero if the instruction can not get recovered (or access failed).
262  */
263 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
264 {
265 	unsigned long __addr;
266 
267 	__addr = __recover_optprobed_insn(buf, addr);
268 	if (__addr != addr)
269 		return __addr;
270 
271 	return __recover_probed_insn(buf, addr);
272 }
273 
274 /* Check if paddr is at an instruction boundary */
275 static int can_probe(unsigned long paddr)
276 {
277 	unsigned long addr, __addr, offset = 0;
278 	struct insn insn;
279 	kprobe_opcode_t buf[MAX_INSN_SIZE];
280 
281 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
282 		return 0;
283 
284 	/* Decode instructions */
285 	addr = paddr - offset;
286 	while (addr < paddr) {
287 		/*
288 		 * Check if the instruction has been modified by another
289 		 * kprobe, in which case we replace the breakpoint by the
290 		 * original instruction in our buffer.
291 		 * Also, jump optimization will change the breakpoint to
292 		 * relative-jump. Since the relative-jump itself is
293 		 * normally used, we just go through if there is no kprobe.
294 		 */
295 		__addr = recover_probed_instruction(buf, addr);
296 		if (!__addr)
297 			return 0;
298 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
299 		insn_get_length(&insn);
300 
301 		/*
302 		 * Another debugging subsystem might insert this breakpoint.
303 		 * In that case, we can't recover it.
304 		 */
305 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
306 			return 0;
307 		addr += insn.length;
308 	}
309 
310 	return (addr == paddr);
311 }
312 
313 /*
314  * Returns non-zero if opcode modifies the interrupt flag.
315  */
316 static int is_IF_modifier(kprobe_opcode_t *insn)
317 {
318 	/* Skip prefixes */
319 	insn = skip_prefixes(insn);
320 
321 	switch (*insn) {
322 	case 0xfa:		/* cli */
323 	case 0xfb:		/* sti */
324 	case 0xcf:		/* iret/iretd */
325 	case 0x9d:		/* popf/popfd */
326 		return 1;
327 	}
328 
329 	return 0;
330 }
331 
332 /*
333  * Copy an instruction with recovering modified instruction by kprobes
334  * and adjust the displacement if the instruction uses the %rip-relative
335  * addressing mode. Note that since @real will be the final place of copied
336  * instruction, displacement must be adjust by @real, not @dest.
337  * This returns the length of copied instruction, or 0 if it has an error.
338  */
339 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
340 {
341 	kprobe_opcode_t buf[MAX_INSN_SIZE];
342 	unsigned long recovered_insn =
343 		recover_probed_instruction(buf, (unsigned long)src);
344 
345 	if (!recovered_insn || !insn)
346 		return 0;
347 
348 	/* This can access kernel text if given address is not recovered */
349 	if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
350 			MAX_INSN_SIZE))
351 		return 0;
352 
353 	kernel_insn_init(insn, dest, MAX_INSN_SIZE);
354 	insn_get_length(insn);
355 
356 	/* We can not probe force emulate prefixed instruction */
357 	if (insn_has_emulate_prefix(insn))
358 		return 0;
359 
360 	/* Another subsystem puts a breakpoint, failed to recover */
361 	if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
362 		return 0;
363 
364 	/* We should not singlestep on the exception masking instructions */
365 	if (insn_masking_exception(insn))
366 		return 0;
367 
368 #ifdef CONFIG_X86_64
369 	/* Only x86_64 has RIP relative instructions */
370 	if (insn_rip_relative(insn)) {
371 		s64 newdisp;
372 		u8 *disp;
373 		/*
374 		 * The copied instruction uses the %rip-relative addressing
375 		 * mode.  Adjust the displacement for the difference between
376 		 * the original location of this instruction and the location
377 		 * of the copy that will actually be run.  The tricky bit here
378 		 * is making sure that the sign extension happens correctly in
379 		 * this calculation, since we need a signed 32-bit result to
380 		 * be sign-extended to 64 bits when it's added to the %rip
381 		 * value and yield the same 64-bit result that the sign-
382 		 * extension of the original signed 32-bit displacement would
383 		 * have given.
384 		 */
385 		newdisp = (u8 *) src + (s64) insn->displacement.value
386 			  - (u8 *) real;
387 		if ((s64) (s32) newdisp != newdisp) {
388 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
389 			return 0;
390 		}
391 		disp = (u8 *) dest + insn_offset_displacement(insn);
392 		*(s32 *) disp = (s32) newdisp;
393 	}
394 #endif
395 	return insn->length;
396 }
397 
398 /* Prepare reljump right after instruction to boost */
399 static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
400 			  struct insn *insn)
401 {
402 	int len = insn->length;
403 
404 	if (can_boost(insn, p->addr) &&
405 	    MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
406 		/*
407 		 * These instructions can be executed directly if it
408 		 * jumps back to correct address.
409 		 */
410 		synthesize_reljump(buf + len, p->ainsn.insn + len,
411 				   p->addr + insn->length);
412 		len += JMP32_INSN_SIZE;
413 		p->ainsn.boostable = true;
414 	} else {
415 		p->ainsn.boostable = false;
416 	}
417 
418 	return len;
419 }
420 
421 /* Make page to RO mode when allocate it */
422 void *alloc_insn_page(void)
423 {
424 	void *page;
425 
426 	page = module_alloc(PAGE_SIZE);
427 	if (!page)
428 		return NULL;
429 
430 	set_vm_flush_reset_perms(page);
431 	/*
432 	 * First make the page read-only, and only then make it executable to
433 	 * prevent it from being W+X in between.
434 	 */
435 	set_memory_ro((unsigned long)page, 1);
436 
437 	/*
438 	 * TODO: Once additional kernel code protection mechanisms are set, ensure
439 	 * that the page was not maliciously altered and it is still zeroed.
440 	 */
441 	set_memory_x((unsigned long)page, 1);
442 
443 	return page;
444 }
445 
446 /* Recover page to RW mode before releasing it */
447 void free_insn_page(void *page)
448 {
449 	module_memfree(page);
450 }
451 
452 static int arch_copy_kprobe(struct kprobe *p)
453 {
454 	struct insn insn;
455 	kprobe_opcode_t buf[MAX_INSN_SIZE];
456 	int len;
457 
458 	/* Copy an instruction with recovering if other optprobe modifies it.*/
459 	len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
460 	if (!len)
461 		return -EINVAL;
462 
463 	/*
464 	 * __copy_instruction can modify the displacement of the instruction,
465 	 * but it doesn't affect boostable check.
466 	 */
467 	len = prepare_boost(buf, p, &insn);
468 
469 	/* Check whether the instruction modifies Interrupt Flag or not */
470 	p->ainsn.if_modifier = is_IF_modifier(buf);
471 
472 	/* Also, displacement change doesn't affect the first byte */
473 	p->opcode = buf[0];
474 
475 	/* OK, write back the instruction(s) into ROX insn buffer */
476 	text_poke(p->ainsn.insn, buf, len);
477 
478 	return 0;
479 }
480 
481 int arch_prepare_kprobe(struct kprobe *p)
482 {
483 	int ret;
484 
485 	if (alternatives_text_reserved(p->addr, p->addr))
486 		return -EINVAL;
487 
488 	if (!can_probe((unsigned long)p->addr))
489 		return -EILSEQ;
490 	/* insn: must be on special executable page on x86. */
491 	p->ainsn.insn = get_insn_slot();
492 	if (!p->ainsn.insn)
493 		return -ENOMEM;
494 
495 	ret = arch_copy_kprobe(p);
496 	if (ret) {
497 		free_insn_slot(p->ainsn.insn, 0);
498 		p->ainsn.insn = NULL;
499 	}
500 
501 	return ret;
502 }
503 
504 void arch_arm_kprobe(struct kprobe *p)
505 {
506 	text_poke(p->addr, ((unsigned char []){INT3_INSN_OPCODE}), 1);
507 	text_poke_sync();
508 }
509 
510 void arch_disarm_kprobe(struct kprobe *p)
511 {
512 	text_poke(p->addr, &p->opcode, 1);
513 	text_poke_sync();
514 }
515 
516 void arch_remove_kprobe(struct kprobe *p)
517 {
518 	if (p->ainsn.insn) {
519 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
520 		p->ainsn.insn = NULL;
521 	}
522 }
523 
524 static nokprobe_inline void
525 save_previous_kprobe(struct kprobe_ctlblk *kcb)
526 {
527 	kcb->prev_kprobe.kp = kprobe_running();
528 	kcb->prev_kprobe.status = kcb->kprobe_status;
529 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
530 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
531 }
532 
533 static nokprobe_inline void
534 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
535 {
536 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
537 	kcb->kprobe_status = kcb->prev_kprobe.status;
538 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
539 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
540 }
541 
542 static nokprobe_inline void
543 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
544 		   struct kprobe_ctlblk *kcb)
545 {
546 	__this_cpu_write(current_kprobe, p);
547 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
548 		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
549 	if (p->ainsn.if_modifier)
550 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
551 }
552 
553 static nokprobe_inline void clear_btf(void)
554 {
555 	if (test_thread_flag(TIF_BLOCKSTEP)) {
556 		unsigned long debugctl = get_debugctlmsr();
557 
558 		debugctl &= ~DEBUGCTLMSR_BTF;
559 		update_debugctlmsr(debugctl);
560 	}
561 }
562 
563 static nokprobe_inline void restore_btf(void)
564 {
565 	if (test_thread_flag(TIF_BLOCKSTEP)) {
566 		unsigned long debugctl = get_debugctlmsr();
567 
568 		debugctl |= DEBUGCTLMSR_BTF;
569 		update_debugctlmsr(debugctl);
570 	}
571 }
572 
573 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
574 {
575 	unsigned long *sara = stack_addr(regs);
576 
577 	ri->ret_addr = (kprobe_opcode_t *) *sara;
578 	ri->fp = sara;
579 
580 	/* Replace the return addr with trampoline addr */
581 	*sara = (unsigned long) &kretprobe_trampoline;
582 }
583 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
584 
585 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
586 			     struct kprobe_ctlblk *kcb, int reenter)
587 {
588 	if (setup_detour_execution(p, regs, reenter))
589 		return;
590 
591 #if !defined(CONFIG_PREEMPTION)
592 	if (p->ainsn.boostable && !p->post_handler) {
593 		/* Boost up -- we can execute copied instructions directly */
594 		if (!reenter)
595 			reset_current_kprobe();
596 		/*
597 		 * Reentering boosted probe doesn't reset current_kprobe,
598 		 * nor set current_kprobe, because it doesn't use single
599 		 * stepping.
600 		 */
601 		regs->ip = (unsigned long)p->ainsn.insn;
602 		return;
603 	}
604 #endif
605 	if (reenter) {
606 		save_previous_kprobe(kcb);
607 		set_current_kprobe(p, regs, kcb);
608 		kcb->kprobe_status = KPROBE_REENTER;
609 	} else
610 		kcb->kprobe_status = KPROBE_HIT_SS;
611 	/* Prepare real single stepping */
612 	clear_btf();
613 	regs->flags |= X86_EFLAGS_TF;
614 	regs->flags &= ~X86_EFLAGS_IF;
615 	/* single step inline if the instruction is an int3 */
616 	if (p->opcode == INT3_INSN_OPCODE)
617 		regs->ip = (unsigned long)p->addr;
618 	else
619 		regs->ip = (unsigned long)p->ainsn.insn;
620 }
621 NOKPROBE_SYMBOL(setup_singlestep);
622 
623 /*
624  * We have reentered the kprobe_handler(), since another probe was hit while
625  * within the handler. We save the original kprobes variables and just single
626  * step on the instruction of the new probe without calling any user handlers.
627  */
628 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
629 			  struct kprobe_ctlblk *kcb)
630 {
631 	switch (kcb->kprobe_status) {
632 	case KPROBE_HIT_SSDONE:
633 	case KPROBE_HIT_ACTIVE:
634 	case KPROBE_HIT_SS:
635 		kprobes_inc_nmissed_count(p);
636 		setup_singlestep(p, regs, kcb, 1);
637 		break;
638 	case KPROBE_REENTER:
639 		/* A probe has been hit in the codepath leading up to, or just
640 		 * after, single-stepping of a probed instruction. This entire
641 		 * codepath should strictly reside in .kprobes.text section.
642 		 * Raise a BUG or we'll continue in an endless reentering loop
643 		 * and eventually a stack overflow.
644 		 */
645 		pr_err("Unrecoverable kprobe detected.\n");
646 		dump_kprobe(p);
647 		BUG();
648 	default:
649 		/* impossible cases */
650 		WARN_ON(1);
651 		return 0;
652 	}
653 
654 	return 1;
655 }
656 NOKPROBE_SYMBOL(reenter_kprobe);
657 
658 /*
659  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
660  * remain disabled throughout this function.
661  */
662 int kprobe_int3_handler(struct pt_regs *regs)
663 {
664 	kprobe_opcode_t *addr;
665 	struct kprobe *p;
666 	struct kprobe_ctlblk *kcb;
667 
668 	if (user_mode(regs))
669 		return 0;
670 
671 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
672 	/*
673 	 * We don't want to be preempted for the entire duration of kprobe
674 	 * processing. Since int3 and debug trap disables irqs and we clear
675 	 * IF while singlestepping, it must be no preemptible.
676 	 */
677 
678 	kcb = get_kprobe_ctlblk();
679 	p = get_kprobe(addr);
680 
681 	if (p) {
682 		if (kprobe_running()) {
683 			if (reenter_kprobe(p, regs, kcb))
684 				return 1;
685 		} else {
686 			set_current_kprobe(p, regs, kcb);
687 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
688 
689 			/*
690 			 * If we have no pre-handler or it returned 0, we
691 			 * continue with normal processing.  If we have a
692 			 * pre-handler and it returned non-zero, that means
693 			 * user handler setup registers to exit to another
694 			 * instruction, we must skip the single stepping.
695 			 */
696 			if (!p->pre_handler || !p->pre_handler(p, regs))
697 				setup_singlestep(p, regs, kcb, 0);
698 			else
699 				reset_current_kprobe();
700 			return 1;
701 		}
702 	} else if (*addr != INT3_INSN_OPCODE) {
703 		/*
704 		 * The breakpoint instruction was removed right
705 		 * after we hit it.  Another cpu has removed
706 		 * either a probepoint or a debugger breakpoint
707 		 * at this address.  In either case, no further
708 		 * handling of this interrupt is appropriate.
709 		 * Back up over the (now missing) int3 and run
710 		 * the original instruction.
711 		 */
712 		regs->ip = (unsigned long)addr;
713 		return 1;
714 	} /* else: not a kprobe fault; let the kernel handle it */
715 
716 	return 0;
717 }
718 NOKPROBE_SYMBOL(kprobe_int3_handler);
719 
720 /*
721  * When a retprobed function returns, this code saves registers and
722  * calls trampoline_handler() runs, which calls the kretprobe's handler.
723  */
724 asm(
725 	".text\n"
726 	".global kretprobe_trampoline\n"
727 	".type kretprobe_trampoline, @function\n"
728 	"kretprobe_trampoline:\n"
729 	/* We don't bother saving the ss register */
730 #ifdef CONFIG_X86_64
731 	"	pushq %rsp\n"
732 	"	pushfq\n"
733 	SAVE_REGS_STRING
734 	"	movq %rsp, %rdi\n"
735 	"	call trampoline_handler\n"
736 	/* Replace saved sp with true return address. */
737 	"	movq %rax, 19*8(%rsp)\n"
738 	RESTORE_REGS_STRING
739 	"	popfq\n"
740 #else
741 	"	pushl %esp\n"
742 	"	pushfl\n"
743 	SAVE_REGS_STRING
744 	"	movl %esp, %eax\n"
745 	"	call trampoline_handler\n"
746 	/* Replace saved sp with true return address. */
747 	"	movl %eax, 15*4(%esp)\n"
748 	RESTORE_REGS_STRING
749 	"	popfl\n"
750 #endif
751 	"	ret\n"
752 	".size kretprobe_trampoline, .-kretprobe_trampoline\n"
753 );
754 NOKPROBE_SYMBOL(kretprobe_trampoline);
755 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
756 
757 /*
758  * Called from kretprobe_trampoline
759  */
760 __used __visible void *trampoline_handler(struct pt_regs *regs)
761 {
762 	struct kretprobe_instance *ri = NULL;
763 	struct hlist_head *head, empty_rp;
764 	struct hlist_node *tmp;
765 	unsigned long flags, orig_ret_address = 0;
766 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
767 	kprobe_opcode_t *correct_ret_addr = NULL;
768 	void *frame_pointer;
769 	bool skipped = false;
770 
771 	/*
772 	 * Set a dummy kprobe for avoiding kretprobe recursion.
773 	 * Since kretprobe never run in kprobe handler, kprobe must not
774 	 * be running at this point.
775 	 */
776 	kprobe_busy_begin();
777 
778 	INIT_HLIST_HEAD(&empty_rp);
779 	kretprobe_hash_lock(current, &head, &flags);
780 	/* fixup registers */
781 	regs->cs = __KERNEL_CS;
782 #ifdef CONFIG_X86_32
783 	regs->cs |= get_kernel_rpl();
784 	regs->gs = 0;
785 #endif
786 	/* We use pt_regs->sp for return address holder. */
787 	frame_pointer = &regs->sp;
788 	regs->ip = trampoline_address;
789 	regs->orig_ax = ~0UL;
790 
791 	/*
792 	 * It is possible to have multiple instances associated with a given
793 	 * task either because multiple functions in the call path have
794 	 * return probes installed on them, and/or more than one
795 	 * return probe was registered for a target function.
796 	 *
797 	 * We can handle this because:
798 	 *     - instances are always pushed into the head of the list
799 	 *     - when multiple return probes are registered for the same
800 	 *	 function, the (chronologically) first instance's ret_addr
801 	 *	 will be the real return address, and all the rest will
802 	 *	 point to kretprobe_trampoline.
803 	 */
804 	hlist_for_each_entry(ri, head, hlist) {
805 		if (ri->task != current)
806 			/* another task is sharing our hash bucket */
807 			continue;
808 		/*
809 		 * Return probes must be pushed on this hash list correct
810 		 * order (same as return order) so that it can be popped
811 		 * correctly. However, if we find it is pushed it incorrect
812 		 * order, this means we find a function which should not be
813 		 * probed, because the wrong order entry is pushed on the
814 		 * path of processing other kretprobe itself.
815 		 */
816 		if (ri->fp != frame_pointer) {
817 			if (!skipped)
818 				pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
819 			skipped = true;
820 			continue;
821 		}
822 
823 		orig_ret_address = (unsigned long)ri->ret_addr;
824 		if (skipped)
825 			pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
826 				ri->rp->kp.addr);
827 
828 		if (orig_ret_address != trampoline_address)
829 			/*
830 			 * This is the real return address. Any other
831 			 * instances associated with this task are for
832 			 * other calls deeper on the call stack
833 			 */
834 			break;
835 	}
836 
837 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
838 
839 	correct_ret_addr = ri->ret_addr;
840 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
841 		if (ri->task != current)
842 			/* another task is sharing our hash bucket */
843 			continue;
844 		if (ri->fp != frame_pointer)
845 			continue;
846 
847 		orig_ret_address = (unsigned long)ri->ret_addr;
848 		if (ri->rp && ri->rp->handler) {
849 			__this_cpu_write(current_kprobe, &ri->rp->kp);
850 			ri->ret_addr = correct_ret_addr;
851 			ri->rp->handler(ri, regs);
852 			__this_cpu_write(current_kprobe, &kprobe_busy);
853 		}
854 
855 		recycle_rp_inst(ri, &empty_rp);
856 
857 		if (orig_ret_address != trampoline_address)
858 			/*
859 			 * This is the real return address. Any other
860 			 * instances associated with this task are for
861 			 * other calls deeper on the call stack
862 			 */
863 			break;
864 	}
865 
866 	kretprobe_hash_unlock(current, &flags);
867 
868 	kprobe_busy_end();
869 
870 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
871 		hlist_del(&ri->hlist);
872 		kfree(ri);
873 	}
874 	return (void *)orig_ret_address;
875 }
876 NOKPROBE_SYMBOL(trampoline_handler);
877 
878 /*
879  * Called after single-stepping.  p->addr is the address of the
880  * instruction whose first byte has been replaced by the "int 3"
881  * instruction.  To avoid the SMP problems that can occur when we
882  * temporarily put back the original opcode to single-step, we
883  * single-stepped a copy of the instruction.  The address of this
884  * copy is p->ainsn.insn.
885  *
886  * This function prepares to return from the post-single-step
887  * interrupt.  We have to fix up the stack as follows:
888  *
889  * 0) Except in the case of absolute or indirect jump or call instructions,
890  * the new ip is relative to the copied instruction.  We need to make
891  * it relative to the original instruction.
892  *
893  * 1) If the single-stepped instruction was pushfl, then the TF and IF
894  * flags are set in the just-pushed flags, and may need to be cleared.
895  *
896  * 2) If the single-stepped instruction was a call, the return address
897  * that is atop the stack is the address following the copied instruction.
898  * We need to make it the address following the original instruction.
899  *
900  * If this is the first time we've single-stepped the instruction at
901  * this probepoint, and the instruction is boostable, boost it: add a
902  * jump instruction after the copied instruction, that jumps to the next
903  * instruction after the probepoint.
904  */
905 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
906 			     struct kprobe_ctlblk *kcb)
907 {
908 	unsigned long *tos = stack_addr(regs);
909 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
910 	unsigned long orig_ip = (unsigned long)p->addr;
911 	kprobe_opcode_t *insn = p->ainsn.insn;
912 
913 	/* Skip prefixes */
914 	insn = skip_prefixes(insn);
915 
916 	regs->flags &= ~X86_EFLAGS_TF;
917 	switch (*insn) {
918 	case 0x9c:	/* pushfl */
919 		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
920 		*tos |= kcb->kprobe_old_flags;
921 		break;
922 	case 0xc2:	/* iret/ret/lret */
923 	case 0xc3:
924 	case 0xca:
925 	case 0xcb:
926 	case 0xcf:
927 	case 0xea:	/* jmp absolute -- ip is correct */
928 		/* ip is already adjusted, no more changes required */
929 		p->ainsn.boostable = true;
930 		goto no_change;
931 	case 0xe8:	/* call relative - Fix return addr */
932 		*tos = orig_ip + (*tos - copy_ip);
933 		break;
934 #ifdef CONFIG_X86_32
935 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
936 		*tos = orig_ip + (*tos - copy_ip);
937 		goto no_change;
938 #endif
939 	case 0xff:
940 		if ((insn[1] & 0x30) == 0x10) {
941 			/*
942 			 * call absolute, indirect
943 			 * Fix return addr; ip is correct.
944 			 * But this is not boostable
945 			 */
946 			*tos = orig_ip + (*tos - copy_ip);
947 			goto no_change;
948 		} else if (((insn[1] & 0x31) == 0x20) ||
949 			   ((insn[1] & 0x31) == 0x21)) {
950 			/*
951 			 * jmp near and far, absolute indirect
952 			 * ip is correct. And this is boostable
953 			 */
954 			p->ainsn.boostable = true;
955 			goto no_change;
956 		}
957 	default:
958 		break;
959 	}
960 
961 	regs->ip += orig_ip - copy_ip;
962 
963 no_change:
964 	restore_btf();
965 }
966 NOKPROBE_SYMBOL(resume_execution);
967 
968 /*
969  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
970  * remain disabled throughout this function.
971  */
972 int kprobe_debug_handler(struct pt_regs *regs)
973 {
974 	struct kprobe *cur = kprobe_running();
975 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
976 
977 	if (!cur)
978 		return 0;
979 
980 	resume_execution(cur, regs, kcb);
981 	regs->flags |= kcb->kprobe_saved_flags;
982 
983 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
984 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
985 		cur->post_handler(cur, regs, 0);
986 	}
987 
988 	/* Restore back the original saved kprobes variables and continue. */
989 	if (kcb->kprobe_status == KPROBE_REENTER) {
990 		restore_previous_kprobe(kcb);
991 		goto out;
992 	}
993 	reset_current_kprobe();
994 out:
995 	/*
996 	 * if somebody else is singlestepping across a probe point, flags
997 	 * will have TF set, in which case, continue the remaining processing
998 	 * of do_debug, as if this is not a probe hit.
999 	 */
1000 	if (regs->flags & X86_EFLAGS_TF)
1001 		return 0;
1002 
1003 	return 1;
1004 }
1005 NOKPROBE_SYMBOL(kprobe_debug_handler);
1006 
1007 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1008 {
1009 	struct kprobe *cur = kprobe_running();
1010 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1011 
1012 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1013 		/* This must happen on single-stepping */
1014 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1015 			kcb->kprobe_status != KPROBE_REENTER);
1016 		/*
1017 		 * We are here because the instruction being single
1018 		 * stepped caused a page fault. We reset the current
1019 		 * kprobe and the ip points back to the probe address
1020 		 * and allow the page fault handler to continue as a
1021 		 * normal page fault.
1022 		 */
1023 		regs->ip = (unsigned long)cur->addr;
1024 		/*
1025 		 * Trap flag (TF) has been set here because this fault
1026 		 * happened where the single stepping will be done.
1027 		 * So clear it by resetting the current kprobe:
1028 		 */
1029 		regs->flags &= ~X86_EFLAGS_TF;
1030 
1031 		/*
1032 		 * If the TF flag was set before the kprobe hit,
1033 		 * don't touch it:
1034 		 */
1035 		regs->flags |= kcb->kprobe_old_flags;
1036 
1037 		if (kcb->kprobe_status == KPROBE_REENTER)
1038 			restore_previous_kprobe(kcb);
1039 		else
1040 			reset_current_kprobe();
1041 	} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
1042 		   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
1043 		/*
1044 		 * We increment the nmissed count for accounting,
1045 		 * we can also use npre/npostfault count for accounting
1046 		 * these specific fault cases.
1047 		 */
1048 		kprobes_inc_nmissed_count(cur);
1049 
1050 		/*
1051 		 * We come here because instructions in the pre/post
1052 		 * handler caused the page_fault, this could happen
1053 		 * if handler tries to access user space by
1054 		 * copy_from_user(), get_user() etc. Let the
1055 		 * user-specified handler try to fix it first.
1056 		 */
1057 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1058 			return 1;
1059 	}
1060 
1061 	return 0;
1062 }
1063 NOKPROBE_SYMBOL(kprobe_fault_handler);
1064 
1065 int __init arch_populate_kprobe_blacklist(void)
1066 {
1067 	return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1068 					 (unsigned long)__entry_text_end);
1069 }
1070 
1071 int __init arch_init_kprobes(void)
1072 {
1073 	return 0;
1074 }
1075 
1076 int arch_trampoline_kprobe(struct kprobe *p)
1077 {
1078 	return 0;
1079 }
1080