xref: /linux/arch/x86/kernel/kprobes/core.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  *
7  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8  *		Probes initial implementation ( includes contributions from
9  *		Rusty Russell).
10  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11  *		interface to access function arguments.
12  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
13  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
14  * 2005-Mar	Roland McGrath <roland@redhat.com>
15  *		Fixed to handle %rip-relative addressing mode correctly.
16  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18  *		<prasanna@in.ibm.com> added function-return probes.
19  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
20  *		Added function return probes functionality
21  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
22  *		kprobe-booster and kretprobe-booster for i386.
23  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
24  *		and kretprobe-booster for x86-64
25  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
26  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
27  *		unified x86 kprobes code.
28  */
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
34 #include <linux/preempt.h>
35 #include <linux/sched/debug.h>
36 #include <linux/perf_event.h>
37 #include <linux/extable.h>
38 #include <linux/kdebug.h>
39 #include <linux/kallsyms.h>
40 #include <linux/kgdb.h>
41 #include <linux/ftrace.h>
42 #include <linux/kasan.h>
43 #include <linux/objtool.h>
44 #include <linux/vmalloc.h>
45 #include <linux/pgtable.h>
46 #include <linux/set_memory.h>
47 #include <linux/cfi.h>
48 #include <linux/execmem.h>
49 
50 #include <asm/text-patching.h>
51 #include <asm/cacheflush.h>
52 #include <asm/desc.h>
53 #include <linux/uaccess.h>
54 #include <asm/alternative.h>
55 #include <asm/insn.h>
56 #include <asm/debugreg.h>
57 #include <asm/ibt.h>
58 
59 #include "common.h"
60 
61 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
62 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
63 
64 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
65 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
66 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
67 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
68 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
69 	 << (row % 32))
70 	/*
71 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
72 	 * Groups, and some special opcodes can not boost.
73 	 * This is non-const and volatile to keep gcc from statically
74 	 * optimizing it out, as variable_test_bit makes gcc think only
75 	 * *(unsigned long*) is used.
76 	 */
77 static volatile u32 twobyte_is_boostable[256 / 32] = {
78 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
79 	/*      ----------------------------------------------          */
80 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
81 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
82 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
83 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
84 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
85 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
86 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
87 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
88 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
89 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
90 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
91 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
92 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
93 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
94 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
95 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
96 	/*      -----------------------------------------------         */
97 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
98 };
99 #undef W
100 
101 struct kretprobe_blackpoint kretprobe_blacklist[] = {
102 	{"__switch_to", }, /* This function switches only current task, but
103 			      doesn't switch kernel stack.*/
104 	{NULL, NULL}	/* Terminator */
105 };
106 
107 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
108 
109 static nokprobe_inline void
110 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
111 {
112 	struct __arch_relative_insn {
113 		u8 op;
114 		s32 raddr;
115 	} __packed *insn;
116 
117 	insn = (struct __arch_relative_insn *)dest;
118 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
119 	insn->op = op;
120 }
121 
122 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
123 void synthesize_reljump(void *dest, void *from, void *to)
124 {
125 	__synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
126 }
127 NOKPROBE_SYMBOL(synthesize_reljump);
128 
129 /* Insert a call instruction at address 'from', which calls address 'to'.*/
130 void synthesize_relcall(void *dest, void *from, void *to)
131 {
132 	__synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
133 }
134 NOKPROBE_SYMBOL(synthesize_relcall);
135 
136 /*
137  * Returns non-zero if INSN is boostable.
138  * RIP relative instructions are adjusted at copying time in 64 bits mode
139  */
140 bool can_boost(struct insn *insn, void *addr)
141 {
142 	kprobe_opcode_t opcode;
143 	insn_byte_t prefix;
144 	int i;
145 
146 	if (search_exception_tables((unsigned long)addr))
147 		return false;	/* Page fault may occur on this address. */
148 
149 	/* 2nd-byte opcode */
150 	if (insn->opcode.nbytes == 2)
151 		return test_bit(insn->opcode.bytes[1],
152 				(unsigned long *)twobyte_is_boostable);
153 
154 	if (insn->opcode.nbytes != 1)
155 		return false;
156 
157 	for_each_insn_prefix(insn, i, prefix) {
158 		insn_attr_t attr;
159 
160 		attr = inat_get_opcode_attribute(prefix);
161 		/* Can't boost Address-size override prefix and CS override prefix */
162 		if (prefix == 0x2e || inat_is_address_size_prefix(attr))
163 			return false;
164 	}
165 
166 	opcode = insn->opcode.bytes[0];
167 
168 	switch (opcode) {
169 	case 0x62:		/* bound */
170 	case 0x70 ... 0x7f:	/* Conditional jumps */
171 	case 0x9a:		/* Call far */
172 	case 0xcc ... 0xce:	/* software exceptions */
173 	case 0xd6:		/* (UD) */
174 	case 0xd8 ... 0xdf:	/* ESC */
175 	case 0xe0 ... 0xe3:	/* LOOP*, JCXZ */
176 	case 0xe8 ... 0xe9:	/* near Call, JMP */
177 	case 0xeb:		/* Short JMP */
178 	case 0xf0 ... 0xf4:	/* LOCK/REP, HLT */
179 		/* ... are not boostable */
180 		return false;
181 	case 0xc0 ... 0xc1:	/* Grp2 */
182 	case 0xd0 ... 0xd3:	/* Grp2 */
183 		/*
184 		 * AMD uses nnn == 110 as SHL/SAL, but Intel makes it reserved.
185 		 */
186 		return X86_MODRM_REG(insn->modrm.bytes[0]) != 0b110;
187 	case 0xf6 ... 0xf7:	/* Grp3 */
188 		/* AMD uses nnn == 001 as TEST, but Intel makes it reserved. */
189 		return X86_MODRM_REG(insn->modrm.bytes[0]) != 0b001;
190 	case 0xfe:		/* Grp4 */
191 		/* Only INC and DEC are boostable */
192 		return X86_MODRM_REG(insn->modrm.bytes[0]) == 0b000 ||
193 		       X86_MODRM_REG(insn->modrm.bytes[0]) == 0b001;
194 	case 0xff:		/* Grp5 */
195 		/* Only INC, DEC, and indirect JMP are boostable */
196 		return X86_MODRM_REG(insn->modrm.bytes[0]) == 0b000 ||
197 		       X86_MODRM_REG(insn->modrm.bytes[0]) == 0b001 ||
198 		       X86_MODRM_REG(insn->modrm.bytes[0]) == 0b100;
199 	default:
200 		return true;
201 	}
202 }
203 
204 static unsigned long
205 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
206 {
207 	struct kprobe *kp;
208 	bool faddr;
209 
210 	kp = get_kprobe((void *)addr);
211 	faddr = ftrace_location(addr) == addr;
212 	/*
213 	 * Use the current code if it is not modified by Kprobe
214 	 * and it cannot be modified by ftrace.
215 	 */
216 	if (!kp && !faddr)
217 		return addr;
218 
219 	/*
220 	 * Basically, kp->ainsn.insn has an original instruction.
221 	 * However, RIP-relative instruction can not do single-stepping
222 	 * at different place, __copy_instruction() tweaks the displacement of
223 	 * that instruction. In that case, we can't recover the instruction
224 	 * from the kp->ainsn.insn.
225 	 *
226 	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
227 	 * of the first byte of the probed instruction, which is overwritten
228 	 * by int3. And the instruction at kp->addr is not modified by kprobes
229 	 * except for the first byte, we can recover the original instruction
230 	 * from it and kp->opcode.
231 	 *
232 	 * In case of Kprobes using ftrace, we do not have a copy of
233 	 * the original instruction. In fact, the ftrace location might
234 	 * be modified at anytime and even could be in an inconsistent state.
235 	 * Fortunately, we know that the original code is the ideal 5-byte
236 	 * long NOP.
237 	 */
238 	if (copy_from_kernel_nofault(buf, (void *)addr,
239 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
240 		return 0UL;
241 
242 	if (faddr)
243 		memcpy(buf, x86_nops[5], 5);
244 	else
245 		buf[0] = kp->opcode;
246 	return (unsigned long)buf;
247 }
248 
249 /*
250  * Recover the probed instruction at addr for further analysis.
251  * Caller must lock kprobes by kprobe_mutex, or disable preemption
252  * for preventing to release referencing kprobes.
253  * Returns zero if the instruction can not get recovered (or access failed).
254  */
255 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
256 {
257 	unsigned long __addr;
258 
259 	__addr = __recover_optprobed_insn(buf, addr);
260 	if (__addr != addr)
261 		return __addr;
262 
263 	return __recover_probed_insn(buf, addr);
264 }
265 
266 /* Check if insn is INT or UD */
267 static inline bool is_exception_insn(struct insn *insn)
268 {
269 	/* UD uses 0f escape */
270 	if (insn->opcode.bytes[0] == 0x0f) {
271 		/* UD0 / UD1 / UD2 */
272 		return insn->opcode.bytes[1] == 0xff ||
273 		       insn->opcode.bytes[1] == 0xb9 ||
274 		       insn->opcode.bytes[1] == 0x0b;
275 	}
276 
277 	/* INT3 / INT n / INTO / INT1 */
278 	return insn->opcode.bytes[0] == 0xcc ||
279 	       insn->opcode.bytes[0] == 0xcd ||
280 	       insn->opcode.bytes[0] == 0xce ||
281 	       insn->opcode.bytes[0] == 0xf1;
282 }
283 
284 /*
285  * Check if paddr is at an instruction boundary and that instruction can
286  * be probed
287  */
288 static bool can_probe(unsigned long paddr)
289 {
290 	unsigned long addr, __addr, offset = 0;
291 	struct insn insn;
292 	kprobe_opcode_t buf[MAX_INSN_SIZE];
293 
294 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
295 		return false;
296 
297 	/* Decode instructions */
298 	addr = paddr - offset;
299 	while (addr < paddr) {
300 		/*
301 		 * Check if the instruction has been modified by another
302 		 * kprobe, in which case we replace the breakpoint by the
303 		 * original instruction in our buffer.
304 		 * Also, jump optimization will change the breakpoint to
305 		 * relative-jump. Since the relative-jump itself is
306 		 * normally used, we just go through if there is no kprobe.
307 		 */
308 		__addr = recover_probed_instruction(buf, addr);
309 		if (!__addr)
310 			return false;
311 
312 		if (insn_decode_kernel(&insn, (void *)__addr) < 0)
313 			return false;
314 
315 #ifdef CONFIG_KGDB
316 		/*
317 		 * If there is a dynamically installed kgdb sw breakpoint,
318 		 * this function should not be probed.
319 		 */
320 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
321 		    kgdb_has_hit_break(addr))
322 			return false;
323 #endif
324 		addr += insn.length;
325 	}
326 
327 	/* Check if paddr is at an instruction boundary */
328 	if (addr != paddr)
329 		return false;
330 
331 	__addr = recover_probed_instruction(buf, addr);
332 	if (!__addr)
333 		return false;
334 
335 	if (insn_decode_kernel(&insn, (void *)__addr) < 0)
336 		return false;
337 
338 	/* INT and UD are special and should not be kprobed */
339 	if (is_exception_insn(&insn))
340 		return false;
341 
342 	if (IS_ENABLED(CONFIG_CFI_CLANG)) {
343 		/*
344 		 * The compiler generates the following instruction sequence
345 		 * for indirect call checks and cfi.c decodes this;
346 		 *
347 		 *   movl    -<id>, %r10d       ; 6 bytes
348 		 *   addl    -4(%reg), %r10d    ; 4 bytes
349 		 *   je      .Ltmp1             ; 2 bytes
350 		 *   ud2                        ; <- regs->ip
351 		 *   .Ltmp1:
352 		 *
353 		 * Also, these movl and addl are used for showing expected
354 		 * type. So those must not be touched.
355 		 */
356 		if (insn.opcode.value == 0xBA)
357 			offset = 12;
358 		else if (insn.opcode.value == 0x3)
359 			offset = 6;
360 		else
361 			goto out;
362 
363 		/* This movl/addl is used for decoding CFI. */
364 		if (is_cfi_trap(addr + offset))
365 			return false;
366 	}
367 
368 out:
369 	return true;
370 }
371 
372 /* If x86 supports IBT (ENDBR) it must be skipped. */
373 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
374 					 bool *on_func_entry)
375 {
376 	u32 insn;
377 
378 	/*
379 	 * Since 'addr' is not guaranteed to be safe to access, use
380 	 * copy_from_kernel_nofault() to read the instruction:
381 	 */
382 	if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
383 		return NULL;
384 
385 	if (is_endbr(insn)) {
386 		*on_func_entry = !offset || offset == 4;
387 		if (*on_func_entry)
388 			offset = 4;
389 
390 	} else {
391 		*on_func_entry = !offset;
392 	}
393 
394 	return (kprobe_opcode_t *)(addr + offset);
395 }
396 
397 /*
398  * Copy an instruction with recovering modified instruction by kprobes
399  * and adjust the displacement if the instruction uses the %rip-relative
400  * addressing mode. Note that since @real will be the final place of copied
401  * instruction, displacement must be adjust by @real, not @dest.
402  * This returns the length of copied instruction, or 0 if it has an error.
403  */
404 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
405 {
406 	kprobe_opcode_t buf[MAX_INSN_SIZE];
407 	unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
408 	int ret;
409 
410 	if (!recovered_insn || !insn)
411 		return 0;
412 
413 	/* This can access kernel text if given address is not recovered */
414 	if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
415 			MAX_INSN_SIZE))
416 		return 0;
417 
418 	ret = insn_decode_kernel(insn, dest);
419 	if (ret < 0)
420 		return 0;
421 
422 	/* We can not probe force emulate prefixed instruction */
423 	if (insn_has_emulate_prefix(insn))
424 		return 0;
425 
426 	/* Another subsystem puts a breakpoint, failed to recover */
427 	if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
428 		return 0;
429 
430 	/* We should not singlestep on the exception masking instructions */
431 	if (insn_masking_exception(insn))
432 		return 0;
433 
434 #ifdef CONFIG_X86_64
435 	/* Only x86_64 has RIP relative instructions */
436 	if (insn_rip_relative(insn)) {
437 		s64 newdisp;
438 		u8 *disp;
439 		/*
440 		 * The copied instruction uses the %rip-relative addressing
441 		 * mode.  Adjust the displacement for the difference between
442 		 * the original location of this instruction and the location
443 		 * of the copy that will actually be run.  The tricky bit here
444 		 * is making sure that the sign extension happens correctly in
445 		 * this calculation, since we need a signed 32-bit result to
446 		 * be sign-extended to 64 bits when it's added to the %rip
447 		 * value and yield the same 64-bit result that the sign-
448 		 * extension of the original signed 32-bit displacement would
449 		 * have given.
450 		 */
451 		newdisp = (u8 *) src + (s64) insn->displacement.value
452 			  - (u8 *) real;
453 		if ((s64) (s32) newdisp != newdisp) {
454 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
455 			return 0;
456 		}
457 		disp = (u8 *) dest + insn_offset_displacement(insn);
458 		*(s32 *) disp = (s32) newdisp;
459 	}
460 #endif
461 	return insn->length;
462 }
463 
464 /* Prepare reljump or int3 right after instruction */
465 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
466 			      struct insn *insn)
467 {
468 	int len = insn->length;
469 
470 	if (!IS_ENABLED(CONFIG_PREEMPTION) &&
471 	    !p->post_handler && can_boost(insn, p->addr) &&
472 	    MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
473 		/*
474 		 * These instructions can be executed directly if it
475 		 * jumps back to correct address.
476 		 */
477 		synthesize_reljump(buf + len, p->ainsn.insn + len,
478 				   p->addr + insn->length);
479 		len += JMP32_INSN_SIZE;
480 		p->ainsn.boostable = 1;
481 	} else {
482 		/* Otherwise, put an int3 for trapping singlestep */
483 		if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
484 			return -ENOSPC;
485 
486 		buf[len] = INT3_INSN_OPCODE;
487 		len += INT3_INSN_SIZE;
488 	}
489 
490 	return len;
491 }
492 
493 /* Make page to RO mode when allocate it */
494 void *alloc_insn_page(void)
495 {
496 	void *page;
497 
498 	page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
499 	if (!page)
500 		return NULL;
501 
502 	/*
503 	 * TODO: Once additional kernel code protection mechanisms are set, ensure
504 	 * that the page was not maliciously altered and it is still zeroed.
505 	 */
506 	set_memory_rox((unsigned long)page, 1);
507 
508 	return page;
509 }
510 
511 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
512 
513 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
514 {
515 	switch (p->ainsn.opcode) {
516 	case 0xfa:	/* cli */
517 		regs->flags &= ~(X86_EFLAGS_IF);
518 		break;
519 	case 0xfb:	/* sti */
520 		regs->flags |= X86_EFLAGS_IF;
521 		break;
522 	case 0x9c:	/* pushf */
523 		int3_emulate_push(regs, regs->flags);
524 		break;
525 	case 0x9d:	/* popf */
526 		regs->flags = int3_emulate_pop(regs);
527 		break;
528 	}
529 	regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
530 }
531 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
532 
533 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
534 {
535 	int3_emulate_ret(regs);
536 }
537 NOKPROBE_SYMBOL(kprobe_emulate_ret);
538 
539 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
540 {
541 	unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
542 
543 	func += p->ainsn.rel32;
544 	int3_emulate_call(regs, func);
545 }
546 NOKPROBE_SYMBOL(kprobe_emulate_call);
547 
548 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
549 {
550 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
551 
552 	ip += p->ainsn.rel32;
553 	int3_emulate_jmp(regs, ip);
554 }
555 NOKPROBE_SYMBOL(kprobe_emulate_jmp);
556 
557 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
558 {
559 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
560 
561 	int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
562 }
563 NOKPROBE_SYMBOL(kprobe_emulate_jcc);
564 
565 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
566 {
567 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
568 	bool match;
569 
570 	if (p->ainsn.loop.type != 3) {	/* LOOP* */
571 		if (p->ainsn.loop.asize == 32)
572 			match = ((*(u32 *)&regs->cx)--) != 0;
573 #ifdef CONFIG_X86_64
574 		else if (p->ainsn.loop.asize == 64)
575 			match = ((*(u64 *)&regs->cx)--) != 0;
576 #endif
577 		else
578 			match = ((*(u16 *)&regs->cx)--) != 0;
579 	} else {			/* JCXZ */
580 		if (p->ainsn.loop.asize == 32)
581 			match = *(u32 *)(&regs->cx) == 0;
582 #ifdef CONFIG_X86_64
583 		else if (p->ainsn.loop.asize == 64)
584 			match = *(u64 *)(&regs->cx) == 0;
585 #endif
586 		else
587 			match = *(u16 *)(&regs->cx) == 0;
588 	}
589 
590 	if (p->ainsn.loop.type == 0)	/* LOOPNE */
591 		match = match && !(regs->flags & X86_EFLAGS_ZF);
592 	else if (p->ainsn.loop.type == 1)	/* LOOPE */
593 		match = match && (regs->flags & X86_EFLAGS_ZF);
594 
595 	if (match)
596 		ip += p->ainsn.rel32;
597 	int3_emulate_jmp(regs, ip);
598 }
599 NOKPROBE_SYMBOL(kprobe_emulate_loop);
600 
601 static const int addrmode_regoffs[] = {
602 	offsetof(struct pt_regs, ax),
603 	offsetof(struct pt_regs, cx),
604 	offsetof(struct pt_regs, dx),
605 	offsetof(struct pt_regs, bx),
606 	offsetof(struct pt_regs, sp),
607 	offsetof(struct pt_regs, bp),
608 	offsetof(struct pt_regs, si),
609 	offsetof(struct pt_regs, di),
610 #ifdef CONFIG_X86_64
611 	offsetof(struct pt_regs, r8),
612 	offsetof(struct pt_regs, r9),
613 	offsetof(struct pt_regs, r10),
614 	offsetof(struct pt_regs, r11),
615 	offsetof(struct pt_regs, r12),
616 	offsetof(struct pt_regs, r13),
617 	offsetof(struct pt_regs, r14),
618 	offsetof(struct pt_regs, r15),
619 #endif
620 };
621 
622 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
623 {
624 	unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
625 
626 	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
627 	int3_emulate_jmp(regs, regs_get_register(regs, offs));
628 }
629 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
630 
631 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
632 {
633 	unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
634 
635 	int3_emulate_jmp(regs, regs_get_register(regs, offs));
636 }
637 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
638 
639 static int prepare_emulation(struct kprobe *p, struct insn *insn)
640 {
641 	insn_byte_t opcode = insn->opcode.bytes[0];
642 
643 	switch (opcode) {
644 	case 0xfa:		/* cli */
645 	case 0xfb:		/* sti */
646 	case 0x9c:		/* pushfl */
647 	case 0x9d:		/* popf/popfd */
648 		/*
649 		 * IF modifiers must be emulated since it will enable interrupt while
650 		 * int3 single stepping.
651 		 */
652 		p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
653 		p->ainsn.opcode = opcode;
654 		break;
655 	case 0xc2:	/* ret/lret */
656 	case 0xc3:
657 	case 0xca:
658 	case 0xcb:
659 		p->ainsn.emulate_op = kprobe_emulate_ret;
660 		break;
661 	case 0x9a:	/* far call absolute -- segment is not supported */
662 	case 0xea:	/* far jmp absolute -- segment is not supported */
663 	case 0xcc:	/* int3 */
664 	case 0xcf:	/* iret -- in-kernel IRET is not supported */
665 		return -EOPNOTSUPP;
666 		break;
667 	case 0xe8:	/* near call relative */
668 		p->ainsn.emulate_op = kprobe_emulate_call;
669 		if (insn->immediate.nbytes == 2)
670 			p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
671 		else
672 			p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
673 		break;
674 	case 0xeb:	/* short jump relative */
675 	case 0xe9:	/* near jump relative */
676 		p->ainsn.emulate_op = kprobe_emulate_jmp;
677 		if (insn->immediate.nbytes == 1)
678 			p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
679 		else if (insn->immediate.nbytes == 2)
680 			p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
681 		else
682 			p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
683 		break;
684 	case 0x70 ... 0x7f:
685 		/* 1 byte conditional jump */
686 		p->ainsn.emulate_op = kprobe_emulate_jcc;
687 		p->ainsn.jcc.type = opcode & 0xf;
688 		p->ainsn.rel32 = insn->immediate.value;
689 		break;
690 	case 0x0f:
691 		opcode = insn->opcode.bytes[1];
692 		if ((opcode & 0xf0) == 0x80) {
693 			/* 2 bytes Conditional Jump */
694 			p->ainsn.emulate_op = kprobe_emulate_jcc;
695 			p->ainsn.jcc.type = opcode & 0xf;
696 			if (insn->immediate.nbytes == 2)
697 				p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
698 			else
699 				p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
700 		} else if (opcode == 0x01 &&
701 			   X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
702 			   X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
703 			/* VM extensions - not supported */
704 			return -EOPNOTSUPP;
705 		}
706 		break;
707 	case 0xe0:	/* Loop NZ */
708 	case 0xe1:	/* Loop */
709 	case 0xe2:	/* Loop */
710 	case 0xe3:	/* J*CXZ */
711 		p->ainsn.emulate_op = kprobe_emulate_loop;
712 		p->ainsn.loop.type = opcode & 0x3;
713 		p->ainsn.loop.asize = insn->addr_bytes * 8;
714 		p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
715 		break;
716 	case 0xff:
717 		/*
718 		 * Since the 0xff is an extended group opcode, the instruction
719 		 * is determined by the MOD/RM byte.
720 		 */
721 		opcode = insn->modrm.bytes[0];
722 		switch (X86_MODRM_REG(opcode)) {
723 		case 0b010:	/* FF /2, call near, absolute indirect */
724 			p->ainsn.emulate_op = kprobe_emulate_call_indirect;
725 			break;
726 		case 0b100:	/* FF /4, jmp near, absolute indirect */
727 			p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
728 			break;
729 		case 0b011:	/* FF /3, call far, absolute indirect */
730 		case 0b101:	/* FF /5, jmp far, absolute indirect */
731 			return -EOPNOTSUPP;
732 		}
733 
734 		if (!p->ainsn.emulate_op)
735 			break;
736 
737 		if (insn->addr_bytes != sizeof(unsigned long))
738 			return -EOPNOTSUPP;	/* Don't support different size */
739 		if (X86_MODRM_MOD(opcode) != 3)
740 			return -EOPNOTSUPP;	/* TODO: support memory addressing */
741 
742 		p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
743 #ifdef CONFIG_X86_64
744 		if (X86_REX_B(insn->rex_prefix.value))
745 			p->ainsn.indirect.reg += 8;
746 #endif
747 		break;
748 	default:
749 		break;
750 	}
751 	p->ainsn.size = insn->length;
752 
753 	return 0;
754 }
755 
756 static int arch_copy_kprobe(struct kprobe *p)
757 {
758 	struct insn insn;
759 	kprobe_opcode_t buf[MAX_INSN_SIZE];
760 	int ret, len;
761 
762 	/* Copy an instruction with recovering if other optprobe modifies it.*/
763 	len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
764 	if (!len)
765 		return -EINVAL;
766 
767 	/* Analyze the opcode and setup emulate functions */
768 	ret = prepare_emulation(p, &insn);
769 	if (ret < 0)
770 		return ret;
771 
772 	/* Add int3 for single-step or booster jmp */
773 	len = prepare_singlestep(buf, p, &insn);
774 	if (len < 0)
775 		return len;
776 
777 	/* Also, displacement change doesn't affect the first byte */
778 	p->opcode = buf[0];
779 
780 	p->ainsn.tp_len = len;
781 	perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len);
782 
783 	/* OK, write back the instruction(s) into ROX insn buffer */
784 	text_poke(p->ainsn.insn, buf, len);
785 
786 	return 0;
787 }
788 
789 int arch_prepare_kprobe(struct kprobe *p)
790 {
791 	int ret;
792 
793 	if (alternatives_text_reserved(p->addr, p->addr))
794 		return -EINVAL;
795 
796 	if (!can_probe((unsigned long)p->addr))
797 		return -EILSEQ;
798 
799 	memset(&p->ainsn, 0, sizeof(p->ainsn));
800 
801 	/* insn: must be on special executable page on x86. */
802 	p->ainsn.insn = get_insn_slot();
803 	if (!p->ainsn.insn)
804 		return -ENOMEM;
805 
806 	ret = arch_copy_kprobe(p);
807 	if (ret) {
808 		free_insn_slot(p->ainsn.insn, 0);
809 		p->ainsn.insn = NULL;
810 	}
811 
812 	return ret;
813 }
814 
815 void arch_arm_kprobe(struct kprobe *p)
816 {
817 	u8 int3 = INT3_INSN_OPCODE;
818 
819 	text_poke(p->addr, &int3, 1);
820 	text_poke_sync();
821 	perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
822 }
823 
824 void arch_disarm_kprobe(struct kprobe *p)
825 {
826 	u8 int3 = INT3_INSN_OPCODE;
827 
828 	perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
829 	text_poke(p->addr, &p->opcode, 1);
830 	text_poke_sync();
831 }
832 
833 void arch_remove_kprobe(struct kprobe *p)
834 {
835 	if (p->ainsn.insn) {
836 		/* Record the perf event before freeing the slot */
837 		perf_event_text_poke(p->ainsn.insn, p->ainsn.insn,
838 				     p->ainsn.tp_len, NULL, 0);
839 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
840 		p->ainsn.insn = NULL;
841 	}
842 }
843 
844 static nokprobe_inline void
845 save_previous_kprobe(struct kprobe_ctlblk *kcb)
846 {
847 	kcb->prev_kprobe.kp = kprobe_running();
848 	kcb->prev_kprobe.status = kcb->kprobe_status;
849 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
850 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
851 }
852 
853 static nokprobe_inline void
854 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
855 {
856 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
857 	kcb->kprobe_status = kcb->prev_kprobe.status;
858 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
859 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
860 }
861 
862 static nokprobe_inline void
863 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
864 		   struct kprobe_ctlblk *kcb)
865 {
866 	__this_cpu_write(current_kprobe, p);
867 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
868 		= (regs->flags & X86_EFLAGS_IF);
869 }
870 
871 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
872 			       struct kprobe_ctlblk *kcb)
873 {
874 	/* Restore back the original saved kprobes variables and continue. */
875 	if (kcb->kprobe_status == KPROBE_REENTER) {
876 		/* This will restore both kcb and current_kprobe */
877 		restore_previous_kprobe(kcb);
878 	} else {
879 		/*
880 		 * Always update the kcb status because
881 		 * reset_curent_kprobe() doesn't update kcb.
882 		 */
883 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
884 		if (cur->post_handler)
885 			cur->post_handler(cur, regs, 0);
886 		reset_current_kprobe();
887 	}
888 }
889 NOKPROBE_SYMBOL(kprobe_post_process);
890 
891 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
892 			     struct kprobe_ctlblk *kcb, int reenter)
893 {
894 	if (setup_detour_execution(p, regs, reenter))
895 		return;
896 
897 #if !defined(CONFIG_PREEMPTION)
898 	if (p->ainsn.boostable) {
899 		/* Boost up -- we can execute copied instructions directly */
900 		if (!reenter)
901 			reset_current_kprobe();
902 		/*
903 		 * Reentering boosted probe doesn't reset current_kprobe,
904 		 * nor set current_kprobe, because it doesn't use single
905 		 * stepping.
906 		 */
907 		regs->ip = (unsigned long)p->ainsn.insn;
908 		return;
909 	}
910 #endif
911 	if (reenter) {
912 		save_previous_kprobe(kcb);
913 		set_current_kprobe(p, regs, kcb);
914 		kcb->kprobe_status = KPROBE_REENTER;
915 	} else
916 		kcb->kprobe_status = KPROBE_HIT_SS;
917 
918 	if (p->ainsn.emulate_op) {
919 		p->ainsn.emulate_op(p, regs);
920 		kprobe_post_process(p, regs, kcb);
921 		return;
922 	}
923 
924 	/* Disable interrupt, and set ip register on trampoline */
925 	regs->flags &= ~X86_EFLAGS_IF;
926 	regs->ip = (unsigned long)p->ainsn.insn;
927 }
928 NOKPROBE_SYMBOL(setup_singlestep);
929 
930 /*
931  * Called after single-stepping.  p->addr is the address of the
932  * instruction whose first byte has been replaced by the "int3"
933  * instruction.  To avoid the SMP problems that can occur when we
934  * temporarily put back the original opcode to single-step, we
935  * single-stepped a copy of the instruction.  The address of this
936  * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
937  * right after the copied instruction.
938  * Different from the trap single-step, "int3" single-step can not
939  * handle the instruction which changes the ip register, e.g. jmp,
940  * call, conditional jmp, and the instructions which changes the IF
941  * flags because interrupt must be disabled around the single-stepping.
942  * Such instructions are software emulated, but others are single-stepped
943  * using "int3".
944  *
945  * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
946  * be adjusted, so that we can resume execution on correct code.
947  */
948 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
949 			      struct kprobe_ctlblk *kcb)
950 {
951 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
952 	unsigned long orig_ip = (unsigned long)p->addr;
953 
954 	/* Restore saved interrupt flag and ip register */
955 	regs->flags |= kcb->kprobe_saved_flags;
956 	/* Note that regs->ip is executed int3 so must be a step back */
957 	regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
958 }
959 NOKPROBE_SYMBOL(resume_singlestep);
960 
961 /*
962  * We have reentered the kprobe_handler(), since another probe was hit while
963  * within the handler. We save the original kprobes variables and just single
964  * step on the instruction of the new probe without calling any user handlers.
965  */
966 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
967 			  struct kprobe_ctlblk *kcb)
968 {
969 	switch (kcb->kprobe_status) {
970 	case KPROBE_HIT_SSDONE:
971 	case KPROBE_HIT_ACTIVE:
972 	case KPROBE_HIT_SS:
973 		kprobes_inc_nmissed_count(p);
974 		setup_singlestep(p, regs, kcb, 1);
975 		break;
976 	case KPROBE_REENTER:
977 		/* A probe has been hit in the codepath leading up to, or just
978 		 * after, single-stepping of a probed instruction. This entire
979 		 * codepath should strictly reside in .kprobes.text section.
980 		 * Raise a BUG or we'll continue in an endless reentering loop
981 		 * and eventually a stack overflow.
982 		 */
983 		pr_err("Unrecoverable kprobe detected.\n");
984 		dump_kprobe(p);
985 		BUG();
986 	default:
987 		/* impossible cases */
988 		WARN_ON(1);
989 		return 0;
990 	}
991 
992 	return 1;
993 }
994 NOKPROBE_SYMBOL(reenter_kprobe);
995 
996 static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
997 {
998 	return (kcb->kprobe_status == KPROBE_HIT_SS ||
999 		kcb->kprobe_status == KPROBE_REENTER);
1000 }
1001 
1002 /*
1003  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
1004  * remain disabled throughout this function.
1005  */
1006 int kprobe_int3_handler(struct pt_regs *regs)
1007 {
1008 	kprobe_opcode_t *addr;
1009 	struct kprobe *p;
1010 	struct kprobe_ctlblk *kcb;
1011 
1012 	if (user_mode(regs))
1013 		return 0;
1014 
1015 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
1016 	/*
1017 	 * We don't want to be preempted for the entire duration of kprobe
1018 	 * processing. Since int3 and debug trap disables irqs and we clear
1019 	 * IF while singlestepping, it must be no preemptible.
1020 	 */
1021 
1022 	kcb = get_kprobe_ctlblk();
1023 	p = get_kprobe(addr);
1024 
1025 	if (p) {
1026 		if (kprobe_running()) {
1027 			if (reenter_kprobe(p, regs, kcb))
1028 				return 1;
1029 		} else {
1030 			set_current_kprobe(p, regs, kcb);
1031 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1032 
1033 			/*
1034 			 * If we have no pre-handler or it returned 0, we
1035 			 * continue with normal processing.  If we have a
1036 			 * pre-handler and it returned non-zero, that means
1037 			 * user handler setup registers to exit to another
1038 			 * instruction, we must skip the single stepping.
1039 			 */
1040 			if (!p->pre_handler || !p->pre_handler(p, regs))
1041 				setup_singlestep(p, regs, kcb, 0);
1042 			else
1043 				reset_current_kprobe();
1044 			return 1;
1045 		}
1046 	} else if (kprobe_is_ss(kcb)) {
1047 		p = kprobe_running();
1048 		if ((unsigned long)p->ainsn.insn < regs->ip &&
1049 		    (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
1050 			/* Most provably this is the second int3 for singlestep */
1051 			resume_singlestep(p, regs, kcb);
1052 			kprobe_post_process(p, regs, kcb);
1053 			return 1;
1054 		}
1055 	} /* else: not a kprobe fault; let the kernel handle it */
1056 
1057 	return 0;
1058 }
1059 NOKPROBE_SYMBOL(kprobe_int3_handler);
1060 
1061 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1062 {
1063 	struct kprobe *cur = kprobe_running();
1064 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1065 
1066 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1067 		/* This must happen on single-stepping */
1068 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1069 			kcb->kprobe_status != KPROBE_REENTER);
1070 		/*
1071 		 * We are here because the instruction being single
1072 		 * stepped caused a page fault. We reset the current
1073 		 * kprobe and the ip points back to the probe address
1074 		 * and allow the page fault handler to continue as a
1075 		 * normal page fault.
1076 		 */
1077 		regs->ip = (unsigned long)cur->addr;
1078 
1079 		/*
1080 		 * If the IF flag was set before the kprobe hit,
1081 		 * don't touch it:
1082 		 */
1083 		regs->flags |= kcb->kprobe_old_flags;
1084 
1085 		if (kcb->kprobe_status == KPROBE_REENTER)
1086 			restore_previous_kprobe(kcb);
1087 		else
1088 			reset_current_kprobe();
1089 	}
1090 
1091 	return 0;
1092 }
1093 NOKPROBE_SYMBOL(kprobe_fault_handler);
1094 
1095 int __init arch_populate_kprobe_blacklist(void)
1096 {
1097 	return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1098 					 (unsigned long)__entry_text_end);
1099 }
1100 
1101 int __init arch_init_kprobes(void)
1102 {
1103 	return 0;
1104 }
1105 
1106 int arch_trampoline_kprobe(struct kprobe *p)
1107 {
1108 	return 0;
1109 }
1110