xref: /linux/arch/x86/kernel/kprobes/core.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  *
7  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8  *		Probes initial implementation ( includes contributions from
9  *		Rusty Russell).
10  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11  *		interface to access function arguments.
12  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
13  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
14  * 2005-Mar	Roland McGrath <roland@redhat.com>
15  *		Fixed to handle %rip-relative addressing mode correctly.
16  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18  *		<prasanna@in.ibm.com> added function-return probes.
19  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
20  *		Added function return probes functionality
21  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
22  *		kprobe-booster and kretprobe-booster for i386.
23  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
24  *		and kretprobe-booster for x86-64
25  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
26  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
27  *		unified x86 kprobes code.
28  */
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
34 #include <linux/preempt.h>
35 #include <linux/sched/debug.h>
36 #include <linux/perf_event.h>
37 #include <linux/extable.h>
38 #include <linux/kdebug.h>
39 #include <linux/kallsyms.h>
40 #include <linux/kgdb.h>
41 #include <linux/ftrace.h>
42 #include <linux/kasan.h>
43 #include <linux/moduleloader.h>
44 #include <linux/objtool.h>
45 #include <linux/vmalloc.h>
46 #include <linux/pgtable.h>
47 #include <linux/set_memory.h>
48 #include <linux/cfi.h>
49 
50 #include <asm/text-patching.h>
51 #include <asm/cacheflush.h>
52 #include <asm/desc.h>
53 #include <linux/uaccess.h>
54 #include <asm/alternative.h>
55 #include <asm/insn.h>
56 #include <asm/debugreg.h>
57 #include <asm/ibt.h>
58 
59 #include "common.h"
60 
61 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
62 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
63 
64 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
65 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
66 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
67 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
68 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
69 	 << (row % 32))
70 	/*
71 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
72 	 * Groups, and some special opcodes can not boost.
73 	 * This is non-const and volatile to keep gcc from statically
74 	 * optimizing it out, as variable_test_bit makes gcc think only
75 	 * *(unsigned long*) is used.
76 	 */
77 static volatile u32 twobyte_is_boostable[256 / 32] = {
78 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
79 	/*      ----------------------------------------------          */
80 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
81 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
82 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
83 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
84 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
85 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
86 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
87 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
88 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
89 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
90 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
91 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
92 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
93 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
94 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
95 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
96 	/*      -----------------------------------------------         */
97 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
98 };
99 #undef W
100 
101 struct kretprobe_blackpoint kretprobe_blacklist[] = {
102 	{"__switch_to", }, /* This function switches only current task, but
103 			      doesn't switch kernel stack.*/
104 	{NULL, NULL}	/* Terminator */
105 };
106 
107 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
108 
109 static nokprobe_inline void
110 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
111 {
112 	struct __arch_relative_insn {
113 		u8 op;
114 		s32 raddr;
115 	} __packed *insn;
116 
117 	insn = (struct __arch_relative_insn *)dest;
118 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
119 	insn->op = op;
120 }
121 
122 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
123 void synthesize_reljump(void *dest, void *from, void *to)
124 {
125 	__synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
126 }
127 NOKPROBE_SYMBOL(synthesize_reljump);
128 
129 /* Insert a call instruction at address 'from', which calls address 'to'.*/
130 void synthesize_relcall(void *dest, void *from, void *to)
131 {
132 	__synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
133 }
134 NOKPROBE_SYMBOL(synthesize_relcall);
135 
136 /*
137  * Returns non-zero if INSN is boostable.
138  * RIP relative instructions are adjusted at copying time in 64 bits mode
139  */
140 bool can_boost(struct insn *insn, void *addr)
141 {
142 	kprobe_opcode_t opcode;
143 	insn_byte_t prefix;
144 	int i;
145 
146 	if (search_exception_tables((unsigned long)addr))
147 		return false;	/* Page fault may occur on this address. */
148 
149 	/* 2nd-byte opcode */
150 	if (insn->opcode.nbytes == 2)
151 		return test_bit(insn->opcode.bytes[1],
152 				(unsigned long *)twobyte_is_boostable);
153 
154 	if (insn->opcode.nbytes != 1)
155 		return false;
156 
157 	for_each_insn_prefix(insn, i, prefix) {
158 		insn_attr_t attr;
159 
160 		attr = inat_get_opcode_attribute(prefix);
161 		/* Can't boost Address-size override prefix and CS override prefix */
162 		if (prefix == 0x2e || inat_is_address_size_prefix(attr))
163 			return false;
164 	}
165 
166 	opcode = insn->opcode.bytes[0];
167 
168 	switch (opcode) {
169 	case 0x62:		/* bound */
170 	case 0x70 ... 0x7f:	/* Conditional jumps */
171 	case 0x9a:		/* Call far */
172 	case 0xcc ... 0xce:	/* software exceptions */
173 	case 0xd6:		/* (UD) */
174 	case 0xd8 ... 0xdf:	/* ESC */
175 	case 0xe0 ... 0xe3:	/* LOOP*, JCXZ */
176 	case 0xe8 ... 0xe9:	/* near Call, JMP */
177 	case 0xeb:		/* Short JMP */
178 	case 0xf0 ... 0xf4:	/* LOCK/REP, HLT */
179 		/* ... are not boostable */
180 		return false;
181 	case 0xc0 ... 0xc1:	/* Grp2 */
182 	case 0xd0 ... 0xd3:	/* Grp2 */
183 		/*
184 		 * AMD uses nnn == 110 as SHL/SAL, but Intel makes it reserved.
185 		 */
186 		return X86_MODRM_REG(insn->modrm.bytes[0]) != 0b110;
187 	case 0xf6 ... 0xf7:	/* Grp3 */
188 		/* AMD uses nnn == 001 as TEST, but Intel makes it reserved. */
189 		return X86_MODRM_REG(insn->modrm.bytes[0]) != 0b001;
190 	case 0xfe:		/* Grp4 */
191 		/* Only INC and DEC are boostable */
192 		return X86_MODRM_REG(insn->modrm.bytes[0]) == 0b000 ||
193 		       X86_MODRM_REG(insn->modrm.bytes[0]) == 0b001;
194 	case 0xff:		/* Grp5 */
195 		/* Only INC, DEC, and indirect JMP are boostable */
196 		return X86_MODRM_REG(insn->modrm.bytes[0]) == 0b000 ||
197 		       X86_MODRM_REG(insn->modrm.bytes[0]) == 0b001 ||
198 		       X86_MODRM_REG(insn->modrm.bytes[0]) == 0b100;
199 	default:
200 		return true;
201 	}
202 }
203 
204 static unsigned long
205 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
206 {
207 	struct kprobe *kp;
208 	bool faddr;
209 
210 	kp = get_kprobe((void *)addr);
211 	faddr = ftrace_location(addr) == addr;
212 	/*
213 	 * Use the current code if it is not modified by Kprobe
214 	 * and it cannot be modified by ftrace.
215 	 */
216 	if (!kp && !faddr)
217 		return addr;
218 
219 	/*
220 	 * Basically, kp->ainsn.insn has an original instruction.
221 	 * However, RIP-relative instruction can not do single-stepping
222 	 * at different place, __copy_instruction() tweaks the displacement of
223 	 * that instruction. In that case, we can't recover the instruction
224 	 * from the kp->ainsn.insn.
225 	 *
226 	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
227 	 * of the first byte of the probed instruction, which is overwritten
228 	 * by int3. And the instruction at kp->addr is not modified by kprobes
229 	 * except for the first byte, we can recover the original instruction
230 	 * from it and kp->opcode.
231 	 *
232 	 * In case of Kprobes using ftrace, we do not have a copy of
233 	 * the original instruction. In fact, the ftrace location might
234 	 * be modified at anytime and even could be in an inconsistent state.
235 	 * Fortunately, we know that the original code is the ideal 5-byte
236 	 * long NOP.
237 	 */
238 	if (copy_from_kernel_nofault(buf, (void *)addr,
239 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
240 		return 0UL;
241 
242 	if (faddr)
243 		memcpy(buf, x86_nops[5], 5);
244 	else
245 		buf[0] = kp->opcode;
246 	return (unsigned long)buf;
247 }
248 
249 /*
250  * Recover the probed instruction at addr for further analysis.
251  * Caller must lock kprobes by kprobe_mutex, or disable preemption
252  * for preventing to release referencing kprobes.
253  * Returns zero if the instruction can not get recovered (or access failed).
254  */
255 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
256 {
257 	unsigned long __addr;
258 
259 	__addr = __recover_optprobed_insn(buf, addr);
260 	if (__addr != addr)
261 		return __addr;
262 
263 	return __recover_probed_insn(buf, addr);
264 }
265 
266 /* Check if insn is INT or UD */
267 static inline bool is_exception_insn(struct insn *insn)
268 {
269 	/* UD uses 0f escape */
270 	if (insn->opcode.bytes[0] == 0x0f) {
271 		/* UD0 / UD1 / UD2 */
272 		return insn->opcode.bytes[1] == 0xff ||
273 		       insn->opcode.bytes[1] == 0xb9 ||
274 		       insn->opcode.bytes[1] == 0x0b;
275 	}
276 
277 	/* INT3 / INT n / INTO / INT1 */
278 	return insn->opcode.bytes[0] == 0xcc ||
279 	       insn->opcode.bytes[0] == 0xcd ||
280 	       insn->opcode.bytes[0] == 0xce ||
281 	       insn->opcode.bytes[0] == 0xf1;
282 }
283 
284 /*
285  * Check if paddr is at an instruction boundary and that instruction can
286  * be probed
287  */
288 static bool can_probe(unsigned long paddr)
289 {
290 	unsigned long addr, __addr, offset = 0;
291 	struct insn insn;
292 	kprobe_opcode_t buf[MAX_INSN_SIZE];
293 
294 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
295 		return false;
296 
297 	/* Decode instructions */
298 	addr = paddr - offset;
299 	while (addr < paddr) {
300 		/*
301 		 * Check if the instruction has been modified by another
302 		 * kprobe, in which case we replace the breakpoint by the
303 		 * original instruction in our buffer.
304 		 * Also, jump optimization will change the breakpoint to
305 		 * relative-jump. Since the relative-jump itself is
306 		 * normally used, we just go through if there is no kprobe.
307 		 */
308 		__addr = recover_probed_instruction(buf, addr);
309 		if (!__addr)
310 			return false;
311 
312 		if (insn_decode_kernel(&insn, (void *)__addr) < 0)
313 			return false;
314 
315 #ifdef CONFIG_KGDB
316 		/*
317 		 * If there is a dynamically installed kgdb sw breakpoint,
318 		 * this function should not be probed.
319 		 */
320 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
321 		    kgdb_has_hit_break(addr))
322 			return false;
323 #endif
324 		addr += insn.length;
325 	}
326 
327 	/* Check if paddr is at an instruction boundary */
328 	if (addr != paddr)
329 		return false;
330 
331 	__addr = recover_probed_instruction(buf, addr);
332 	if (!__addr)
333 		return false;
334 
335 	if (insn_decode_kernel(&insn, (void *)__addr) < 0)
336 		return false;
337 
338 	/* INT and UD are special and should not be kprobed */
339 	if (is_exception_insn(&insn))
340 		return false;
341 
342 	if (IS_ENABLED(CONFIG_CFI_CLANG)) {
343 		/*
344 		 * The compiler generates the following instruction sequence
345 		 * for indirect call checks and cfi.c decodes this;
346 		 *
347 		 *   movl    -<id>, %r10d       ; 6 bytes
348 		 *   addl    -4(%reg), %r10d    ; 4 bytes
349 		 *   je      .Ltmp1             ; 2 bytes
350 		 *   ud2                        ; <- regs->ip
351 		 *   .Ltmp1:
352 		 *
353 		 * Also, these movl and addl are used for showing expected
354 		 * type. So those must not be touched.
355 		 */
356 		if (insn.opcode.value == 0xBA)
357 			offset = 12;
358 		else if (insn.opcode.value == 0x3)
359 			offset = 6;
360 		else
361 			goto out;
362 
363 		/* This movl/addl is used for decoding CFI. */
364 		if (is_cfi_trap(addr + offset))
365 			return false;
366 	}
367 
368 out:
369 	return true;
370 }
371 
372 /* If x86 supports IBT (ENDBR) it must be skipped. */
373 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
374 					 bool *on_func_entry)
375 {
376 	if (is_endbr(*(u32 *)addr)) {
377 		*on_func_entry = !offset || offset == 4;
378 		if (*on_func_entry)
379 			offset = 4;
380 
381 	} else {
382 		*on_func_entry = !offset;
383 	}
384 
385 	return (kprobe_opcode_t *)(addr + offset);
386 }
387 
388 /*
389  * Copy an instruction with recovering modified instruction by kprobes
390  * and adjust the displacement if the instruction uses the %rip-relative
391  * addressing mode. Note that since @real will be the final place of copied
392  * instruction, displacement must be adjust by @real, not @dest.
393  * This returns the length of copied instruction, or 0 if it has an error.
394  */
395 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
396 {
397 	kprobe_opcode_t buf[MAX_INSN_SIZE];
398 	unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
399 	int ret;
400 
401 	if (!recovered_insn || !insn)
402 		return 0;
403 
404 	/* This can access kernel text if given address is not recovered */
405 	if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
406 			MAX_INSN_SIZE))
407 		return 0;
408 
409 	ret = insn_decode_kernel(insn, dest);
410 	if (ret < 0)
411 		return 0;
412 
413 	/* We can not probe force emulate prefixed instruction */
414 	if (insn_has_emulate_prefix(insn))
415 		return 0;
416 
417 	/* Another subsystem puts a breakpoint, failed to recover */
418 	if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
419 		return 0;
420 
421 	/* We should not singlestep on the exception masking instructions */
422 	if (insn_masking_exception(insn))
423 		return 0;
424 
425 #ifdef CONFIG_X86_64
426 	/* Only x86_64 has RIP relative instructions */
427 	if (insn_rip_relative(insn)) {
428 		s64 newdisp;
429 		u8 *disp;
430 		/*
431 		 * The copied instruction uses the %rip-relative addressing
432 		 * mode.  Adjust the displacement for the difference between
433 		 * the original location of this instruction and the location
434 		 * of the copy that will actually be run.  The tricky bit here
435 		 * is making sure that the sign extension happens correctly in
436 		 * this calculation, since we need a signed 32-bit result to
437 		 * be sign-extended to 64 bits when it's added to the %rip
438 		 * value and yield the same 64-bit result that the sign-
439 		 * extension of the original signed 32-bit displacement would
440 		 * have given.
441 		 */
442 		newdisp = (u8 *) src + (s64) insn->displacement.value
443 			  - (u8 *) real;
444 		if ((s64) (s32) newdisp != newdisp) {
445 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
446 			return 0;
447 		}
448 		disp = (u8 *) dest + insn_offset_displacement(insn);
449 		*(s32 *) disp = (s32) newdisp;
450 	}
451 #endif
452 	return insn->length;
453 }
454 
455 /* Prepare reljump or int3 right after instruction */
456 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
457 			      struct insn *insn)
458 {
459 	int len = insn->length;
460 
461 	if (!IS_ENABLED(CONFIG_PREEMPTION) &&
462 	    !p->post_handler && can_boost(insn, p->addr) &&
463 	    MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
464 		/*
465 		 * These instructions can be executed directly if it
466 		 * jumps back to correct address.
467 		 */
468 		synthesize_reljump(buf + len, p->ainsn.insn + len,
469 				   p->addr + insn->length);
470 		len += JMP32_INSN_SIZE;
471 		p->ainsn.boostable = 1;
472 	} else {
473 		/* Otherwise, put an int3 for trapping singlestep */
474 		if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
475 			return -ENOSPC;
476 
477 		buf[len] = INT3_INSN_OPCODE;
478 		len += INT3_INSN_SIZE;
479 	}
480 
481 	return len;
482 }
483 
484 /* Make page to RO mode when allocate it */
485 void *alloc_insn_page(void)
486 {
487 	void *page;
488 
489 	page = module_alloc(PAGE_SIZE);
490 	if (!page)
491 		return NULL;
492 
493 	/*
494 	 * TODO: Once additional kernel code protection mechanisms are set, ensure
495 	 * that the page was not maliciously altered and it is still zeroed.
496 	 */
497 	set_memory_rox((unsigned long)page, 1);
498 
499 	return page;
500 }
501 
502 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
503 
504 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
505 {
506 	switch (p->ainsn.opcode) {
507 	case 0xfa:	/* cli */
508 		regs->flags &= ~(X86_EFLAGS_IF);
509 		break;
510 	case 0xfb:	/* sti */
511 		regs->flags |= X86_EFLAGS_IF;
512 		break;
513 	case 0x9c:	/* pushf */
514 		int3_emulate_push(regs, regs->flags);
515 		break;
516 	case 0x9d:	/* popf */
517 		regs->flags = int3_emulate_pop(regs);
518 		break;
519 	}
520 	regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
521 }
522 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
523 
524 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
525 {
526 	int3_emulate_ret(regs);
527 }
528 NOKPROBE_SYMBOL(kprobe_emulate_ret);
529 
530 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
531 {
532 	unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
533 
534 	func += p->ainsn.rel32;
535 	int3_emulate_call(regs, func);
536 }
537 NOKPROBE_SYMBOL(kprobe_emulate_call);
538 
539 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
540 {
541 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
542 
543 	ip += p->ainsn.rel32;
544 	int3_emulate_jmp(regs, ip);
545 }
546 NOKPROBE_SYMBOL(kprobe_emulate_jmp);
547 
548 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
549 {
550 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
551 
552 	int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
553 }
554 NOKPROBE_SYMBOL(kprobe_emulate_jcc);
555 
556 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
557 {
558 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
559 	bool match;
560 
561 	if (p->ainsn.loop.type != 3) {	/* LOOP* */
562 		if (p->ainsn.loop.asize == 32)
563 			match = ((*(u32 *)&regs->cx)--) != 0;
564 #ifdef CONFIG_X86_64
565 		else if (p->ainsn.loop.asize == 64)
566 			match = ((*(u64 *)&regs->cx)--) != 0;
567 #endif
568 		else
569 			match = ((*(u16 *)&regs->cx)--) != 0;
570 	} else {			/* JCXZ */
571 		if (p->ainsn.loop.asize == 32)
572 			match = *(u32 *)(&regs->cx) == 0;
573 #ifdef CONFIG_X86_64
574 		else if (p->ainsn.loop.asize == 64)
575 			match = *(u64 *)(&regs->cx) == 0;
576 #endif
577 		else
578 			match = *(u16 *)(&regs->cx) == 0;
579 	}
580 
581 	if (p->ainsn.loop.type == 0)	/* LOOPNE */
582 		match = match && !(regs->flags & X86_EFLAGS_ZF);
583 	else if (p->ainsn.loop.type == 1)	/* LOOPE */
584 		match = match && (regs->flags & X86_EFLAGS_ZF);
585 
586 	if (match)
587 		ip += p->ainsn.rel32;
588 	int3_emulate_jmp(regs, ip);
589 }
590 NOKPROBE_SYMBOL(kprobe_emulate_loop);
591 
592 static const int addrmode_regoffs[] = {
593 	offsetof(struct pt_regs, ax),
594 	offsetof(struct pt_regs, cx),
595 	offsetof(struct pt_regs, dx),
596 	offsetof(struct pt_regs, bx),
597 	offsetof(struct pt_regs, sp),
598 	offsetof(struct pt_regs, bp),
599 	offsetof(struct pt_regs, si),
600 	offsetof(struct pt_regs, di),
601 #ifdef CONFIG_X86_64
602 	offsetof(struct pt_regs, r8),
603 	offsetof(struct pt_regs, r9),
604 	offsetof(struct pt_regs, r10),
605 	offsetof(struct pt_regs, r11),
606 	offsetof(struct pt_regs, r12),
607 	offsetof(struct pt_regs, r13),
608 	offsetof(struct pt_regs, r14),
609 	offsetof(struct pt_regs, r15),
610 #endif
611 };
612 
613 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
614 {
615 	unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
616 
617 	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
618 	int3_emulate_jmp(regs, regs_get_register(regs, offs));
619 }
620 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
621 
622 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
623 {
624 	unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
625 
626 	int3_emulate_jmp(regs, regs_get_register(regs, offs));
627 }
628 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
629 
630 static int prepare_emulation(struct kprobe *p, struct insn *insn)
631 {
632 	insn_byte_t opcode = insn->opcode.bytes[0];
633 
634 	switch (opcode) {
635 	case 0xfa:		/* cli */
636 	case 0xfb:		/* sti */
637 	case 0x9c:		/* pushfl */
638 	case 0x9d:		/* popf/popfd */
639 		/*
640 		 * IF modifiers must be emulated since it will enable interrupt while
641 		 * int3 single stepping.
642 		 */
643 		p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
644 		p->ainsn.opcode = opcode;
645 		break;
646 	case 0xc2:	/* ret/lret */
647 	case 0xc3:
648 	case 0xca:
649 	case 0xcb:
650 		p->ainsn.emulate_op = kprobe_emulate_ret;
651 		break;
652 	case 0x9a:	/* far call absolute -- segment is not supported */
653 	case 0xea:	/* far jmp absolute -- segment is not supported */
654 	case 0xcc:	/* int3 */
655 	case 0xcf:	/* iret -- in-kernel IRET is not supported */
656 		return -EOPNOTSUPP;
657 		break;
658 	case 0xe8:	/* near call relative */
659 		p->ainsn.emulate_op = kprobe_emulate_call;
660 		if (insn->immediate.nbytes == 2)
661 			p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
662 		else
663 			p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
664 		break;
665 	case 0xeb:	/* short jump relative */
666 	case 0xe9:	/* near jump relative */
667 		p->ainsn.emulate_op = kprobe_emulate_jmp;
668 		if (insn->immediate.nbytes == 1)
669 			p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
670 		else if (insn->immediate.nbytes == 2)
671 			p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
672 		else
673 			p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
674 		break;
675 	case 0x70 ... 0x7f:
676 		/* 1 byte conditional jump */
677 		p->ainsn.emulate_op = kprobe_emulate_jcc;
678 		p->ainsn.jcc.type = opcode & 0xf;
679 		p->ainsn.rel32 = insn->immediate.value;
680 		break;
681 	case 0x0f:
682 		opcode = insn->opcode.bytes[1];
683 		if ((opcode & 0xf0) == 0x80) {
684 			/* 2 bytes Conditional Jump */
685 			p->ainsn.emulate_op = kprobe_emulate_jcc;
686 			p->ainsn.jcc.type = opcode & 0xf;
687 			if (insn->immediate.nbytes == 2)
688 				p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
689 			else
690 				p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
691 		} else if (opcode == 0x01 &&
692 			   X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
693 			   X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
694 			/* VM extensions - not supported */
695 			return -EOPNOTSUPP;
696 		}
697 		break;
698 	case 0xe0:	/* Loop NZ */
699 	case 0xe1:	/* Loop */
700 	case 0xe2:	/* Loop */
701 	case 0xe3:	/* J*CXZ */
702 		p->ainsn.emulate_op = kprobe_emulate_loop;
703 		p->ainsn.loop.type = opcode & 0x3;
704 		p->ainsn.loop.asize = insn->addr_bytes * 8;
705 		p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
706 		break;
707 	case 0xff:
708 		/*
709 		 * Since the 0xff is an extended group opcode, the instruction
710 		 * is determined by the MOD/RM byte.
711 		 */
712 		opcode = insn->modrm.bytes[0];
713 		switch (X86_MODRM_REG(opcode)) {
714 		case 0b010:	/* FF /2, call near, absolute indirect */
715 			p->ainsn.emulate_op = kprobe_emulate_call_indirect;
716 			break;
717 		case 0b100:	/* FF /4, jmp near, absolute indirect */
718 			p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
719 			break;
720 		case 0b011:	/* FF /3, call far, absolute indirect */
721 		case 0b101:	/* FF /5, jmp far, absolute indirect */
722 			return -EOPNOTSUPP;
723 		}
724 
725 		if (!p->ainsn.emulate_op)
726 			break;
727 
728 		if (insn->addr_bytes != sizeof(unsigned long))
729 			return -EOPNOTSUPP;	/* Don't support different size */
730 		if (X86_MODRM_MOD(opcode) != 3)
731 			return -EOPNOTSUPP;	/* TODO: support memory addressing */
732 
733 		p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
734 #ifdef CONFIG_X86_64
735 		if (X86_REX_B(insn->rex_prefix.value))
736 			p->ainsn.indirect.reg += 8;
737 #endif
738 		break;
739 	default:
740 		break;
741 	}
742 	p->ainsn.size = insn->length;
743 
744 	return 0;
745 }
746 
747 static int arch_copy_kprobe(struct kprobe *p)
748 {
749 	struct insn insn;
750 	kprobe_opcode_t buf[MAX_INSN_SIZE];
751 	int ret, len;
752 
753 	/* Copy an instruction with recovering if other optprobe modifies it.*/
754 	len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
755 	if (!len)
756 		return -EINVAL;
757 
758 	/* Analyze the opcode and setup emulate functions */
759 	ret = prepare_emulation(p, &insn);
760 	if (ret < 0)
761 		return ret;
762 
763 	/* Add int3 for single-step or booster jmp */
764 	len = prepare_singlestep(buf, p, &insn);
765 	if (len < 0)
766 		return len;
767 
768 	/* Also, displacement change doesn't affect the first byte */
769 	p->opcode = buf[0];
770 
771 	p->ainsn.tp_len = len;
772 	perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len);
773 
774 	/* OK, write back the instruction(s) into ROX insn buffer */
775 	text_poke(p->ainsn.insn, buf, len);
776 
777 	return 0;
778 }
779 
780 int arch_prepare_kprobe(struct kprobe *p)
781 {
782 	int ret;
783 
784 	if (alternatives_text_reserved(p->addr, p->addr))
785 		return -EINVAL;
786 
787 	if (!can_probe((unsigned long)p->addr))
788 		return -EILSEQ;
789 
790 	memset(&p->ainsn, 0, sizeof(p->ainsn));
791 
792 	/* insn: must be on special executable page on x86. */
793 	p->ainsn.insn = get_insn_slot();
794 	if (!p->ainsn.insn)
795 		return -ENOMEM;
796 
797 	ret = arch_copy_kprobe(p);
798 	if (ret) {
799 		free_insn_slot(p->ainsn.insn, 0);
800 		p->ainsn.insn = NULL;
801 	}
802 
803 	return ret;
804 }
805 
806 void arch_arm_kprobe(struct kprobe *p)
807 {
808 	u8 int3 = INT3_INSN_OPCODE;
809 
810 	text_poke(p->addr, &int3, 1);
811 	text_poke_sync();
812 	perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
813 }
814 
815 void arch_disarm_kprobe(struct kprobe *p)
816 {
817 	u8 int3 = INT3_INSN_OPCODE;
818 
819 	perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
820 	text_poke(p->addr, &p->opcode, 1);
821 	text_poke_sync();
822 }
823 
824 void arch_remove_kprobe(struct kprobe *p)
825 {
826 	if (p->ainsn.insn) {
827 		/* Record the perf event before freeing the slot */
828 		perf_event_text_poke(p->ainsn.insn, p->ainsn.insn,
829 				     p->ainsn.tp_len, NULL, 0);
830 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
831 		p->ainsn.insn = NULL;
832 	}
833 }
834 
835 static nokprobe_inline void
836 save_previous_kprobe(struct kprobe_ctlblk *kcb)
837 {
838 	kcb->prev_kprobe.kp = kprobe_running();
839 	kcb->prev_kprobe.status = kcb->kprobe_status;
840 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
841 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
842 }
843 
844 static nokprobe_inline void
845 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
846 {
847 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
848 	kcb->kprobe_status = kcb->prev_kprobe.status;
849 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
850 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
851 }
852 
853 static nokprobe_inline void
854 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
855 		   struct kprobe_ctlblk *kcb)
856 {
857 	__this_cpu_write(current_kprobe, p);
858 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
859 		= (regs->flags & X86_EFLAGS_IF);
860 }
861 
862 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
863 			       struct kprobe_ctlblk *kcb)
864 {
865 	/* Restore back the original saved kprobes variables and continue. */
866 	if (kcb->kprobe_status == KPROBE_REENTER) {
867 		/* This will restore both kcb and current_kprobe */
868 		restore_previous_kprobe(kcb);
869 	} else {
870 		/*
871 		 * Always update the kcb status because
872 		 * reset_curent_kprobe() doesn't update kcb.
873 		 */
874 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
875 		if (cur->post_handler)
876 			cur->post_handler(cur, regs, 0);
877 		reset_current_kprobe();
878 	}
879 }
880 NOKPROBE_SYMBOL(kprobe_post_process);
881 
882 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
883 			     struct kprobe_ctlblk *kcb, int reenter)
884 {
885 	if (setup_detour_execution(p, regs, reenter))
886 		return;
887 
888 #if !defined(CONFIG_PREEMPTION)
889 	if (p->ainsn.boostable) {
890 		/* Boost up -- we can execute copied instructions directly */
891 		if (!reenter)
892 			reset_current_kprobe();
893 		/*
894 		 * Reentering boosted probe doesn't reset current_kprobe,
895 		 * nor set current_kprobe, because it doesn't use single
896 		 * stepping.
897 		 */
898 		regs->ip = (unsigned long)p->ainsn.insn;
899 		return;
900 	}
901 #endif
902 	if (reenter) {
903 		save_previous_kprobe(kcb);
904 		set_current_kprobe(p, regs, kcb);
905 		kcb->kprobe_status = KPROBE_REENTER;
906 	} else
907 		kcb->kprobe_status = KPROBE_HIT_SS;
908 
909 	if (p->ainsn.emulate_op) {
910 		p->ainsn.emulate_op(p, regs);
911 		kprobe_post_process(p, regs, kcb);
912 		return;
913 	}
914 
915 	/* Disable interrupt, and set ip register on trampoline */
916 	regs->flags &= ~X86_EFLAGS_IF;
917 	regs->ip = (unsigned long)p->ainsn.insn;
918 }
919 NOKPROBE_SYMBOL(setup_singlestep);
920 
921 /*
922  * Called after single-stepping.  p->addr is the address of the
923  * instruction whose first byte has been replaced by the "int3"
924  * instruction.  To avoid the SMP problems that can occur when we
925  * temporarily put back the original opcode to single-step, we
926  * single-stepped a copy of the instruction.  The address of this
927  * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
928  * right after the copied instruction.
929  * Different from the trap single-step, "int3" single-step can not
930  * handle the instruction which changes the ip register, e.g. jmp,
931  * call, conditional jmp, and the instructions which changes the IF
932  * flags because interrupt must be disabled around the single-stepping.
933  * Such instructions are software emulated, but others are single-stepped
934  * using "int3".
935  *
936  * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
937  * be adjusted, so that we can resume execution on correct code.
938  */
939 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
940 			      struct kprobe_ctlblk *kcb)
941 {
942 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
943 	unsigned long orig_ip = (unsigned long)p->addr;
944 
945 	/* Restore saved interrupt flag and ip register */
946 	regs->flags |= kcb->kprobe_saved_flags;
947 	/* Note that regs->ip is executed int3 so must be a step back */
948 	regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
949 }
950 NOKPROBE_SYMBOL(resume_singlestep);
951 
952 /*
953  * We have reentered the kprobe_handler(), since another probe was hit while
954  * within the handler. We save the original kprobes variables and just single
955  * step on the instruction of the new probe without calling any user handlers.
956  */
957 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
958 			  struct kprobe_ctlblk *kcb)
959 {
960 	switch (kcb->kprobe_status) {
961 	case KPROBE_HIT_SSDONE:
962 	case KPROBE_HIT_ACTIVE:
963 	case KPROBE_HIT_SS:
964 		kprobes_inc_nmissed_count(p);
965 		setup_singlestep(p, regs, kcb, 1);
966 		break;
967 	case KPROBE_REENTER:
968 		/* A probe has been hit in the codepath leading up to, or just
969 		 * after, single-stepping of a probed instruction. This entire
970 		 * codepath should strictly reside in .kprobes.text section.
971 		 * Raise a BUG or we'll continue in an endless reentering loop
972 		 * and eventually a stack overflow.
973 		 */
974 		pr_err("Unrecoverable kprobe detected.\n");
975 		dump_kprobe(p);
976 		BUG();
977 	default:
978 		/* impossible cases */
979 		WARN_ON(1);
980 		return 0;
981 	}
982 
983 	return 1;
984 }
985 NOKPROBE_SYMBOL(reenter_kprobe);
986 
987 static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
988 {
989 	return (kcb->kprobe_status == KPROBE_HIT_SS ||
990 		kcb->kprobe_status == KPROBE_REENTER);
991 }
992 
993 /*
994  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
995  * remain disabled throughout this function.
996  */
997 int kprobe_int3_handler(struct pt_regs *regs)
998 {
999 	kprobe_opcode_t *addr;
1000 	struct kprobe *p;
1001 	struct kprobe_ctlblk *kcb;
1002 
1003 	if (user_mode(regs))
1004 		return 0;
1005 
1006 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
1007 	/*
1008 	 * We don't want to be preempted for the entire duration of kprobe
1009 	 * processing. Since int3 and debug trap disables irqs and we clear
1010 	 * IF while singlestepping, it must be no preemptible.
1011 	 */
1012 
1013 	kcb = get_kprobe_ctlblk();
1014 	p = get_kprobe(addr);
1015 
1016 	if (p) {
1017 		if (kprobe_running()) {
1018 			if (reenter_kprobe(p, regs, kcb))
1019 				return 1;
1020 		} else {
1021 			set_current_kprobe(p, regs, kcb);
1022 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1023 
1024 			/*
1025 			 * If we have no pre-handler or it returned 0, we
1026 			 * continue with normal processing.  If we have a
1027 			 * pre-handler and it returned non-zero, that means
1028 			 * user handler setup registers to exit to another
1029 			 * instruction, we must skip the single stepping.
1030 			 */
1031 			if (!p->pre_handler || !p->pre_handler(p, regs))
1032 				setup_singlestep(p, regs, kcb, 0);
1033 			else
1034 				reset_current_kprobe();
1035 			return 1;
1036 		}
1037 	} else if (kprobe_is_ss(kcb)) {
1038 		p = kprobe_running();
1039 		if ((unsigned long)p->ainsn.insn < regs->ip &&
1040 		    (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
1041 			/* Most provably this is the second int3 for singlestep */
1042 			resume_singlestep(p, regs, kcb);
1043 			kprobe_post_process(p, regs, kcb);
1044 			return 1;
1045 		}
1046 	} /* else: not a kprobe fault; let the kernel handle it */
1047 
1048 	return 0;
1049 }
1050 NOKPROBE_SYMBOL(kprobe_int3_handler);
1051 
1052 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1053 {
1054 	struct kprobe *cur = kprobe_running();
1055 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1056 
1057 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1058 		/* This must happen on single-stepping */
1059 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1060 			kcb->kprobe_status != KPROBE_REENTER);
1061 		/*
1062 		 * We are here because the instruction being single
1063 		 * stepped caused a page fault. We reset the current
1064 		 * kprobe and the ip points back to the probe address
1065 		 * and allow the page fault handler to continue as a
1066 		 * normal page fault.
1067 		 */
1068 		regs->ip = (unsigned long)cur->addr;
1069 
1070 		/*
1071 		 * If the IF flag was set before the kprobe hit,
1072 		 * don't touch it:
1073 		 */
1074 		regs->flags |= kcb->kprobe_old_flags;
1075 
1076 		if (kcb->kprobe_status == KPROBE_REENTER)
1077 			restore_previous_kprobe(kcb);
1078 		else
1079 			reset_current_kprobe();
1080 	}
1081 
1082 	return 0;
1083 }
1084 NOKPROBE_SYMBOL(kprobe_fault_handler);
1085 
1086 int __init arch_populate_kprobe_blacklist(void)
1087 {
1088 	return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1089 					 (unsigned long)__entry_text_end);
1090 }
1091 
1092 int __init arch_init_kprobes(void)
1093 {
1094 	return 0;
1095 }
1096 
1097 int arch_trampoline_kprobe(struct kprobe *p)
1098 {
1099 	return 0;
1100 }
1101