xref: /linux/arch/x86/kernel/kprobes/opt.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes Jump Optimization (Optprobes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  * Copyright (C) Hitachi Ltd., 2012
7  */
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kgdb.h>
19 #include <linux/ftrace.h>
20 #include <linux/objtool.h>
21 #include <linux/pgtable.h>
22 #include <linux/static_call.h>
23 
24 #include <asm/text-patching.h>
25 #include <asm/cacheflush.h>
26 #include <asm/desc.h>
27 #include <linux/uaccess.h>
28 #include <asm/alternative.h>
29 #include <asm/insn.h>
30 #include <asm/debugreg.h>
31 #include <asm/set_memory.h>
32 #include <asm/sections.h>
33 #include <asm/nospec-branch.h>
34 
35 #include "common.h"
36 
37 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
38 {
39 	struct optimized_kprobe *op;
40 	struct kprobe *kp;
41 	long offs;
42 	int i;
43 
44 	for (i = 0; i < JMP32_INSN_SIZE; i++) {
45 		kp = get_kprobe((void *)addr - i);
46 		/* This function only handles jump-optimized kprobe */
47 		if (kp && kprobe_optimized(kp)) {
48 			op = container_of(kp, struct optimized_kprobe, kp);
49 			/* If op is optimized or under unoptimizing */
50 			if (list_empty(&op->list) || optprobe_queued_unopt(op))
51 				goto found;
52 		}
53 	}
54 
55 	return addr;
56 found:
57 	/*
58 	 * If the kprobe can be optimized, original bytes which can be
59 	 * overwritten by jump destination address. In this case, original
60 	 * bytes must be recovered from op->optinsn.copied_insn buffer.
61 	 */
62 	if (copy_from_kernel_nofault(buf, (void *)addr,
63 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
64 		return 0UL;
65 
66 	if (addr == (unsigned long)kp->addr) {
67 		buf[0] = kp->opcode;
68 		memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
69 	} else {
70 		offs = addr - (unsigned long)kp->addr - 1;
71 		memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
72 	}
73 
74 	return (unsigned long)buf;
75 }
76 
77 static void synthesize_clac(kprobe_opcode_t *addr)
78 {
79 	/*
80 	 * Can't be static_cpu_has() due to how objtool treats this feature bit.
81 	 * This isn't a fast path anyway.
82 	 */
83 	if (!boot_cpu_has(X86_FEATURE_SMAP))
84 		return;
85 
86 	/* Replace the NOP3 with CLAC */
87 	addr[0] = 0x0f;
88 	addr[1] = 0x01;
89 	addr[2] = 0xca;
90 }
91 
92 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
93 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
94 {
95 #ifdef CONFIG_X86_64
96 	*addr++ = 0x48;
97 	*addr++ = 0xbf;
98 #else
99 	*addr++ = 0xb8;
100 #endif
101 	*(unsigned long *)addr = val;
102 }
103 
104 asm (
105 			".pushsection .rodata\n"
106 			".global optprobe_template_entry\n"
107 			"optprobe_template_entry:\n"
108 #ifdef CONFIG_X86_64
109 			"       pushq $" __stringify(__KERNEL_DS) "\n"
110 			/* Save the 'sp - 8', this will be fixed later. */
111 			"	pushq %rsp\n"
112 			"	pushfq\n"
113 			".global optprobe_template_clac\n"
114 			"optprobe_template_clac:\n"
115 			ASM_NOP3
116 			SAVE_REGS_STRING
117 			"	movq %rsp, %rsi\n"
118 			".global optprobe_template_val\n"
119 			"optprobe_template_val:\n"
120 			ASM_NOP5
121 			ASM_NOP5
122 			".global optprobe_template_call\n"
123 			"optprobe_template_call:\n"
124 			ASM_NOP5
125 			/* Copy 'regs->flags' into 'regs->ss'. */
126 			"	movq 18*8(%rsp), %rdx\n"
127 			"	movq %rdx, 20*8(%rsp)\n"
128 			RESTORE_REGS_STRING
129 			/* Skip 'regs->flags' and 'regs->sp'. */
130 			"	addq $16, %rsp\n"
131 			/* And pop flags register from 'regs->ss'. */
132 			"	popfq\n"
133 #else /* CONFIG_X86_32 */
134 			"	pushl %ss\n"
135 			/* Save the 'sp - 4', this will be fixed later. */
136 			"	pushl %esp\n"
137 			"	pushfl\n"
138 			".global optprobe_template_clac\n"
139 			"optprobe_template_clac:\n"
140 			ASM_NOP3
141 			SAVE_REGS_STRING
142 			"	movl %esp, %edx\n"
143 			".global optprobe_template_val\n"
144 			"optprobe_template_val:\n"
145 			ASM_NOP5
146 			".global optprobe_template_call\n"
147 			"optprobe_template_call:\n"
148 			ASM_NOP5
149 			/* Copy 'regs->flags' into 'regs->ss'. */
150 			"	movl 14*4(%esp), %edx\n"
151 			"	movl %edx, 16*4(%esp)\n"
152 			RESTORE_REGS_STRING
153 			/* Skip 'regs->flags' and 'regs->sp'. */
154 			"	addl $8, %esp\n"
155 			/* And pop flags register from 'regs->ss'. */
156 			"	popfl\n"
157 #endif
158 			".global optprobe_template_end\n"
159 			"optprobe_template_end:\n"
160 			".popsection\n");
161 
162 #define TMPL_CLAC_IDX \
163 	((long)optprobe_template_clac - (long)optprobe_template_entry)
164 #define TMPL_MOVE_IDX \
165 	((long)optprobe_template_val - (long)optprobe_template_entry)
166 #define TMPL_CALL_IDX \
167 	((long)optprobe_template_call - (long)optprobe_template_entry)
168 #define TMPL_END_IDX \
169 	((long)optprobe_template_end - (long)optprobe_template_entry)
170 
171 /* Optimized kprobe call back function: called from optinsn */
172 static void
173 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
174 {
175 	/* This is possible if op is under delayed unoptimizing */
176 	if (kprobe_disabled(&op->kp))
177 		return;
178 
179 	preempt_disable();
180 	if (kprobe_running()) {
181 		kprobes_inc_nmissed_count(&op->kp);
182 	} else {
183 		struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
184 		/* Adjust stack pointer */
185 		regs->sp += sizeof(long);
186 		/* Save skipped registers */
187 		regs->cs = __KERNEL_CS;
188 #ifdef CONFIG_X86_32
189 		regs->gs = 0;
190 #endif
191 		regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
192 		regs->orig_ax = ~0UL;
193 
194 		__this_cpu_write(current_kprobe, &op->kp);
195 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
196 		opt_pre_handler(&op->kp, regs);
197 		__this_cpu_write(current_kprobe, NULL);
198 	}
199 	preempt_enable();
200 }
201 NOKPROBE_SYMBOL(optimized_callback);
202 
203 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
204 {
205 	struct insn insn;
206 	int len = 0, ret;
207 
208 	while (len < JMP32_INSN_SIZE) {
209 		ret = __copy_instruction(dest + len, src + len, real + len, &insn);
210 		if (!ret || !can_boost(&insn, src + len))
211 			return -EINVAL;
212 		len += ret;
213 	}
214 	/* Check whether the address range is reserved */
215 	if (ftrace_text_reserved(src, src + len - 1) ||
216 	    alternatives_text_reserved(src, src + len - 1) ||
217 	    jump_label_text_reserved(src, src + len - 1) ||
218 	    static_call_text_reserved(src, src + len - 1))
219 		return -EBUSY;
220 
221 	return len;
222 }
223 
224 /* Check whether insn is indirect jump */
225 static int insn_is_indirect_jump(struct insn *insn)
226 {
227 	return ((insn->opcode.bytes[0] == 0xff &&
228 		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
229 		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
230 }
231 
232 /* Check whether insn jumps into specified address range */
233 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
234 {
235 	unsigned long target = 0;
236 
237 	switch (insn->opcode.bytes[0]) {
238 	case 0xe0:	/* loopne */
239 	case 0xe1:	/* loope */
240 	case 0xe2:	/* loop */
241 	case 0xe3:	/* jcxz */
242 	case 0xe9:	/* near relative jump */
243 	case 0xeb:	/* short relative jump */
244 		break;
245 	case 0x0f:
246 		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
247 			break;
248 		return 0;
249 	default:
250 		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
251 			break;
252 		return 0;
253 	}
254 	target = (unsigned long)insn->next_byte + insn->immediate.value;
255 
256 	return (start <= target && target <= start + len);
257 }
258 
259 /* Decode whole function to ensure any instructions don't jump into target */
260 static int can_optimize(unsigned long paddr)
261 {
262 	unsigned long addr, size = 0, offset = 0;
263 	struct insn insn;
264 	kprobe_opcode_t buf[MAX_INSN_SIZE];
265 
266 	/* Lookup symbol including addr */
267 	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
268 		return 0;
269 
270 	/*
271 	 * Do not optimize in the entry code due to the unstable
272 	 * stack handling and registers setup.
273 	 */
274 	if (((paddr >= (unsigned long)__entry_text_start) &&
275 	     (paddr <  (unsigned long)__entry_text_end)))
276 		return 0;
277 
278 	/* Check there is enough space for a relative jump. */
279 	if (size - offset < JMP32_INSN_SIZE)
280 		return 0;
281 
282 	/* Decode instructions */
283 	addr = paddr - offset;
284 	while (addr < paddr - offset + size) { /* Decode until function end */
285 		unsigned long recovered_insn;
286 		int ret;
287 
288 		if (search_exception_tables(addr))
289 			/*
290 			 * Since some fixup code will jumps into this function,
291 			 * we can't optimize kprobe in this function.
292 			 */
293 			return 0;
294 		recovered_insn = recover_probed_instruction(buf, addr);
295 		if (!recovered_insn)
296 			return 0;
297 
298 		ret = insn_decode_kernel(&insn, (void *)recovered_insn);
299 		if (ret < 0)
300 			return 0;
301 #ifdef CONFIG_KGDB
302 		/*
303 		 * If there is a dynamically installed kgdb sw breakpoint,
304 		 * this function should not be probed.
305 		 */
306 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
307 		    kgdb_has_hit_break(addr))
308 			return 0;
309 #endif
310 		/* Recover address */
311 		insn.kaddr = (void *)addr;
312 		insn.next_byte = (void *)(addr + insn.length);
313 		/*
314 		 * Check any instructions don't jump into target, indirectly or
315 		 * directly.
316 		 *
317 		 * The indirect case is present to handle a code with jump
318 		 * tables. When the kernel uses retpolines, the check should in
319 		 * theory additionally look for jumps to indirect thunks.
320 		 * However, the kernel built with retpolines or IBT has jump
321 		 * tables disabled so the check can be skipped altogether.
322 		 */
323 		if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) &&
324 		    !IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
325 		    insn_is_indirect_jump(&insn))
326 			return 0;
327 		if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
328 					 DISP32_SIZE))
329 			return 0;
330 		addr += insn.length;
331 	}
332 
333 	return 1;
334 }
335 
336 /* Check optimized_kprobe can actually be optimized. */
337 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
338 {
339 	int i;
340 	struct kprobe *p;
341 
342 	for (i = 1; i < op->optinsn.size; i++) {
343 		p = get_kprobe(op->kp.addr + i);
344 		if (p && !kprobe_disarmed(p))
345 			return -EEXIST;
346 	}
347 
348 	return 0;
349 }
350 
351 /* Check the addr is within the optimized instructions. */
352 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
353 				 kprobe_opcode_t *addr)
354 {
355 	return (op->kp.addr <= addr &&
356 		op->kp.addr + op->optinsn.size > addr);
357 }
358 
359 /* Free optimized instruction slot */
360 static
361 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
362 {
363 	u8 *slot = op->optinsn.insn;
364 	if (slot) {
365 		int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
366 
367 		/* Record the perf event before freeing the slot */
368 		if (dirty)
369 			perf_event_text_poke(slot, slot, len, NULL, 0);
370 
371 		free_optinsn_slot(slot, dirty);
372 		op->optinsn.insn = NULL;
373 		op->optinsn.size = 0;
374 	}
375 }
376 
377 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
378 {
379 	__arch_remove_optimized_kprobe(op, 1);
380 }
381 
382 /*
383  * Copy replacing target instructions
384  * Target instructions MUST be relocatable (checked inside)
385  * This is called when new aggr(opt)probe is allocated or reused.
386  */
387 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
388 				  struct kprobe *__unused)
389 {
390 	u8 *buf = NULL, *slot;
391 	int ret, len;
392 	long rel;
393 
394 	if (!can_optimize((unsigned long)op->kp.addr))
395 		return -EILSEQ;
396 
397 	buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
398 	if (!buf)
399 		return -ENOMEM;
400 
401 	op->optinsn.insn = slot = get_optinsn_slot();
402 	if (!slot) {
403 		ret = -ENOMEM;
404 		goto out;
405 	}
406 
407 	/*
408 	 * Verify if the address gap is in 2GB range, because this uses
409 	 * a relative jump.
410 	 */
411 	rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
412 	if (abs(rel) > 0x7fffffff) {
413 		ret = -ERANGE;
414 		goto err;
415 	}
416 
417 	/* Copy arch-dep-instance from template */
418 	memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
419 
420 	/* Copy instructions into the out-of-line buffer */
421 	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
422 					  slot + TMPL_END_IDX);
423 	if (ret < 0)
424 		goto err;
425 	op->optinsn.size = ret;
426 	len = TMPL_END_IDX + op->optinsn.size;
427 
428 	synthesize_clac(buf + TMPL_CLAC_IDX);
429 
430 	/* Set probe information */
431 	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
432 
433 	/* Set probe function call */
434 	synthesize_relcall(buf + TMPL_CALL_IDX,
435 			   slot + TMPL_CALL_IDX, optimized_callback);
436 
437 	/* Set returning jmp instruction at the tail of out-of-line buffer */
438 	synthesize_reljump(buf + len, slot + len,
439 			   (u8 *)op->kp.addr + op->optinsn.size);
440 	len += JMP32_INSN_SIZE;
441 
442 	/*
443 	 * Note	len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
444 	 * used in __arch_remove_optimized_kprobe().
445 	 */
446 
447 	/* We have to use text_poke() for instruction buffer because it is RO */
448 	perf_event_text_poke(slot, NULL, 0, buf, len);
449 	text_poke(slot, buf, len);
450 
451 	ret = 0;
452 out:
453 	kfree(buf);
454 	return ret;
455 
456 err:
457 	__arch_remove_optimized_kprobe(op, 0);
458 	goto out;
459 }
460 
461 /*
462  * Replace breakpoints (INT3) with relative jumps (JMP.d32).
463  * Caller must call with locking kprobe_mutex and text_mutex.
464  *
465  * The caller will have installed a regular kprobe and after that issued
466  * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
467  * the 4 bytes after the INT3 are unused and can now be overwritten.
468  */
469 void arch_optimize_kprobes(struct list_head *oplist)
470 {
471 	struct optimized_kprobe *op, *tmp;
472 	u8 insn_buff[JMP32_INSN_SIZE];
473 
474 	list_for_each_entry_safe(op, tmp, oplist, list) {
475 		s32 rel = (s32)((long)op->optinsn.insn -
476 			((long)op->kp.addr + JMP32_INSN_SIZE));
477 
478 		WARN_ON(kprobe_disabled(&op->kp));
479 
480 		/* Backup instructions which will be replaced by jump address */
481 		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
482 		       DISP32_SIZE);
483 
484 		insn_buff[0] = JMP32_INSN_OPCODE;
485 		*(s32 *)(&insn_buff[1]) = rel;
486 
487 		smp_text_poke_single(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
488 
489 		list_del_init(&op->list);
490 	}
491 }
492 
493 /*
494  * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
495  *
496  * After that, we can restore the 4 bytes after the INT3 to undo what
497  * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
498  * unused once the INT3 lands.
499  */
500 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
501 {
502 	u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
503 	u8 old[JMP32_INSN_SIZE];
504 	u8 *addr = op->kp.addr;
505 
506 	memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
507 	memcpy(new + INT3_INSN_SIZE,
508 	       op->optinsn.copied_insn,
509 	       JMP32_INSN_SIZE - INT3_INSN_SIZE);
510 
511 	text_poke(addr, new, INT3_INSN_SIZE);
512 	smp_text_poke_sync_each_cpu();
513 	text_poke(addr + INT3_INSN_SIZE,
514 		  new + INT3_INSN_SIZE,
515 		  JMP32_INSN_SIZE - INT3_INSN_SIZE);
516 	smp_text_poke_sync_each_cpu();
517 
518 	perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
519 }
520 
521 /*
522  * Recover original instructions and breakpoints from relative jumps.
523  * Caller must call with locking kprobe_mutex.
524  */
525 extern void arch_unoptimize_kprobes(struct list_head *oplist,
526 				    struct list_head *done_list)
527 {
528 	struct optimized_kprobe *op, *tmp;
529 
530 	list_for_each_entry_safe(op, tmp, oplist, list) {
531 		arch_unoptimize_kprobe(op);
532 		list_move(&op->list, done_list);
533 	}
534 }
535 
536 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
537 {
538 	struct optimized_kprobe *op;
539 
540 	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
541 		/* This kprobe is really able to run optimized path. */
542 		op = container_of(p, struct optimized_kprobe, kp);
543 		/* Detour through copied instructions */
544 		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
545 		if (!reenter)
546 			reset_current_kprobe();
547 		return 1;
548 	}
549 	return 0;
550 }
551 NOKPROBE_SYMBOL(setup_detour_execution);
552