xref: /linux/arch/arm64/kernel/probes/decode-insn.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 /*
2  * arch/arm64/kernel/probes/decode-insn.c
3  *
4  * Copyright (C) 2013 Linaro Limited.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <asm/kprobes.h>
20 #include <asm/insn.h>
21 #include <asm/sections.h>
22 
23 #include "decode-insn.h"
24 #include "simulate-insn.h"
25 
26 static bool __kprobes aarch64_insn_is_steppable(u32 insn)
27 {
28 	/*
29 	 * Branch instructions will write a new value into the PC which is
30 	 * likely to be relative to the XOL address and therefore invalid.
31 	 * Deliberate generation of an exception during stepping is also not
32 	 * currently safe. Lastly, MSR instructions can do any number of nasty
33 	 * things we can't handle during single-stepping.
34 	 */
35 	if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
36 		if (aarch64_insn_is_branch(insn) ||
37 		    aarch64_insn_is_msr_imm(insn) ||
38 		    aarch64_insn_is_msr_reg(insn) ||
39 		    aarch64_insn_is_exception(insn) ||
40 		    aarch64_insn_is_eret(insn))
41 			return false;
42 
43 		/*
44 		 * The MRS instruction may not return a correct value when
45 		 * executing in the single-stepping environment. We do make one
46 		 * exception, for reading the DAIF bits.
47 		 */
48 		if (aarch64_insn_is_mrs(insn))
49 			return aarch64_insn_extract_system_reg(insn)
50 			     != AARCH64_INSN_SPCLREG_DAIF;
51 
52 		/*
53 		 * The HINT instruction is is problematic when single-stepping,
54 		 * except for the NOP case.
55 		 */
56 		if (aarch64_insn_is_hint(insn))
57 			return aarch64_insn_is_nop(insn);
58 
59 		return true;
60 	}
61 
62 	/*
63 	 * Instructions which load PC relative literals are not going to work
64 	 * when executed from an XOL slot. Instructions doing an exclusive
65 	 * load/store are not going to complete successfully when single-step
66 	 * exception handling happens in the middle of the sequence.
67 	 */
68 	if (aarch64_insn_uses_literal(insn) ||
69 	    aarch64_insn_is_exclusive(insn))
70 		return false;
71 
72 	return true;
73 }
74 
75 /* Return:
76  *   INSN_REJECTED     If instruction is one not allowed to kprobe,
77  *   INSN_GOOD         If instruction is supported and uses instruction slot,
78  *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
79  */
80 static enum kprobe_insn __kprobes
81 arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
82 {
83 	/*
84 	 * Instructions reading or modifying the PC won't work from the XOL
85 	 * slot.
86 	 */
87 	if (aarch64_insn_is_steppable(insn))
88 		return INSN_GOOD;
89 
90 	if (aarch64_insn_is_bcond(insn)) {
91 		asi->handler = simulate_b_cond;
92 	} else if (aarch64_insn_is_cbz(insn) ||
93 	    aarch64_insn_is_cbnz(insn)) {
94 		asi->handler = simulate_cbz_cbnz;
95 	} else if (aarch64_insn_is_tbz(insn) ||
96 	    aarch64_insn_is_tbnz(insn)) {
97 		asi->handler = simulate_tbz_tbnz;
98 	} else if (aarch64_insn_is_adr_adrp(insn)) {
99 		asi->handler = simulate_adr_adrp;
100 	} else if (aarch64_insn_is_b(insn) ||
101 	    aarch64_insn_is_bl(insn)) {
102 		asi->handler = simulate_b_bl;
103 	} else if (aarch64_insn_is_br(insn) ||
104 	    aarch64_insn_is_blr(insn) ||
105 	    aarch64_insn_is_ret(insn)) {
106 		asi->handler = simulate_br_blr_ret;
107 	} else if (aarch64_insn_is_ldr_lit(insn)) {
108 		asi->handler = simulate_ldr_literal;
109 	} else if (aarch64_insn_is_ldrsw_lit(insn)) {
110 		asi->handler = simulate_ldrsw_literal;
111 	} else {
112 		/*
113 		 * Instruction cannot be stepped out-of-line and we don't
114 		 * (yet) simulate it.
115 		 */
116 		return INSN_REJECTED;
117 	}
118 
119 	return INSN_GOOD_NO_SLOT;
120 }
121 
122 static bool __kprobes
123 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
124 {
125 	while (scan_start > scan_end) {
126 		/*
127 		 * atomic region starts from exclusive load and ends with
128 		 * exclusive store.
129 		 */
130 		if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
131 			return false;
132 		else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
133 			return true;
134 		scan_start--;
135 	}
136 
137 	return false;
138 }
139 
140 enum kprobe_insn __kprobes
141 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
142 {
143 	enum kprobe_insn decoded;
144 	kprobe_opcode_t insn = le32_to_cpu(*addr);
145 	kprobe_opcode_t *scan_start = addr - 1;
146 	kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
147 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
148 	struct module *mod;
149 #endif
150 
151 	if (addr >= (kprobe_opcode_t *)_text &&
152 	    scan_end < (kprobe_opcode_t *)_text)
153 		scan_end = (kprobe_opcode_t *)_text;
154 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
155 	else {
156 		preempt_disable();
157 		mod = __module_address((unsigned long)addr);
158 		if (mod && within_module_init((unsigned long)addr, mod) &&
159 			!within_module_init((unsigned long)scan_end, mod))
160 			scan_end = (kprobe_opcode_t *)mod->init_layout.base;
161 		else if (mod && within_module_core((unsigned long)addr, mod) &&
162 			!within_module_core((unsigned long)scan_end, mod))
163 			scan_end = (kprobe_opcode_t *)mod->core_layout.base;
164 		preempt_enable();
165 	}
166 #endif
167 	decoded = arm_probe_decode_insn(insn, asi);
168 
169 	if (decoded == INSN_REJECTED ||
170 			is_probed_address_atomic(scan_start, scan_end))
171 		return INSN_REJECTED;
172 
173 	return decoded;
174 }
175