1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/kernel/probes/decode-insn.c
4 *
5 * Copyright (C) 2013 Linaro Limited.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/kprobes.h>
10 #include <linux/module.h>
11 #include <linux/kallsyms.h>
12 #include <asm/insn.h>
13 #include <asm/sections.h>
14
15 #include "decode-insn.h"
16 #include "simulate-insn.h"
17
aarch64_insn_is_steppable(u32 insn)18 static bool __kprobes aarch64_insn_is_steppable(u32 insn)
19 {
20 /*
21 * Branch instructions will write a new value into the PC which is
22 * likely to be relative to the XOL address and therefore invalid.
23 * Deliberate generation of an exception during stepping is also not
24 * currently safe. Lastly, MSR instructions can do any number of nasty
25 * things we can't handle during single-stepping.
26 */
27 if (aarch64_insn_is_class_branch_sys(insn)) {
28 if (aarch64_insn_is_branch(insn) ||
29 aarch64_insn_is_msr_imm(insn) ||
30 aarch64_insn_is_msr_reg(insn) ||
31 aarch64_insn_is_exception(insn) ||
32 aarch64_insn_is_eret(insn) ||
33 aarch64_insn_is_eret_auth(insn))
34 return false;
35
36 /*
37 * The MRS instruction may not return a correct value when
38 * executing in the single-stepping environment. We do make one
39 * exception, for reading the DAIF bits.
40 */
41 if (aarch64_insn_is_mrs(insn))
42 return aarch64_insn_extract_system_reg(insn)
43 != AARCH64_INSN_SPCLREG_DAIF;
44
45 /*
46 * The HINT instruction is steppable only if it is in whitelist
47 * and the rest of other such instructions are blocked for
48 * single stepping as they may cause exception or other
49 * unintended behaviour.
50 */
51 if (aarch64_insn_is_hint(insn))
52 return aarch64_insn_is_steppable_hint(insn);
53
54 return true;
55 }
56
57 /*
58 * Instructions which load PC relative literals are not going to work
59 * when executed from an XOL slot. Instructions doing an exclusive
60 * load/store are not going to complete successfully when single-step
61 * exception handling happens in the middle of the sequence. Memory
62 * copy/set instructions require that all three instructions be placed
63 * consecutively in memory.
64 */
65 if (aarch64_insn_uses_literal(insn) ||
66 aarch64_insn_is_exclusive(insn) ||
67 aarch64_insn_is_mops(insn))
68 return false;
69
70 return true;
71 }
72
73 /* Return:
74 * INSN_REJECTED If instruction is one not allowed to kprobe,
75 * INSN_GOOD If instruction is supported and uses instruction slot,
76 * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
77 */
78 enum probe_insn __kprobes
arm_probe_decode_insn(u32 insn,struct arch_probe_insn * api)79 arm_probe_decode_insn(u32 insn, struct arch_probe_insn *api)
80 {
81 /*
82 * While 'nop' instruction can execute in the out-of-line slot,
83 * simulating them in breakpoint handling offers better performance.
84 */
85 if (aarch64_insn_is_nop(insn)) {
86 api->handler = simulate_nop;
87 return INSN_GOOD_NO_SLOT;
88 }
89
90 /*
91 * Instructions reading or modifying the PC won't work from the XOL
92 * slot.
93 */
94 if (aarch64_insn_is_steppable(insn))
95 return INSN_GOOD;
96
97 if (aarch64_insn_is_bcond(insn)) {
98 api->handler = simulate_b_cond;
99 } else if (aarch64_insn_is_cbz(insn) ||
100 aarch64_insn_is_cbnz(insn)) {
101 api->handler = simulate_cbz_cbnz;
102 } else if (aarch64_insn_is_tbz(insn) ||
103 aarch64_insn_is_tbnz(insn)) {
104 api->handler = simulate_tbz_tbnz;
105 } else if (aarch64_insn_is_adr_adrp(insn)) {
106 api->handler = simulate_adr_adrp;
107 } else if (aarch64_insn_is_b(insn) ||
108 aarch64_insn_is_bl(insn)) {
109 api->handler = simulate_b_bl;
110 } else if (aarch64_insn_is_br(insn) ||
111 aarch64_insn_is_blr(insn)) {
112 api->handler = simulate_br_blr;
113 } else if (aarch64_insn_is_ret(insn)) {
114 api->handler = simulate_ret;
115 } else {
116 /*
117 * Instruction cannot be stepped out-of-line and we don't
118 * (yet) simulate it.
119 */
120 return INSN_REJECTED;
121 }
122
123 return INSN_GOOD_NO_SLOT;
124 }
125
126 #ifdef CONFIG_KPROBES
127 static bool __kprobes
is_probed_address_atomic(kprobe_opcode_t * scan_start,kprobe_opcode_t * scan_end)128 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
129 {
130 while (scan_start >= scan_end) {
131 /*
132 * atomic region starts from exclusive load and ends with
133 * exclusive store.
134 */
135 if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
136 return false;
137 else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
138 return true;
139 scan_start--;
140 }
141
142 return false;
143 }
144
145 enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t * addr,struct arch_specific_insn * asi)146 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
147 {
148 enum probe_insn decoded;
149 u32 insn = le32_to_cpu(*addr);
150 kprobe_opcode_t *scan_end = NULL;
151 unsigned long size = 0, offset = 0;
152 struct arch_probe_insn *api = &asi->api;
153
154 if (aarch64_insn_is_ldr_lit(insn)) {
155 api->handler = simulate_ldr_literal;
156 decoded = INSN_GOOD_NO_SLOT;
157 } else if (aarch64_insn_is_ldrsw_lit(insn)) {
158 api->handler = simulate_ldrsw_literal;
159 decoded = INSN_GOOD_NO_SLOT;
160 } else {
161 decoded = arm_probe_decode_insn(insn, &asi->api);
162 }
163
164 /*
165 * If there's a symbol defined in front of and near enough to
166 * the probe address assume it is the entry point to this
167 * code and use it to further limit how far back we search
168 * when determining if we're in an atomic sequence. If we could
169 * not find any symbol skip the atomic test altogether as we
170 * could otherwise end up searching irrelevant text/literals.
171 * KPROBES depends on KALLSYMS so this last case should never
172 * happen.
173 */
174 if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) {
175 if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t)))
176 scan_end = addr - (offset / sizeof(kprobe_opcode_t));
177 else
178 scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
179 }
180
181 if (decoded != INSN_REJECTED && scan_end)
182 if (is_probed_address_atomic(addr - 1, scan_end))
183 return INSN_REJECTED;
184
185 return decoded;
186 }
187 #endif
188