xref: /linux/arch/arm64/kernel/probes/decode-insn.c (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/kernel/probes/decode-insn.c
4  *
5  * Copyright (C) 2013 Linaro Limited.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/kprobes.h>
10 #include <linux/module.h>
11 #include <linux/kallsyms.h>
12 #include <asm/insn.h>
13 #include <asm/sections.h>
14 
15 #include "decode-insn.h"
16 #include "simulate-insn.h"
17 
18 static bool __kprobes aarch64_insn_is_steppable(u32 insn)
19 {
20 	/*
21 	 * Branch instructions will write a new value into the PC which is
22 	 * likely to be relative to the XOL address and therefore invalid.
23 	 * Deliberate generation of an exception during stepping is also not
24 	 * currently safe. Lastly, MSR instructions can do any number of nasty
25 	 * things we can't handle during single-stepping.
26 	 */
27 	if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
28 		if (aarch64_insn_is_branch(insn) ||
29 		    aarch64_insn_is_msr_imm(insn) ||
30 		    aarch64_insn_is_msr_reg(insn) ||
31 		    aarch64_insn_is_exception(insn) ||
32 		    aarch64_insn_is_eret(insn))
33 			return false;
34 
35 		/*
36 		 * The MRS instruction may not return a correct value when
37 		 * executing in the single-stepping environment. We do make one
38 		 * exception, for reading the DAIF bits.
39 		 */
40 		if (aarch64_insn_is_mrs(insn))
41 			return aarch64_insn_extract_system_reg(insn)
42 			     != AARCH64_INSN_SPCLREG_DAIF;
43 
44 		/*
45 		 * The HINT instruction is is problematic when single-stepping,
46 		 * except for the NOP case.
47 		 */
48 		if (aarch64_insn_is_hint(insn))
49 			return aarch64_insn_is_nop(insn);
50 
51 		return true;
52 	}
53 
54 	/*
55 	 * Instructions which load PC relative literals are not going to work
56 	 * when executed from an XOL slot. Instructions doing an exclusive
57 	 * load/store are not going to complete successfully when single-step
58 	 * exception handling happens in the middle of the sequence.
59 	 */
60 	if (aarch64_insn_uses_literal(insn) ||
61 	    aarch64_insn_is_exclusive(insn))
62 		return false;
63 
64 	return true;
65 }
66 
67 /* Return:
68  *   INSN_REJECTED     If instruction is one not allowed to kprobe,
69  *   INSN_GOOD         If instruction is supported and uses instruction slot,
70  *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
71  */
72 enum probe_insn __kprobes
73 arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
74 {
75 	/*
76 	 * Instructions reading or modifying the PC won't work from the XOL
77 	 * slot.
78 	 */
79 	if (aarch64_insn_is_steppable(insn))
80 		return INSN_GOOD;
81 
82 	if (aarch64_insn_is_bcond(insn)) {
83 		api->handler = simulate_b_cond;
84 	} else if (aarch64_insn_is_cbz(insn) ||
85 	    aarch64_insn_is_cbnz(insn)) {
86 		api->handler = simulate_cbz_cbnz;
87 	} else if (aarch64_insn_is_tbz(insn) ||
88 	    aarch64_insn_is_tbnz(insn)) {
89 		api->handler = simulate_tbz_tbnz;
90 	} else if (aarch64_insn_is_adr_adrp(insn)) {
91 		api->handler = simulate_adr_adrp;
92 	} else if (aarch64_insn_is_b(insn) ||
93 	    aarch64_insn_is_bl(insn)) {
94 		api->handler = simulate_b_bl;
95 	} else if (aarch64_insn_is_br(insn) ||
96 	    aarch64_insn_is_blr(insn) ||
97 	    aarch64_insn_is_ret(insn)) {
98 		api->handler = simulate_br_blr_ret;
99 	} else if (aarch64_insn_is_ldr_lit(insn)) {
100 		api->handler = simulate_ldr_literal;
101 	} else if (aarch64_insn_is_ldrsw_lit(insn)) {
102 		api->handler = simulate_ldrsw_literal;
103 	} else {
104 		/*
105 		 * Instruction cannot be stepped out-of-line and we don't
106 		 * (yet) simulate it.
107 		 */
108 		return INSN_REJECTED;
109 	}
110 
111 	return INSN_GOOD_NO_SLOT;
112 }
113 
114 #ifdef CONFIG_KPROBES
115 static bool __kprobes
116 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
117 {
118 	while (scan_start >= scan_end) {
119 		/*
120 		 * atomic region starts from exclusive load and ends with
121 		 * exclusive store.
122 		 */
123 		if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
124 			return false;
125 		else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
126 			return true;
127 		scan_start--;
128 	}
129 
130 	return false;
131 }
132 
133 enum probe_insn __kprobes
134 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
135 {
136 	enum probe_insn decoded;
137 	probe_opcode_t insn = le32_to_cpu(*addr);
138 	probe_opcode_t *scan_end = NULL;
139 	unsigned long size = 0, offset = 0;
140 
141 	/*
142 	 * If there's a symbol defined in front of and near enough to
143 	 * the probe address assume it is the entry point to this
144 	 * code and use it to further limit how far back we search
145 	 * when determining if we're in an atomic sequence. If we could
146 	 * not find any symbol skip the atomic test altogether as we
147 	 * could otherwise end up searching irrelevant text/literals.
148 	 * KPROBES depends on KALLSYMS so this last case should never
149 	 * happen.
150 	 */
151 	if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) {
152 		if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t)))
153 			scan_end = addr - (offset / sizeof(kprobe_opcode_t));
154 		else
155 			scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
156 	}
157 	decoded = arm_probe_decode_insn(insn, &asi->api);
158 
159 	if (decoded != INSN_REJECTED && scan_end)
160 		if (is_probed_address_atomic(addr - 1, scan_end))
161 			return INSN_REJECTED;
162 
163 	return decoded;
164 }
165 #endif
166