xref: /linux/arch/x86/include/asm/insn.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_X86_INSN_H
3 #define _ASM_X86_INSN_H
4 /*
5  * x86 instruction analysis
6  *
7  * Copyright (C) IBM Corporation, 2009
8  */
9 
10 #include <asm/byteorder.h>
11 /* insn_attr_t is defined in inat.h */
12 #include <asm/inat.h> /* __ignore_sync_check__ */
13 
14 #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
15 
16 struct insn_field {
17 	union {
18 		insn_value_t value;
19 		insn_byte_t bytes[4];
20 	};
21 	/* !0 if we've run insn_get_xxx() for this field */
22 	unsigned char got;
23 	unsigned char nbytes;
24 };
25 
26 static inline void insn_field_set(struct insn_field *p, insn_value_t v,
27 				  unsigned char n)
28 {
29 	p->value = v;
30 	p->nbytes = n;
31 }
32 
33 static inline void insn_set_byte(struct insn_field *p, unsigned char n,
34 				 insn_byte_t v)
35 {
36 	p->bytes[n] = v;
37 }
38 
39 #else
40 
41 struct insn_field {
42 	insn_value_t value;
43 	union {
44 		insn_value_t little;
45 		insn_byte_t bytes[4];
46 	};
47 	/* !0 if we've run insn_get_xxx() for this field */
48 	unsigned char got;
49 	unsigned char nbytes;
50 };
51 
52 static inline void insn_field_set(struct insn_field *p, insn_value_t v,
53 				  unsigned char n)
54 {
55 	p->value = v;
56 	p->little = __cpu_to_le32(v);
57 	p->nbytes = n;
58 }
59 
60 static inline void insn_set_byte(struct insn_field *p, unsigned char n,
61 				 insn_byte_t v)
62 {
63 	p->bytes[n] = v;
64 	p->value = __le32_to_cpu(p->little);
65 }
66 #endif
67 
68 struct insn {
69 	struct insn_field prefixes;	/*
70 					 * Prefixes
71 					 * prefixes.bytes[3]: last prefix
72 					 */
73 	struct insn_field rex_prefix;	/* REX prefix */
74 	union {
75 		struct insn_field vex_prefix;	/* VEX prefix */
76 		struct insn_field xop_prefix;	/* XOP prefix */
77 	};
78 	struct insn_field opcode;	/*
79 					 * opcode.bytes[0]: opcode1
80 					 * opcode.bytes[1]: opcode2
81 					 * opcode.bytes[2]: opcode3
82 					 */
83 	struct insn_field modrm;
84 	struct insn_field sib;
85 	struct insn_field displacement;
86 	union {
87 		struct insn_field immediate;
88 		struct insn_field moffset1;	/* for 64bit MOV */
89 		struct insn_field immediate1;	/* for 64bit imm or off16/32 */
90 	};
91 	union {
92 		struct insn_field moffset2;	/* for 64bit MOV */
93 		struct insn_field immediate2;	/* for 64bit imm or seg16 */
94 	};
95 
96 	int	emulate_prefix_size;
97 	insn_attr_t attr;
98 	unsigned char opnd_bytes;
99 	unsigned char addr_bytes;
100 	unsigned char length;
101 	unsigned char x86_64;
102 
103 	const insn_byte_t *kaddr;	/* kernel address of insn to analyze */
104 	const insn_byte_t *end_kaddr;	/* kernel address of last insn in buffer */
105 	const insn_byte_t *next_byte;
106 };
107 
108 #define MAX_INSN_SIZE	15
109 
110 #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
111 #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
112 #define X86_MODRM_RM(modrm) ((modrm) & 0x07)
113 
114 #define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
115 #define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
116 #define X86_SIB_BASE(sib) ((sib) & 0x07)
117 
118 #define X86_REX2_M(rex) ((rex) & 0x80)	/* REX2 M0 */
119 #define X86_REX2_R(rex) ((rex) & 0x40)	/* REX2 R4 */
120 #define X86_REX2_X(rex) ((rex) & 0x20)	/* REX2 X4 */
121 #define X86_REX2_B(rex) ((rex) & 0x10)	/* REX2 B4 */
122 
123 #define X86_REX_W(rex) ((rex) & 8)	/* REX or REX2 W */
124 #define X86_REX_R(rex) ((rex) & 4)	/* REX or REX2 R3 */
125 #define X86_REX_X(rex) ((rex) & 2)	/* REX or REX2 X3 */
126 #define X86_REX_B(rex) ((rex) & 1)	/* REX or REX2 B3 */
127 
128 /* VEX bit flags  */
129 #define X86_VEX_W(vex)	((vex) & 0x80)	/* VEX3 Byte2 */
130 #define X86_VEX_R(vex)	((vex) & 0x80)	/* VEX2/3 Byte1 */
131 #define X86_VEX_X(vex)	((vex) & 0x40)	/* VEX3 Byte1 */
132 #define X86_VEX_B(vex)	((vex) & 0x20)	/* VEX3 Byte1 */
133 #define X86_VEX_L(vex)	((vex) & 0x04)	/* VEX3 Byte2, VEX2 Byte1 */
134 /* VEX bit fields */
135 #define X86_EVEX_M(vex)	((vex) & 0x07)		/* EVEX Byte1 */
136 #define X86_VEX3_M(vex)	((vex) & 0x1f)		/* VEX3 Byte1 */
137 #define X86_VEX2_M	1			/* VEX2.M always 1 */
138 #define X86_VEX_V(vex)	(((vex) & 0x78) >> 3)	/* VEX3 Byte2, VEX2 Byte1 */
139 #define X86_VEX_P(vex)	((vex) & 0x03)		/* VEX3 Byte2, VEX2 Byte1 */
140 #define X86_VEX_M_MAX	0x1f			/* VEX3.M Maximum value */
141 /* XOP bit fields */
142 #define X86_XOP_R(xop)	((xop) & 0x80)	/* XOP Byte2 */
143 #define X86_XOP_X(xop)	((xop) & 0x40)	/* XOP Byte2 */
144 #define X86_XOP_B(xop)	((xop) & 0x20)	/* XOP Byte2 */
145 #define X86_XOP_M(xop)	((xop) & 0x1f)	/* XOP Byte2 */
146 #define X86_XOP_W(xop)	((xop) & 0x80)	/* XOP Byte3 */
147 #define X86_XOP_V(xop)	((xop) & 0x78)	/* XOP Byte3 */
148 #define X86_XOP_L(xop)	((xop) & 0x04)	/* XOP Byte3 */
149 #define X86_XOP_P(xop)	((xop) & 0x03)	/* XOP Byte3 */
150 #define X86_XOP_M_MIN	0x08	/* Min of XOP.M */
151 #define X86_XOP_M_MAX	0x1f	/* Max of XOP.M */
152 
153 extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
154 extern int insn_get_prefixes(struct insn *insn);
155 extern int insn_get_opcode(struct insn *insn);
156 extern int insn_get_modrm(struct insn *insn);
157 extern int insn_get_sib(struct insn *insn);
158 extern int insn_get_displacement(struct insn *insn);
159 extern int insn_get_immediate(struct insn *insn);
160 extern int insn_get_length(struct insn *insn);
161 
162 enum insn_mode {
163 	INSN_MODE_32,
164 	INSN_MODE_64,
165 	/* Mode is determined by the current kernel build. */
166 	INSN_MODE_KERN,
167 	INSN_NUM_MODES,
168 };
169 
170 extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
171 
172 #define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
173 
174 /* Attribute will be determined after getting ModRM (for opcode groups) */
175 static inline void insn_get_attribute(struct insn *insn)
176 {
177 	insn_get_modrm(insn);
178 }
179 
180 /* Instruction uses RIP-relative addressing */
181 extern int insn_rip_relative(struct insn *insn);
182 
183 static inline int insn_is_rex2(struct insn *insn)
184 {
185 	if (!insn->prefixes.got)
186 		insn_get_prefixes(insn);
187 	return insn->rex_prefix.nbytes == 2;
188 }
189 
190 static inline insn_byte_t insn_rex2_m_bit(struct insn *insn)
191 {
192 	return X86_REX2_M(insn->rex_prefix.bytes[1]);
193 }
194 
195 static inline int insn_is_avx_or_xop(struct insn *insn)
196 {
197 	if (!insn->prefixes.got)
198 		insn_get_prefixes(insn);
199 	return (insn->vex_prefix.value != 0);
200 }
201 
202 static inline int insn_is_evex(struct insn *insn)
203 {
204 	if (!insn->prefixes.got)
205 		insn_get_prefixes(insn);
206 	return (insn->vex_prefix.nbytes == 4);
207 }
208 
209 /* If we already know this is AVX/XOP encoded */
210 static inline int avx_insn_is_xop(struct insn *insn)
211 {
212 	insn_attr_t attr = inat_get_opcode_attribute(insn->vex_prefix.bytes[0]);
213 
214 	return inat_is_xop_prefix(attr);
215 }
216 
217 static inline int insn_is_xop(struct insn *insn)
218 {
219 	if (!insn_is_avx_or_xop(insn))
220 		return 0;
221 
222 	return avx_insn_is_xop(insn);
223 }
224 
225 static inline int insn_has_emulate_prefix(struct insn *insn)
226 {
227 	return !!insn->emulate_prefix_size;
228 }
229 
230 static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
231 {
232 	if (insn->vex_prefix.nbytes == 2)	/* 2 bytes VEX */
233 		return X86_VEX2_M;
234 	else if (insn->vex_prefix.nbytes == 3)	/* 3 bytes VEX */
235 		return X86_VEX3_M(insn->vex_prefix.bytes[1]);
236 	else					/* EVEX */
237 		return X86_EVEX_M(insn->vex_prefix.bytes[1]);
238 }
239 
240 static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
241 {
242 	if (insn->vex_prefix.nbytes == 2)	/* 2 bytes VEX */
243 		return X86_VEX_P(insn->vex_prefix.bytes[1]);
244 	else
245 		return X86_VEX_P(insn->vex_prefix.bytes[2]);
246 }
247 
248 static inline insn_byte_t insn_vex_w_bit(struct insn *insn)
249 {
250 	if (insn->vex_prefix.nbytes < 3)
251 		return 0;
252 	return X86_VEX_W(insn->vex_prefix.bytes[2]);
253 }
254 
255 static inline insn_byte_t insn_xop_map_bits(struct insn *insn)
256 {
257 	if (insn->xop_prefix.nbytes < 3)	/* XOP is 3 bytes */
258 		return 0;
259 	return X86_XOP_M(insn->xop_prefix.bytes[1]);
260 }
261 
262 static inline insn_byte_t insn_xop_p_bits(struct insn *insn)
263 {
264 	return X86_XOP_P(insn->vex_prefix.bytes[2]);
265 }
266 
267 /* Get the last prefix id from last prefix or VEX prefix */
268 static inline int insn_last_prefix_id(struct insn *insn)
269 {
270 	if (insn_is_avx_or_xop(insn)) {
271 		if (avx_insn_is_xop(insn))
272 			return insn_xop_p_bits(insn);
273 		return insn_vex_p_bits(insn);	/* VEX_p is a SIMD prefix id */
274 	}
275 
276 	if (insn->prefixes.bytes[3])
277 		return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
278 
279 	return 0;
280 }
281 
282 /* Offset of each field from kaddr */
283 static inline int insn_offset_rex_prefix(struct insn *insn)
284 {
285 	return insn->prefixes.nbytes;
286 }
287 static inline int insn_offset_vex_prefix(struct insn *insn)
288 {
289 	return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
290 }
291 static inline int insn_offset_opcode(struct insn *insn)
292 {
293 	return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
294 }
295 static inline int insn_offset_modrm(struct insn *insn)
296 {
297 	return insn_offset_opcode(insn) + insn->opcode.nbytes;
298 }
299 static inline int insn_offset_sib(struct insn *insn)
300 {
301 	return insn_offset_modrm(insn) + insn->modrm.nbytes;
302 }
303 static inline int insn_offset_displacement(struct insn *insn)
304 {
305 	return insn_offset_sib(insn) + insn->sib.nbytes;
306 }
307 static inline int insn_offset_immediate(struct insn *insn)
308 {
309 	return insn_offset_displacement(insn) + insn->displacement.nbytes;
310 }
311 
312 /**
313  * for_each_insn_prefix() -- Iterate prefixes in the instruction
314  * @insn: Pointer to struct insn.
315  * @idx:  Index storage.
316  * @prefix: Prefix byte.
317  *
318  * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
319  * and the index is stored in @idx (note that this @idx is just for a cursor,
320  * do not change it.)
321  * Since prefixes.nbytes can be bigger than 4 if some prefixes
322  * are repeated, it cannot be used for looping over the prefixes.
323  */
324 #define for_each_insn_prefix(insn, idx, prefix)	\
325 	for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
326 
327 #define POP_SS_OPCODE 0x1f
328 #define MOV_SREG_OPCODE 0x8e
329 
330 /*
331  * Intel SDM Vol.3A 6.8.3 states;
332  * "Any single-step trap that would be delivered following the MOV to SS
333  * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
334  * suppressed."
335  * This function returns true if @insn is MOV SS or POP SS. On these
336  * instructions, single stepping is suppressed.
337  */
338 static inline int insn_masking_exception(struct insn *insn)
339 {
340 	return insn->opcode.bytes[0] == POP_SS_OPCODE ||
341 		(insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
342 		 X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
343 }
344 
345 #endif /* _ASM_X86_INSN_H */
346