xref: /linux/tools/arch/x86/lib/insn.c (revision 8137a49e1567726eb10fcf55ad141ac19804ca6b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * x86 instruction analysis
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004, 2009
6  */
7 
8 #include <linux/kernel.h>
9 #ifdef __KERNEL__
10 #include <linux/string.h>
11 #else
12 #include <string.h>
13 #endif
14 #include "../include/asm/inat.h" /* __ignore_sync_check__ */
15 #include "../include/asm/insn.h" /* __ignore_sync_check__ */
16 
17 #include <linux/errno.h>
18 #include <linux/kconfig.h>
19 
20 #include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */
21 
22 #define leXX_to_cpu(t, r)						\
23 ({									\
24 	__typeof__(t) v;						\
25 	switch (sizeof(t)) {						\
26 	case 4: v = le32_to_cpu(r); break;				\
27 	case 2: v = le16_to_cpu(r); break;				\
28 	case 1:	v = r; break;						\
29 	default:							\
30 		BUILD_BUG(); break;					\
31 	}								\
32 	v;								\
33 })
34 
35 /* Verify next sizeof(t) bytes can be on the same instruction */
36 #define validate_next(t, insn, n)	\
37 	((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
38 
39 #define __get_next(t, insn)	\
40 	({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
41 
42 #define __peek_nbyte_next(t, insn, n)	\
43 	({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
44 
45 #define get_next(t, insn)	\
46 	({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
47 
48 #define peek_nbyte_next(t, insn, n)	\
49 	({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
50 
51 #define peek_next(t, insn)	peek_nbyte_next(t, insn, 0)
52 
53 /**
54  * insn_init() - initialize struct insn
55  * @insn:	&struct insn to be initialized
56  * @kaddr:	address (in kernel memory) of instruction (or copy thereof)
57  * @buf_len:	length of the insn buffer at @kaddr
58  * @x86_64:	!0 for 64-bit kernel or 64-bit app
59  */
60 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
61 {
62 	/*
63 	 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
64 	 * even if the input buffer is long enough to hold them.
65 	 */
66 	if (buf_len > MAX_INSN_SIZE)
67 		buf_len = MAX_INSN_SIZE;
68 
69 	memset(insn, 0, sizeof(*insn));
70 	insn->kaddr = kaddr;
71 	insn->end_kaddr = kaddr + buf_len;
72 	insn->next_byte = kaddr;
73 	insn->x86_64 = x86_64 ? 1 : 0;
74 	insn->opnd_bytes = 4;
75 	if (x86_64)
76 		insn->addr_bytes = 8;
77 	else
78 		insn->addr_bytes = 4;
79 }
80 
81 static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
82 static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
83 
84 static int __insn_get_emulate_prefix(struct insn *insn,
85 				     const insn_byte_t *prefix, size_t len)
86 {
87 	size_t i;
88 
89 	for (i = 0; i < len; i++) {
90 		if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
91 			goto err_out;
92 	}
93 
94 	insn->emulate_prefix_size = len;
95 	insn->next_byte += len;
96 
97 	return 1;
98 
99 err_out:
100 	return 0;
101 }
102 
103 static void insn_get_emulate_prefix(struct insn *insn)
104 {
105 	if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
106 		return;
107 
108 	__insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
109 }
110 
111 /**
112  * insn_get_prefixes - scan x86 instruction prefix bytes
113  * @insn:	&struct insn containing instruction
114  *
115  * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
116  * to point to the (first) opcode.  No effect if @insn->prefixes.got
117  * is already set.
118  *
119  * * Returns:
120  * 0:  on success
121  * < 0: on error
122  */
123 int insn_get_prefixes(struct insn *insn)
124 {
125 	struct insn_field *prefixes = &insn->prefixes;
126 	insn_attr_t attr;
127 	insn_byte_t b, lb;
128 	int i, nb;
129 
130 	if (prefixes->got)
131 		return 0;
132 
133 	insn_get_emulate_prefix(insn);
134 
135 	nb = 0;
136 	lb = 0;
137 	b = peek_next(insn_byte_t, insn);
138 	attr = inat_get_opcode_attribute(b);
139 	while (inat_is_legacy_prefix(attr)) {
140 		/* Skip if same prefix */
141 		for (i = 0; i < nb; i++)
142 			if (prefixes->bytes[i] == b)
143 				goto found;
144 		if (nb == 4)
145 			/* Invalid instruction */
146 			break;
147 		prefixes->bytes[nb++] = b;
148 		if (inat_is_address_size_prefix(attr)) {
149 			/* address size switches 2/4 or 4/8 */
150 			if (insn->x86_64)
151 				insn->addr_bytes ^= 12;
152 			else
153 				insn->addr_bytes ^= 6;
154 		} else if (inat_is_operand_size_prefix(attr)) {
155 			/* oprand size switches 2/4 */
156 			insn->opnd_bytes ^= 6;
157 		}
158 found:
159 		prefixes->nbytes++;
160 		insn->next_byte++;
161 		lb = b;
162 		b = peek_next(insn_byte_t, insn);
163 		attr = inat_get_opcode_attribute(b);
164 	}
165 	/* Set the last prefix */
166 	if (lb && lb != insn->prefixes.bytes[3]) {
167 		if (unlikely(insn->prefixes.bytes[3])) {
168 			/* Swap the last prefix */
169 			b = insn->prefixes.bytes[3];
170 			for (i = 0; i < nb; i++)
171 				if (prefixes->bytes[i] == lb)
172 					insn_set_byte(prefixes, i, b);
173 		}
174 		insn_set_byte(&insn->prefixes, 3, lb);
175 	}
176 
177 	/* Decode REX prefix */
178 	if (insn->x86_64) {
179 		b = peek_next(insn_byte_t, insn);
180 		attr = inat_get_opcode_attribute(b);
181 		if (inat_is_rex_prefix(attr)) {
182 			insn_field_set(&insn->rex_prefix, b, 1);
183 			insn->next_byte++;
184 			if (X86_REX_W(b))
185 				/* REX.W overrides opnd_size */
186 				insn->opnd_bytes = 8;
187 		}
188 	}
189 	insn->rex_prefix.got = 1;
190 
191 	/* Decode VEX prefix */
192 	b = peek_next(insn_byte_t, insn);
193 	attr = inat_get_opcode_attribute(b);
194 	if (inat_is_vex_prefix(attr)) {
195 		insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
196 		if (!insn->x86_64) {
197 			/*
198 			 * In 32-bits mode, if the [7:6] bits (mod bits of
199 			 * ModRM) on the second byte are not 11b, it is
200 			 * LDS or LES or BOUND.
201 			 */
202 			if (X86_MODRM_MOD(b2) != 3)
203 				goto vex_end;
204 		}
205 		insn_set_byte(&insn->vex_prefix, 0, b);
206 		insn_set_byte(&insn->vex_prefix, 1, b2);
207 		if (inat_is_evex_prefix(attr)) {
208 			b2 = peek_nbyte_next(insn_byte_t, insn, 2);
209 			insn_set_byte(&insn->vex_prefix, 2, b2);
210 			b2 = peek_nbyte_next(insn_byte_t, insn, 3);
211 			insn_set_byte(&insn->vex_prefix, 3, b2);
212 			insn->vex_prefix.nbytes = 4;
213 			insn->next_byte += 4;
214 			if (insn->x86_64 && X86_VEX_W(b2))
215 				/* VEX.W overrides opnd_size */
216 				insn->opnd_bytes = 8;
217 		} else if (inat_is_vex3_prefix(attr)) {
218 			b2 = peek_nbyte_next(insn_byte_t, insn, 2);
219 			insn_set_byte(&insn->vex_prefix, 2, b2);
220 			insn->vex_prefix.nbytes = 3;
221 			insn->next_byte += 3;
222 			if (insn->x86_64 && X86_VEX_W(b2))
223 				/* VEX.W overrides opnd_size */
224 				insn->opnd_bytes = 8;
225 		} else {
226 			/*
227 			 * For VEX2, fake VEX3-like byte#2.
228 			 * Makes it easier to decode vex.W, vex.vvvv,
229 			 * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
230 			 */
231 			insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f);
232 			insn->vex_prefix.nbytes = 2;
233 			insn->next_byte += 2;
234 		}
235 	}
236 vex_end:
237 	insn->vex_prefix.got = 1;
238 
239 	prefixes->got = 1;
240 
241 	return 0;
242 
243 err_out:
244 	return -ENODATA;
245 }
246 
247 /**
248  * insn_get_opcode - collect opcode(s)
249  * @insn:	&struct insn containing instruction
250  *
251  * Populates @insn->opcode, updates @insn->next_byte to point past the
252  * opcode byte(s), and set @insn->attr (except for groups).
253  * If necessary, first collects any preceding (prefix) bytes.
254  * Sets @insn->opcode.value = opcode1.  No effect if @insn->opcode.got
255  * is already 1.
256  *
257  * Returns:
258  * 0:  on success
259  * < 0: on error
260  */
261 int insn_get_opcode(struct insn *insn)
262 {
263 	struct insn_field *opcode = &insn->opcode;
264 	int pfx_id, ret;
265 	insn_byte_t op;
266 
267 	if (opcode->got)
268 		return 0;
269 
270 	if (!insn->prefixes.got) {
271 		ret = insn_get_prefixes(insn);
272 		if (ret)
273 			return ret;
274 	}
275 
276 	/* Get first opcode */
277 	op = get_next(insn_byte_t, insn);
278 	insn_set_byte(opcode, 0, op);
279 	opcode->nbytes = 1;
280 
281 	/* Check if there is VEX prefix or not */
282 	if (insn_is_avx(insn)) {
283 		insn_byte_t m, p;
284 		m = insn_vex_m_bits(insn);
285 		p = insn_vex_p_bits(insn);
286 		insn->attr = inat_get_avx_attribute(op, m, p);
287 		if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
288 		    (!inat_accept_vex(insn->attr) &&
289 		     !inat_is_group(insn->attr))) {
290 			/* This instruction is bad */
291 			insn->attr = 0;
292 			return -EINVAL;
293 		}
294 		/* VEX has only 1 byte for opcode */
295 		goto end;
296 	}
297 
298 	insn->attr = inat_get_opcode_attribute(op);
299 	while (inat_is_escape(insn->attr)) {
300 		/* Get escaped opcode */
301 		op = get_next(insn_byte_t, insn);
302 		opcode->bytes[opcode->nbytes++] = op;
303 		pfx_id = insn_last_prefix_id(insn);
304 		insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
305 	}
306 
307 	if (inat_must_vex(insn->attr)) {
308 		/* This instruction is bad */
309 		insn->attr = 0;
310 		return -EINVAL;
311 	}
312 end:
313 	opcode->got = 1;
314 	return 0;
315 
316 err_out:
317 	return -ENODATA;
318 }
319 
320 /**
321  * insn_get_modrm - collect ModRM byte, if any
322  * @insn:	&struct insn containing instruction
323  *
324  * Populates @insn->modrm and updates @insn->next_byte to point past the
325  * ModRM byte, if any.  If necessary, first collects the preceding bytes
326  * (prefixes and opcode(s)).  No effect if @insn->modrm.got is already 1.
327  *
328  * Returns:
329  * 0:  on success
330  * < 0: on error
331  */
332 int insn_get_modrm(struct insn *insn)
333 {
334 	struct insn_field *modrm = &insn->modrm;
335 	insn_byte_t pfx_id, mod;
336 	int ret;
337 
338 	if (modrm->got)
339 		return 0;
340 
341 	if (!insn->opcode.got) {
342 		ret = insn_get_opcode(insn);
343 		if (ret)
344 			return ret;
345 	}
346 
347 	if (inat_has_modrm(insn->attr)) {
348 		mod = get_next(insn_byte_t, insn);
349 		insn_field_set(modrm, mod, 1);
350 		if (inat_is_group(insn->attr)) {
351 			pfx_id = insn_last_prefix_id(insn);
352 			insn->attr = inat_get_group_attribute(mod, pfx_id,
353 							      insn->attr);
354 			if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
355 				/* Bad insn */
356 				insn->attr = 0;
357 				return -EINVAL;
358 			}
359 		}
360 	}
361 
362 	if (insn->x86_64 && inat_is_force64(insn->attr))
363 		insn->opnd_bytes = 8;
364 
365 	modrm->got = 1;
366 	return 0;
367 
368 err_out:
369 	return -ENODATA;
370 }
371 
372 
373 /**
374  * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
375  * @insn:	&struct insn containing instruction
376  *
377  * If necessary, first collects the instruction up to and including the
378  * ModRM byte.  No effect if @insn->x86_64 is 0.
379  */
380 int insn_rip_relative(struct insn *insn)
381 {
382 	struct insn_field *modrm = &insn->modrm;
383 	int ret;
384 
385 	if (!insn->x86_64)
386 		return 0;
387 
388 	if (!modrm->got) {
389 		ret = insn_get_modrm(insn);
390 		if (ret)
391 			return 0;
392 	}
393 	/*
394 	 * For rip-relative instructions, the mod field (top 2 bits)
395 	 * is zero and the r/m field (bottom 3 bits) is 0x5.
396 	 */
397 	return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5);
398 }
399 
400 /**
401  * insn_get_sib() - Get the SIB byte of instruction
402  * @insn:	&struct insn containing instruction
403  *
404  * If necessary, first collects the instruction up to and including the
405  * ModRM byte.
406  *
407  * Returns:
408  * 0: if decoding succeeded
409  * < 0: otherwise.
410  */
411 int insn_get_sib(struct insn *insn)
412 {
413 	insn_byte_t modrm;
414 	int ret;
415 
416 	if (insn->sib.got)
417 		return 0;
418 
419 	if (!insn->modrm.got) {
420 		ret = insn_get_modrm(insn);
421 		if (ret)
422 			return ret;
423 	}
424 
425 	if (insn->modrm.nbytes) {
426 		modrm = insn->modrm.bytes[0];
427 		if (insn->addr_bytes != 2 &&
428 		    X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
429 			insn_field_set(&insn->sib,
430 				       get_next(insn_byte_t, insn), 1);
431 		}
432 	}
433 	insn->sib.got = 1;
434 
435 	return 0;
436 
437 err_out:
438 	return -ENODATA;
439 }
440 
441 
442 /**
443  * insn_get_displacement() - Get the displacement of instruction
444  * @insn:	&struct insn containing instruction
445  *
446  * If necessary, first collects the instruction up to and including the
447  * SIB byte.
448  * Displacement value is sign-expanded.
449  *
450  * * Returns:
451  * 0: if decoding succeeded
452  * < 0: otherwise.
453  */
454 int insn_get_displacement(struct insn *insn)
455 {
456 	insn_byte_t mod, rm, base;
457 	int ret;
458 
459 	if (insn->displacement.got)
460 		return 0;
461 
462 	if (!insn->sib.got) {
463 		ret = insn_get_sib(insn);
464 		if (ret)
465 			return ret;
466 	}
467 
468 	if (insn->modrm.nbytes) {
469 		/*
470 		 * Interpreting the modrm byte:
471 		 * mod = 00 - no displacement fields (exceptions below)
472 		 * mod = 01 - 1-byte displacement field
473 		 * mod = 10 - displacement field is 4 bytes, or 2 bytes if
474 		 * 	address size = 2 (0x67 prefix in 32-bit mode)
475 		 * mod = 11 - no memory operand
476 		 *
477 		 * If address size = 2...
478 		 * mod = 00, r/m = 110 - displacement field is 2 bytes
479 		 *
480 		 * If address size != 2...
481 		 * mod != 11, r/m = 100 - SIB byte exists
482 		 * mod = 00, SIB base = 101 - displacement field is 4 bytes
483 		 * mod = 00, r/m = 101 - rip-relative addressing, displacement
484 		 * 	field is 4 bytes
485 		 */
486 		mod = X86_MODRM_MOD(insn->modrm.value);
487 		rm = X86_MODRM_RM(insn->modrm.value);
488 		base = X86_SIB_BASE(insn->sib.value);
489 		if (mod == 3)
490 			goto out;
491 		if (mod == 1) {
492 			insn_field_set(&insn->displacement,
493 				       get_next(signed char, insn), 1);
494 		} else if (insn->addr_bytes == 2) {
495 			if ((mod == 0 && rm == 6) || mod == 2) {
496 				insn_field_set(&insn->displacement,
497 					       get_next(short, insn), 2);
498 			}
499 		} else {
500 			if ((mod == 0 && rm == 5) || mod == 2 ||
501 			    (mod == 0 && base == 5)) {
502 				insn_field_set(&insn->displacement,
503 					       get_next(int, insn), 4);
504 			}
505 		}
506 	}
507 out:
508 	insn->displacement.got = 1;
509 	return 0;
510 
511 err_out:
512 	return -ENODATA;
513 }
514 
515 /* Decode moffset16/32/64. Return 0 if failed */
516 static int __get_moffset(struct insn *insn)
517 {
518 	switch (insn->addr_bytes) {
519 	case 2:
520 		insn_field_set(&insn->moffset1, get_next(short, insn), 2);
521 		break;
522 	case 4:
523 		insn_field_set(&insn->moffset1, get_next(int, insn), 4);
524 		break;
525 	case 8:
526 		insn_field_set(&insn->moffset1, get_next(int, insn), 4);
527 		insn_field_set(&insn->moffset2, get_next(int, insn), 4);
528 		break;
529 	default:	/* opnd_bytes must be modified manually */
530 		goto err_out;
531 	}
532 	insn->moffset1.got = insn->moffset2.got = 1;
533 
534 	return 1;
535 
536 err_out:
537 	return 0;
538 }
539 
540 /* Decode imm v32(Iz). Return 0 if failed */
541 static int __get_immv32(struct insn *insn)
542 {
543 	switch (insn->opnd_bytes) {
544 	case 2:
545 		insn_field_set(&insn->immediate, get_next(short, insn), 2);
546 		break;
547 	case 4:
548 	case 8:
549 		insn_field_set(&insn->immediate, get_next(int, insn), 4);
550 		break;
551 	default:	/* opnd_bytes must be modified manually */
552 		goto err_out;
553 	}
554 
555 	return 1;
556 
557 err_out:
558 	return 0;
559 }
560 
561 /* Decode imm v64(Iv/Ov), Return 0 if failed */
562 static int __get_immv(struct insn *insn)
563 {
564 	switch (insn->opnd_bytes) {
565 	case 2:
566 		insn_field_set(&insn->immediate1, get_next(short, insn), 2);
567 		break;
568 	case 4:
569 		insn_field_set(&insn->immediate1, get_next(int, insn), 4);
570 		insn->immediate1.nbytes = 4;
571 		break;
572 	case 8:
573 		insn_field_set(&insn->immediate1, get_next(int, insn), 4);
574 		insn_field_set(&insn->immediate2, get_next(int, insn), 4);
575 		break;
576 	default:	/* opnd_bytes must be modified manually */
577 		goto err_out;
578 	}
579 	insn->immediate1.got = insn->immediate2.got = 1;
580 
581 	return 1;
582 err_out:
583 	return 0;
584 }
585 
586 /* Decode ptr16:16/32(Ap) */
587 static int __get_immptr(struct insn *insn)
588 {
589 	switch (insn->opnd_bytes) {
590 	case 2:
591 		insn_field_set(&insn->immediate1, get_next(short, insn), 2);
592 		break;
593 	case 4:
594 		insn_field_set(&insn->immediate1, get_next(int, insn), 4);
595 		break;
596 	case 8:
597 		/* ptr16:64 is not exist (no segment) */
598 		return 0;
599 	default:	/* opnd_bytes must be modified manually */
600 		goto err_out;
601 	}
602 	insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2);
603 	insn->immediate1.got = insn->immediate2.got = 1;
604 
605 	return 1;
606 err_out:
607 	return 0;
608 }
609 
610 /**
611  * insn_get_immediate() - Get the immediate in an instruction
612  * @insn:	&struct insn containing instruction
613  *
614  * If necessary, first collects the instruction up to and including the
615  * displacement bytes.
616  * Basically, most of immediates are sign-expanded. Unsigned-value can be
617  * computed by bit masking with ((1 << (nbytes * 8)) - 1)
618  *
619  * Returns:
620  * 0:  on success
621  * < 0: on error
622  */
623 int insn_get_immediate(struct insn *insn)
624 {
625 	int ret;
626 
627 	if (insn->immediate.got)
628 		return 0;
629 
630 	if (!insn->displacement.got) {
631 		ret = insn_get_displacement(insn);
632 		if (ret)
633 			return ret;
634 	}
635 
636 	if (inat_has_moffset(insn->attr)) {
637 		if (!__get_moffset(insn))
638 			goto err_out;
639 		goto done;
640 	}
641 
642 	if (!inat_has_immediate(insn->attr))
643 		/* no immediates */
644 		goto done;
645 
646 	switch (inat_immediate_size(insn->attr)) {
647 	case INAT_IMM_BYTE:
648 		insn_field_set(&insn->immediate, get_next(signed char, insn), 1);
649 		break;
650 	case INAT_IMM_WORD:
651 		insn_field_set(&insn->immediate, get_next(short, insn), 2);
652 		break;
653 	case INAT_IMM_DWORD:
654 		insn_field_set(&insn->immediate, get_next(int, insn), 4);
655 		break;
656 	case INAT_IMM_QWORD:
657 		insn_field_set(&insn->immediate1, get_next(int, insn), 4);
658 		insn_field_set(&insn->immediate2, get_next(int, insn), 4);
659 		break;
660 	case INAT_IMM_PTR:
661 		if (!__get_immptr(insn))
662 			goto err_out;
663 		break;
664 	case INAT_IMM_VWORD32:
665 		if (!__get_immv32(insn))
666 			goto err_out;
667 		break;
668 	case INAT_IMM_VWORD:
669 		if (!__get_immv(insn))
670 			goto err_out;
671 		break;
672 	default:
673 		/* Here, insn must have an immediate, but failed */
674 		goto err_out;
675 	}
676 	if (inat_has_second_immediate(insn->attr)) {
677 		insn_field_set(&insn->immediate2, get_next(signed char, insn), 1);
678 	}
679 done:
680 	insn->immediate.got = 1;
681 	return 0;
682 
683 err_out:
684 	return -ENODATA;
685 }
686 
687 /**
688  * insn_get_length() - Get the length of instruction
689  * @insn:	&struct insn containing instruction
690  *
691  * If necessary, first collects the instruction up to and including the
692  * immediates bytes.
693  *
694  * Returns:
695  *  - 0 on success
696  *  - < 0 on error
697 */
698 int insn_get_length(struct insn *insn)
699 {
700 	int ret;
701 
702 	if (insn->length)
703 		return 0;
704 
705 	if (!insn->immediate.got) {
706 		ret = insn_get_immediate(insn);
707 		if (ret)
708 			return ret;
709 	}
710 
711 	insn->length = (unsigned char)((unsigned long)insn->next_byte
712 				     - (unsigned long)insn->kaddr);
713 
714 	return 0;
715 }
716 
717 /* Ensure this instruction is decoded completely */
718 static inline int insn_complete(struct insn *insn)
719 {
720 	return insn->opcode.got && insn->modrm.got && insn->sib.got &&
721 		insn->displacement.got && insn->immediate.got;
722 }
723 
724 /**
725  * insn_decode() - Decode an x86 instruction
726  * @insn:	&struct insn to be initialized
727  * @kaddr:	address (in kernel memory) of instruction (or copy thereof)
728  * @buf_len:	length of the insn buffer at @kaddr
729  * @m:		insn mode, see enum insn_mode
730  *
731  * Returns:
732  * 0: if decoding succeeded
733  * < 0: otherwise.
734  */
735 int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
736 {
737 	int ret;
738 
739 #define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */
740 
741 	if (m == INSN_MODE_KERN)
742 		insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
743 	else
744 		insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
745 
746 	ret = insn_get_length(insn);
747 	if (ret)
748 		return ret;
749 
750 	if (insn_complete(insn))
751 		return 0;
752 
753 	return -EINVAL;
754 }
755