1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * x86 instruction analysis 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004, 2009 6 */ 7 8 #include <linux/kernel.h> 9 #ifdef __KERNEL__ 10 #include <linux/string.h> 11 #else 12 #include <string.h> 13 #endif 14 #include "../include/asm/inat.h" /* __ignore_sync_check__ */ 15 #include "../include/asm/insn.h" /* __ignore_sync_check__ */ 16 #include "../include/asm-generic/unaligned.h" /* __ignore_sync_check__ */ 17 18 #include <linux/errno.h> 19 #include <linux/kconfig.h> 20 21 #include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */ 22 23 #define leXX_to_cpu(t, r) \ 24 ({ \ 25 __typeof__(t) v; \ 26 switch (sizeof(t)) { \ 27 case 4: v = le32_to_cpu(r); break; \ 28 case 2: v = le16_to_cpu(r); break; \ 29 case 1: v = r; break; \ 30 default: \ 31 BUILD_BUG(); break; \ 32 } \ 33 v; \ 34 }) 35 36 /* Verify next sizeof(t) bytes can be on the same instruction */ 37 #define validate_next(t, insn, n) \ 38 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) 39 40 #define __get_next(t, insn) \ 41 ({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); }) 42 43 #define __peek_nbyte_next(t, insn, n) \ 44 ({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); }) 45 46 #define get_next(t, insn) \ 47 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); }) 48 49 #define peek_nbyte_next(t, insn, n) \ 50 ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); }) 51 52 #define peek_next(t, insn) peek_nbyte_next(t, insn, 0) 53 54 /** 55 * insn_init() - initialize struct insn 56 * @insn: &struct insn to be initialized 57 * @kaddr: address (in kernel memory) of instruction (or copy thereof) 58 * @buf_len: length of the insn buffer at @kaddr 59 * @x86_64: !0 for 64-bit kernel or 64-bit app 60 */ 61 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) 62 { 63 /* 64 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid 65 * even if the input buffer is long enough to hold them. 66 */ 67 if (buf_len > MAX_INSN_SIZE) 68 buf_len = MAX_INSN_SIZE; 69 70 memset(insn, 0, sizeof(*insn)); 71 insn->kaddr = kaddr; 72 insn->end_kaddr = kaddr + buf_len; 73 insn->next_byte = kaddr; 74 insn->x86_64 = x86_64; 75 insn->opnd_bytes = 4; 76 if (x86_64) 77 insn->addr_bytes = 8; 78 else 79 insn->addr_bytes = 4; 80 } 81 82 static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX }; 83 static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX }; 84 85 static int __insn_get_emulate_prefix(struct insn *insn, 86 const insn_byte_t *prefix, size_t len) 87 { 88 size_t i; 89 90 for (i = 0; i < len; i++) { 91 if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i]) 92 goto err_out; 93 } 94 95 insn->emulate_prefix_size = len; 96 insn->next_byte += len; 97 98 return 1; 99 100 err_out: 101 return 0; 102 } 103 104 static void insn_get_emulate_prefix(struct insn *insn) 105 { 106 if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix))) 107 return; 108 109 __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix)); 110 } 111 112 /** 113 * insn_get_prefixes - scan x86 instruction prefix bytes 114 * @insn: &struct insn containing instruction 115 * 116 * Populates the @insn->prefixes bitmap, and updates @insn->next_byte 117 * to point to the (first) opcode. No effect if @insn->prefixes.got 118 * is already set. 119 * 120 * * Returns: 121 * 0: on success 122 * < 0: on error 123 */ 124 int insn_get_prefixes(struct insn *insn) 125 { 126 struct insn_field *prefixes = &insn->prefixes; 127 insn_attr_t attr; 128 insn_byte_t b, lb; 129 int i, nb; 130 131 if (prefixes->got) 132 return 0; 133 134 insn_get_emulate_prefix(insn); 135 136 nb = 0; 137 lb = 0; 138 b = peek_next(insn_byte_t, insn); 139 attr = inat_get_opcode_attribute(b); 140 while (inat_is_legacy_prefix(attr)) { 141 /* Skip if same prefix */ 142 for (i = 0; i < nb; i++) 143 if (prefixes->bytes[i] == b) 144 goto found; 145 if (nb == 4) 146 /* Invalid instruction */ 147 break; 148 prefixes->bytes[nb++] = b; 149 if (inat_is_address_size_prefix(attr)) { 150 /* address size switches 2/4 or 4/8 */ 151 if (insn->x86_64) 152 insn->addr_bytes ^= 12; 153 else 154 insn->addr_bytes ^= 6; 155 } else if (inat_is_operand_size_prefix(attr)) { 156 /* oprand size switches 2/4 */ 157 insn->opnd_bytes ^= 6; 158 } 159 found: 160 prefixes->nbytes++; 161 insn->next_byte++; 162 lb = b; 163 b = peek_next(insn_byte_t, insn); 164 attr = inat_get_opcode_attribute(b); 165 } 166 /* Set the last prefix */ 167 if (lb && lb != insn->prefixes.bytes[3]) { 168 if (unlikely(insn->prefixes.bytes[3])) { 169 /* Swap the last prefix */ 170 b = insn->prefixes.bytes[3]; 171 for (i = 0; i < nb; i++) 172 if (prefixes->bytes[i] == lb) 173 insn_set_byte(prefixes, i, b); 174 } 175 insn_set_byte(&insn->prefixes, 3, lb); 176 } 177 178 /* Decode REX prefix */ 179 if (insn->x86_64) { 180 b = peek_next(insn_byte_t, insn); 181 attr = inat_get_opcode_attribute(b); 182 if (inat_is_rex_prefix(attr)) { 183 insn_field_set(&insn->rex_prefix, b, 1); 184 insn->next_byte++; 185 if (X86_REX_W(b)) 186 /* REX.W overrides opnd_size */ 187 insn->opnd_bytes = 8; 188 } else if (inat_is_rex2_prefix(attr)) { 189 insn_set_byte(&insn->rex_prefix, 0, b); 190 b = peek_nbyte_next(insn_byte_t, insn, 1); 191 insn_set_byte(&insn->rex_prefix, 1, b); 192 insn->rex_prefix.nbytes = 2; 193 insn->next_byte += 2; 194 if (X86_REX_W(b)) 195 /* REX.W overrides opnd_size */ 196 insn->opnd_bytes = 8; 197 insn->rex_prefix.got = 1; 198 goto vex_end; 199 } 200 } 201 insn->rex_prefix.got = 1; 202 203 /* Decode VEX prefix */ 204 b = peek_next(insn_byte_t, insn); 205 attr = inat_get_opcode_attribute(b); 206 if (inat_is_vex_prefix(attr)) { 207 insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); 208 if (!insn->x86_64) { 209 /* 210 * In 32-bits mode, if the [7:6] bits (mod bits of 211 * ModRM) on the second byte are not 11b, it is 212 * LDS or LES or BOUND. 213 */ 214 if (X86_MODRM_MOD(b2) != 3) 215 goto vex_end; 216 } 217 insn_set_byte(&insn->vex_prefix, 0, b); 218 insn_set_byte(&insn->vex_prefix, 1, b2); 219 if (inat_is_evex_prefix(attr)) { 220 b2 = peek_nbyte_next(insn_byte_t, insn, 2); 221 insn_set_byte(&insn->vex_prefix, 2, b2); 222 b2 = peek_nbyte_next(insn_byte_t, insn, 3); 223 insn_set_byte(&insn->vex_prefix, 3, b2); 224 insn->vex_prefix.nbytes = 4; 225 insn->next_byte += 4; 226 if (insn->x86_64 && X86_VEX_W(b2)) 227 /* VEX.W overrides opnd_size */ 228 insn->opnd_bytes = 8; 229 } else if (inat_is_vex3_prefix(attr)) { 230 b2 = peek_nbyte_next(insn_byte_t, insn, 2); 231 insn_set_byte(&insn->vex_prefix, 2, b2); 232 insn->vex_prefix.nbytes = 3; 233 insn->next_byte += 3; 234 if (insn->x86_64 && X86_VEX_W(b2)) 235 /* VEX.W overrides opnd_size */ 236 insn->opnd_bytes = 8; 237 } else { 238 /* 239 * For VEX2, fake VEX3-like byte#2. 240 * Makes it easier to decode vex.W, vex.vvvv, 241 * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0. 242 */ 243 insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f); 244 insn->vex_prefix.nbytes = 2; 245 insn->next_byte += 2; 246 } 247 } 248 vex_end: 249 insn->vex_prefix.got = 1; 250 251 prefixes->got = 1; 252 253 return 0; 254 255 err_out: 256 return -ENODATA; 257 } 258 259 /** 260 * insn_get_opcode - collect opcode(s) 261 * @insn: &struct insn containing instruction 262 * 263 * Populates @insn->opcode, updates @insn->next_byte to point past the 264 * opcode byte(s), and set @insn->attr (except for groups). 265 * If necessary, first collects any preceding (prefix) bytes. 266 * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got 267 * is already 1. 268 * 269 * Returns: 270 * 0: on success 271 * < 0: on error 272 */ 273 int insn_get_opcode(struct insn *insn) 274 { 275 struct insn_field *opcode = &insn->opcode; 276 int pfx_id, ret; 277 insn_byte_t op; 278 279 if (opcode->got) 280 return 0; 281 282 ret = insn_get_prefixes(insn); 283 if (ret) 284 return ret; 285 286 /* Get first opcode */ 287 op = get_next(insn_byte_t, insn); 288 insn_set_byte(opcode, 0, op); 289 opcode->nbytes = 1; 290 291 /* Check if there is VEX prefix or not */ 292 if (insn_is_avx(insn)) { 293 insn_byte_t m, p; 294 m = insn_vex_m_bits(insn); 295 p = insn_vex_p_bits(insn); 296 insn->attr = inat_get_avx_attribute(op, m, p); 297 if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) || 298 (!inat_accept_vex(insn->attr) && 299 !inat_is_group(insn->attr))) { 300 /* This instruction is bad */ 301 insn->attr = 0; 302 return -EINVAL; 303 } 304 /* VEX has only 1 byte for opcode */ 305 goto end; 306 } 307 308 /* Check if there is REX2 prefix or not */ 309 if (insn_is_rex2(insn)) { 310 if (insn_rex2_m_bit(insn)) { 311 /* map 1 is escape 0x0f */ 312 insn_attr_t esc_attr = inat_get_opcode_attribute(0x0f); 313 314 pfx_id = insn_last_prefix_id(insn); 315 insn->attr = inat_get_escape_attribute(op, pfx_id, esc_attr); 316 } else { 317 insn->attr = inat_get_opcode_attribute(op); 318 } 319 goto end; 320 } 321 322 insn->attr = inat_get_opcode_attribute(op); 323 while (inat_is_escape(insn->attr)) { 324 /* Get escaped opcode */ 325 op = get_next(insn_byte_t, insn); 326 opcode->bytes[opcode->nbytes++] = op; 327 pfx_id = insn_last_prefix_id(insn); 328 insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); 329 } 330 331 if (inat_must_vex(insn->attr)) { 332 /* This instruction is bad */ 333 insn->attr = 0; 334 return -EINVAL; 335 } 336 end: 337 opcode->got = 1; 338 return 0; 339 340 err_out: 341 return -ENODATA; 342 } 343 344 /** 345 * insn_get_modrm - collect ModRM byte, if any 346 * @insn: &struct insn containing instruction 347 * 348 * Populates @insn->modrm and updates @insn->next_byte to point past the 349 * ModRM byte, if any. If necessary, first collects the preceding bytes 350 * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. 351 * 352 * Returns: 353 * 0: on success 354 * < 0: on error 355 */ 356 int insn_get_modrm(struct insn *insn) 357 { 358 struct insn_field *modrm = &insn->modrm; 359 insn_byte_t pfx_id, mod; 360 int ret; 361 362 if (modrm->got) 363 return 0; 364 365 ret = insn_get_opcode(insn); 366 if (ret) 367 return ret; 368 369 if (inat_has_modrm(insn->attr)) { 370 mod = get_next(insn_byte_t, insn); 371 insn_field_set(modrm, mod, 1); 372 if (inat_is_group(insn->attr)) { 373 pfx_id = insn_last_prefix_id(insn); 374 insn->attr = inat_get_group_attribute(mod, pfx_id, 375 insn->attr); 376 if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) { 377 /* Bad insn */ 378 insn->attr = 0; 379 return -EINVAL; 380 } 381 } 382 } 383 384 if (insn->x86_64 && inat_is_force64(insn->attr)) 385 insn->opnd_bytes = 8; 386 387 modrm->got = 1; 388 return 0; 389 390 err_out: 391 return -ENODATA; 392 } 393 394 395 /** 396 * insn_rip_relative() - Does instruction use RIP-relative addressing mode? 397 * @insn: &struct insn containing instruction 398 * 399 * If necessary, first collects the instruction up to and including the 400 * ModRM byte. No effect if @insn->x86_64 is 0. 401 */ 402 int insn_rip_relative(struct insn *insn) 403 { 404 struct insn_field *modrm = &insn->modrm; 405 int ret; 406 407 if (!insn->x86_64) 408 return 0; 409 410 ret = insn_get_modrm(insn); 411 if (ret) 412 return 0; 413 /* 414 * For rip-relative instructions, the mod field (top 2 bits) 415 * is zero and the r/m field (bottom 3 bits) is 0x5. 416 */ 417 return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5); 418 } 419 420 /** 421 * insn_get_sib() - Get the SIB byte of instruction 422 * @insn: &struct insn containing instruction 423 * 424 * If necessary, first collects the instruction up to and including the 425 * ModRM byte. 426 * 427 * Returns: 428 * 0: if decoding succeeded 429 * < 0: otherwise. 430 */ 431 int insn_get_sib(struct insn *insn) 432 { 433 insn_byte_t modrm; 434 int ret; 435 436 if (insn->sib.got) 437 return 0; 438 439 ret = insn_get_modrm(insn); 440 if (ret) 441 return ret; 442 443 if (insn->modrm.nbytes) { 444 modrm = insn->modrm.bytes[0]; 445 if (insn->addr_bytes != 2 && 446 X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { 447 insn_field_set(&insn->sib, 448 get_next(insn_byte_t, insn), 1); 449 } 450 } 451 insn->sib.got = 1; 452 453 return 0; 454 455 err_out: 456 return -ENODATA; 457 } 458 459 460 /** 461 * insn_get_displacement() - Get the displacement of instruction 462 * @insn: &struct insn containing instruction 463 * 464 * If necessary, first collects the instruction up to and including the 465 * SIB byte. 466 * Displacement value is sign-expanded. 467 * 468 * * Returns: 469 * 0: if decoding succeeded 470 * < 0: otherwise. 471 */ 472 int insn_get_displacement(struct insn *insn) 473 { 474 insn_byte_t mod, rm, base; 475 int ret; 476 477 if (insn->displacement.got) 478 return 0; 479 480 ret = insn_get_sib(insn); 481 if (ret) 482 return ret; 483 484 if (insn->modrm.nbytes) { 485 /* 486 * Interpreting the modrm byte: 487 * mod = 00 - no displacement fields (exceptions below) 488 * mod = 01 - 1-byte displacement field 489 * mod = 10 - displacement field is 4 bytes, or 2 bytes if 490 * address size = 2 (0x67 prefix in 32-bit mode) 491 * mod = 11 - no memory operand 492 * 493 * If address size = 2... 494 * mod = 00, r/m = 110 - displacement field is 2 bytes 495 * 496 * If address size != 2... 497 * mod != 11, r/m = 100 - SIB byte exists 498 * mod = 00, SIB base = 101 - displacement field is 4 bytes 499 * mod = 00, r/m = 101 - rip-relative addressing, displacement 500 * field is 4 bytes 501 */ 502 mod = X86_MODRM_MOD(insn->modrm.value); 503 rm = X86_MODRM_RM(insn->modrm.value); 504 base = X86_SIB_BASE(insn->sib.value); 505 if (mod == 3) 506 goto out; 507 if (mod == 1) { 508 insn_field_set(&insn->displacement, 509 get_next(signed char, insn), 1); 510 } else if (insn->addr_bytes == 2) { 511 if ((mod == 0 && rm == 6) || mod == 2) { 512 insn_field_set(&insn->displacement, 513 get_next(short, insn), 2); 514 } 515 } else { 516 if ((mod == 0 && rm == 5) || mod == 2 || 517 (mod == 0 && base == 5)) { 518 insn_field_set(&insn->displacement, 519 get_next(int, insn), 4); 520 } 521 } 522 } 523 out: 524 insn->displacement.got = 1; 525 return 0; 526 527 err_out: 528 return -ENODATA; 529 } 530 531 /* Decode moffset16/32/64. Return 0 if failed */ 532 static int __get_moffset(struct insn *insn) 533 { 534 switch (insn->addr_bytes) { 535 case 2: 536 insn_field_set(&insn->moffset1, get_next(short, insn), 2); 537 break; 538 case 4: 539 insn_field_set(&insn->moffset1, get_next(int, insn), 4); 540 break; 541 case 8: 542 insn_field_set(&insn->moffset1, get_next(int, insn), 4); 543 insn_field_set(&insn->moffset2, get_next(int, insn), 4); 544 break; 545 default: /* opnd_bytes must be modified manually */ 546 goto err_out; 547 } 548 insn->moffset1.got = insn->moffset2.got = 1; 549 550 return 1; 551 552 err_out: 553 return 0; 554 } 555 556 /* Decode imm v32(Iz). Return 0 if failed */ 557 static int __get_immv32(struct insn *insn) 558 { 559 switch (insn->opnd_bytes) { 560 case 2: 561 insn_field_set(&insn->immediate, get_next(short, insn), 2); 562 break; 563 case 4: 564 case 8: 565 insn_field_set(&insn->immediate, get_next(int, insn), 4); 566 break; 567 default: /* opnd_bytes must be modified manually */ 568 goto err_out; 569 } 570 571 return 1; 572 573 err_out: 574 return 0; 575 } 576 577 /* Decode imm v64(Iv/Ov), Return 0 if failed */ 578 static int __get_immv(struct insn *insn) 579 { 580 switch (insn->opnd_bytes) { 581 case 2: 582 insn_field_set(&insn->immediate1, get_next(short, insn), 2); 583 break; 584 case 4: 585 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 586 insn->immediate1.nbytes = 4; 587 break; 588 case 8: 589 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 590 insn_field_set(&insn->immediate2, get_next(int, insn), 4); 591 break; 592 default: /* opnd_bytes must be modified manually */ 593 goto err_out; 594 } 595 insn->immediate1.got = insn->immediate2.got = 1; 596 597 return 1; 598 err_out: 599 return 0; 600 } 601 602 /* Decode ptr16:16/32(Ap) */ 603 static int __get_immptr(struct insn *insn) 604 { 605 switch (insn->opnd_bytes) { 606 case 2: 607 insn_field_set(&insn->immediate1, get_next(short, insn), 2); 608 break; 609 case 4: 610 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 611 break; 612 case 8: 613 /* ptr16:64 is not exist (no segment) */ 614 return 0; 615 default: /* opnd_bytes must be modified manually */ 616 goto err_out; 617 } 618 insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2); 619 insn->immediate1.got = insn->immediate2.got = 1; 620 621 return 1; 622 err_out: 623 return 0; 624 } 625 626 /** 627 * insn_get_immediate() - Get the immediate in an instruction 628 * @insn: &struct insn containing instruction 629 * 630 * If necessary, first collects the instruction up to and including the 631 * displacement bytes. 632 * Basically, most of immediates are sign-expanded. Unsigned-value can be 633 * computed by bit masking with ((1 << (nbytes * 8)) - 1) 634 * 635 * Returns: 636 * 0: on success 637 * < 0: on error 638 */ 639 int insn_get_immediate(struct insn *insn) 640 { 641 int ret; 642 643 if (insn->immediate.got) 644 return 0; 645 646 ret = insn_get_displacement(insn); 647 if (ret) 648 return ret; 649 650 if (inat_has_moffset(insn->attr)) { 651 if (!__get_moffset(insn)) 652 goto err_out; 653 goto done; 654 } 655 656 if (!inat_has_immediate(insn->attr)) 657 /* no immediates */ 658 goto done; 659 660 switch (inat_immediate_size(insn->attr)) { 661 case INAT_IMM_BYTE: 662 insn_field_set(&insn->immediate, get_next(signed char, insn), 1); 663 break; 664 case INAT_IMM_WORD: 665 insn_field_set(&insn->immediate, get_next(short, insn), 2); 666 break; 667 case INAT_IMM_DWORD: 668 insn_field_set(&insn->immediate, get_next(int, insn), 4); 669 break; 670 case INAT_IMM_QWORD: 671 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 672 insn_field_set(&insn->immediate2, get_next(int, insn), 4); 673 break; 674 case INAT_IMM_PTR: 675 if (!__get_immptr(insn)) 676 goto err_out; 677 break; 678 case INAT_IMM_VWORD32: 679 if (!__get_immv32(insn)) 680 goto err_out; 681 break; 682 case INAT_IMM_VWORD: 683 if (!__get_immv(insn)) 684 goto err_out; 685 break; 686 default: 687 /* Here, insn must have an immediate, but failed */ 688 goto err_out; 689 } 690 if (inat_has_second_immediate(insn->attr)) { 691 insn_field_set(&insn->immediate2, get_next(signed char, insn), 1); 692 } 693 done: 694 insn->immediate.got = 1; 695 return 0; 696 697 err_out: 698 return -ENODATA; 699 } 700 701 /** 702 * insn_get_length() - Get the length of instruction 703 * @insn: &struct insn containing instruction 704 * 705 * If necessary, first collects the instruction up to and including the 706 * immediates bytes. 707 * 708 * Returns: 709 * - 0 on success 710 * - < 0 on error 711 */ 712 int insn_get_length(struct insn *insn) 713 { 714 int ret; 715 716 if (insn->length) 717 return 0; 718 719 ret = insn_get_immediate(insn); 720 if (ret) 721 return ret; 722 723 insn->length = (unsigned char)((unsigned long)insn->next_byte 724 - (unsigned long)insn->kaddr); 725 726 return 0; 727 } 728 729 /* Ensure this instruction is decoded completely */ 730 static inline int insn_complete(struct insn *insn) 731 { 732 return insn->opcode.got && insn->modrm.got && insn->sib.got && 733 insn->displacement.got && insn->immediate.got; 734 } 735 736 /** 737 * insn_decode() - Decode an x86 instruction 738 * @insn: &struct insn to be initialized 739 * @kaddr: address (in kernel memory) of instruction (or copy thereof) 740 * @buf_len: length of the insn buffer at @kaddr 741 * @m: insn mode, see enum insn_mode 742 * 743 * Returns: 744 * 0: if decoding succeeded 745 * < 0: otherwise. 746 */ 747 int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m) 748 { 749 int ret; 750 751 #define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */ 752 753 if (m == INSN_MODE_KERN) 754 insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64)); 755 else 756 insn_init(insn, kaddr, buf_len, m == INSN_MODE_64); 757 758 ret = insn_get_length(insn); 759 if (ret) 760 return ret; 761 762 if (insn_complete(insn)) 763 return 0; 764 765 return -EINVAL; 766 } 767