1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * x86 instruction analysis 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004, 2009 6 */ 7 8 #include <linux/kernel.h> 9 #ifdef __KERNEL__ 10 #include <linux/string.h> 11 #else 12 #include <string.h> 13 #endif 14 #include <asm/inat.h> /*__ignore_sync_check__ */ 15 #include <asm/insn.h> /* __ignore_sync_check__ */ 16 #include <asm/unaligned.h> /* __ignore_sync_check__ */ 17 18 #include <linux/errno.h> 19 #include <linux/kconfig.h> 20 21 #include <asm/emulate_prefix.h> /* __ignore_sync_check__ */ 22 23 #define leXX_to_cpu(t, r) \ 24 ({ \ 25 __typeof__(t) v; \ 26 switch (sizeof(t)) { \ 27 case 4: v = le32_to_cpu(r); break; \ 28 case 2: v = le16_to_cpu(r); break; \ 29 case 1: v = r; break; \ 30 default: \ 31 BUILD_BUG(); break; \ 32 } \ 33 v; \ 34 }) 35 36 /* Verify next sizeof(t) bytes can be on the same instruction */ 37 #define validate_next(t, insn, n) \ 38 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) 39 40 #define __get_next(t, insn) \ 41 ({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); }) 42 43 #define __peek_nbyte_next(t, insn, n) \ 44 ({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); }) 45 46 #define get_next(t, insn) \ 47 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); }) 48 49 #define peek_nbyte_next(t, insn, n) \ 50 ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); }) 51 52 #define peek_next(t, insn) peek_nbyte_next(t, insn, 0) 53 54 /** 55 * insn_init() - initialize struct insn 56 * @insn: &struct insn to be initialized 57 * @kaddr: address (in kernel memory) of instruction (or copy thereof) 58 * @buf_len: length of the insn buffer at @kaddr 59 * @x86_64: !0 for 64-bit kernel or 64-bit app 60 */ 61 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) 62 { 63 /* 64 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid 65 * even if the input buffer is long enough to hold them. 66 */ 67 if (buf_len > MAX_INSN_SIZE) 68 buf_len = MAX_INSN_SIZE; 69 70 memset(insn, 0, sizeof(*insn)); 71 insn->kaddr = kaddr; 72 insn->end_kaddr = kaddr + buf_len; 73 insn->next_byte = kaddr; 74 insn->x86_64 = x86_64; 75 insn->opnd_bytes = 4; 76 if (x86_64) 77 insn->addr_bytes = 8; 78 else 79 insn->addr_bytes = 4; 80 } 81 82 static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX }; 83 static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX }; 84 85 static int __insn_get_emulate_prefix(struct insn *insn, 86 const insn_byte_t *prefix, size_t len) 87 { 88 size_t i; 89 90 for (i = 0; i < len; i++) { 91 if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i]) 92 goto err_out; 93 } 94 95 insn->emulate_prefix_size = len; 96 insn->next_byte += len; 97 98 return 1; 99 100 err_out: 101 return 0; 102 } 103 104 static void insn_get_emulate_prefix(struct insn *insn) 105 { 106 if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix))) 107 return; 108 109 __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix)); 110 } 111 112 /** 113 * insn_get_prefixes - scan x86 instruction prefix bytes 114 * @insn: &struct insn containing instruction 115 * 116 * Populates the @insn->prefixes bitmap, and updates @insn->next_byte 117 * to point to the (first) opcode. No effect if @insn->prefixes.got 118 * is already set. 119 * 120 * * Returns: 121 * 0: on success 122 * < 0: on error 123 */ 124 int insn_get_prefixes(struct insn *insn) 125 { 126 struct insn_field *prefixes = &insn->prefixes; 127 insn_attr_t attr; 128 insn_byte_t b, lb; 129 int i, nb; 130 131 if (prefixes->got) 132 return 0; 133 134 insn_get_emulate_prefix(insn); 135 136 nb = 0; 137 lb = 0; 138 b = peek_next(insn_byte_t, insn); 139 attr = inat_get_opcode_attribute(b); 140 while (inat_is_legacy_prefix(attr)) { 141 /* Skip if same prefix */ 142 for (i = 0; i < nb; i++) 143 if (prefixes->bytes[i] == b) 144 goto found; 145 if (nb == 4) 146 /* Invalid instruction */ 147 break; 148 prefixes->bytes[nb++] = b; 149 if (inat_is_address_size_prefix(attr)) { 150 /* address size switches 2/4 or 4/8 */ 151 if (insn->x86_64) 152 insn->addr_bytes ^= 12; 153 else 154 insn->addr_bytes ^= 6; 155 } else if (inat_is_operand_size_prefix(attr)) { 156 /* oprand size switches 2/4 */ 157 insn->opnd_bytes ^= 6; 158 } 159 found: 160 prefixes->nbytes++; 161 insn->next_byte++; 162 lb = b; 163 b = peek_next(insn_byte_t, insn); 164 attr = inat_get_opcode_attribute(b); 165 } 166 /* Set the last prefix */ 167 if (lb && lb != insn->prefixes.bytes[3]) { 168 if (unlikely(insn->prefixes.bytes[3])) { 169 /* Swap the last prefix */ 170 b = insn->prefixes.bytes[3]; 171 for (i = 0; i < nb; i++) 172 if (prefixes->bytes[i] == lb) 173 insn_set_byte(prefixes, i, b); 174 } 175 insn_set_byte(&insn->prefixes, 3, lb); 176 } 177 178 /* Decode REX prefix */ 179 if (insn->x86_64) { 180 b = peek_next(insn_byte_t, insn); 181 attr = inat_get_opcode_attribute(b); 182 if (inat_is_rex_prefix(attr)) { 183 insn_field_set(&insn->rex_prefix, b, 1); 184 insn->next_byte++; 185 if (X86_REX_W(b)) 186 /* REX.W overrides opnd_size */ 187 insn->opnd_bytes = 8; 188 } 189 } 190 insn->rex_prefix.got = 1; 191 192 /* Decode VEX prefix */ 193 b = peek_next(insn_byte_t, insn); 194 attr = inat_get_opcode_attribute(b); 195 if (inat_is_vex_prefix(attr)) { 196 insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); 197 if (!insn->x86_64) { 198 /* 199 * In 32-bits mode, if the [7:6] bits (mod bits of 200 * ModRM) on the second byte are not 11b, it is 201 * LDS or LES or BOUND. 202 */ 203 if (X86_MODRM_MOD(b2) != 3) 204 goto vex_end; 205 } 206 insn_set_byte(&insn->vex_prefix, 0, b); 207 insn_set_byte(&insn->vex_prefix, 1, b2); 208 if (inat_is_evex_prefix(attr)) { 209 b2 = peek_nbyte_next(insn_byte_t, insn, 2); 210 insn_set_byte(&insn->vex_prefix, 2, b2); 211 b2 = peek_nbyte_next(insn_byte_t, insn, 3); 212 insn_set_byte(&insn->vex_prefix, 3, b2); 213 insn->vex_prefix.nbytes = 4; 214 insn->next_byte += 4; 215 if (insn->x86_64 && X86_VEX_W(b2)) 216 /* VEX.W overrides opnd_size */ 217 insn->opnd_bytes = 8; 218 } else if (inat_is_vex3_prefix(attr)) { 219 b2 = peek_nbyte_next(insn_byte_t, insn, 2); 220 insn_set_byte(&insn->vex_prefix, 2, b2); 221 insn->vex_prefix.nbytes = 3; 222 insn->next_byte += 3; 223 if (insn->x86_64 && X86_VEX_W(b2)) 224 /* VEX.W overrides opnd_size */ 225 insn->opnd_bytes = 8; 226 } else { 227 /* 228 * For VEX2, fake VEX3-like byte#2. 229 * Makes it easier to decode vex.W, vex.vvvv, 230 * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0. 231 */ 232 insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f); 233 insn->vex_prefix.nbytes = 2; 234 insn->next_byte += 2; 235 } 236 } 237 vex_end: 238 insn->vex_prefix.got = 1; 239 240 prefixes->got = 1; 241 242 return 0; 243 244 err_out: 245 return -ENODATA; 246 } 247 248 /** 249 * insn_get_opcode - collect opcode(s) 250 * @insn: &struct insn containing instruction 251 * 252 * Populates @insn->opcode, updates @insn->next_byte to point past the 253 * opcode byte(s), and set @insn->attr (except for groups). 254 * If necessary, first collects any preceding (prefix) bytes. 255 * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got 256 * is already 1. 257 * 258 * Returns: 259 * 0: on success 260 * < 0: on error 261 */ 262 int insn_get_opcode(struct insn *insn) 263 { 264 struct insn_field *opcode = &insn->opcode; 265 int pfx_id, ret; 266 insn_byte_t op; 267 268 if (opcode->got) 269 return 0; 270 271 ret = insn_get_prefixes(insn); 272 if (ret) 273 return ret; 274 275 /* Get first opcode */ 276 op = get_next(insn_byte_t, insn); 277 insn_set_byte(opcode, 0, op); 278 opcode->nbytes = 1; 279 280 /* Check if there is VEX prefix or not */ 281 if (insn_is_avx(insn)) { 282 insn_byte_t m, p; 283 m = insn_vex_m_bits(insn); 284 p = insn_vex_p_bits(insn); 285 insn->attr = inat_get_avx_attribute(op, m, p); 286 if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) || 287 (!inat_accept_vex(insn->attr) && 288 !inat_is_group(insn->attr))) { 289 /* This instruction is bad */ 290 insn->attr = 0; 291 return -EINVAL; 292 } 293 /* VEX has only 1 byte for opcode */ 294 goto end; 295 } 296 297 insn->attr = inat_get_opcode_attribute(op); 298 while (inat_is_escape(insn->attr)) { 299 /* Get escaped opcode */ 300 op = get_next(insn_byte_t, insn); 301 opcode->bytes[opcode->nbytes++] = op; 302 pfx_id = insn_last_prefix_id(insn); 303 insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); 304 } 305 306 if (inat_must_vex(insn->attr)) { 307 /* This instruction is bad */ 308 insn->attr = 0; 309 return -EINVAL; 310 } 311 end: 312 opcode->got = 1; 313 return 0; 314 315 err_out: 316 return -ENODATA; 317 } 318 319 /** 320 * insn_get_modrm - collect ModRM byte, if any 321 * @insn: &struct insn containing instruction 322 * 323 * Populates @insn->modrm and updates @insn->next_byte to point past the 324 * ModRM byte, if any. If necessary, first collects the preceding bytes 325 * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. 326 * 327 * Returns: 328 * 0: on success 329 * < 0: on error 330 */ 331 int insn_get_modrm(struct insn *insn) 332 { 333 struct insn_field *modrm = &insn->modrm; 334 insn_byte_t pfx_id, mod; 335 int ret; 336 337 if (modrm->got) 338 return 0; 339 340 ret = insn_get_opcode(insn); 341 if (ret) 342 return ret; 343 344 if (inat_has_modrm(insn->attr)) { 345 mod = get_next(insn_byte_t, insn); 346 insn_field_set(modrm, mod, 1); 347 if (inat_is_group(insn->attr)) { 348 pfx_id = insn_last_prefix_id(insn); 349 insn->attr = inat_get_group_attribute(mod, pfx_id, 350 insn->attr); 351 if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) { 352 /* Bad insn */ 353 insn->attr = 0; 354 return -EINVAL; 355 } 356 } 357 } 358 359 if (insn->x86_64 && inat_is_force64(insn->attr)) 360 insn->opnd_bytes = 8; 361 362 modrm->got = 1; 363 return 0; 364 365 err_out: 366 return -ENODATA; 367 } 368 369 370 /** 371 * insn_rip_relative() - Does instruction use RIP-relative addressing mode? 372 * @insn: &struct insn containing instruction 373 * 374 * If necessary, first collects the instruction up to and including the 375 * ModRM byte. No effect if @insn->x86_64 is 0. 376 */ 377 int insn_rip_relative(struct insn *insn) 378 { 379 struct insn_field *modrm = &insn->modrm; 380 int ret; 381 382 if (!insn->x86_64) 383 return 0; 384 385 ret = insn_get_modrm(insn); 386 if (ret) 387 return 0; 388 /* 389 * For rip-relative instructions, the mod field (top 2 bits) 390 * is zero and the r/m field (bottom 3 bits) is 0x5. 391 */ 392 return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5); 393 } 394 395 /** 396 * insn_get_sib() - Get the SIB byte of instruction 397 * @insn: &struct insn containing instruction 398 * 399 * If necessary, first collects the instruction up to and including the 400 * ModRM byte. 401 * 402 * Returns: 403 * 0: if decoding succeeded 404 * < 0: otherwise. 405 */ 406 int insn_get_sib(struct insn *insn) 407 { 408 insn_byte_t modrm; 409 int ret; 410 411 if (insn->sib.got) 412 return 0; 413 414 ret = insn_get_modrm(insn); 415 if (ret) 416 return ret; 417 418 if (insn->modrm.nbytes) { 419 modrm = insn->modrm.bytes[0]; 420 if (insn->addr_bytes != 2 && 421 X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { 422 insn_field_set(&insn->sib, 423 get_next(insn_byte_t, insn), 1); 424 } 425 } 426 insn->sib.got = 1; 427 428 return 0; 429 430 err_out: 431 return -ENODATA; 432 } 433 434 435 /** 436 * insn_get_displacement() - Get the displacement of instruction 437 * @insn: &struct insn containing instruction 438 * 439 * If necessary, first collects the instruction up to and including the 440 * SIB byte. 441 * Displacement value is sign-expanded. 442 * 443 * * Returns: 444 * 0: if decoding succeeded 445 * < 0: otherwise. 446 */ 447 int insn_get_displacement(struct insn *insn) 448 { 449 insn_byte_t mod, rm, base; 450 int ret; 451 452 if (insn->displacement.got) 453 return 0; 454 455 ret = insn_get_sib(insn); 456 if (ret) 457 return ret; 458 459 if (insn->modrm.nbytes) { 460 /* 461 * Interpreting the modrm byte: 462 * mod = 00 - no displacement fields (exceptions below) 463 * mod = 01 - 1-byte displacement field 464 * mod = 10 - displacement field is 4 bytes, or 2 bytes if 465 * address size = 2 (0x67 prefix in 32-bit mode) 466 * mod = 11 - no memory operand 467 * 468 * If address size = 2... 469 * mod = 00, r/m = 110 - displacement field is 2 bytes 470 * 471 * If address size != 2... 472 * mod != 11, r/m = 100 - SIB byte exists 473 * mod = 00, SIB base = 101 - displacement field is 4 bytes 474 * mod = 00, r/m = 101 - rip-relative addressing, displacement 475 * field is 4 bytes 476 */ 477 mod = X86_MODRM_MOD(insn->modrm.value); 478 rm = X86_MODRM_RM(insn->modrm.value); 479 base = X86_SIB_BASE(insn->sib.value); 480 if (mod == 3) 481 goto out; 482 if (mod == 1) { 483 insn_field_set(&insn->displacement, 484 get_next(signed char, insn), 1); 485 } else if (insn->addr_bytes == 2) { 486 if ((mod == 0 && rm == 6) || mod == 2) { 487 insn_field_set(&insn->displacement, 488 get_next(short, insn), 2); 489 } 490 } else { 491 if ((mod == 0 && rm == 5) || mod == 2 || 492 (mod == 0 && base == 5)) { 493 insn_field_set(&insn->displacement, 494 get_next(int, insn), 4); 495 } 496 } 497 } 498 out: 499 insn->displacement.got = 1; 500 return 0; 501 502 err_out: 503 return -ENODATA; 504 } 505 506 /* Decode moffset16/32/64. Return 0 if failed */ 507 static int __get_moffset(struct insn *insn) 508 { 509 switch (insn->addr_bytes) { 510 case 2: 511 insn_field_set(&insn->moffset1, get_next(short, insn), 2); 512 break; 513 case 4: 514 insn_field_set(&insn->moffset1, get_next(int, insn), 4); 515 break; 516 case 8: 517 insn_field_set(&insn->moffset1, get_next(int, insn), 4); 518 insn_field_set(&insn->moffset2, get_next(int, insn), 4); 519 break; 520 default: /* opnd_bytes must be modified manually */ 521 goto err_out; 522 } 523 insn->moffset1.got = insn->moffset2.got = 1; 524 525 return 1; 526 527 err_out: 528 return 0; 529 } 530 531 /* Decode imm v32(Iz). Return 0 if failed */ 532 static int __get_immv32(struct insn *insn) 533 { 534 switch (insn->opnd_bytes) { 535 case 2: 536 insn_field_set(&insn->immediate, get_next(short, insn), 2); 537 break; 538 case 4: 539 case 8: 540 insn_field_set(&insn->immediate, get_next(int, insn), 4); 541 break; 542 default: /* opnd_bytes must be modified manually */ 543 goto err_out; 544 } 545 546 return 1; 547 548 err_out: 549 return 0; 550 } 551 552 /* Decode imm v64(Iv/Ov), Return 0 if failed */ 553 static int __get_immv(struct insn *insn) 554 { 555 switch (insn->opnd_bytes) { 556 case 2: 557 insn_field_set(&insn->immediate1, get_next(short, insn), 2); 558 break; 559 case 4: 560 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 561 insn->immediate1.nbytes = 4; 562 break; 563 case 8: 564 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 565 insn_field_set(&insn->immediate2, get_next(int, insn), 4); 566 break; 567 default: /* opnd_bytes must be modified manually */ 568 goto err_out; 569 } 570 insn->immediate1.got = insn->immediate2.got = 1; 571 572 return 1; 573 err_out: 574 return 0; 575 } 576 577 /* Decode ptr16:16/32(Ap) */ 578 static int __get_immptr(struct insn *insn) 579 { 580 switch (insn->opnd_bytes) { 581 case 2: 582 insn_field_set(&insn->immediate1, get_next(short, insn), 2); 583 break; 584 case 4: 585 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 586 break; 587 case 8: 588 /* ptr16:64 is not exist (no segment) */ 589 return 0; 590 default: /* opnd_bytes must be modified manually */ 591 goto err_out; 592 } 593 insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2); 594 insn->immediate1.got = insn->immediate2.got = 1; 595 596 return 1; 597 err_out: 598 return 0; 599 } 600 601 /** 602 * insn_get_immediate() - Get the immediate in an instruction 603 * @insn: &struct insn containing instruction 604 * 605 * If necessary, first collects the instruction up to and including the 606 * displacement bytes. 607 * Basically, most of immediates are sign-expanded. Unsigned-value can be 608 * computed by bit masking with ((1 << (nbytes * 8)) - 1) 609 * 610 * Returns: 611 * 0: on success 612 * < 0: on error 613 */ 614 int insn_get_immediate(struct insn *insn) 615 { 616 int ret; 617 618 if (insn->immediate.got) 619 return 0; 620 621 ret = insn_get_displacement(insn); 622 if (ret) 623 return ret; 624 625 if (inat_has_moffset(insn->attr)) { 626 if (!__get_moffset(insn)) 627 goto err_out; 628 goto done; 629 } 630 631 if (!inat_has_immediate(insn->attr)) 632 /* no immediates */ 633 goto done; 634 635 switch (inat_immediate_size(insn->attr)) { 636 case INAT_IMM_BYTE: 637 insn_field_set(&insn->immediate, get_next(signed char, insn), 1); 638 break; 639 case INAT_IMM_WORD: 640 insn_field_set(&insn->immediate, get_next(short, insn), 2); 641 break; 642 case INAT_IMM_DWORD: 643 insn_field_set(&insn->immediate, get_next(int, insn), 4); 644 break; 645 case INAT_IMM_QWORD: 646 insn_field_set(&insn->immediate1, get_next(int, insn), 4); 647 insn_field_set(&insn->immediate2, get_next(int, insn), 4); 648 break; 649 case INAT_IMM_PTR: 650 if (!__get_immptr(insn)) 651 goto err_out; 652 break; 653 case INAT_IMM_VWORD32: 654 if (!__get_immv32(insn)) 655 goto err_out; 656 break; 657 case INAT_IMM_VWORD: 658 if (!__get_immv(insn)) 659 goto err_out; 660 break; 661 default: 662 /* Here, insn must have an immediate, but failed */ 663 goto err_out; 664 } 665 if (inat_has_second_immediate(insn->attr)) { 666 insn_field_set(&insn->immediate2, get_next(signed char, insn), 1); 667 } 668 done: 669 insn->immediate.got = 1; 670 return 0; 671 672 err_out: 673 return -ENODATA; 674 } 675 676 /** 677 * insn_get_length() - Get the length of instruction 678 * @insn: &struct insn containing instruction 679 * 680 * If necessary, first collects the instruction up to and including the 681 * immediates bytes. 682 * 683 * Returns: 684 * - 0 on success 685 * - < 0 on error 686 */ 687 int insn_get_length(struct insn *insn) 688 { 689 int ret; 690 691 if (insn->length) 692 return 0; 693 694 ret = insn_get_immediate(insn); 695 if (ret) 696 return ret; 697 698 insn->length = (unsigned char)((unsigned long)insn->next_byte 699 - (unsigned long)insn->kaddr); 700 701 return 0; 702 } 703 704 /* Ensure this instruction is decoded completely */ 705 static inline int insn_complete(struct insn *insn) 706 { 707 return insn->opcode.got && insn->modrm.got && insn->sib.got && 708 insn->displacement.got && insn->immediate.got; 709 } 710 711 /** 712 * insn_decode() - Decode an x86 instruction 713 * @insn: &struct insn to be initialized 714 * @kaddr: address (in kernel memory) of instruction (or copy thereof) 715 * @buf_len: length of the insn buffer at @kaddr 716 * @m: insn mode, see enum insn_mode 717 * 718 * Returns: 719 * 0: if decoding succeeded 720 * < 0: otherwise. 721 */ 722 int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m) 723 { 724 int ret; 725 726 /* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */ 727 728 if (m == INSN_MODE_KERN) 729 insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64)); 730 else 731 insn_init(insn, kaddr, buf_len, m == INSN_MODE_64); 732 733 ret = insn_get_length(insn); 734 if (ret) 735 return ret; 736 737 if (insn_complete(insn)) 738 return 0; 739 740 return -EINVAL; 741 } 742