1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <stdio.h> 7 #include <stdlib.h> 8 9 #define unlikely(cond) (cond) 10 #include <asm/insn.h> 11 #include "../../../arch/x86/lib/inat.c" 12 #include "../../../arch/x86/lib/insn.c" 13 14 #define CONFIG_64BIT 1 15 #include <asm/nops.h> 16 17 #include <asm/orc_types.h> 18 #include <objtool/check.h> 19 #include <objtool/disas.h> 20 #include <objtool/elf.h> 21 #include <objtool/arch.h> 22 #include <objtool/warn.h> 23 #include <objtool/builtin.h> 24 #include <arch/elf.h> 25 26 int arch_ftrace_match(const char *name) 27 { 28 return !strcmp(name, "__fentry__"); 29 } 30 31 static int is_x86_64(const struct elf *elf) 32 { 33 switch (elf->ehdr.e_machine) { 34 case EM_X86_64: 35 return 1; 36 case EM_386: 37 return 0; 38 default: 39 ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine); 40 return -1; 41 } 42 } 43 44 bool arch_callee_saved_reg(unsigned char reg) 45 { 46 switch (reg) { 47 case CFI_BP: 48 case CFI_BX: 49 case CFI_R12: 50 case CFI_R13: 51 case CFI_R14: 52 case CFI_R15: 53 return true; 54 55 case CFI_AX: 56 case CFI_CX: 57 case CFI_DX: 58 case CFI_SI: 59 case CFI_DI: 60 case CFI_SP: 61 case CFI_R8: 62 case CFI_R9: 63 case CFI_R10: 64 case CFI_R11: 65 case CFI_RA: 66 default: 67 return false; 68 } 69 } 70 71 /* Undo the effects of __pa_symbol() if necessary */ 72 static unsigned long phys_to_virt(unsigned long pa) 73 { 74 s64 va = pa; 75 76 if (va > 0) 77 va &= ~(0x80000000); 78 79 return va; 80 } 81 82 s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc) 83 { 84 s64 addend = reloc_addend(reloc); 85 86 if (arch_pc_relative_reloc(reloc)) 87 addend += insn->offset + insn->len - reloc_offset(reloc); 88 89 return phys_to_virt(addend); 90 } 91 92 static void scan_for_insn(struct section *sec, unsigned long offset, 93 unsigned long *insn_off, unsigned int *insn_len) 94 { 95 unsigned long o = 0; 96 struct insn insn; 97 98 while (1) { 99 100 insn_decode(&insn, sec->data->d_buf + o, sec_size(sec) - o, 101 INSN_MODE_64); 102 103 if (o + insn.length > offset) { 104 *insn_off = o; 105 *insn_len = insn.length; 106 return; 107 } 108 109 o += insn.length; 110 } 111 } 112 113 u64 arch_adjusted_addend(struct reloc *reloc) 114 { 115 unsigned int type = reloc_type(reloc); 116 s64 addend = reloc_addend(reloc); 117 unsigned long insn_off; 118 unsigned int insn_len; 119 120 if (type == R_X86_64_PLT32) 121 return addend + 4; 122 123 if (type != R_X86_64_PC32 || !is_text_sec(reloc->sec->base)) 124 return addend; 125 126 scan_for_insn(reloc->sec->base, reloc_offset(reloc), 127 &insn_off, &insn_len); 128 129 return addend + insn_off + insn_len - reloc_offset(reloc); 130 } 131 132 unsigned long arch_jump_destination(struct instruction *insn) 133 { 134 return insn->offset + insn->len + insn->immediate; 135 } 136 137 bool arch_pc_relative_reloc(struct reloc *reloc) 138 { 139 /* 140 * All relocation types where P (the address of the target) 141 * is included in the computation. 142 */ 143 switch (reloc_type(reloc)) { 144 case R_X86_64_PC8: 145 case R_X86_64_PC16: 146 case R_X86_64_PC32: 147 case R_X86_64_PC64: 148 149 case R_X86_64_PLT32: 150 case R_X86_64_GOTPC32: 151 case R_X86_64_GOTPCREL: 152 return true; 153 154 default: 155 break; 156 } 157 158 return false; 159 } 160 161 #define ADD_OP(op) \ 162 if (!(op = calloc(1, sizeof(*op)))) \ 163 return -1; \ 164 else for (*ops_list = op, ops_list = &op->next; op; op = NULL) 165 166 /* 167 * Helpers to decode ModRM/SIB: 168 * 169 * r/m| AX CX DX BX | SP | BP | SI DI | 170 * | R8 R9 R10 R11 | R12 | R13 | R14 R15 | 171 * Mod+----------------+-----+-----+---------+ 172 * 00 | [r/m] |[SIB]|[IP+]| [r/m] | 173 * 01 | [r/m + d8] |[S+d]| [r/m + d8] | 174 * 10 | [r/m + d32] |[S+D]| [r/m + d32] | 175 * 11 | r/ m | 176 */ 177 178 #define mod_is_mem() (modrm_mod != 3) 179 #define mod_is_reg() (modrm_mod == 3) 180 181 #define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0) 182 #define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem()) 183 184 /* 185 * Check the ModRM register. If there is a SIB byte then check with 186 * the SIB base register. But if the SIB base is 5 (i.e. CFI_BP) and 187 * ModRM mod is 0 then there is no base register. 188 */ 189 #define rm_is(reg) (have_SIB() ? \ 190 sib_base == (reg) && sib_index == CFI_SP && \ 191 (sib_base != CFI_BP || modrm_mod != 0) : \ 192 modrm_rm == (reg)) 193 194 #define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg)) 195 #define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg)) 196 197 static bool has_notrack_prefix(struct insn *insn) 198 { 199 int i; 200 201 for (i = 0; i < insn->prefixes.nbytes; i++) { 202 if (insn->prefixes.bytes[i] == 0x3e) 203 return true; 204 } 205 206 return false; 207 } 208 209 int arch_decode_instruction(struct objtool_file *file, const struct section *sec, 210 unsigned long offset, unsigned int maxlen, 211 struct instruction *insn) 212 { 213 struct stack_op **ops_list = &insn->stack_ops; 214 const struct elf *elf = file->elf; 215 struct insn ins; 216 int x86_64, ret; 217 unsigned char op1, op2, op3, prefix, 218 rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0, 219 modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0, 220 sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0; 221 struct stack_op *op = NULL; 222 struct symbol *sym; 223 u64 imm; 224 225 x86_64 = is_x86_64(elf); 226 if (x86_64 == -1) 227 return -1; 228 229 ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen, 230 x86_64 ? INSN_MODE_64 : INSN_MODE_32); 231 if (ret < 0) { 232 ERROR("can't decode instruction at %s:0x%lx", sec->name, offset); 233 return -1; 234 } 235 236 insn->len = ins.length; 237 insn->type = INSN_OTHER; 238 239 if (ins.vex_prefix.nbytes) 240 return 0; 241 242 prefix = ins.prefixes.bytes[0]; 243 244 op1 = ins.opcode.bytes[0]; 245 op2 = ins.opcode.bytes[1]; 246 op3 = ins.opcode.bytes[2]; 247 248 if (ins.rex_prefix.nbytes) { 249 rex = ins.rex_prefix.bytes[0]; 250 rex_w = X86_REX_W(rex) >> 3; 251 rex_r = X86_REX_R(rex) >> 2; 252 rex_x = X86_REX_X(rex) >> 1; 253 rex_b = X86_REX_B(rex); 254 } 255 256 if (ins.modrm.nbytes) { 257 modrm = ins.modrm.bytes[0]; 258 modrm_mod = X86_MODRM_MOD(modrm); 259 modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r; 260 modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b; 261 } 262 263 if (ins.sib.nbytes) { 264 sib = ins.sib.bytes[0]; 265 /* sib_scale = X86_SIB_SCALE(sib); */ 266 sib_index = X86_SIB_INDEX(sib) + 8*rex_x; 267 sib_base = X86_SIB_BASE(sib) + 8*rex_b; 268 } 269 270 switch (op1) { 271 272 case 0x1: 273 case 0x29: 274 if (rex_w && rm_is_reg(CFI_SP)) { 275 276 /* add/sub reg, %rsp */ 277 ADD_OP(op) { 278 op->src.type = OP_SRC_ADD; 279 op->src.reg = modrm_reg; 280 op->dest.type = OP_DEST_REG; 281 op->dest.reg = CFI_SP; 282 } 283 } 284 break; 285 286 case 0x50 ... 0x57: 287 288 /* push reg */ 289 ADD_OP(op) { 290 op->src.type = OP_SRC_REG; 291 op->src.reg = (op1 & 0x7) + 8*rex_b; 292 op->dest.type = OP_DEST_PUSH; 293 } 294 295 break; 296 297 case 0x58 ... 0x5f: 298 299 /* pop reg */ 300 ADD_OP(op) { 301 op->src.type = OP_SRC_POP; 302 op->dest.type = OP_DEST_REG; 303 op->dest.reg = (op1 & 0x7) + 8*rex_b; 304 } 305 306 break; 307 308 case 0x68: 309 case 0x6a: 310 /* push immediate */ 311 ADD_OP(op) { 312 op->src.type = OP_SRC_CONST; 313 op->dest.type = OP_DEST_PUSH; 314 } 315 break; 316 317 case 0x70 ... 0x7f: 318 insn->type = INSN_JUMP_CONDITIONAL; 319 break; 320 321 case 0x80 ... 0x83: 322 /* 323 * 1000 00sw : mod OP r/m : immediate 324 * 325 * s - sign extend immediate 326 * w - imm8 / imm32 327 * 328 * OP: 000 ADD 100 AND 329 * 001 OR 101 SUB 330 * 010 ADC 110 XOR 331 * 011 SBB 111 CMP 332 */ 333 334 /* 64bit only */ 335 if (!rex_w) 336 break; 337 338 /* %rsp target only */ 339 if (!rm_is_reg(CFI_SP)) 340 break; 341 342 imm = ins.immediate.value; 343 if (op1 & 2) { /* sign extend */ 344 if (op1 & 1) { /* imm32 */ 345 imm <<= 32; 346 imm = (s64)imm >> 32; 347 } else { /* imm8 */ 348 imm <<= 56; 349 imm = (s64)imm >> 56; 350 } 351 } 352 353 switch (modrm_reg & 7) { 354 case 5: 355 imm = -imm; 356 fallthrough; 357 case 0: 358 /* add/sub imm, %rsp */ 359 ADD_OP(op) { 360 op->src.type = OP_SRC_ADD; 361 op->src.reg = CFI_SP; 362 op->src.offset = imm; 363 op->dest.type = OP_DEST_REG; 364 op->dest.reg = CFI_SP; 365 } 366 break; 367 368 case 4: 369 /* and imm, %rsp */ 370 ADD_OP(op) { 371 op->src.type = OP_SRC_AND; 372 op->src.reg = CFI_SP; 373 op->src.offset = ins.immediate.value; 374 op->dest.type = OP_DEST_REG; 375 op->dest.reg = CFI_SP; 376 } 377 break; 378 379 default: 380 /* ERROR ? */ 381 break; 382 } 383 384 break; 385 386 case 0x89: 387 if (!rex_w) 388 break; 389 390 if (modrm_reg == CFI_SP) { 391 392 if (mod_is_reg()) { 393 /* mov %rsp, reg */ 394 ADD_OP(op) { 395 op->src.type = OP_SRC_REG; 396 op->src.reg = CFI_SP; 397 op->dest.type = OP_DEST_REG; 398 op->dest.reg = modrm_rm; 399 } 400 break; 401 402 } else { 403 /* skip RIP relative displacement */ 404 if (is_RIP()) 405 break; 406 407 /* skip nontrivial SIB */ 408 if (have_SIB()) { 409 modrm_rm = sib_base; 410 if (sib_index != CFI_SP) 411 break; 412 } 413 414 /* mov %rsp, disp(%reg) */ 415 ADD_OP(op) { 416 op->src.type = OP_SRC_REG; 417 op->src.reg = CFI_SP; 418 op->dest.type = OP_DEST_REG_INDIRECT; 419 op->dest.reg = modrm_rm; 420 op->dest.offset = ins.displacement.value; 421 } 422 break; 423 } 424 425 break; 426 } 427 428 if (rm_is_reg(CFI_SP)) { 429 430 /* mov reg, %rsp */ 431 ADD_OP(op) { 432 op->src.type = OP_SRC_REG; 433 op->src.reg = modrm_reg; 434 op->dest.type = OP_DEST_REG; 435 op->dest.reg = CFI_SP; 436 } 437 break; 438 } 439 440 fallthrough; 441 case 0x88: 442 if (!rex_w) 443 break; 444 445 if (rm_is_mem(CFI_BP)) { 446 447 /* mov reg, disp(%rbp) */ 448 ADD_OP(op) { 449 op->src.type = OP_SRC_REG; 450 op->src.reg = modrm_reg; 451 op->dest.type = OP_DEST_REG_INDIRECT; 452 op->dest.reg = CFI_BP; 453 op->dest.offset = ins.displacement.value; 454 } 455 break; 456 } 457 458 if (rm_is_mem(CFI_SP)) { 459 460 /* mov reg, disp(%rsp) */ 461 ADD_OP(op) { 462 op->src.type = OP_SRC_REG; 463 op->src.reg = modrm_reg; 464 op->dest.type = OP_DEST_REG_INDIRECT; 465 op->dest.reg = CFI_SP; 466 op->dest.offset = ins.displacement.value; 467 } 468 break; 469 } 470 471 break; 472 473 case 0x8b: 474 if (!rex_w) 475 break; 476 477 if (rm_is_mem(CFI_BP)) { 478 479 /* mov disp(%rbp), reg */ 480 ADD_OP(op) { 481 op->src.type = OP_SRC_REG_INDIRECT; 482 op->src.reg = CFI_BP; 483 op->src.offset = ins.displacement.value; 484 op->dest.type = OP_DEST_REG; 485 op->dest.reg = modrm_reg; 486 } 487 break; 488 } 489 490 if (rm_is_mem(CFI_SP)) { 491 492 /* mov disp(%rsp), reg */ 493 ADD_OP(op) { 494 op->src.type = OP_SRC_REG_INDIRECT; 495 op->src.reg = CFI_SP; 496 op->src.offset = ins.displacement.value; 497 op->dest.type = OP_DEST_REG; 498 op->dest.reg = modrm_reg; 499 } 500 break; 501 } 502 503 break; 504 505 case 0x8d: 506 if (mod_is_reg()) { 507 WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset); 508 break; 509 } 510 511 /* skip non 64bit ops */ 512 if (!rex_w) 513 break; 514 515 /* skip nontrivial SIB */ 516 if (have_SIB()) { 517 modrm_rm = sib_base; 518 if (sib_index != CFI_SP) 519 break; 520 } 521 522 /* lea disp(%rip), %dst */ 523 if (is_RIP()) { 524 insn->type = INSN_LEA_RIP; 525 break; 526 } 527 528 /* lea disp(%src), %dst */ 529 ADD_OP(op) { 530 op->src.offset = ins.displacement.value; 531 if (!op->src.offset) { 532 /* lea (%src), %dst */ 533 op->src.type = OP_SRC_REG; 534 } else { 535 /* lea disp(%src), %dst */ 536 op->src.type = OP_SRC_ADD; 537 } 538 op->src.reg = modrm_rm; 539 op->dest.type = OP_DEST_REG; 540 op->dest.reg = modrm_reg; 541 } 542 break; 543 544 case 0x8f: 545 /* pop to mem */ 546 ADD_OP(op) { 547 op->src.type = OP_SRC_POP; 548 op->dest.type = OP_DEST_MEM; 549 } 550 break; 551 552 case 0x90: 553 if (rex_b) /* XCHG %r8, %rax */ 554 break; 555 556 if (prefix == 0xf3) /* REP NOP := PAUSE */ 557 break; 558 559 insn->type = INSN_NOP; 560 break; 561 562 case 0x9c: 563 /* pushf */ 564 ADD_OP(op) { 565 op->src.type = OP_SRC_CONST; 566 op->dest.type = OP_DEST_PUSHF; 567 } 568 break; 569 570 case 0x9d: 571 /* popf */ 572 ADD_OP(op) { 573 op->src.type = OP_SRC_POPF; 574 op->dest.type = OP_DEST_MEM; 575 } 576 break; 577 578 case 0x0f: 579 580 if (op2 == 0x01) { 581 582 switch (insn_last_prefix_id(&ins)) { 583 case INAT_PFX_REPE: 584 case INAT_PFX_REPNE: 585 if (modrm == 0xca) 586 /* eretu/erets */ 587 insn->type = INSN_SYSRET; 588 break; 589 default: 590 if (modrm == 0xca) 591 insn->type = INSN_CLAC; 592 else if (modrm == 0xcb) 593 insn->type = INSN_STAC; 594 break; 595 } 596 } else if (op2 >= 0x80 && op2 <= 0x8f) { 597 598 insn->type = INSN_JUMP_CONDITIONAL; 599 600 } else if (op2 == 0x05 || op2 == 0x34) { 601 602 /* syscall, sysenter */ 603 insn->type = INSN_SYSCALL; 604 605 } else if (op2 == 0x07 || op2 == 0x35) { 606 607 /* sysret, sysexit */ 608 insn->type = INSN_SYSRET; 609 610 } else if (op2 == 0x0b || op2 == 0xb9) { 611 612 /* ud2, ud1 */ 613 insn->type = INSN_BUG; 614 615 } else if (op2 == 0x1f) { 616 617 /* 0f 1f /0 := NOPL */ 618 if (modrm_reg == 0) 619 insn->type = INSN_NOP; 620 621 } else if (op2 == 0x1e) { 622 623 if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb)) 624 insn->type = INSN_ENDBR; 625 626 627 } else if (op2 == 0x38 && op3 == 0xf8) { 628 if (ins.prefixes.nbytes == 1 && 629 ins.prefixes.bytes[0] == 0xf2) { 630 /* ENQCMD cannot be used in the kernel. */ 631 WARN("ENQCMD instruction at %s:%lx", sec->name, offset); 632 } 633 634 } else if (op2 == 0xa0 || op2 == 0xa8) { 635 636 /* push fs/gs */ 637 ADD_OP(op) { 638 op->src.type = OP_SRC_CONST; 639 op->dest.type = OP_DEST_PUSH; 640 } 641 642 } else if (op2 == 0xa1 || op2 == 0xa9) { 643 644 /* pop fs/gs */ 645 ADD_OP(op) { 646 op->src.type = OP_SRC_POP; 647 op->dest.type = OP_DEST_MEM; 648 } 649 } 650 651 break; 652 653 case 0xc9: 654 /* 655 * leave 656 * 657 * equivalent to: 658 * mov bp, sp 659 * pop bp 660 */ 661 ADD_OP(op) { 662 op->src.type = OP_SRC_REG; 663 op->src.reg = CFI_BP; 664 op->dest.type = OP_DEST_REG; 665 op->dest.reg = CFI_SP; 666 } 667 ADD_OP(op) { 668 op->src.type = OP_SRC_POP; 669 op->dest.type = OP_DEST_REG; 670 op->dest.reg = CFI_BP; 671 } 672 break; 673 674 case 0xcc: 675 /* int3 */ 676 insn->type = INSN_TRAP; 677 break; 678 679 case 0xe3: 680 /* jecxz/jrcxz */ 681 insn->type = INSN_JUMP_CONDITIONAL; 682 break; 683 684 case 0xe9: 685 case 0xeb: 686 insn->type = INSN_JUMP_UNCONDITIONAL; 687 break; 688 689 case 0xc2: 690 case 0xc3: 691 insn->type = INSN_RETURN; 692 break; 693 694 case 0xc7: /* mov imm, r/m */ 695 if (!opts.noinstr) 696 break; 697 698 if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) { 699 struct reloc *immr, *disp; 700 struct symbol *func; 701 int idx; 702 703 immr = find_reloc_by_dest(elf, (void *)sec, offset+3); 704 disp = find_reloc_by_dest(elf, (void *)sec, offset+7); 705 706 if (!immr || strcmp(immr->sym->name, "pv_ops")) 707 break; 708 709 idx = (reloc_addend(immr) + 8) / sizeof(void *); 710 711 func = disp->sym; 712 if (disp->sym->type == STT_SECTION) 713 func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp)); 714 if (!func) { 715 ERROR("no func for pv_ops[]"); 716 return -1; 717 } 718 719 objtool_pv_add(file, idx, func); 720 } 721 722 break; 723 724 case 0xcf: /* iret */ 725 /* 726 * Handle sync_core(), which has an IRET to self. 727 * All other IRET are in STT_NONE entry code. 728 */ 729 sym = find_symbol_containing(sec, offset); 730 if (sym && sym->type == STT_FUNC) { 731 ADD_OP(op) { 732 /* add $40, %rsp */ 733 op->src.type = OP_SRC_ADD; 734 op->src.reg = CFI_SP; 735 op->src.offset = 5*8; 736 op->dest.type = OP_DEST_REG; 737 op->dest.reg = CFI_SP; 738 } 739 break; 740 } 741 742 fallthrough; 743 744 case 0xca: /* retf */ 745 case 0xcb: /* retf */ 746 insn->type = INSN_SYSRET; 747 break; 748 749 case 0xd6: /* udb */ 750 insn->type = INSN_BUG; 751 break; 752 753 case 0xe0: /* loopne */ 754 case 0xe1: /* loope */ 755 case 0xe2: /* loop */ 756 insn->type = INSN_JUMP_CONDITIONAL; 757 break; 758 759 case 0xe8: 760 insn->type = INSN_CALL; 761 /* 762 * For the impact on the stack, a CALL behaves like 763 * a PUSH of an immediate value (the return address). 764 */ 765 ADD_OP(op) { 766 op->src.type = OP_SRC_CONST; 767 op->dest.type = OP_DEST_PUSH; 768 } 769 break; 770 771 case 0xfc: 772 insn->type = INSN_CLD; 773 break; 774 775 case 0xfd: 776 insn->type = INSN_STD; 777 break; 778 779 case 0xff: 780 if (modrm_reg == 2 || modrm_reg == 3) { 781 782 insn->type = INSN_CALL_DYNAMIC; 783 if (has_notrack_prefix(&ins)) 784 WARN("notrack prefix found at %s:0x%lx", sec->name, offset); 785 786 } else if (modrm_reg == 4) { 787 788 insn->type = INSN_JUMP_DYNAMIC; 789 if (has_notrack_prefix(&ins)) 790 WARN("notrack prefix found at %s:0x%lx", sec->name, offset); 791 792 } else if (modrm_reg == 5) { 793 794 /* jmpf */ 795 insn->type = INSN_SYSRET; 796 797 } else if (modrm_reg == 6) { 798 799 /* push from mem */ 800 ADD_OP(op) { 801 op->src.type = OP_SRC_CONST; 802 op->dest.type = OP_DEST_PUSH; 803 } 804 } 805 806 break; 807 808 default: 809 break; 810 } 811 812 if (ins.immediate.nbytes) 813 insn->immediate = ins.immediate.value; 814 else if (ins.displacement.nbytes) 815 insn->immediate = ins.displacement.value; 816 817 return 0; 818 } 819 820 void arch_initial_func_cfi_state(struct cfi_init_state *state) 821 { 822 int i; 823 824 for (i = 0; i < CFI_NUM_REGS; i++) { 825 state->regs[i].base = CFI_UNDEFINED; 826 state->regs[i].offset = 0; 827 } 828 829 /* initial CFA (call frame address) */ 830 state->cfa.base = CFI_SP; 831 state->cfa.offset = 8; 832 833 /* initial RA (return address) */ 834 state->regs[CFI_RA].base = CFI_CFA; 835 state->regs[CFI_RA].offset = -8; 836 } 837 838 const char *arch_nop_insn(int len) 839 { 840 static const char nops[5][5] = { 841 { BYTES_NOP1 }, 842 { BYTES_NOP2 }, 843 { BYTES_NOP3 }, 844 { BYTES_NOP4 }, 845 { BYTES_NOP5 }, 846 }; 847 848 if (len < 1 || len > 5) { 849 ERROR("invalid NOP size: %d\n", len); 850 return NULL; 851 } 852 853 return nops[len-1]; 854 } 855 856 #define BYTE_RET 0xC3 857 858 const char *arch_ret_insn(int len) 859 { 860 static const char ret[5][5] = { 861 { BYTE_RET }, 862 { BYTE_RET, 0xcc }, 863 { BYTE_RET, 0xcc, BYTES_NOP1 }, 864 { BYTE_RET, 0xcc, BYTES_NOP2 }, 865 { BYTE_RET, 0xcc, BYTES_NOP3 }, 866 }; 867 868 if (len < 1 || len > 5) { 869 ERROR("invalid RET size: %d\n", len); 870 return NULL; 871 } 872 873 return ret[len-1]; 874 } 875 876 int arch_decode_hint_reg(u8 sp_reg, int *base) 877 { 878 switch (sp_reg) { 879 case ORC_REG_UNDEFINED: 880 *base = CFI_UNDEFINED; 881 break; 882 case ORC_REG_SP: 883 *base = CFI_SP; 884 break; 885 case ORC_REG_BP: 886 *base = CFI_BP; 887 break; 888 case ORC_REG_SP_INDIRECT: 889 *base = CFI_SP_INDIRECT; 890 break; 891 case ORC_REG_R10: 892 *base = CFI_R10; 893 break; 894 case ORC_REG_R13: 895 *base = CFI_R13; 896 break; 897 case ORC_REG_DI: 898 *base = CFI_DI; 899 break; 900 case ORC_REG_DX: 901 *base = CFI_DX; 902 break; 903 default: 904 return -1; 905 } 906 907 return 0; 908 } 909 910 bool arch_is_retpoline(struct symbol *sym) 911 { 912 return !strncmp(sym->name, "__x86_indirect_", 15) || 913 !strncmp(sym->name, "__pi___x86_indirect_", 20); 914 } 915 916 bool arch_is_rethunk(struct symbol *sym) 917 { 918 return !strcmp(sym->name, "__x86_return_thunk") || 919 !strcmp(sym->name, "__pi___x86_return_thunk"); 920 } 921 922 bool arch_is_embedded_insn(struct symbol *sym) 923 { 924 return !strcmp(sym->name, "retbleed_return_thunk") || 925 !strcmp(sym->name, "srso_alias_safe_ret") || 926 !strcmp(sym->name, "srso_safe_ret"); 927 } 928 929 unsigned int arch_reloc_size(struct reloc *reloc) 930 { 931 switch (reloc_type(reloc)) { 932 case R_X86_64_32: 933 case R_X86_64_32S: 934 case R_X86_64_PC32: 935 case R_X86_64_PLT32: 936 return 4; 937 default: 938 return 8; 939 } 940 } 941 942 bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc) 943 { 944 switch (reloc_type(reloc)) { 945 case R_X86_64_32: 946 case R_X86_64_32S: 947 case R_X86_64_64: 948 return true; 949 default: 950 return false; 951 } 952 } 953 954 #ifdef DISAS 955 956 int arch_disas_info_init(struct disassemble_info *dinfo) 957 { 958 return disas_info_init(dinfo, bfd_arch_i386, 959 bfd_mach_i386_i386, bfd_mach_x86_64, 960 "att"); 961 } 962 963 #endif /* DISAS */ 964