1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <stdio.h>
7 #include <stdlib.h>
8
9 #define unlikely(cond) (cond)
10 #include <asm/insn.h>
11 #include "../../../arch/x86/lib/inat.c"
12 #include "../../../arch/x86/lib/insn.c"
13
14 #define CONFIG_64BIT 1
15 #include <asm/nops.h>
16
17 #include <asm/orc_types.h>
18 #include <objtool/check.h>
19 #include <objtool/disas.h>
20 #include <objtool/elf.h>
21 #include <objtool/arch.h>
22 #include <objtool/warn.h>
23 #include <objtool/builtin.h>
24 #include <arch/elf.h>
25
26 const char *arch_reg_name[CFI_NUM_REGS] = {
27 "rax", "rcx", "rdx", "rbx",
28 "rsp", "rbp", "rsi", "rdi",
29 "r8", "r9", "r10", "r11",
30 "r12", "r13", "r14", "r15",
31 "ra"
32 };
33
arch_ftrace_match(const char * name)34 int arch_ftrace_match(const char *name)
35 {
36 return !strcmp(name, "__fentry__");
37 }
38
is_x86_64(const struct elf * elf)39 static int is_x86_64(const struct elf *elf)
40 {
41 switch (elf->ehdr.e_machine) {
42 case EM_X86_64:
43 return 1;
44 case EM_386:
45 return 0;
46 default:
47 ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine);
48 return -1;
49 }
50 }
51
arch_callee_saved_reg(unsigned char reg)52 bool arch_callee_saved_reg(unsigned char reg)
53 {
54 switch (reg) {
55 case CFI_BP:
56 case CFI_BX:
57 case CFI_R12:
58 case CFI_R13:
59 case CFI_R14:
60 case CFI_R15:
61 return true;
62
63 case CFI_AX:
64 case CFI_CX:
65 case CFI_DX:
66 case CFI_SI:
67 case CFI_DI:
68 case CFI_SP:
69 case CFI_R8:
70 case CFI_R9:
71 case CFI_R10:
72 case CFI_R11:
73 case CFI_RA:
74 default:
75 return false;
76 }
77 }
78
79 /* Undo the effects of __pa_symbol() if necessary */
phys_to_virt(unsigned long pa)80 static unsigned long phys_to_virt(unsigned long pa)
81 {
82 s64 va = pa;
83
84 if (va > 0)
85 va &= ~(0x80000000);
86
87 return va;
88 }
89
arch_insn_adjusted_addend(struct instruction * insn,struct reloc * reloc)90 s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc)
91 {
92 s64 addend = reloc_addend(reloc);
93
94 if (arch_pc_relative_reloc(reloc))
95 addend += insn->offset + insn->len - reloc_offset(reloc);
96
97 return phys_to_virt(addend);
98 }
99
scan_for_insn(struct section * sec,unsigned long offset,unsigned long * insn_off,unsigned int * insn_len)100 static void scan_for_insn(struct section *sec, unsigned long offset,
101 unsigned long *insn_off, unsigned int *insn_len)
102 {
103 unsigned long o = 0;
104 struct insn insn;
105
106 while (1) {
107
108 insn_decode(&insn, sec->data->d_buf + o, sec_size(sec) - o,
109 INSN_MODE_64);
110
111 if (o + insn.length > offset) {
112 *insn_off = o;
113 *insn_len = insn.length;
114 return;
115 }
116
117 o += insn.length;
118 }
119 }
120
arch_adjusted_addend(struct reloc * reloc)121 u64 arch_adjusted_addend(struct reloc *reloc)
122 {
123 unsigned int type = reloc_type(reloc);
124 s64 addend = reloc_addend(reloc);
125 unsigned long insn_off;
126 unsigned int insn_len;
127
128 if (type == R_X86_64_PLT32)
129 return addend + 4;
130
131 if (type != R_X86_64_PC32 || !is_text_sec(reloc->sec->base))
132 return addend;
133
134 scan_for_insn(reloc->sec->base, reloc_offset(reloc),
135 &insn_off, &insn_len);
136
137 return addend + insn_off + insn_len - reloc_offset(reloc);
138 }
139
arch_jump_destination(struct instruction * insn)140 unsigned long arch_jump_destination(struct instruction *insn)
141 {
142 return insn->offset + insn->len + insn->immediate;
143 }
144
arch_pc_relative_reloc(struct reloc * reloc)145 bool arch_pc_relative_reloc(struct reloc *reloc)
146 {
147 /*
148 * All relocation types where P (the address of the target)
149 * is included in the computation.
150 */
151 switch (reloc_type(reloc)) {
152 case R_X86_64_PC8:
153 case R_X86_64_PC16:
154 case R_X86_64_PC32:
155 case R_X86_64_PC64:
156
157 case R_X86_64_PLT32:
158 case R_X86_64_GOTPC32:
159 case R_X86_64_GOTPCREL:
160 return true;
161
162 default:
163 break;
164 }
165
166 return false;
167 }
168
169 #define ADD_OP(op) \
170 if (!(op = calloc(1, sizeof(*op)))) \
171 return -1; \
172 else for (*ops_list = op, ops_list = &op->next; op; op = NULL)
173
174 /*
175 * Helpers to decode ModRM/SIB:
176 *
177 * r/m| AX CX DX BX | SP | BP | SI DI |
178 * | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
179 * Mod+----------------+-----+-----+---------+
180 * 00 | [r/m] |[SIB]|[IP+]| [r/m] |
181 * 01 | [r/m + d8] |[S+d]| [r/m + d8] |
182 * 10 | [r/m + d32] |[S+D]| [r/m + d32] |
183 * 11 | r/ m |
184 */
185
186 #define mod_is_mem() (modrm_mod != 3)
187 #define mod_is_reg() (modrm_mod == 3)
188
189 #define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
190 #define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
191
192 /*
193 * Check the ModRM register. If there is a SIB byte then check with
194 * the SIB base register. But if the SIB base is 5 (i.e. CFI_BP) and
195 * ModRM mod is 0 then there is no base register.
196 */
197 #define rm_is(reg) (have_SIB() ? \
198 sib_base == (reg) && sib_index == CFI_SP && \
199 (sib_base != CFI_BP || modrm_mod != 0) : \
200 modrm_rm == (reg))
201
202 #define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
203 #define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
204
has_notrack_prefix(struct insn * insn)205 static bool has_notrack_prefix(struct insn *insn)
206 {
207 int i;
208
209 for (i = 0; i < insn->prefixes.nbytes; i++) {
210 if (insn->prefixes.bytes[i] == 0x3e)
211 return true;
212 }
213
214 return false;
215 }
216
arch_decode_instruction(struct objtool_file * file,const struct section * sec,unsigned long offset,unsigned int maxlen,struct instruction * insn)217 int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
218 unsigned long offset, unsigned int maxlen,
219 struct instruction *insn)
220 {
221 struct stack_op **ops_list = &insn->stack_ops;
222 const struct elf *elf = file->elf;
223 struct insn ins;
224 int x86_64, ret;
225 unsigned char op1, op2, op3, prefix,
226 rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
227 modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
228 sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
229 struct stack_op *op = NULL;
230 struct symbol *sym;
231 u64 imm;
232
233 x86_64 = is_x86_64(elf);
234 if (x86_64 == -1)
235 return -1;
236
237 ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen,
238 x86_64 ? INSN_MODE_64 : INSN_MODE_32);
239 if (ret < 0) {
240 ERROR("can't decode instruction at %s:0x%lx", sec->name, offset);
241 return -1;
242 }
243
244 insn->len = ins.length;
245 insn->type = INSN_OTHER;
246
247 if (ins.vex_prefix.nbytes)
248 return 0;
249
250 prefix = ins.prefixes.bytes[0];
251
252 op1 = ins.opcode.bytes[0];
253 op2 = ins.opcode.bytes[1];
254 op3 = ins.opcode.bytes[2];
255
256 if (ins.rex_prefix.nbytes) {
257 rex = ins.rex_prefix.bytes[0];
258 rex_w = X86_REX_W(rex) >> 3;
259 rex_r = X86_REX_R(rex) >> 2;
260 rex_x = X86_REX_X(rex) >> 1;
261 rex_b = X86_REX_B(rex);
262 }
263
264 if (ins.modrm.nbytes) {
265 modrm = ins.modrm.bytes[0];
266 modrm_mod = X86_MODRM_MOD(modrm);
267 modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
268 modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
269 }
270
271 if (ins.sib.nbytes) {
272 sib = ins.sib.bytes[0];
273 /* sib_scale = X86_SIB_SCALE(sib); */
274 sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
275 sib_base = X86_SIB_BASE(sib) + 8*rex_b;
276 }
277
278 switch (op1) {
279
280 case 0x1:
281 case 0x29:
282 if (rex_w && rm_is_reg(CFI_SP)) {
283
284 /* add/sub reg, %rsp */
285 ADD_OP(op) {
286 op->src.type = OP_SRC_ADD;
287 op->src.reg = modrm_reg;
288 op->dest.type = OP_DEST_REG;
289 op->dest.reg = CFI_SP;
290 }
291 }
292 break;
293
294 case 0x50 ... 0x57:
295
296 /* push reg */
297 ADD_OP(op) {
298 op->src.type = OP_SRC_REG;
299 op->src.reg = (op1 & 0x7) + 8*rex_b;
300 op->dest.type = OP_DEST_PUSH;
301 }
302
303 break;
304
305 case 0x58 ... 0x5f:
306
307 /* pop reg */
308 ADD_OP(op) {
309 op->src.type = OP_SRC_POP;
310 op->dest.type = OP_DEST_REG;
311 op->dest.reg = (op1 & 0x7) + 8*rex_b;
312 }
313
314 break;
315
316 case 0x68:
317 case 0x6a:
318 /* push immediate */
319 ADD_OP(op) {
320 op->src.type = OP_SRC_CONST;
321 op->dest.type = OP_DEST_PUSH;
322 }
323 break;
324
325 case 0x70 ... 0x7f:
326 insn->type = INSN_JUMP_CONDITIONAL;
327 break;
328
329 case 0x80 ... 0x83:
330 /*
331 * 1000 00sw : mod OP r/m : immediate
332 *
333 * s - sign extend immediate
334 * w - imm8 / imm32
335 *
336 * OP: 000 ADD 100 AND
337 * 001 OR 101 SUB
338 * 010 ADC 110 XOR
339 * 011 SBB 111 CMP
340 */
341
342 /* 64bit only */
343 if (!rex_w)
344 break;
345
346 /* %rsp target only */
347 if (!rm_is_reg(CFI_SP))
348 break;
349
350 imm = ins.immediate.value;
351 if (op1 & 2) { /* sign extend */
352 if (op1 & 1) { /* imm32 */
353 imm <<= 32;
354 imm = (s64)imm >> 32;
355 } else { /* imm8 */
356 imm <<= 56;
357 imm = (s64)imm >> 56;
358 }
359 }
360
361 switch (modrm_reg & 7) {
362 case 5:
363 imm = -imm;
364 fallthrough;
365 case 0:
366 /* add/sub imm, %rsp */
367 ADD_OP(op) {
368 op->src.type = OP_SRC_ADD;
369 op->src.reg = CFI_SP;
370 op->src.offset = imm;
371 op->dest.type = OP_DEST_REG;
372 op->dest.reg = CFI_SP;
373 }
374 break;
375
376 case 4:
377 /* and imm, %rsp */
378 ADD_OP(op) {
379 op->src.type = OP_SRC_AND;
380 op->src.reg = CFI_SP;
381 op->src.offset = ins.immediate.value;
382 op->dest.type = OP_DEST_REG;
383 op->dest.reg = CFI_SP;
384 }
385 break;
386
387 default:
388 /* ERROR ? */
389 break;
390 }
391
392 break;
393
394 case 0x89:
395 if (!rex_w)
396 break;
397
398 if (mod_is_reg()) {
399 /* mov reg, reg */
400 ADD_OP(op) {
401 op->src.type = OP_SRC_REG;
402 op->src.reg = modrm_reg;
403 op->dest.type = OP_DEST_REG;
404 op->dest.reg = modrm_rm;
405 }
406 break;
407 }
408
409 /* skip RIP relative displacement */
410 if (is_RIP())
411 break;
412
413 /* skip nontrivial SIB */
414 if (have_SIB()) {
415 modrm_rm = sib_base;
416 if (sib_index != CFI_SP)
417 break;
418 }
419
420 /* mov %rsp, disp(%reg) */
421 if (modrm_reg == CFI_SP) {
422 ADD_OP(op) {
423 op->src.type = OP_SRC_REG;
424 op->src.reg = CFI_SP;
425 op->dest.type = OP_DEST_REG_INDIRECT;
426 op->dest.reg = modrm_rm;
427 op->dest.offset = ins.displacement.value;
428 }
429 break;
430 }
431
432 fallthrough;
433 case 0x88:
434 if (!rex_w)
435 break;
436
437 if (rm_is_mem(CFI_BP)) {
438
439 /* mov reg, disp(%rbp) */
440 ADD_OP(op) {
441 op->src.type = OP_SRC_REG;
442 op->src.reg = modrm_reg;
443 op->dest.type = OP_DEST_REG_INDIRECT;
444 op->dest.reg = CFI_BP;
445 op->dest.offset = ins.displacement.value;
446 }
447 break;
448 }
449
450 if (rm_is_mem(CFI_SP)) {
451
452 /* mov reg, disp(%rsp) */
453 ADD_OP(op) {
454 op->src.type = OP_SRC_REG;
455 op->src.reg = modrm_reg;
456 op->dest.type = OP_DEST_REG_INDIRECT;
457 op->dest.reg = CFI_SP;
458 op->dest.offset = ins.displacement.value;
459 }
460 break;
461 }
462
463 break;
464
465 case 0x8b:
466 if (!rex_w)
467 break;
468
469 if (rm_is_mem(CFI_BP)) {
470
471 /* mov disp(%rbp), reg */
472 ADD_OP(op) {
473 op->src.type = OP_SRC_REG_INDIRECT;
474 op->src.reg = CFI_BP;
475 op->src.offset = ins.displacement.value;
476 op->dest.type = OP_DEST_REG;
477 op->dest.reg = modrm_reg;
478 }
479 break;
480 }
481
482 if (rm_is_mem(CFI_SP)) {
483
484 /* mov disp(%rsp), reg */
485 ADD_OP(op) {
486 op->src.type = OP_SRC_REG_INDIRECT;
487 op->src.reg = CFI_SP;
488 op->src.offset = ins.displacement.value;
489 op->dest.type = OP_DEST_REG;
490 op->dest.reg = modrm_reg;
491 }
492 break;
493 }
494
495 break;
496
497 case 0x8d:
498 if (mod_is_reg()) {
499 WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
500 break;
501 }
502
503 /* skip non 64bit ops */
504 if (!rex_w)
505 break;
506
507 /* skip nontrivial SIB */
508 if (have_SIB()) {
509 modrm_rm = sib_base;
510 if (sib_index != CFI_SP)
511 break;
512 }
513
514 /* lea disp(%rip), %dst */
515 if (is_RIP()) {
516 insn->type = INSN_LEA_RIP;
517 break;
518 }
519
520 /* lea disp(%src), %dst */
521 ADD_OP(op) {
522 op->src.offset = ins.displacement.value;
523 if (!op->src.offset) {
524 /* lea (%src), %dst */
525 op->src.type = OP_SRC_REG;
526 } else {
527 /* lea disp(%src), %dst */
528 op->src.type = OP_SRC_ADD;
529 }
530 op->src.reg = modrm_rm;
531 op->dest.type = OP_DEST_REG;
532 op->dest.reg = modrm_reg;
533 }
534 break;
535
536 case 0x8f:
537 /* pop to mem */
538 ADD_OP(op) {
539 op->src.type = OP_SRC_POP;
540 op->dest.type = OP_DEST_MEM;
541 }
542 break;
543
544 case 0x90:
545 if (rex_b) /* XCHG %r8, %rax */
546 break;
547
548 if (prefix == 0xf3) /* REP NOP := PAUSE */
549 break;
550
551 insn->type = INSN_NOP;
552 break;
553
554 case 0x9c:
555 /* pushf */
556 ADD_OP(op) {
557 op->src.type = OP_SRC_CONST;
558 op->dest.type = OP_DEST_PUSHF;
559 }
560 break;
561
562 case 0x9d:
563 /* popf */
564 ADD_OP(op) {
565 op->src.type = OP_SRC_POPF;
566 op->dest.type = OP_DEST_MEM;
567 }
568 break;
569
570 case 0x0f:
571
572 if (op2 == 0x01) {
573
574 switch (insn_last_prefix_id(&ins)) {
575 case INAT_PFX_REPE:
576 case INAT_PFX_REPNE:
577 if (modrm == 0xca)
578 /* eretu/erets */
579 insn->type = INSN_SYSRET;
580 break;
581 default:
582 if (modrm == 0xca)
583 insn->type = INSN_CLAC;
584 else if (modrm == 0xcb)
585 insn->type = INSN_STAC;
586 break;
587 }
588 } else if (op2 >= 0x80 && op2 <= 0x8f) {
589
590 insn->type = INSN_JUMP_CONDITIONAL;
591
592 } else if (op2 == 0x05 || op2 == 0x34) {
593
594 /* syscall, sysenter */
595 insn->type = INSN_SYSCALL;
596
597 } else if (op2 == 0x07 || op2 == 0x35) {
598
599 /* sysret, sysexit */
600 insn->type = INSN_SYSRET;
601
602 } else if (op2 == 0x0b || op2 == 0xb9) {
603
604 /* ud2, ud1 */
605 insn->type = INSN_BUG;
606
607 } else if (op2 == 0x1f) {
608
609 /* 0f 1f /0 := NOPL */
610 if (modrm_reg == 0)
611 insn->type = INSN_NOP;
612
613 } else if (op2 == 0x1e) {
614
615 if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb))
616 insn->type = INSN_ENDBR;
617
618
619 } else if (op2 == 0x38 && op3 == 0xf8) {
620 if (ins.prefixes.nbytes == 1 &&
621 ins.prefixes.bytes[0] == 0xf2) {
622 /* ENQCMD cannot be used in the kernel. */
623 WARN("ENQCMD instruction at %s:%lx", sec->name, offset);
624 }
625
626 } else if (op2 == 0xa0 || op2 == 0xa8) {
627
628 /* push fs/gs */
629 ADD_OP(op) {
630 op->src.type = OP_SRC_CONST;
631 op->dest.type = OP_DEST_PUSH;
632 }
633
634 } else if (op2 == 0xa1 || op2 == 0xa9) {
635
636 /* pop fs/gs */
637 ADD_OP(op) {
638 op->src.type = OP_SRC_POP;
639 op->dest.type = OP_DEST_MEM;
640 }
641 }
642
643 break;
644
645 case 0xc9:
646 /*
647 * leave
648 *
649 * equivalent to:
650 * mov bp, sp
651 * pop bp
652 */
653 ADD_OP(op) {
654 op->src.type = OP_SRC_REG;
655 op->src.reg = CFI_BP;
656 op->dest.type = OP_DEST_REG;
657 op->dest.reg = CFI_SP;
658 }
659 ADD_OP(op) {
660 op->src.type = OP_SRC_POP;
661 op->dest.type = OP_DEST_REG;
662 op->dest.reg = CFI_BP;
663 }
664 break;
665
666 case 0xcc:
667 /* int3 */
668 insn->type = INSN_TRAP;
669 break;
670
671 case 0xe3:
672 /* jecxz/jrcxz */
673 insn->type = INSN_JUMP_CONDITIONAL;
674 break;
675
676 case 0xe9:
677 case 0xeb:
678 insn->type = INSN_JUMP_UNCONDITIONAL;
679 break;
680
681 case 0xc2:
682 case 0xc3:
683 insn->type = INSN_RETURN;
684 break;
685
686 case 0xc7: /* mov imm, r/m */
687 if (!opts.noinstr)
688 break;
689
690 if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) {
691 struct reloc *immr, *disp;
692 struct symbol *func;
693 int idx;
694
695 immr = find_reloc_by_dest(elf, (void *)sec, offset+3);
696 disp = find_reloc_by_dest(elf, (void *)sec, offset+7);
697
698 if (!immr || strncmp(immr->sym->name, "pv_ops", 6))
699 break;
700
701 idx = pv_ops_idx_off(immr->sym->name);
702 if (idx < 0)
703 break;
704
705 idx += (reloc_addend(immr) + 8) / sizeof(void *);
706
707 func = disp->sym;
708 if (disp->sym->type == STT_SECTION)
709 func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp));
710 if (!func) {
711 ERROR("no func for pv_ops[]");
712 return -1;
713 }
714
715 objtool_pv_add(file, idx, func);
716 }
717
718 break;
719
720 case 0xcf: /* iret */
721 /*
722 * Handle sync_core(), which has an IRET to self.
723 * All other IRET are in STT_NONE entry code.
724 */
725 sym = find_symbol_containing(sec, offset);
726 if (sym && sym->type == STT_FUNC) {
727 ADD_OP(op) {
728 /* add $40, %rsp */
729 op->src.type = OP_SRC_ADD;
730 op->src.reg = CFI_SP;
731 op->src.offset = 5*8;
732 op->dest.type = OP_DEST_REG;
733 op->dest.reg = CFI_SP;
734 }
735 break;
736 }
737
738 fallthrough;
739
740 case 0xca: /* retf */
741 case 0xcb: /* retf */
742 insn->type = INSN_SYSRET;
743 break;
744
745 case 0xd6: /* udb */
746 insn->type = INSN_BUG;
747 break;
748
749 case 0xe0: /* loopne */
750 case 0xe1: /* loope */
751 case 0xe2: /* loop */
752 insn->type = INSN_JUMP_CONDITIONAL;
753 break;
754
755 case 0xe8:
756 insn->type = INSN_CALL;
757 /*
758 * For the impact on the stack, a CALL behaves like
759 * a PUSH of an immediate value (the return address).
760 */
761 ADD_OP(op) {
762 op->src.type = OP_SRC_CONST;
763 op->dest.type = OP_DEST_PUSH;
764 }
765 break;
766
767 case 0xfc:
768 insn->type = INSN_CLD;
769 break;
770
771 case 0xfd:
772 insn->type = INSN_STD;
773 break;
774
775 case 0xff:
776 if (modrm_reg == 2 || modrm_reg == 3) {
777
778 insn->type = INSN_CALL_DYNAMIC;
779 if (has_notrack_prefix(&ins))
780 WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
781
782 } else if (modrm_reg == 4) {
783
784 insn->type = INSN_JUMP_DYNAMIC;
785 if (has_notrack_prefix(&ins))
786 WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
787
788 } else if (modrm_reg == 5) {
789
790 /* jmpf */
791 insn->type = INSN_SYSRET;
792
793 } else if (modrm_reg == 6) {
794
795 /* push from mem */
796 ADD_OP(op) {
797 op->src.type = OP_SRC_CONST;
798 op->dest.type = OP_DEST_PUSH;
799 }
800 }
801
802 break;
803
804 default:
805 break;
806 }
807
808 if (ins.immediate.nbytes)
809 insn->immediate = ins.immediate.value;
810 else if (ins.displacement.nbytes)
811 insn->immediate = ins.displacement.value;
812
813 return 0;
814 }
815
arch_initial_func_cfi_state(struct cfi_init_state * state)816 void arch_initial_func_cfi_state(struct cfi_init_state *state)
817 {
818 int i;
819
820 for (i = 0; i < CFI_NUM_REGS; i++) {
821 state->regs[i].base = CFI_UNDEFINED;
822 state->regs[i].offset = 0;
823 }
824
825 /* initial CFA (call frame address) */
826 state->cfa.base = CFI_SP;
827 state->cfa.offset = 8;
828
829 /* initial RA (return address) */
830 state->regs[CFI_RA].base = CFI_CFA;
831 state->regs[CFI_RA].offset = -8;
832 }
833
arch_nop_insn(int len)834 const char *arch_nop_insn(int len)
835 {
836 static const char nops[5][5] = {
837 { BYTES_NOP1 },
838 { BYTES_NOP2 },
839 { BYTES_NOP3 },
840 { BYTES_NOP4 },
841 { BYTES_NOP5 },
842 };
843
844 if (len < 1 || len > 5) {
845 ERROR("invalid NOP size: %d\n", len);
846 return NULL;
847 }
848
849 return nops[len-1];
850 }
851
852 #define BYTE_RET 0xC3
853
arch_ret_insn(int len)854 const char *arch_ret_insn(int len)
855 {
856 static const char ret[5][5] = {
857 { BYTE_RET },
858 { BYTE_RET, 0xcc },
859 { BYTE_RET, 0xcc, BYTES_NOP1 },
860 { BYTE_RET, 0xcc, BYTES_NOP2 },
861 { BYTE_RET, 0xcc, BYTES_NOP3 },
862 };
863
864 if (len < 1 || len > 5) {
865 ERROR("invalid RET size: %d\n", len);
866 return NULL;
867 }
868
869 return ret[len-1];
870 }
871
arch_decode_hint_reg(u8 sp_reg,int * base)872 int arch_decode_hint_reg(u8 sp_reg, int *base)
873 {
874 switch (sp_reg) {
875 case ORC_REG_UNDEFINED:
876 *base = CFI_UNDEFINED;
877 break;
878 case ORC_REG_SP:
879 *base = CFI_SP;
880 break;
881 case ORC_REG_BP:
882 *base = CFI_BP;
883 break;
884 case ORC_REG_SP_INDIRECT:
885 *base = CFI_SP_INDIRECT;
886 break;
887 case ORC_REG_R10:
888 *base = CFI_R10;
889 break;
890 case ORC_REG_R13:
891 *base = CFI_R13;
892 break;
893 case ORC_REG_DI:
894 *base = CFI_DI;
895 break;
896 case ORC_REG_DX:
897 *base = CFI_DX;
898 break;
899 default:
900 return -1;
901 }
902
903 return 0;
904 }
905
arch_is_retpoline(struct symbol * sym)906 bool arch_is_retpoline(struct symbol *sym)
907 {
908 return !strncmp(sym->name, "__x86_indirect_", 15) ||
909 !strncmp(sym->name, "__pi___x86_indirect_", 20);
910 }
911
arch_is_rethunk(struct symbol * sym)912 bool arch_is_rethunk(struct symbol *sym)
913 {
914 return !strcmp(sym->name, "__x86_return_thunk") ||
915 !strcmp(sym->name, "__pi___x86_return_thunk");
916 }
917
arch_is_embedded_insn(struct symbol * sym)918 bool arch_is_embedded_insn(struct symbol *sym)
919 {
920 return !strcmp(sym->name, "retbleed_return_thunk") ||
921 !strcmp(sym->name, "srso_alias_safe_ret") ||
922 !strcmp(sym->name, "srso_safe_ret");
923 }
924
arch_reloc_size(struct reloc * reloc)925 unsigned int arch_reloc_size(struct reloc *reloc)
926 {
927 switch (reloc_type(reloc)) {
928 case R_X86_64_32:
929 case R_X86_64_32S:
930 case R_X86_64_PC32:
931 case R_X86_64_PLT32:
932 return 4;
933 default:
934 return 8;
935 }
936 }
937
arch_absolute_reloc(struct elf * elf,struct reloc * reloc)938 bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
939 {
940 switch (reloc_type(reloc)) {
941 case R_X86_64_32:
942 case R_X86_64_32S:
943 case R_X86_64_64:
944 return true;
945 default:
946 return false;
947 }
948 }
949
950 #ifdef DISAS
951
arch_disas_info_init(struct disassemble_info * dinfo)952 int arch_disas_info_init(struct disassemble_info *dinfo)
953 {
954 return disas_info_init(dinfo, bfd_arch_i386,
955 bfd_mach_i386_i386, bfd_mach_x86_64,
956 "att");
957 }
958
959 #endif /* DISAS */
960