1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2a2c7a983SIngo Molnar /*
358ffa1b4SChristoph Hellwig * BPF JIT compiler
40a14842fSEric Dumazet *
53b58908aSEric Dumazet * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
658ffa1b4SChristoph Hellwig * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
70a14842fSEric Dumazet */
80a14842fSEric Dumazet #include <linux/netdevice.h>
90a14842fSEric Dumazet #include <linux/filter.h>
10855ddb56SEric Dumazet #include <linux/if_vlan.h>
1171d22d58SDaniel Borkmann #include <linux/bpf.h>
125964b200SAlexei Starovoitov #include <linux/memory.h>
1375ccbef6SBjörn Töpel #include <linux/sort.h>
143dec541bSAlexei Starovoitov #include <asm/extable.h>
15ee3e2469SPeter Zijlstra #include <asm/ftrace.h>
16d1163651SLaura Abbott #include <asm/set_memory.h>
17a493a87fSDaniel Borkmann #include <asm/nospec-branch.h>
185964b200SAlexei Starovoitov #include <asm/text-patching.h>
19fd5d27b7SKumar Kartikeya Dwivedi #include <asm/unwind.h>
204f9087f1SPeter Zijlstra #include <asm/cfi.h>
210a14842fSEric Dumazet
22f18b03faSKumar Kartikeya Dwivedi static bool all_callee_regs_used[4] = {true, true, true, true};
23f18b03faSKumar Kartikeya Dwivedi
emit_code(u8 * ptr,u32 bytes,unsigned int len)245cccc702SJoe Perches static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
250a14842fSEric Dumazet {
260a14842fSEric Dumazet if (len == 1)
270a14842fSEric Dumazet *ptr = bytes;
280a14842fSEric Dumazet else if (len == 2)
290a14842fSEric Dumazet *(u16 *)ptr = bytes;
300a14842fSEric Dumazet else {
310a14842fSEric Dumazet *(u32 *)ptr = bytes;
320a14842fSEric Dumazet barrier();
330a14842fSEric Dumazet }
340a14842fSEric Dumazet return ptr + len;
350a14842fSEric Dumazet }
360a14842fSEric Dumazet
37b52f00e6SAlexei Starovoitov #define EMIT(bytes, len) \
38ced50fc4SJiri Olsa do { prog = emit_code(prog, bytes, len); } while (0)
390a14842fSEric Dumazet
400a14842fSEric Dumazet #define EMIT1(b1) EMIT(b1, 1)
410a14842fSEric Dumazet #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
420a14842fSEric Dumazet #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
430a14842fSEric Dumazet #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
44a2c7a983SIngo Molnar
4562258278SAlexei Starovoitov #define EMIT1_off32(b1, off) \
4662258278SAlexei Starovoitov do { EMIT1(b1); EMIT(off, 4); } while (0)
4762258278SAlexei Starovoitov #define EMIT2_off32(b1, b2, off) \
4862258278SAlexei Starovoitov do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
4962258278SAlexei Starovoitov #define EMIT3_off32(b1, b2, b3, off) \
5062258278SAlexei Starovoitov do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
5162258278SAlexei Starovoitov #define EMIT4_off32(b1, b2, b3, b4, off) \
5262258278SAlexei Starovoitov do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
530a14842fSEric Dumazet
5458912710SPeter Zijlstra #ifdef CONFIG_X86_KERNEL_IBT
5558912710SPeter Zijlstra #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
564f9087f1SPeter Zijlstra #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
5758912710SPeter Zijlstra #else
5858912710SPeter Zijlstra #define EMIT_ENDBR()
594f9087f1SPeter Zijlstra #define EMIT_ENDBR_POISON()
6058912710SPeter Zijlstra #endif
6158912710SPeter Zijlstra
is_imm8(int value)625cccc702SJoe Perches static bool is_imm8(int value)
630a14842fSEric Dumazet {
640a14842fSEric Dumazet return value <= 127 && value >= -128;
650a14842fSEric Dumazet }
660a14842fSEric Dumazet
67*c8831bdbSYonghong Song /*
68*c8831bdbSYonghong Song * Let us limit the positive offset to be <= 123.
69*c8831bdbSYonghong Song * This is to ensure eventual jit convergence For the following patterns:
70*c8831bdbSYonghong Song * ...
71*c8831bdbSYonghong Song * pass4, final_proglen=4391:
72*c8831bdbSYonghong Song * ...
73*c8831bdbSYonghong Song * 20e: 48 85 ff test rdi,rdi
74*c8831bdbSYonghong Song * 211: 74 7d je 0x290
75*c8831bdbSYonghong Song * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
76*c8831bdbSYonghong Song * ...
77*c8831bdbSYonghong Song * 289: 48 85 ff test rdi,rdi
78*c8831bdbSYonghong Song * 28c: 74 17 je 0x2a5
79*c8831bdbSYonghong Song * 28e: e9 7f ff ff ff jmp 0x212
80*c8831bdbSYonghong Song * 293: bf 03 00 00 00 mov edi,0x3
81*c8831bdbSYonghong Song * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
82*c8831bdbSYonghong Song * and insn at 0x28e is 5-byte jmp insn with offset -129.
83*c8831bdbSYonghong Song *
84*c8831bdbSYonghong Song * pass5, final_proglen=4392:
85*c8831bdbSYonghong Song * ...
86*c8831bdbSYonghong Song * 20e: 48 85 ff test rdi,rdi
87*c8831bdbSYonghong Song * 211: 0f 84 80 00 00 00 je 0x297
88*c8831bdbSYonghong Song * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
89*c8831bdbSYonghong Song * ...
90*c8831bdbSYonghong Song * 28d: 48 85 ff test rdi,rdi
91*c8831bdbSYonghong Song * 290: 74 1a je 0x2ac
92*c8831bdbSYonghong Song * 292: eb 84 jmp 0x218
93*c8831bdbSYonghong Song * 294: bf 03 00 00 00 mov edi,0x3
94*c8831bdbSYonghong Song * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
95*c8831bdbSYonghong Song * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
96*c8831bdbSYonghong Song * At the same time, insn at 0x292 is a 2-byte insn since its offset is
97*c8831bdbSYonghong Song * -124.
98*c8831bdbSYonghong Song *
99*c8831bdbSYonghong Song * pass6 will repeat the same code as in pass4 and this will prevent
100*c8831bdbSYonghong Song * eventual convergence.
101*c8831bdbSYonghong Song *
102*c8831bdbSYonghong Song * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
103*c8831bdbSYonghong Song * cycle in the above. In the above example je offset <= 0x7c should work.
104*c8831bdbSYonghong Song *
105*c8831bdbSYonghong Song * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
106*c8831bdbSYonghong Song * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
107*c8831bdbSYonghong Song * avoid no convergence issue.
108*c8831bdbSYonghong Song *
109*c8831bdbSYonghong Song * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
110*c8831bdbSYonghong Song * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
111*c8831bdbSYonghong Song */
is_imm8_jmp_offset(int value)112*c8831bdbSYonghong Song static bool is_imm8_jmp_offset(int value)
113*c8831bdbSYonghong Song {
114*c8831bdbSYonghong Song return value <= 123 && value >= -128;
115*c8831bdbSYonghong Song }
116*c8831bdbSYonghong Song
is_simm32(s64 value)1175cccc702SJoe Perches static bool is_simm32(s64 value)
1180a14842fSEric Dumazet {
11962258278SAlexei Starovoitov return value == (s64)(s32)value;
1200a14842fSEric Dumazet }
1210a14842fSEric Dumazet
is_uimm32(u64 value)1226fe8b9c1SDaniel Borkmann static bool is_uimm32(u64 value)
1236fe8b9c1SDaniel Borkmann {
1246fe8b9c1SDaniel Borkmann return value == (u64)(u32)value;
1256fe8b9c1SDaniel Borkmann }
1266fe8b9c1SDaniel Borkmann
127e430f34eSAlexei Starovoitov /* mov dst, src */
128e430f34eSAlexei Starovoitov #define EMIT_mov(DST, SRC) \
129a2c7a983SIngo Molnar do { \
130a2c7a983SIngo Molnar if (DST != SRC) \
131e430f34eSAlexei Starovoitov EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
1320a14842fSEric Dumazet } while (0)
1330a14842fSEric Dumazet
bpf_size_to_x86_bytes(int bpf_size)13462258278SAlexei Starovoitov static int bpf_size_to_x86_bytes(int bpf_size)
13562258278SAlexei Starovoitov {
13662258278SAlexei Starovoitov if (bpf_size == BPF_W)
13762258278SAlexei Starovoitov return 4;
13862258278SAlexei Starovoitov else if (bpf_size == BPF_H)
13962258278SAlexei Starovoitov return 2;
14062258278SAlexei Starovoitov else if (bpf_size == BPF_B)
14162258278SAlexei Starovoitov return 1;
14262258278SAlexei Starovoitov else if (bpf_size == BPF_DW)
14362258278SAlexei Starovoitov return 4; /* imm32 */
14462258278SAlexei Starovoitov else
14562258278SAlexei Starovoitov return 0;
14662258278SAlexei Starovoitov }
14762258278SAlexei Starovoitov
148a2c7a983SIngo Molnar /*
149a2c7a983SIngo Molnar * List of x86 cond jumps opcodes (. + s8)
1500a14842fSEric Dumazet * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
1510a14842fSEric Dumazet */
1520a14842fSEric Dumazet #define X86_JB 0x72
1530a14842fSEric Dumazet #define X86_JAE 0x73
1540a14842fSEric Dumazet #define X86_JE 0x74
1550a14842fSEric Dumazet #define X86_JNE 0x75
1560a14842fSEric Dumazet #define X86_JBE 0x76
1570a14842fSEric Dumazet #define X86_JA 0x77
15852afc51eSDaniel Borkmann #define X86_JL 0x7C
15962258278SAlexei Starovoitov #define X86_JGE 0x7D
16052afc51eSDaniel Borkmann #define X86_JLE 0x7E
16162258278SAlexei Starovoitov #define X86_JG 0x7F
1620a14842fSEric Dumazet
163a2c7a983SIngo Molnar /* Pick a register outside of BPF range for JIT internal work */
164959a7579SDaniel Borkmann #define AUX_REG (MAX_BPF_JIT_REG + 1)
165fec56f58SAlexei Starovoitov #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
1662fe99eb0SAlexei Starovoitov #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
16762258278SAlexei Starovoitov
168a2c7a983SIngo Molnar /*
169a2c7a983SIngo Molnar * The following table maps BPF registers to x86-64 registers.
170959a7579SDaniel Borkmann *
171a2c7a983SIngo Molnar * x86-64 register R12 is unused, since if used as base address
172959a7579SDaniel Borkmann * register in load/store instructions, it always needs an
173959a7579SDaniel Borkmann * extra byte of encoding and is callee saved.
174959a7579SDaniel Borkmann *
175fec56f58SAlexei Starovoitov * x86-64 register R9 is not used by BPF programs, but can be used by BPF
176fec56f58SAlexei Starovoitov * trampoline. x86-64 register R10 is used for blinding (if enabled).
17762258278SAlexei Starovoitov */
17862258278SAlexei Starovoitov static const int reg2hex[] = {
179a2c7a983SIngo Molnar [BPF_REG_0] = 0, /* RAX */
180a2c7a983SIngo Molnar [BPF_REG_1] = 7, /* RDI */
181a2c7a983SIngo Molnar [BPF_REG_2] = 6, /* RSI */
182a2c7a983SIngo Molnar [BPF_REG_3] = 2, /* RDX */
183a2c7a983SIngo Molnar [BPF_REG_4] = 1, /* RCX */
184a2c7a983SIngo Molnar [BPF_REG_5] = 0, /* R8 */
185a2c7a983SIngo Molnar [BPF_REG_6] = 3, /* RBX callee saved */
186a2c7a983SIngo Molnar [BPF_REG_7] = 5, /* R13 callee saved */
187a2c7a983SIngo Molnar [BPF_REG_8] = 6, /* R14 callee saved */
188a2c7a983SIngo Molnar [BPF_REG_9] = 7, /* R15 callee saved */
189a2c7a983SIngo Molnar [BPF_REG_FP] = 5, /* RBP readonly */
190a2c7a983SIngo Molnar [BPF_REG_AX] = 2, /* R10 temp register */
191a2c7a983SIngo Molnar [AUX_REG] = 3, /* R11 temp register */
192fec56f58SAlexei Starovoitov [X86_REG_R9] = 1, /* R9 register, 6th function argument */
1932fe99eb0SAlexei Starovoitov [X86_REG_R12] = 4, /* R12 callee saved */
19462258278SAlexei Starovoitov };
19562258278SAlexei Starovoitov
1963dec541bSAlexei Starovoitov static const int reg2pt_regs[] = {
1973dec541bSAlexei Starovoitov [BPF_REG_0] = offsetof(struct pt_regs, ax),
1983dec541bSAlexei Starovoitov [BPF_REG_1] = offsetof(struct pt_regs, di),
1993dec541bSAlexei Starovoitov [BPF_REG_2] = offsetof(struct pt_regs, si),
2003dec541bSAlexei Starovoitov [BPF_REG_3] = offsetof(struct pt_regs, dx),
2013dec541bSAlexei Starovoitov [BPF_REG_4] = offsetof(struct pt_regs, cx),
2023dec541bSAlexei Starovoitov [BPF_REG_5] = offsetof(struct pt_regs, r8),
2033dec541bSAlexei Starovoitov [BPF_REG_6] = offsetof(struct pt_regs, bx),
2043dec541bSAlexei Starovoitov [BPF_REG_7] = offsetof(struct pt_regs, r13),
2053dec541bSAlexei Starovoitov [BPF_REG_8] = offsetof(struct pt_regs, r14),
2063dec541bSAlexei Starovoitov [BPF_REG_9] = offsetof(struct pt_regs, r15),
2073dec541bSAlexei Starovoitov };
2083dec541bSAlexei Starovoitov
209a2c7a983SIngo Molnar /*
210a2c7a983SIngo Molnar * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
21162258278SAlexei Starovoitov * which need extra byte of encoding.
21262258278SAlexei Starovoitov * rax,rcx,...,rbp have simpler encoding
21362258278SAlexei Starovoitov */
is_ereg(u32 reg)2145cccc702SJoe Perches static bool is_ereg(u32 reg)
21562258278SAlexei Starovoitov {
216d148134bSJoe Perches return (1 << reg) & (BIT(BPF_REG_5) |
217d148134bSJoe Perches BIT(AUX_REG) |
218d148134bSJoe Perches BIT(BPF_REG_7) |
219d148134bSJoe Perches BIT(BPF_REG_8) |
220959a7579SDaniel Borkmann BIT(BPF_REG_9) |
221fec56f58SAlexei Starovoitov BIT(X86_REG_R9) |
2222fe99eb0SAlexei Starovoitov BIT(X86_REG_R12) |
223959a7579SDaniel Borkmann BIT(BPF_REG_AX));
22462258278SAlexei Starovoitov }
22562258278SAlexei Starovoitov
226aee194b1SLuke Nelson /*
227aee194b1SLuke Nelson * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
228aee194b1SLuke Nelson * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
229aee194b1SLuke Nelson * of encoding. al,cl,dl,bl have simpler encoding.
230aee194b1SLuke Nelson */
is_ereg_8l(u32 reg)231aee194b1SLuke Nelson static bool is_ereg_8l(u32 reg)
232aee194b1SLuke Nelson {
233aee194b1SLuke Nelson return is_ereg(reg) ||
234aee194b1SLuke Nelson (1 << reg) & (BIT(BPF_REG_1) |
235aee194b1SLuke Nelson BIT(BPF_REG_2) |
236aee194b1SLuke Nelson BIT(BPF_REG_FP));
237aee194b1SLuke Nelson }
238aee194b1SLuke Nelson
is_axreg(u32 reg)239de0a444dSDaniel Borkmann static bool is_axreg(u32 reg)
240de0a444dSDaniel Borkmann {
241de0a444dSDaniel Borkmann return reg == BPF_REG_0;
242de0a444dSDaniel Borkmann }
243de0a444dSDaniel Borkmann
244a2c7a983SIngo Molnar /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)2455cccc702SJoe Perches static u8 add_1mod(u8 byte, u32 reg)
24662258278SAlexei Starovoitov {
24762258278SAlexei Starovoitov if (is_ereg(reg))
24862258278SAlexei Starovoitov byte |= 1;
24962258278SAlexei Starovoitov return byte;
25062258278SAlexei Starovoitov }
25162258278SAlexei Starovoitov
add_2mod(u8 byte,u32 r1,u32 r2)2525cccc702SJoe Perches static u8 add_2mod(u8 byte, u32 r1, u32 r2)
25362258278SAlexei Starovoitov {
25462258278SAlexei Starovoitov if (is_ereg(r1))
25562258278SAlexei Starovoitov byte |= 1;
25662258278SAlexei Starovoitov if (is_ereg(r2))
25762258278SAlexei Starovoitov byte |= 4;
25862258278SAlexei Starovoitov return byte;
25962258278SAlexei Starovoitov }
26062258278SAlexei Starovoitov
add_3mod(u8 byte,u32 r1,u32 r2,u32 index)2612fe99eb0SAlexei Starovoitov static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
2622fe99eb0SAlexei Starovoitov {
2632fe99eb0SAlexei Starovoitov if (is_ereg(r1))
2642fe99eb0SAlexei Starovoitov byte |= 1;
2652fe99eb0SAlexei Starovoitov if (is_ereg(index))
2662fe99eb0SAlexei Starovoitov byte |= 2;
2672fe99eb0SAlexei Starovoitov if (is_ereg(r2))
2682fe99eb0SAlexei Starovoitov byte |= 4;
2692fe99eb0SAlexei Starovoitov return byte;
2702fe99eb0SAlexei Starovoitov }
2712fe99eb0SAlexei Starovoitov
272a2c7a983SIngo Molnar /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)2735cccc702SJoe Perches static u8 add_1reg(u8 byte, u32 dst_reg)
27462258278SAlexei Starovoitov {
275e430f34eSAlexei Starovoitov return byte + reg2hex[dst_reg];
27662258278SAlexei Starovoitov }
27762258278SAlexei Starovoitov
278a2c7a983SIngo Molnar /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)2795cccc702SJoe Perches static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
28062258278SAlexei Starovoitov {
281e430f34eSAlexei Starovoitov return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
28262258278SAlexei Starovoitov }
28362258278SAlexei Starovoitov
284e5f02cacSBrendan Jackman /* Some 1-byte opcodes for binary ALU operations */
285e5f02cacSBrendan Jackman static u8 simple_alu_opcodes[] = {
286e5f02cacSBrendan Jackman [BPF_ADD] = 0x01,
287e5f02cacSBrendan Jackman [BPF_SUB] = 0x29,
288e5f02cacSBrendan Jackman [BPF_AND] = 0x21,
289e5f02cacSBrendan Jackman [BPF_OR] = 0x09,
290e5f02cacSBrendan Jackman [BPF_XOR] = 0x31,
291e5f02cacSBrendan Jackman [BPF_LSH] = 0xE0,
292e5f02cacSBrendan Jackman [BPF_RSH] = 0xE8,
293e5f02cacSBrendan Jackman [BPF_ARSH] = 0xF8,
294e5f02cacSBrendan Jackman };
295e5f02cacSBrendan Jackman
jit_fill_hole(void * area,unsigned int size)296738cbe72SDaniel Borkmann static void jit_fill_hole(void *area, unsigned int size)
297738cbe72SDaniel Borkmann {
298a2c7a983SIngo Molnar /* Fill whole space with INT3 instructions */
299738cbe72SDaniel Borkmann memset(area, 0xcc, size);
300738cbe72SDaniel Borkmann }
301738cbe72SDaniel Borkmann
bpf_arch_text_invalidate(void * dst,size_t len)302fe736565SSong Liu int bpf_arch_text_invalidate(void *dst, size_t len)
303fe736565SSong Liu {
304fe736565SSong Liu return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
305fe736565SSong Liu }
306fe736565SSong Liu
307f3c2af7bSAlexei Starovoitov struct jit_context {
308a2c7a983SIngo Molnar int cleanup_addr; /* Epilogue code offset */
309dceba081SPeter Zijlstra
310dceba081SPeter Zijlstra /*
311dceba081SPeter Zijlstra * Program specific offsets of labels in the code; these rely on the
312dceba081SPeter Zijlstra * JIT doing at least 2 passes, recording the position on the first
313dceba081SPeter Zijlstra * pass, only to generate the correct offset on the second pass.
314dceba081SPeter Zijlstra */
315dceba081SPeter Zijlstra int tail_call_direct_label;
316dceba081SPeter Zijlstra int tail_call_indirect_label;
317f3c2af7bSAlexei Starovoitov };
318f3c2af7bSAlexei Starovoitov
319a2c7a983SIngo Molnar /* Maximum number of bytes emitted while JITing one eBPF insn */
320e0ee9c12SAlexei Starovoitov #define BPF_MAX_INSN_SIZE 128
321e0ee9c12SAlexei Starovoitov #define BPF_INSN_SAFETY 64
3224b3da77bSDaniel Borkmann
3234b3da77bSDaniel Borkmann /* Number of bytes emit_patch() needs to generate instructions */
3244b3da77bSDaniel Borkmann #define X86_PATCH_SIZE 5
325ebf7d1f5SMaciej Fijalkowski /* Number of bytes that will be skipped on tailcall */
326116e04baSLeon Hwang #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
327e0ee9c12SAlexei Starovoitov
push_r12(u8 ** pprog)328f18b03faSKumar Kartikeya Dwivedi static void push_r12(u8 **pprog)
329f18b03faSKumar Kartikeya Dwivedi {
330f18b03faSKumar Kartikeya Dwivedi u8 *prog = *pprog;
331f18b03faSKumar Kartikeya Dwivedi
332f18b03faSKumar Kartikeya Dwivedi EMIT2(0x41, 0x54); /* push r12 */
333f18b03faSKumar Kartikeya Dwivedi *pprog = prog;
334f18b03faSKumar Kartikeya Dwivedi }
335f18b03faSKumar Kartikeya Dwivedi
push_callee_regs(u8 ** pprog,bool * callee_regs_used)336ebf7d1f5SMaciej Fijalkowski static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
337ebf7d1f5SMaciej Fijalkowski {
338ebf7d1f5SMaciej Fijalkowski u8 *prog = *pprog;
339ebf7d1f5SMaciej Fijalkowski
340ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[0])
341ebf7d1f5SMaciej Fijalkowski EMIT1(0x53); /* push rbx */
342ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[1])
343ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x55); /* push r13 */
344ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[2])
345ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x56); /* push r14 */
346ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[3])
347ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x57); /* push r15 */
348ebf7d1f5SMaciej Fijalkowski *pprog = prog;
349ebf7d1f5SMaciej Fijalkowski }
350ebf7d1f5SMaciej Fijalkowski
pop_r12(u8 ** pprog)351f18b03faSKumar Kartikeya Dwivedi static void pop_r12(u8 **pprog)
352f18b03faSKumar Kartikeya Dwivedi {
353f18b03faSKumar Kartikeya Dwivedi u8 *prog = *pprog;
354f18b03faSKumar Kartikeya Dwivedi
355f18b03faSKumar Kartikeya Dwivedi EMIT2(0x41, 0x5C); /* pop r12 */
356f18b03faSKumar Kartikeya Dwivedi *pprog = prog;
357f18b03faSKumar Kartikeya Dwivedi }
358f18b03faSKumar Kartikeya Dwivedi
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)359ebf7d1f5SMaciej Fijalkowski static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
360ebf7d1f5SMaciej Fijalkowski {
361ebf7d1f5SMaciej Fijalkowski u8 *prog = *pprog;
362ebf7d1f5SMaciej Fijalkowski
363ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[3])
364ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5F); /* pop r15 */
365ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[2])
366ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5E); /* pop r14 */
367ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[1])
368ebf7d1f5SMaciej Fijalkowski EMIT2(0x41, 0x5D); /* pop r13 */
369ebf7d1f5SMaciej Fijalkowski if (callee_regs_used[0])
370ebf7d1f5SMaciej Fijalkowski EMIT1(0x5B); /* pop rbx */
371ebf7d1f5SMaciej Fijalkowski *pprog = prog;
372ebf7d1f5SMaciej Fijalkowski }
373b52f00e6SAlexei Starovoitov
emit_nops(u8 ** pprog,int len)37400bc8988SLeon Hwang static void emit_nops(u8 **pprog, int len)
37500bc8988SLeon Hwang {
37600bc8988SLeon Hwang u8 *prog = *pprog;
37700bc8988SLeon Hwang int i, noplen;
37800bc8988SLeon Hwang
37900bc8988SLeon Hwang while (len > 0) {
38000bc8988SLeon Hwang noplen = len;
38100bc8988SLeon Hwang
38200bc8988SLeon Hwang if (noplen > ASM_NOP_MAX)
38300bc8988SLeon Hwang noplen = ASM_NOP_MAX;
38400bc8988SLeon Hwang
38500bc8988SLeon Hwang for (i = 0; i < noplen; i++)
38600bc8988SLeon Hwang EMIT1(x86_nops[noplen][i]);
38700bc8988SLeon Hwang len -= noplen;
38800bc8988SLeon Hwang }
38900bc8988SLeon Hwang
39000bc8988SLeon Hwang *pprog = prog;
39100bc8988SLeon Hwang }
39200bc8988SLeon Hwang
393a2c7a983SIngo Molnar /*
3944f9087f1SPeter Zijlstra * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
3954f9087f1SPeter Zijlstra * in arch/x86/kernel/alternative.c
3964f9087f1SPeter Zijlstra */
3974f9087f1SPeter Zijlstra
emit_fineibt(u8 ** pprog,u32 hash)3982cd3e377SPeter Zijlstra static void emit_fineibt(u8 **pprog, u32 hash)
3994f9087f1SPeter Zijlstra {
4004f9087f1SPeter Zijlstra u8 *prog = *pprog;
4014f9087f1SPeter Zijlstra
4024f9087f1SPeter Zijlstra EMIT_ENDBR();
403e72d88d1SPeter Zijlstra EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */
4044f9087f1SPeter Zijlstra EMIT2(0x74, 0x07); /* jz.d8 +7 */
4054f9087f1SPeter Zijlstra EMIT2(0x0f, 0x0b); /* ud2 */
4064f9087f1SPeter Zijlstra EMIT1(0x90); /* nop */
4074f9087f1SPeter Zijlstra EMIT_ENDBR_POISON();
4084f9087f1SPeter Zijlstra
4094f9087f1SPeter Zijlstra *pprog = prog;
4104f9087f1SPeter Zijlstra }
4114f9087f1SPeter Zijlstra
emit_kcfi(u8 ** pprog,u32 hash)4122cd3e377SPeter Zijlstra static void emit_kcfi(u8 **pprog, u32 hash)
4134f9087f1SPeter Zijlstra {
4144f9087f1SPeter Zijlstra u8 *prog = *pprog;
4154f9087f1SPeter Zijlstra
416e72d88d1SPeter Zijlstra EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
4174f9087f1SPeter Zijlstra #ifdef CONFIG_CALL_PADDING
4184f9087f1SPeter Zijlstra EMIT1(0x90);
4194f9087f1SPeter Zijlstra EMIT1(0x90);
4204f9087f1SPeter Zijlstra EMIT1(0x90);
4214f9087f1SPeter Zijlstra EMIT1(0x90);
4224f9087f1SPeter Zijlstra EMIT1(0x90);
4234f9087f1SPeter Zijlstra EMIT1(0x90);
4244f9087f1SPeter Zijlstra EMIT1(0x90);
4254f9087f1SPeter Zijlstra EMIT1(0x90);
4264f9087f1SPeter Zijlstra EMIT1(0x90);
4274f9087f1SPeter Zijlstra EMIT1(0x90);
4284f9087f1SPeter Zijlstra EMIT1(0x90);
4294f9087f1SPeter Zijlstra #endif
4304f9087f1SPeter Zijlstra EMIT_ENDBR();
4314f9087f1SPeter Zijlstra
4324f9087f1SPeter Zijlstra *pprog = prog;
4334f9087f1SPeter Zijlstra }
4344f9087f1SPeter Zijlstra
emit_cfi(u8 ** pprog,u32 hash)4352cd3e377SPeter Zijlstra static void emit_cfi(u8 **pprog, u32 hash)
4364f9087f1SPeter Zijlstra {
4374f9087f1SPeter Zijlstra u8 *prog = *pprog;
4384f9087f1SPeter Zijlstra
4394f9087f1SPeter Zijlstra switch (cfi_mode) {
4404f9087f1SPeter Zijlstra case CFI_FINEIBT:
4412cd3e377SPeter Zijlstra emit_fineibt(&prog, hash);
4424f9087f1SPeter Zijlstra break;
4434f9087f1SPeter Zijlstra
4444f9087f1SPeter Zijlstra case CFI_KCFI:
4452cd3e377SPeter Zijlstra emit_kcfi(&prog, hash);
4464f9087f1SPeter Zijlstra break;
4474f9087f1SPeter Zijlstra
4484f9087f1SPeter Zijlstra default:
4494f9087f1SPeter Zijlstra EMIT_ENDBR();
4504f9087f1SPeter Zijlstra break;
4514f9087f1SPeter Zijlstra }
4524f9087f1SPeter Zijlstra
4534f9087f1SPeter Zijlstra *pprog = prog;
4544f9087f1SPeter Zijlstra }
4554f9087f1SPeter Zijlstra
emit_prologue_tail_call(u8 ** pprog,bool is_subprog)456116e04baSLeon Hwang static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
457116e04baSLeon Hwang {
458116e04baSLeon Hwang u8 *prog = *pprog;
459116e04baSLeon Hwang
460116e04baSLeon Hwang if (!is_subprog) {
461116e04baSLeon Hwang /* cmp rax, MAX_TAIL_CALL_CNT */
462116e04baSLeon Hwang EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
463116e04baSLeon Hwang EMIT2(X86_JA, 6); /* ja 6 */
464116e04baSLeon Hwang /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
465116e04baSLeon Hwang * case1: entry of main prog.
466116e04baSLeon Hwang * case2: tail callee of main prog.
467116e04baSLeon Hwang */
468116e04baSLeon Hwang EMIT1(0x50); /* push rax */
469116e04baSLeon Hwang /* Make rax as tail_call_cnt_ptr. */
470116e04baSLeon Hwang EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
471116e04baSLeon Hwang EMIT2(0xEB, 1); /* jmp 1 */
472116e04baSLeon Hwang /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
473116e04baSLeon Hwang * case: tail callee of subprog.
474116e04baSLeon Hwang */
475116e04baSLeon Hwang EMIT1(0x50); /* push rax */
476116e04baSLeon Hwang /* push tail_call_cnt_ptr */
477116e04baSLeon Hwang EMIT1(0x50); /* push rax */
478116e04baSLeon Hwang } else { /* is_subprog */
479116e04baSLeon Hwang /* rax is tail_call_cnt_ptr. */
480116e04baSLeon Hwang EMIT1(0x50); /* push rax */
481116e04baSLeon Hwang EMIT1(0x50); /* push rax */
482116e04baSLeon Hwang }
483116e04baSLeon Hwang
484116e04baSLeon Hwang *pprog = prog;
485116e04baSLeon Hwang }
486116e04baSLeon Hwang
487a2c7a983SIngo Molnar /*
488ebf7d1f5SMaciej Fijalkowski * Emit x86-64 prologue code for BPF program.
489ebf7d1f5SMaciej Fijalkowski * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
490ebf7d1f5SMaciej Fijalkowski * while jumping to another program
491b52f00e6SAlexei Starovoitov */
emit_prologue(u8 ** pprog,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog,bool is_exception_cb)492ebf7d1f5SMaciej Fijalkowski static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
493f18b03faSKumar Kartikeya Dwivedi bool tail_call_reachable, bool is_subprog,
494f18b03faSKumar Kartikeya Dwivedi bool is_exception_cb)
4950a14842fSEric Dumazet {
496b52f00e6SAlexei Starovoitov u8 *prog = *pprog;
4970a14842fSEric Dumazet
4982cd3e377SPeter Zijlstra emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
4999fd4a39dSAlexei Starovoitov /* BPF trampoline can be made to work without these nops,
5009fd4a39dSAlexei Starovoitov * but let's waste 5 bytes for now and optimize later
5019fd4a39dSAlexei Starovoitov */
50200bc8988SLeon Hwang emit_nops(&prog, X86_PATCH_SIZE);
503ebf7d1f5SMaciej Fijalkowski if (!ebpf_from_cbpf) {
504ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable && !is_subprog)
5052bee9770SLeon Hwang /* When it's the entry of the whole tailcall context,
5062bee9770SLeon Hwang * zeroing rax means initialising tail_call_cnt.
5072bee9770SLeon Hwang */
508116e04baSLeon Hwang EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
509ebf7d1f5SMaciej Fijalkowski else
5102bee9770SLeon Hwang /* Keep the same instruction layout. */
511116e04baSLeon Hwang emit_nops(&prog, 3); /* nop3 */
512ebf7d1f5SMaciej Fijalkowski }
513f18b03faSKumar Kartikeya Dwivedi /* Exception callback receives FP as third parameter */
514f18b03faSKumar Kartikeya Dwivedi if (is_exception_cb) {
515f18b03faSKumar Kartikeya Dwivedi EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
516f18b03faSKumar Kartikeya Dwivedi EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
517f18b03faSKumar Kartikeya Dwivedi /* The main frame must have exception_boundary as true, so we
518f18b03faSKumar Kartikeya Dwivedi * first restore those callee-saved regs from stack, before
519f18b03faSKumar Kartikeya Dwivedi * reusing the stack frame.
520f18b03faSKumar Kartikeya Dwivedi */
521f18b03faSKumar Kartikeya Dwivedi pop_callee_regs(&prog, all_callee_regs_used);
522f18b03faSKumar Kartikeya Dwivedi pop_r12(&prog);
523f18b03faSKumar Kartikeya Dwivedi /* Reset the stack frame. */
524f18b03faSKumar Kartikeya Dwivedi EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
525f18b03faSKumar Kartikeya Dwivedi } else {
526fe8d9571SAlexei Starovoitov EMIT1(0x55); /* push rbp */
527fe8d9571SAlexei Starovoitov EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
528f18b03faSKumar Kartikeya Dwivedi }
52958912710SPeter Zijlstra
53058912710SPeter Zijlstra /* X86_TAIL_CALL_OFFSET is here */
53158912710SPeter Zijlstra EMIT_ENDBR();
53258912710SPeter Zijlstra
533fe8d9571SAlexei Starovoitov /* sub rsp, rounded_stack_depth */
5344d0b8c0bSMaciej Fijalkowski if (stack_depth)
535fe8d9571SAlexei Starovoitov EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
536ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable)
537116e04baSLeon Hwang emit_prologue_tail_call(&prog, is_subprog);
538b52f00e6SAlexei Starovoitov *pprog = prog;
539b52f00e6SAlexei Starovoitov }
540b52f00e6SAlexei Starovoitov
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)541428d5df1SDaniel Borkmann static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
542428d5df1SDaniel Borkmann {
543428d5df1SDaniel Borkmann u8 *prog = *pprog;
544428d5df1SDaniel Borkmann s64 offset;
545428d5df1SDaniel Borkmann
546428d5df1SDaniel Borkmann offset = func - (ip + X86_PATCH_SIZE);
547428d5df1SDaniel Borkmann if (!is_simm32(offset)) {
548428d5df1SDaniel Borkmann pr_err("Target call %p is out of range\n", func);
549428d5df1SDaniel Borkmann return -ERANGE;
550428d5df1SDaniel Borkmann }
551428d5df1SDaniel Borkmann EMIT1_off32(opcode, offset);
552428d5df1SDaniel Borkmann *pprog = prog;
553428d5df1SDaniel Borkmann return 0;
554428d5df1SDaniel Borkmann }
555428d5df1SDaniel Borkmann
emit_call(u8 ** pprog,void * func,void * ip)556428d5df1SDaniel Borkmann static int emit_call(u8 **pprog, void *func, void *ip)
557428d5df1SDaniel Borkmann {
558428d5df1SDaniel Borkmann return emit_patch(pprog, func, ip, 0xE8);
559428d5df1SDaniel Borkmann }
560428d5df1SDaniel Borkmann
emit_rsb_call(u8 ** pprog,void * func,void * ip)561b2e9dfe5SThomas Gleixner static int emit_rsb_call(u8 **pprog, void *func, void *ip)
562b2e9dfe5SThomas Gleixner {
563b2e9dfe5SThomas Gleixner OPTIMIZER_HIDE_VAR(func);
5646a537453SJoan Bruguera Micó ip += x86_call_depth_emit_accounting(pprog, func, ip);
565b2e9dfe5SThomas Gleixner return emit_patch(pprog, func, ip, 0xE8);
566b2e9dfe5SThomas Gleixner }
567b2e9dfe5SThomas Gleixner
emit_jump(u8 ** pprog,void * func,void * ip)568428d5df1SDaniel Borkmann static int emit_jump(u8 **pprog, void *func, void *ip)
569428d5df1SDaniel Borkmann {
570428d5df1SDaniel Borkmann return emit_patch(pprog, func, ip, 0xE9);
571428d5df1SDaniel Borkmann }
572428d5df1SDaniel Borkmann
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)573428d5df1SDaniel Borkmann static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
5741022a549SSong Liu void *old_addr, void *new_addr)
575428d5df1SDaniel Borkmann {
576a89dfde3SPeter Zijlstra const u8 *nop_insn = x86_nops[5];
577b553a6ecSDaniel Borkmann u8 old_insn[X86_PATCH_SIZE];
578b553a6ecSDaniel Borkmann u8 new_insn[X86_PATCH_SIZE];
579428d5df1SDaniel Borkmann u8 *prog;
580428d5df1SDaniel Borkmann int ret;
581428d5df1SDaniel Borkmann
582428d5df1SDaniel Borkmann memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
583b553a6ecSDaniel Borkmann if (old_addr) {
584428d5df1SDaniel Borkmann prog = old_insn;
585b553a6ecSDaniel Borkmann ret = t == BPF_MOD_CALL ?
586b553a6ecSDaniel Borkmann emit_call(&prog, old_addr, ip) :
587b553a6ecSDaniel Borkmann emit_jump(&prog, old_addr, ip);
588428d5df1SDaniel Borkmann if (ret)
589428d5df1SDaniel Borkmann return ret;
590428d5df1SDaniel Borkmann }
591b553a6ecSDaniel Borkmann
592428d5df1SDaniel Borkmann memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
593b553a6ecSDaniel Borkmann if (new_addr) {
594b553a6ecSDaniel Borkmann prog = new_insn;
595b553a6ecSDaniel Borkmann ret = t == BPF_MOD_CALL ?
596b553a6ecSDaniel Borkmann emit_call(&prog, new_addr, ip) :
597b553a6ecSDaniel Borkmann emit_jump(&prog, new_addr, ip);
598428d5df1SDaniel Borkmann if (ret)
599428d5df1SDaniel Borkmann return ret;
600428d5df1SDaniel Borkmann }
601428d5df1SDaniel Borkmann
602428d5df1SDaniel Borkmann ret = -EBUSY;
603428d5df1SDaniel Borkmann mutex_lock(&text_mutex);
604428d5df1SDaniel Borkmann if (memcmp(ip, old_insn, X86_PATCH_SIZE))
605428d5df1SDaniel Borkmann goto out;
606ebf7d1f5SMaciej Fijalkowski ret = 1;
607b553a6ecSDaniel Borkmann if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
608428d5df1SDaniel Borkmann text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
609428d5df1SDaniel Borkmann ret = 0;
610ebf7d1f5SMaciej Fijalkowski }
611428d5df1SDaniel Borkmann out:
612428d5df1SDaniel Borkmann mutex_unlock(&text_mutex);
613428d5df1SDaniel Borkmann return ret;
614428d5df1SDaniel Borkmann }
615428d5df1SDaniel Borkmann
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)616428d5df1SDaniel Borkmann int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
617428d5df1SDaniel Borkmann void *old_addr, void *new_addr)
618428d5df1SDaniel Borkmann {
619428d5df1SDaniel Borkmann if (!is_kernel_text((long)ip) &&
620428d5df1SDaniel Borkmann !is_bpf_text_address((long)ip))
621428d5df1SDaniel Borkmann /* BPF poking in modules is not supported */
622428d5df1SDaniel Borkmann return -EINVAL;
623428d5df1SDaniel Borkmann
62458912710SPeter Zijlstra /*
62558912710SPeter Zijlstra * See emit_prologue(), for IBT builds the trampoline hook is preceded
62658912710SPeter Zijlstra * with an ENDBR instruction.
62758912710SPeter Zijlstra */
62858912710SPeter Zijlstra if (is_endbr(*(u32 *)ip))
62958912710SPeter Zijlstra ip += ENDBR_INSN_SIZE;
63058912710SPeter Zijlstra
6311022a549SSong Liu return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
632428d5df1SDaniel Borkmann }
633428d5df1SDaniel Borkmann
63487c87ecdSPeter Zijlstra #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
63587c87ecdSPeter Zijlstra
emit_indirect_jump(u8 ** pprog,int reg,u8 * ip)63687c87ecdSPeter Zijlstra static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
63787c87ecdSPeter Zijlstra {
63887c87ecdSPeter Zijlstra u8 *prog = *pprog;
63987c87ecdSPeter Zijlstra
640d45476d9SPeter Zijlstra (Intel) if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
64187c87ecdSPeter Zijlstra EMIT_LFENCE();
64287c87ecdSPeter Zijlstra EMIT2(0xFF, 0xE0 + reg);
64387c87ecdSPeter Zijlstra } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
644be8a0965SPeter Zijlstra OPTIMIZER_HIDE_VAR(reg);
6453b6c1747SPeter Zijlstra if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
6463b6c1747SPeter Zijlstra emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
6473b6c1747SPeter Zijlstra else
64887c87ecdSPeter Zijlstra emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
649369ae6ffSPeter Zijlstra } else {
6508c03af3eSPeter Zijlstra EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
6517b75782fSBreno Leitao if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
6528c03af3eSPeter Zijlstra EMIT1(0xCC); /* int3 */
653369ae6ffSPeter Zijlstra }
65487c87ecdSPeter Zijlstra
65587c87ecdSPeter Zijlstra *pprog = prog;
65687c87ecdSPeter Zijlstra }
65787c87ecdSPeter Zijlstra
emit_return(u8 ** pprog,u8 * ip)658d77cfe59SPeter Zijlstra static void emit_return(u8 **pprog, u8 *ip)
659d77cfe59SPeter Zijlstra {
660d77cfe59SPeter Zijlstra u8 *prog = *pprog;
661d77cfe59SPeter Zijlstra
662d77cfe59SPeter Zijlstra if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
663770ae1b7SPeter Zijlstra emit_jump(&prog, x86_return_thunk, ip);
664d77cfe59SPeter Zijlstra } else {
665d77cfe59SPeter Zijlstra EMIT1(0xC3); /* ret */
6667b75782fSBreno Leitao if (IS_ENABLED(CONFIG_MITIGATION_SLS))
667d77cfe59SPeter Zijlstra EMIT1(0xCC); /* int3 */
668d77cfe59SPeter Zijlstra }
669b52f00e6SAlexei Starovoitov
670b52f00e6SAlexei Starovoitov *pprog = prog;
671b52f00e6SAlexei Starovoitov }
672b52f00e6SAlexei Starovoitov
673116e04baSLeon Hwang #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
674116e04baSLeon Hwang
675a2c7a983SIngo Molnar /*
676a2c7a983SIngo Molnar * Generate the following code:
677a2c7a983SIngo Molnar *
678b52f00e6SAlexei Starovoitov * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
679b52f00e6SAlexei Starovoitov * if (index >= array->map.max_entries)
680b52f00e6SAlexei Starovoitov * goto out;
681116e04baSLeon Hwang * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
682b52f00e6SAlexei Starovoitov * goto out;
6832a36f0b9SWang Nan * prog = array->ptrs[index];
684b52f00e6SAlexei Starovoitov * if (prog == NULL)
685b52f00e6SAlexei Starovoitov * goto out;
686b52f00e6SAlexei Starovoitov * goto *(prog->bpf_func + prologue_size);
687b52f00e6SAlexei Starovoitov * out:
688b52f00e6SAlexei Starovoitov */
emit_bpf_tail_call_indirect(struct bpf_prog * bpf_prog,u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)689f18b03faSKumar Kartikeya Dwivedi static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
690f18b03faSKumar Kartikeya Dwivedi u8 **pprog, bool *callee_regs_used,
691dceba081SPeter Zijlstra u32 stack_depth, u8 *ip,
692dceba081SPeter Zijlstra struct jit_context *ctx)
693b52f00e6SAlexei Starovoitov {
694116e04baSLeon Hwang int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
695dceba081SPeter Zijlstra u8 *prog = *pprog, *start = *pprog;
696dceba081SPeter Zijlstra int offset;
6974d0b8c0bSMaciej Fijalkowski
698a2c7a983SIngo Molnar /*
699a2c7a983SIngo Molnar * rdi - pointer to ctx
700b52f00e6SAlexei Starovoitov * rsi - pointer to bpf_array
701b52f00e6SAlexei Starovoitov * rdx - index in bpf_array
702b52f00e6SAlexei Starovoitov */
703b52f00e6SAlexei Starovoitov
704a2c7a983SIngo Molnar /*
705a2c7a983SIngo Molnar * if (index >= array->map.max_entries)
706b52f00e6SAlexei Starovoitov * goto out;
707b52f00e6SAlexei Starovoitov */
70890caccddSAlexei Starovoitov EMIT2(0x89, 0xD2); /* mov edx, edx */
70990caccddSAlexei Starovoitov EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
710b52f00e6SAlexei Starovoitov offsetof(struct bpf_array, map.max_entries));
711dceba081SPeter Zijlstra
712dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start);
713dceba081SPeter Zijlstra EMIT2(X86_JBE, offset); /* jbe out */
714b52f00e6SAlexei Starovoitov
715a2c7a983SIngo Molnar /*
716116e04baSLeon Hwang * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
717b52f00e6SAlexei Starovoitov * goto out;
718b52f00e6SAlexei Starovoitov */
719116e04baSLeon Hwang EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
720116e04baSLeon Hwang EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
721dceba081SPeter Zijlstra
722dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start);
723ebf7f6f0STiezhu Yang EMIT2(X86_JAE, offset); /* jae out */
724b52f00e6SAlexei Starovoitov
7252a36f0b9SWang Nan /* prog = array->ptrs[index]; */
7260d4ddce3SMaciej Fijalkowski EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
7272a36f0b9SWang Nan offsetof(struct bpf_array, ptrs));
728b52f00e6SAlexei Starovoitov
729a2c7a983SIngo Molnar /*
730a2c7a983SIngo Molnar * if (prog == NULL)
731b52f00e6SAlexei Starovoitov * goto out;
732b52f00e6SAlexei Starovoitov */
7330d4ddce3SMaciej Fijalkowski EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
734b52f00e6SAlexei Starovoitov
735dceba081SPeter Zijlstra offset = ctx->tail_call_indirect_label - (prog + 2 - start);
736dceba081SPeter Zijlstra EMIT2(X86_JE, offset); /* je out */
737dceba081SPeter Zijlstra
738116e04baSLeon Hwang /* Inc tail_call_cnt if the slot is populated. */
739116e04baSLeon Hwang EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
740116e04baSLeon Hwang
741f18b03faSKumar Kartikeya Dwivedi if (bpf_prog->aux->exception_boundary) {
742f18b03faSKumar Kartikeya Dwivedi pop_callee_regs(&prog, all_callee_regs_used);
743f18b03faSKumar Kartikeya Dwivedi pop_r12(&prog);
744f18b03faSKumar Kartikeya Dwivedi } else {
745dceba081SPeter Zijlstra pop_callee_regs(&prog, callee_regs_used);
7462fe99eb0SAlexei Starovoitov if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
7472fe99eb0SAlexei Starovoitov pop_r12(&prog);
748f18b03faSKumar Kartikeya Dwivedi }
749ebf7d1f5SMaciej Fijalkowski
750116e04baSLeon Hwang /* Pop tail_call_cnt_ptr. */
751116e04baSLeon Hwang EMIT1(0x58); /* pop rax */
752116e04baSLeon Hwang /* Pop tail_call_cnt, if it's main prog.
753116e04baSLeon Hwang * Pop tail_call_cnt_ptr, if it's subprog.
754116e04baSLeon Hwang */
755ebf7d1f5SMaciej Fijalkowski EMIT1(0x58); /* pop rax */
7564d0b8c0bSMaciej Fijalkowski if (stack_depth)
757ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
758ebf7d1f5SMaciej Fijalkowski round_up(stack_depth, 8));
759ebf7d1f5SMaciej Fijalkowski
760ebf7d1f5SMaciej Fijalkowski /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
7610d4ddce3SMaciej Fijalkowski EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
762b52f00e6SAlexei Starovoitov offsetof(struct bpf_prog, bpf_func));
763ebf7d1f5SMaciej Fijalkowski EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
764ebf7d1f5SMaciej Fijalkowski X86_TAIL_CALL_OFFSET);
765a2c7a983SIngo Molnar /*
7660d4ddce3SMaciej Fijalkowski * Now we're ready to jump into next BPF program
767b52f00e6SAlexei Starovoitov * rdi == ctx (1st arg)
768ebf7d1f5SMaciej Fijalkowski * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
769b52f00e6SAlexei Starovoitov */
77087c87ecdSPeter Zijlstra emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
771b52f00e6SAlexei Starovoitov
772b52f00e6SAlexei Starovoitov /* out: */
773dceba081SPeter Zijlstra ctx->tail_call_indirect_label = prog - start;
774b52f00e6SAlexei Starovoitov *pprog = prog;
775b52f00e6SAlexei Starovoitov }
776b52f00e6SAlexei Starovoitov
emit_bpf_tail_call_direct(struct bpf_prog * bpf_prog,struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)777f18b03faSKumar Kartikeya Dwivedi static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
778f18b03faSKumar Kartikeya Dwivedi struct bpf_jit_poke_descriptor *poke,
779dceba081SPeter Zijlstra u8 **pprog, u8 *ip,
780dceba081SPeter Zijlstra bool *callee_regs_used, u32 stack_depth,
781dceba081SPeter Zijlstra struct jit_context *ctx)
782428d5df1SDaniel Borkmann {
783116e04baSLeon Hwang int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
784dceba081SPeter Zijlstra u8 *prog = *pprog, *start = *pprog;
785dceba081SPeter Zijlstra int offset;
786ebf7d1f5SMaciej Fijalkowski
787428d5df1SDaniel Borkmann /*
788116e04baSLeon Hwang * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
789428d5df1SDaniel Borkmann * goto out;
790428d5df1SDaniel Borkmann */
791116e04baSLeon Hwang EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
792116e04baSLeon Hwang EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
793dceba081SPeter Zijlstra
794dceba081SPeter Zijlstra offset = ctx->tail_call_direct_label - (prog + 2 - start);
795ebf7f6f0STiezhu Yang EMIT2(X86_JAE, offset); /* jae out */
796428d5df1SDaniel Borkmann
797dceba081SPeter Zijlstra poke->tailcall_bypass = ip + (prog - start);
798ebf7d1f5SMaciej Fijalkowski poke->adj_off = X86_TAIL_CALL_OFFSET;
799dceba081SPeter Zijlstra poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
800ebf7d1f5SMaciej Fijalkowski poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
801ebf7d1f5SMaciej Fijalkowski
802ebf7d1f5SMaciej Fijalkowski emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
803ebf7d1f5SMaciej Fijalkowski poke->tailcall_bypass);
804ebf7d1f5SMaciej Fijalkowski
805116e04baSLeon Hwang /* Inc tail_call_cnt if the slot is populated. */
806116e04baSLeon Hwang EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
807116e04baSLeon Hwang
808f18b03faSKumar Kartikeya Dwivedi if (bpf_prog->aux->exception_boundary) {
809f18b03faSKumar Kartikeya Dwivedi pop_callee_regs(&prog, all_callee_regs_used);
810f18b03faSKumar Kartikeya Dwivedi pop_r12(&prog);
811f18b03faSKumar Kartikeya Dwivedi } else {
812dceba081SPeter Zijlstra pop_callee_regs(&prog, callee_regs_used);
8132fe99eb0SAlexei Starovoitov if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
8142fe99eb0SAlexei Starovoitov pop_r12(&prog);
815f18b03faSKumar Kartikeya Dwivedi }
816f18b03faSKumar Kartikeya Dwivedi
817116e04baSLeon Hwang /* Pop tail_call_cnt_ptr. */
818116e04baSLeon Hwang EMIT1(0x58); /* pop rax */
819116e04baSLeon Hwang /* Pop tail_call_cnt, if it's main prog.
820116e04baSLeon Hwang * Pop tail_call_cnt_ptr, if it's subprog.
821116e04baSLeon Hwang */
822ebf7d1f5SMaciej Fijalkowski EMIT1(0x58); /* pop rax */
8234d0b8c0bSMaciej Fijalkowski if (stack_depth)
824ebf7d1f5SMaciej Fijalkowski EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
825428d5df1SDaniel Borkmann
82600bc8988SLeon Hwang emit_nops(&prog, X86_PATCH_SIZE);
827dceba081SPeter Zijlstra
828428d5df1SDaniel Borkmann /* out: */
829dceba081SPeter Zijlstra ctx->tail_call_direct_label = prog - start;
830428d5df1SDaniel Borkmann
831428d5df1SDaniel Borkmann *pprog = prog;
832428d5df1SDaniel Borkmann }
833428d5df1SDaniel Borkmann
bpf_tail_call_direct_fixup(struct bpf_prog * prog)834428d5df1SDaniel Borkmann static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
835428d5df1SDaniel Borkmann {
836428d5df1SDaniel Borkmann struct bpf_jit_poke_descriptor *poke;
837428d5df1SDaniel Borkmann struct bpf_array *array;
838428d5df1SDaniel Borkmann struct bpf_prog *target;
839428d5df1SDaniel Borkmann int i, ret;
840428d5df1SDaniel Borkmann
841428d5df1SDaniel Borkmann for (i = 0; i < prog->aux->size_poke_tab; i++) {
842428d5df1SDaniel Borkmann poke = &prog->aux->poke_tab[i];
843f263a814SJohn Fastabend if (poke->aux && poke->aux != prog->aux)
844f263a814SJohn Fastabend continue;
845f263a814SJohn Fastabend
846cf71b174SMaciej Fijalkowski WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
847428d5df1SDaniel Borkmann
848428d5df1SDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
849428d5df1SDaniel Borkmann continue;
850428d5df1SDaniel Borkmann
851428d5df1SDaniel Borkmann array = container_of(poke->tail_call.map, struct bpf_array, map);
852428d5df1SDaniel Borkmann mutex_lock(&array->aux->poke_mutex);
853428d5df1SDaniel Borkmann target = array->ptrs[poke->tail_call.key];
854428d5df1SDaniel Borkmann if (target) {
855cf71b174SMaciej Fijalkowski ret = __bpf_arch_text_poke(poke->tailcall_target,
856cf71b174SMaciej Fijalkowski BPF_MOD_JUMP, NULL,
857428d5df1SDaniel Borkmann (u8 *)target->bpf_func +
8581022a549SSong Liu poke->adj_off);
859428d5df1SDaniel Borkmann BUG_ON(ret < 0);
860ebf7d1f5SMaciej Fijalkowski ret = __bpf_arch_text_poke(poke->tailcall_bypass,
861ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP,
862ebf7d1f5SMaciej Fijalkowski (u8 *)poke->tailcall_target +
8631022a549SSong Liu X86_PATCH_SIZE, NULL);
864ebf7d1f5SMaciej Fijalkowski BUG_ON(ret < 0);
865428d5df1SDaniel Borkmann }
866cf71b174SMaciej Fijalkowski WRITE_ONCE(poke->tailcall_target_stable, true);
867428d5df1SDaniel Borkmann mutex_unlock(&array->aux->poke_mutex);
868428d5df1SDaniel Borkmann }
869428d5df1SDaniel Borkmann }
870428d5df1SDaniel Borkmann
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)8716fe8b9c1SDaniel Borkmann static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
8726fe8b9c1SDaniel Borkmann u32 dst_reg, const u32 imm32)
8736fe8b9c1SDaniel Borkmann {
8746fe8b9c1SDaniel Borkmann u8 *prog = *pprog;
8756fe8b9c1SDaniel Borkmann u8 b1, b2, b3;
8766fe8b9c1SDaniel Borkmann
877a2c7a983SIngo Molnar /*
878a2c7a983SIngo Molnar * Optimization: if imm32 is positive, use 'mov %eax, imm32'
8796fe8b9c1SDaniel Borkmann * (which zero-extends imm32) to save 2 bytes.
8806fe8b9c1SDaniel Borkmann */
8816fe8b9c1SDaniel Borkmann if (sign_propagate && (s32)imm32 < 0) {
8826fe8b9c1SDaniel Borkmann /* 'mov %rax, imm32' sign extends imm32 */
8836fe8b9c1SDaniel Borkmann b1 = add_1mod(0x48, dst_reg);
8846fe8b9c1SDaniel Borkmann b2 = 0xC7;
8856fe8b9c1SDaniel Borkmann b3 = 0xC0;
8866fe8b9c1SDaniel Borkmann EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
8876fe8b9c1SDaniel Borkmann goto done;
8886fe8b9c1SDaniel Borkmann }
8896fe8b9c1SDaniel Borkmann
890a2c7a983SIngo Molnar /*
891a2c7a983SIngo Molnar * Optimization: if imm32 is zero, use 'xor %eax, %eax'
8926fe8b9c1SDaniel Borkmann * to save 3 bytes.
8936fe8b9c1SDaniel Borkmann */
8946fe8b9c1SDaniel Borkmann if (imm32 == 0) {
8956fe8b9c1SDaniel Borkmann if (is_ereg(dst_reg))
8966fe8b9c1SDaniel Borkmann EMIT1(add_2mod(0x40, dst_reg, dst_reg));
8976fe8b9c1SDaniel Borkmann b2 = 0x31; /* xor */
8986fe8b9c1SDaniel Borkmann b3 = 0xC0;
8996fe8b9c1SDaniel Borkmann EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
9006fe8b9c1SDaniel Borkmann goto done;
9016fe8b9c1SDaniel Borkmann }
9026fe8b9c1SDaniel Borkmann
9036fe8b9c1SDaniel Borkmann /* mov %eax, imm32 */
9046fe8b9c1SDaniel Borkmann if (is_ereg(dst_reg))
9056fe8b9c1SDaniel Borkmann EMIT1(add_1mod(0x40, dst_reg));
9066fe8b9c1SDaniel Borkmann EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
9076fe8b9c1SDaniel Borkmann done:
9086fe8b9c1SDaniel Borkmann *pprog = prog;
9096fe8b9c1SDaniel Borkmann }
9106fe8b9c1SDaniel Borkmann
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)9116fe8b9c1SDaniel Borkmann static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
9126fe8b9c1SDaniel Borkmann const u32 imm32_hi, const u32 imm32_lo)
9136fe8b9c1SDaniel Borkmann {
914af682b76SAlexei Starovoitov u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
9156fe8b9c1SDaniel Borkmann u8 *prog = *pprog;
9166fe8b9c1SDaniel Borkmann
917af682b76SAlexei Starovoitov if (is_uimm32(imm64)) {
918a2c7a983SIngo Molnar /*
919a2c7a983SIngo Molnar * For emitting plain u32, where sign bit must not be
9206fe8b9c1SDaniel Borkmann * propagated LLVM tends to load imm64 over mov32
9216fe8b9c1SDaniel Borkmann * directly, so save couple of bytes by just doing
9226fe8b9c1SDaniel Borkmann * 'mov %eax, imm32' instead.
9236fe8b9c1SDaniel Borkmann */
9246fe8b9c1SDaniel Borkmann emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
925af682b76SAlexei Starovoitov } else if (is_simm32(imm64)) {
926af682b76SAlexei Starovoitov emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
9276fe8b9c1SDaniel Borkmann } else {
9284d854f4fSJiri Olsa /* movabsq rax, imm64 */
9296fe8b9c1SDaniel Borkmann EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
9306fe8b9c1SDaniel Borkmann EMIT(imm32_lo, 4);
9316fe8b9c1SDaniel Borkmann EMIT(imm32_hi, 4);
9326fe8b9c1SDaniel Borkmann }
9336fe8b9c1SDaniel Borkmann
9346fe8b9c1SDaniel Borkmann *pprog = prog;
9356fe8b9c1SDaniel Borkmann }
9366fe8b9c1SDaniel Borkmann
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)9374c38e2f3SDaniel Borkmann static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
9384c38e2f3SDaniel Borkmann {
9394c38e2f3SDaniel Borkmann u8 *prog = *pprog;
9404c38e2f3SDaniel Borkmann
9414c38e2f3SDaniel Borkmann if (is64) {
9424c38e2f3SDaniel Borkmann /* mov dst, src */
9434c38e2f3SDaniel Borkmann EMIT_mov(dst_reg, src_reg);
9444c38e2f3SDaniel Borkmann } else {
9454c38e2f3SDaniel Borkmann /* mov32 dst, src */
9464c38e2f3SDaniel Borkmann if (is_ereg(dst_reg) || is_ereg(src_reg))
9474c38e2f3SDaniel Borkmann EMIT1(add_2mod(0x40, dst_reg, src_reg));
9484c38e2f3SDaniel Borkmann EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
9494c38e2f3SDaniel Borkmann }
9504c38e2f3SDaniel Borkmann
9514c38e2f3SDaniel Borkmann *pprog = prog;
9524c38e2f3SDaniel Borkmann }
9534c38e2f3SDaniel Borkmann
emit_movsx_reg(u8 ** pprog,int num_bits,bool is64,u32 dst_reg,u32 src_reg)9548100928cSYonghong Song static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
9558100928cSYonghong Song u32 src_reg)
9568100928cSYonghong Song {
9578100928cSYonghong Song u8 *prog = *pprog;
9588100928cSYonghong Song
9598100928cSYonghong Song if (is64) {
9608100928cSYonghong Song /* movs[b,w,l]q dst, src */
9618100928cSYonghong Song if (num_bits == 8)
9628100928cSYonghong Song EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
9638100928cSYonghong Song add_2reg(0xC0, src_reg, dst_reg));
9648100928cSYonghong Song else if (num_bits == 16)
9658100928cSYonghong Song EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
9668100928cSYonghong Song add_2reg(0xC0, src_reg, dst_reg));
9678100928cSYonghong Song else if (num_bits == 32)
9688100928cSYonghong Song EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
9698100928cSYonghong Song add_2reg(0xC0, src_reg, dst_reg));
9708100928cSYonghong Song } else {
9718100928cSYonghong Song /* movs[b,w]l dst, src */
9728100928cSYonghong Song if (num_bits == 8) {
9738100928cSYonghong Song EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
9748100928cSYonghong Song add_2reg(0xC0, src_reg, dst_reg));
9758100928cSYonghong Song } else if (num_bits == 16) {
9768100928cSYonghong Song if (is_ereg(dst_reg) || is_ereg(src_reg))
9778100928cSYonghong Song EMIT1(add_2mod(0x40, src_reg, dst_reg));
9788100928cSYonghong Song EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
9798100928cSYonghong Song add_2reg(0xC0, src_reg, dst_reg));
9808100928cSYonghong Song }
9818100928cSYonghong Song }
9828100928cSYonghong Song
9838100928cSYonghong Song *pprog = prog;
9848100928cSYonghong Song }
9858100928cSYonghong Song
98611c11d07SBrendan Jackman /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)98711c11d07SBrendan Jackman static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
98811c11d07SBrendan Jackman {
98911c11d07SBrendan Jackman u8 *prog = *pprog;
99011c11d07SBrendan Jackman
99111c11d07SBrendan Jackman if (is_imm8(off)) {
99211c11d07SBrendan Jackman /* 1-byte signed displacement.
99311c11d07SBrendan Jackman *
99411c11d07SBrendan Jackman * If off == 0 we could skip this and save one extra byte, but
99511c11d07SBrendan Jackman * special case of x86 R13 which always needs an offset is not
99611c11d07SBrendan Jackman * worth the hassle
99711c11d07SBrendan Jackman */
99811c11d07SBrendan Jackman EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
99911c11d07SBrendan Jackman } else {
100011c11d07SBrendan Jackman /* 4-byte signed displacement */
100111c11d07SBrendan Jackman EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
100211c11d07SBrendan Jackman }
100311c11d07SBrendan Jackman *pprog = prog;
100411c11d07SBrendan Jackman }
100511c11d07SBrendan Jackman
emit_insn_suffix_SIB(u8 ** pprog,u32 ptr_reg,u32 val_reg,u32 index_reg,int off)10062fe99eb0SAlexei Starovoitov static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
10072fe99eb0SAlexei Starovoitov {
10082fe99eb0SAlexei Starovoitov u8 *prog = *pprog;
10092fe99eb0SAlexei Starovoitov
10102fe99eb0SAlexei Starovoitov if (is_imm8(off)) {
10112fe99eb0SAlexei Starovoitov EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
10122fe99eb0SAlexei Starovoitov } else {
10132fe99eb0SAlexei Starovoitov EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
10142fe99eb0SAlexei Starovoitov }
10152fe99eb0SAlexei Starovoitov *pprog = prog;
10162fe99eb0SAlexei Starovoitov }
10172fe99eb0SAlexei Starovoitov
101874007cfcSBrendan Jackman /*
101974007cfcSBrendan Jackman * Emit a REX byte if it will be necessary to address these registers
102074007cfcSBrendan Jackman */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)102174007cfcSBrendan Jackman static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
102274007cfcSBrendan Jackman {
102374007cfcSBrendan Jackman u8 *prog = *pprog;
102474007cfcSBrendan Jackman
102574007cfcSBrendan Jackman if (is64)
102674007cfcSBrendan Jackman EMIT1(add_2mod(0x48, dst_reg, src_reg));
102774007cfcSBrendan Jackman else if (is_ereg(dst_reg) || is_ereg(src_reg))
102874007cfcSBrendan Jackman EMIT1(add_2mod(0x40, dst_reg, src_reg));
102974007cfcSBrendan Jackman *pprog = prog;
103074007cfcSBrendan Jackman }
103174007cfcSBrendan Jackman
10326364d7d7SJie Meng /*
10336364d7d7SJie Meng * Similar version of maybe_emit_mod() for a single register
10346364d7d7SJie Meng */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)10356364d7d7SJie Meng static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
10366364d7d7SJie Meng {
10376364d7d7SJie Meng u8 *prog = *pprog;
10386364d7d7SJie Meng
10396364d7d7SJie Meng if (is64)
10406364d7d7SJie Meng EMIT1(add_1mod(0x48, reg));
10416364d7d7SJie Meng else if (is_ereg(reg))
10426364d7d7SJie Meng EMIT1(add_1mod(0x40, reg));
10436364d7d7SJie Meng *pprog = prog;
10446364d7d7SJie Meng }
10456364d7d7SJie Meng
10463b2744e6SAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)10473b2744e6SAlexei Starovoitov static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
10483b2744e6SAlexei Starovoitov {
10493b2744e6SAlexei Starovoitov u8 *prog = *pprog;
10503b2744e6SAlexei Starovoitov
10513b2744e6SAlexei Starovoitov switch (size) {
10523b2744e6SAlexei Starovoitov case BPF_B:
10533b2744e6SAlexei Starovoitov /* Emit 'movzx rax, byte ptr [rax + off]' */
10543b2744e6SAlexei Starovoitov EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
10553b2744e6SAlexei Starovoitov break;
10563b2744e6SAlexei Starovoitov case BPF_H:
10573b2744e6SAlexei Starovoitov /* Emit 'movzx rax, word ptr [rax + off]' */
10583b2744e6SAlexei Starovoitov EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
10593b2744e6SAlexei Starovoitov break;
10603b2744e6SAlexei Starovoitov case BPF_W:
10613b2744e6SAlexei Starovoitov /* Emit 'mov eax, dword ptr [rax+0x14]' */
10623b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg))
10633b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
10643b2744e6SAlexei Starovoitov else
10653b2744e6SAlexei Starovoitov EMIT1(0x8B);
10663b2744e6SAlexei Starovoitov break;
10673b2744e6SAlexei Starovoitov case BPF_DW:
10683b2744e6SAlexei Starovoitov /* Emit 'mov rax, qword ptr [rax+0x14]' */
10693b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
10703b2744e6SAlexei Starovoitov break;
10713b2744e6SAlexei Starovoitov }
107211c11d07SBrendan Jackman emit_insn_suffix(&prog, src_reg, dst_reg, off);
10733b2744e6SAlexei Starovoitov *pprog = prog;
10743b2744e6SAlexei Starovoitov }
10753b2744e6SAlexei Starovoitov
10761f9a1ea8SYonghong Song /* LDSX: dst_reg = *(s8*)(src_reg + off) */
emit_ldsx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)10771f9a1ea8SYonghong Song static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
10781f9a1ea8SYonghong Song {
10791f9a1ea8SYonghong Song u8 *prog = *pprog;
10801f9a1ea8SYonghong Song
10811f9a1ea8SYonghong Song switch (size) {
10821f9a1ea8SYonghong Song case BPF_B:
10831f9a1ea8SYonghong Song /* Emit 'movsx rax, byte ptr [rax + off]' */
10841f9a1ea8SYonghong Song EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
10851f9a1ea8SYonghong Song break;
10861f9a1ea8SYonghong Song case BPF_H:
10871f9a1ea8SYonghong Song /* Emit 'movsx rax, word ptr [rax + off]' */
10881f9a1ea8SYonghong Song EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
10891f9a1ea8SYonghong Song break;
10901f9a1ea8SYonghong Song case BPF_W:
10911f9a1ea8SYonghong Song /* Emit 'movsx rax, dword ptr [rax+0x14]' */
10921f9a1ea8SYonghong Song EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
10931f9a1ea8SYonghong Song break;
10941f9a1ea8SYonghong Song }
10951f9a1ea8SYonghong Song emit_insn_suffix(&prog, src_reg, dst_reg, off);
10961f9a1ea8SYonghong Song *pprog = prog;
10971f9a1ea8SYonghong Song }
10981f9a1ea8SYonghong Song
emit_ldx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)10992fe99eb0SAlexei Starovoitov static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
11002fe99eb0SAlexei Starovoitov {
11012fe99eb0SAlexei Starovoitov u8 *prog = *pprog;
11022fe99eb0SAlexei Starovoitov
11032fe99eb0SAlexei Starovoitov switch (size) {
11042fe99eb0SAlexei Starovoitov case BPF_B:
11052fe99eb0SAlexei Starovoitov /* movzx rax, byte ptr [rax + r12 + off] */
11062fe99eb0SAlexei Starovoitov EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
11072fe99eb0SAlexei Starovoitov break;
11082fe99eb0SAlexei Starovoitov case BPF_H:
11092fe99eb0SAlexei Starovoitov /* movzx rax, word ptr [rax + r12 + off] */
11102fe99eb0SAlexei Starovoitov EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
11112fe99eb0SAlexei Starovoitov break;
11122fe99eb0SAlexei Starovoitov case BPF_W:
11132fe99eb0SAlexei Starovoitov /* mov eax, dword ptr [rax + r12 + off] */
11142fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
11152fe99eb0SAlexei Starovoitov break;
11162fe99eb0SAlexei Starovoitov case BPF_DW:
11172fe99eb0SAlexei Starovoitov /* mov rax, qword ptr [rax + r12 + off] */
11182fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
11192fe99eb0SAlexei Starovoitov break;
11202fe99eb0SAlexei Starovoitov }
11212fe99eb0SAlexei Starovoitov emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
11222fe99eb0SAlexei Starovoitov *pprog = prog;
11232fe99eb0SAlexei Starovoitov }
11242fe99eb0SAlexei Starovoitov
emit_ldx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)11252fe99eb0SAlexei Starovoitov static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
11262fe99eb0SAlexei Starovoitov {
11272fe99eb0SAlexei Starovoitov emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
11282fe99eb0SAlexei Starovoitov }
11292fe99eb0SAlexei Starovoitov
11303b2744e6SAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)11313b2744e6SAlexei Starovoitov static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
11323b2744e6SAlexei Starovoitov {
11333b2744e6SAlexei Starovoitov u8 *prog = *pprog;
11343b2744e6SAlexei Starovoitov
11353b2744e6SAlexei Starovoitov switch (size) {
11363b2744e6SAlexei Starovoitov case BPF_B:
11373b2744e6SAlexei Starovoitov /* Emit 'mov byte ptr [rax + off], al' */
1138aee194b1SLuke Nelson if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1139aee194b1SLuke Nelson /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
11403b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
11413b2744e6SAlexei Starovoitov else
11423b2744e6SAlexei Starovoitov EMIT1(0x88);
11433b2744e6SAlexei Starovoitov break;
11443b2744e6SAlexei Starovoitov case BPF_H:
11453b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg))
11463b2744e6SAlexei Starovoitov EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
11473b2744e6SAlexei Starovoitov else
11483b2744e6SAlexei Starovoitov EMIT2(0x66, 0x89);
11493b2744e6SAlexei Starovoitov break;
11503b2744e6SAlexei Starovoitov case BPF_W:
11513b2744e6SAlexei Starovoitov if (is_ereg(dst_reg) || is_ereg(src_reg))
11523b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
11533b2744e6SAlexei Starovoitov else
11543b2744e6SAlexei Starovoitov EMIT1(0x89);
11553b2744e6SAlexei Starovoitov break;
11563b2744e6SAlexei Starovoitov case BPF_DW:
11573b2744e6SAlexei Starovoitov EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
11583b2744e6SAlexei Starovoitov break;
11593b2744e6SAlexei Starovoitov }
116011c11d07SBrendan Jackman emit_insn_suffix(&prog, dst_reg, src_reg, off);
11613b2744e6SAlexei Starovoitov *pprog = prog;
11623b2744e6SAlexei Starovoitov }
11633b2744e6SAlexei Starovoitov
11642fe99eb0SAlexei Starovoitov /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
emit_stx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)11652fe99eb0SAlexei Starovoitov static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
11662fe99eb0SAlexei Starovoitov {
11672fe99eb0SAlexei Starovoitov u8 *prog = *pprog;
11682fe99eb0SAlexei Starovoitov
11692fe99eb0SAlexei Starovoitov switch (size) {
11702fe99eb0SAlexei Starovoitov case BPF_B:
11712fe99eb0SAlexei Starovoitov /* mov byte ptr [rax + r12 + off], al */
11722fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
11732fe99eb0SAlexei Starovoitov break;
11742fe99eb0SAlexei Starovoitov case BPF_H:
11752fe99eb0SAlexei Starovoitov /* mov word ptr [rax + r12 + off], ax */
11762fe99eb0SAlexei Starovoitov EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
11772fe99eb0SAlexei Starovoitov break;
11782fe99eb0SAlexei Starovoitov case BPF_W:
11792fe99eb0SAlexei Starovoitov /* mov dword ptr [rax + r12 + 1], eax */
11802fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
11812fe99eb0SAlexei Starovoitov break;
11822fe99eb0SAlexei Starovoitov case BPF_DW:
11832fe99eb0SAlexei Starovoitov /* mov qword ptr [rax + r12 + 1], rax */
11842fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
11852fe99eb0SAlexei Starovoitov break;
11862fe99eb0SAlexei Starovoitov }
11872fe99eb0SAlexei Starovoitov emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
11882fe99eb0SAlexei Starovoitov *pprog = prog;
11892fe99eb0SAlexei Starovoitov }
11902fe99eb0SAlexei Starovoitov
emit_stx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)11912fe99eb0SAlexei Starovoitov static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
11922fe99eb0SAlexei Starovoitov {
11932fe99eb0SAlexei Starovoitov emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
11942fe99eb0SAlexei Starovoitov }
11952fe99eb0SAlexei Starovoitov
11962fe99eb0SAlexei Starovoitov /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
emit_st_index(u8 ** pprog,u32 size,u32 dst_reg,u32 index_reg,int off,int imm)11972fe99eb0SAlexei Starovoitov static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
11982fe99eb0SAlexei Starovoitov {
11992fe99eb0SAlexei Starovoitov u8 *prog = *pprog;
12002fe99eb0SAlexei Starovoitov
12012fe99eb0SAlexei Starovoitov switch (size) {
12022fe99eb0SAlexei Starovoitov case BPF_B:
12032fe99eb0SAlexei Starovoitov /* mov byte ptr [rax + r12 + off], imm8 */
12042fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
12052fe99eb0SAlexei Starovoitov break;
12062fe99eb0SAlexei Starovoitov case BPF_H:
12072fe99eb0SAlexei Starovoitov /* mov word ptr [rax + r12 + off], imm16 */
12082fe99eb0SAlexei Starovoitov EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
12092fe99eb0SAlexei Starovoitov break;
12102fe99eb0SAlexei Starovoitov case BPF_W:
12112fe99eb0SAlexei Starovoitov /* mov dword ptr [rax + r12 + 1], imm32 */
12122fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
12132fe99eb0SAlexei Starovoitov break;
12142fe99eb0SAlexei Starovoitov case BPF_DW:
12152fe99eb0SAlexei Starovoitov /* mov qword ptr [rax + r12 + 1], imm32 */
12162fe99eb0SAlexei Starovoitov EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
12172fe99eb0SAlexei Starovoitov break;
12182fe99eb0SAlexei Starovoitov }
12192fe99eb0SAlexei Starovoitov emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
12202fe99eb0SAlexei Starovoitov EMIT(imm, bpf_size_to_x86_bytes(size));
12212fe99eb0SAlexei Starovoitov *pprog = prog;
12222fe99eb0SAlexei Starovoitov }
12232fe99eb0SAlexei Starovoitov
emit_st_r12(u8 ** pprog,u32 size,u32 dst_reg,int off,int imm)12242fe99eb0SAlexei Starovoitov static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
12252fe99eb0SAlexei Starovoitov {
12262fe99eb0SAlexei Starovoitov emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
12272fe99eb0SAlexei Starovoitov }
12282fe99eb0SAlexei Starovoitov
emit_atomic(u8 ** pprog,u8 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)122991c960b0SBrendan Jackman static int emit_atomic(u8 **pprog, u8 atomic_op,
123091c960b0SBrendan Jackman u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
123191c960b0SBrendan Jackman {
123291c960b0SBrendan Jackman u8 *prog = *pprog;
123391c960b0SBrendan Jackman
123491c960b0SBrendan Jackman EMIT1(0xF0); /* lock prefix */
123591c960b0SBrendan Jackman
123691c960b0SBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
123791c960b0SBrendan Jackman
123891c960b0SBrendan Jackman /* emit opcode */
123991c960b0SBrendan Jackman switch (atomic_op) {
124091c960b0SBrendan Jackman case BPF_ADD:
1241981f94c3SBrendan Jackman case BPF_AND:
1242981f94c3SBrendan Jackman case BPF_OR:
1243981f94c3SBrendan Jackman case BPF_XOR:
124491c960b0SBrendan Jackman /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
124591c960b0SBrendan Jackman EMIT1(simple_alu_opcodes[atomic_op]);
124691c960b0SBrendan Jackman break;
12475ca419f2SBrendan Jackman case BPF_ADD | BPF_FETCH:
12485ca419f2SBrendan Jackman /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
12495ca419f2SBrendan Jackman EMIT2(0x0F, 0xC1);
12505ca419f2SBrendan Jackman break;
12515ffa2550SBrendan Jackman case BPF_XCHG:
12525ffa2550SBrendan Jackman /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
12535ffa2550SBrendan Jackman EMIT1(0x87);
12545ffa2550SBrendan Jackman break;
12555ffa2550SBrendan Jackman case BPF_CMPXCHG:
12565ffa2550SBrendan Jackman /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
12575ffa2550SBrendan Jackman EMIT2(0x0F, 0xB1);
12585ffa2550SBrendan Jackman break;
125991c960b0SBrendan Jackman default:
126091c960b0SBrendan Jackman pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
126191c960b0SBrendan Jackman return -EFAULT;
126291c960b0SBrendan Jackman }
126391c960b0SBrendan Jackman
126491c960b0SBrendan Jackman emit_insn_suffix(&prog, dst_reg, src_reg, off);
126591c960b0SBrendan Jackman
126691c960b0SBrendan Jackman *pprog = prog;
126791c960b0SBrendan Jackman return 0;
126891c960b0SBrendan Jackman }
126991c960b0SBrendan Jackman
emit_atomic_index(u8 ** pprog,u8 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1270d503a04fSAlexei Starovoitov static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1271d503a04fSAlexei Starovoitov u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1272d503a04fSAlexei Starovoitov {
1273d503a04fSAlexei Starovoitov u8 *prog = *pprog;
1274d503a04fSAlexei Starovoitov
1275d503a04fSAlexei Starovoitov EMIT1(0xF0); /* lock prefix */
1276d503a04fSAlexei Starovoitov switch (size) {
1277d503a04fSAlexei Starovoitov case BPF_W:
1278d503a04fSAlexei Starovoitov EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1279d503a04fSAlexei Starovoitov break;
1280d503a04fSAlexei Starovoitov case BPF_DW:
1281d503a04fSAlexei Starovoitov EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1282d503a04fSAlexei Starovoitov break;
1283d503a04fSAlexei Starovoitov default:
1284d503a04fSAlexei Starovoitov pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
1285d503a04fSAlexei Starovoitov return -EFAULT;
1286d503a04fSAlexei Starovoitov }
1287d503a04fSAlexei Starovoitov
1288d503a04fSAlexei Starovoitov /* emit opcode */
1289d503a04fSAlexei Starovoitov switch (atomic_op) {
1290d503a04fSAlexei Starovoitov case BPF_ADD:
1291d503a04fSAlexei Starovoitov case BPF_AND:
1292d503a04fSAlexei Starovoitov case BPF_OR:
1293d503a04fSAlexei Starovoitov case BPF_XOR:
1294d503a04fSAlexei Starovoitov /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1295d503a04fSAlexei Starovoitov EMIT1(simple_alu_opcodes[atomic_op]);
1296d503a04fSAlexei Starovoitov break;
1297d503a04fSAlexei Starovoitov case BPF_ADD | BPF_FETCH:
1298d503a04fSAlexei Starovoitov /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1299d503a04fSAlexei Starovoitov EMIT2(0x0F, 0xC1);
1300d503a04fSAlexei Starovoitov break;
1301d503a04fSAlexei Starovoitov case BPF_XCHG:
1302d503a04fSAlexei Starovoitov /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1303d503a04fSAlexei Starovoitov EMIT1(0x87);
1304d503a04fSAlexei Starovoitov break;
1305d503a04fSAlexei Starovoitov case BPF_CMPXCHG:
1306d503a04fSAlexei Starovoitov /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1307d503a04fSAlexei Starovoitov EMIT2(0x0F, 0xB1);
1308d503a04fSAlexei Starovoitov break;
1309d503a04fSAlexei Starovoitov default:
1310d503a04fSAlexei Starovoitov pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1311d503a04fSAlexei Starovoitov return -EFAULT;
1312d503a04fSAlexei Starovoitov }
1313d503a04fSAlexei Starovoitov emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1314d503a04fSAlexei Starovoitov *pprog = prog;
1315d503a04fSAlexei Starovoitov return 0;
1316d503a04fSAlexei Starovoitov }
1317d503a04fSAlexei Starovoitov
13182fe99eb0SAlexei Starovoitov #define DONT_CLEAR 1
13192fe99eb0SAlexei Starovoitov
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)132046d28947SThomas Gleixner bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
13213dec541bSAlexei Starovoitov {
13223dec541bSAlexei Starovoitov u32 reg = x->fixup >> 8;
13233dec541bSAlexei Starovoitov
13243dec541bSAlexei Starovoitov /* jump over faulting load and clear dest register */
13252fe99eb0SAlexei Starovoitov if (reg != DONT_CLEAR)
13263dec541bSAlexei Starovoitov *(unsigned long *)((void *)regs + reg) = 0;
13273dec541bSAlexei Starovoitov regs->ip += x->fixup & 0xff;
13283dec541bSAlexei Starovoitov return true;
13293dec541bSAlexei Starovoitov }
13303dec541bSAlexei Starovoitov
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used)1331ebf7d1f5SMaciej Fijalkowski static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1332f663a03cSLeon Hwang bool *regs_used)
1333ebf7d1f5SMaciej Fijalkowski {
1334ebf7d1f5SMaciej Fijalkowski int i;
1335ebf7d1f5SMaciej Fijalkowski
1336ebf7d1f5SMaciej Fijalkowski for (i = 1; i <= insn_cnt; i++, insn++) {
1337ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1338ebf7d1f5SMaciej Fijalkowski regs_used[0] = true;
1339ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1340ebf7d1f5SMaciej Fijalkowski regs_used[1] = true;
1341ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1342ebf7d1f5SMaciej Fijalkowski regs_used[2] = true;
1343ebf7d1f5SMaciej Fijalkowski if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1344ebf7d1f5SMaciej Fijalkowski regs_used[3] = true;
1345ebf7d1f5SMaciej Fijalkowski }
1346ebf7d1f5SMaciej Fijalkowski }
1347ebf7d1f5SMaciej Fijalkowski
134877d8f5d4SJie Meng /* emit the 3-byte VEX prefix
134977d8f5d4SJie Meng *
135077d8f5d4SJie Meng * r: same as rex.r, extra bit for ModRM reg field
135177d8f5d4SJie Meng * x: same as rex.x, extra bit for SIB index field
135277d8f5d4SJie Meng * b: same as rex.b, extra bit for ModRM r/m, or SIB base
135377d8f5d4SJie Meng * m: opcode map select, encoding escape bytes e.g. 0x0f38
135477d8f5d4SJie Meng * w: same as rex.w (32 bit or 64 bit) or opcode specific
135577d8f5d4SJie Meng * src_reg2: additional source reg (encoded as BPF reg)
135677d8f5d4SJie Meng * l: vector length (128 bit or 256 bit) or reserved
135777d8f5d4SJie Meng * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
135877d8f5d4SJie Meng */
emit_3vex(u8 ** pprog,bool r,bool x,bool b,u8 m,bool w,u8 src_reg2,bool l,u8 pp)135977d8f5d4SJie Meng static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
136077d8f5d4SJie Meng bool w, u8 src_reg2, bool l, u8 pp)
136177d8f5d4SJie Meng {
136277d8f5d4SJie Meng u8 *prog = *pprog;
136377d8f5d4SJie Meng const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
136477d8f5d4SJie Meng u8 b1, b2;
136577d8f5d4SJie Meng u8 vvvv = reg2hex[src_reg2];
136677d8f5d4SJie Meng
136777d8f5d4SJie Meng /* reg2hex gives only the lower 3 bit of vvvv */
136877d8f5d4SJie Meng if (is_ereg(src_reg2))
136977d8f5d4SJie Meng vvvv |= 1 << 3;
137077d8f5d4SJie Meng
137177d8f5d4SJie Meng /*
137277d8f5d4SJie Meng * 2nd byte of 3-byte VEX prefix
137377d8f5d4SJie Meng * ~ means bit inverted encoding
137477d8f5d4SJie Meng *
137577d8f5d4SJie Meng * 7 0
137677d8f5d4SJie Meng * +---+---+---+---+---+---+---+---+
137777d8f5d4SJie Meng * |~R |~X |~B | m |
137877d8f5d4SJie Meng * +---+---+---+---+---+---+---+---+
137977d8f5d4SJie Meng */
138077d8f5d4SJie Meng b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
138177d8f5d4SJie Meng /*
138277d8f5d4SJie Meng * 3rd byte of 3-byte VEX prefix
138377d8f5d4SJie Meng *
138477d8f5d4SJie Meng * 7 0
138577d8f5d4SJie Meng * +---+---+---+---+---+---+---+---+
138677d8f5d4SJie Meng * | W | ~vvvv | L | pp |
138777d8f5d4SJie Meng * +---+---+---+---+---+---+---+---+
138877d8f5d4SJie Meng */
138977d8f5d4SJie Meng b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
139077d8f5d4SJie Meng
139177d8f5d4SJie Meng EMIT3(b0, b1, b2);
139277d8f5d4SJie Meng *pprog = prog;
139377d8f5d4SJie Meng }
139477d8f5d4SJie Meng
139577d8f5d4SJie Meng /* emit BMI2 shift instruction */
emit_shiftx(u8 ** pprog,u32 dst_reg,u8 src_reg,bool is64,u8 op)139677d8f5d4SJie Meng static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
139777d8f5d4SJie Meng {
139877d8f5d4SJie Meng u8 *prog = *pprog;
139977d8f5d4SJie Meng bool r = is_ereg(dst_reg);
140077d8f5d4SJie Meng u8 m = 2; /* escape code 0f38 */
140177d8f5d4SJie Meng
140277d8f5d4SJie Meng emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
140377d8f5d4SJie Meng EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
140477d8f5d4SJie Meng *pprog = prog;
140577d8f5d4SJie Meng }
140677d8f5d4SJie Meng
140793c5aeccSGary Lin #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
140893c5aeccSGary Lin
1409116e04baSLeon Hwang #define __LOAD_TCC_PTR(off) \
1410116e04baSLeon Hwang EMIT3_off32(0x48, 0x8B, 0x85, off)
1411116e04baSLeon Hwang /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1412116e04baSLeon Hwang #define LOAD_TAIL_CALL_CNT_PTR(stack) \
1413116e04baSLeon Hwang __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
14142b5dcb31SLeon Hwang
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)14151022a549SSong Liu static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
141693c5aeccSGary Lin int oldproglen, struct jit_context *ctx, bool jmp_padding)
1417b52f00e6SAlexei Starovoitov {
1418ebf7d1f5SMaciej Fijalkowski bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1419b52f00e6SAlexei Starovoitov struct bpf_insn *insn = bpf_prog->insnsi;
1420ebf7d1f5SMaciej Fijalkowski bool callee_regs_used[4] = {};
1421b52f00e6SAlexei Starovoitov int insn_cnt = bpf_prog->len;
1422b52f00e6SAlexei Starovoitov bool seen_exit = false;
1423b52f00e6SAlexei Starovoitov u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1424142fd4d2SAlexei Starovoitov u64 arena_vm_start, user_vm_start;
1425ced50fc4SJiri Olsa int i, excnt = 0;
142693c5aeccSGary Lin int ilen, proglen = 0;
1427b52f00e6SAlexei Starovoitov u8 *prog = temp;
142891c960b0SBrendan Jackman int err;
1429b52f00e6SAlexei Starovoitov
14302fe99eb0SAlexei Starovoitov arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1431142fd4d2SAlexei Starovoitov user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
14322fe99eb0SAlexei Starovoitov
1433f663a03cSLeon Hwang detect_reg_usage(insn, insn_cnt, callee_regs_used);
1434ebf7d1f5SMaciej Fijalkowski
143508691752SDaniel Borkmann emit_prologue(&prog, bpf_prog->aux->stack_depth,
1436ebf7d1f5SMaciej Fijalkowski bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1437f18b03faSKumar Kartikeya Dwivedi bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1438f18b03faSKumar Kartikeya Dwivedi /* Exception callback will clobber callee regs for its own use, and
1439f18b03faSKumar Kartikeya Dwivedi * restore the original callee regs from main prog's stack frame.
1440f18b03faSKumar Kartikeya Dwivedi */
1441f18b03faSKumar Kartikeya Dwivedi if (bpf_prog->aux->exception_boundary) {
1442f18b03faSKumar Kartikeya Dwivedi /* We also need to save r12, which is not mapped to any BPF
1443f18b03faSKumar Kartikeya Dwivedi * register, as we throw after entry into the kernel, which may
1444f18b03faSKumar Kartikeya Dwivedi * overwrite r12.
1445f18b03faSKumar Kartikeya Dwivedi */
1446f18b03faSKumar Kartikeya Dwivedi push_r12(&prog);
1447f18b03faSKumar Kartikeya Dwivedi push_callee_regs(&prog, all_callee_regs_used);
1448f18b03faSKumar Kartikeya Dwivedi } else {
14492fe99eb0SAlexei Starovoitov if (arena_vm_start)
14502fe99eb0SAlexei Starovoitov push_r12(&prog);
1451ebf7d1f5SMaciej Fijalkowski push_callee_regs(&prog, callee_regs_used);
1452f18b03faSKumar Kartikeya Dwivedi }
14532fe99eb0SAlexei Starovoitov if (arena_vm_start)
14542fe99eb0SAlexei Starovoitov emit_mov_imm64(&prog, X86_REG_R12,
14552fe99eb0SAlexei Starovoitov arena_vm_start >> 32, (u32) arena_vm_start);
145693c5aeccSGary Lin
145793c5aeccSGary Lin ilen = prog - temp;
14581022a549SSong Liu if (rw_image)
14591022a549SSong Liu memcpy(rw_image + proglen, temp, ilen);
146093c5aeccSGary Lin proglen += ilen;
146193c5aeccSGary Lin addrs[0] = proglen;
146293c5aeccSGary Lin prog = temp;
1463b52f00e6SAlexei Starovoitov
14647c2e988fSAlexei Starovoitov for (i = 1; i <= insn_cnt; i++, insn++) {
1465e430f34eSAlexei Starovoitov const s32 imm32 = insn->imm;
1466e430f34eSAlexei Starovoitov u32 dst_reg = insn->dst_reg;
1467e430f34eSAlexei Starovoitov u32 src_reg = insn->src_reg;
14686fe8b9c1SDaniel Borkmann u8 b2 = 0, b3 = 0;
14694c5de127SAlexei Starovoitov u8 *start_of_ldx;
147062258278SAlexei Starovoitov s64 jmp_offset;
147190156f4bSDave Marchevsky s16 insn_off;
147262258278SAlexei Starovoitov u8 jmp_cond;
147362258278SAlexei Starovoitov u8 *func;
147493c5aeccSGary Lin int nops;
147562258278SAlexei Starovoitov
147662258278SAlexei Starovoitov switch (insn->code) {
147762258278SAlexei Starovoitov /* ALU */
147862258278SAlexei Starovoitov case BPF_ALU | BPF_ADD | BPF_X:
147962258278SAlexei Starovoitov case BPF_ALU | BPF_SUB | BPF_X:
148062258278SAlexei Starovoitov case BPF_ALU | BPF_AND | BPF_X:
148162258278SAlexei Starovoitov case BPF_ALU | BPF_OR | BPF_X:
148262258278SAlexei Starovoitov case BPF_ALU | BPF_XOR | BPF_X:
148362258278SAlexei Starovoitov case BPF_ALU64 | BPF_ADD | BPF_X:
148462258278SAlexei Starovoitov case BPF_ALU64 | BPF_SUB | BPF_X:
148562258278SAlexei Starovoitov case BPF_ALU64 | BPF_AND | BPF_X:
148662258278SAlexei Starovoitov case BPF_ALU64 | BPF_OR | BPF_X:
148762258278SAlexei Starovoitov case BPF_ALU64 | BPF_XOR | BPF_X:
148874007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg,
148974007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_ALU64);
1490e5f02cacSBrendan Jackman b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1491e430f34eSAlexei Starovoitov EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
14920a14842fSEric Dumazet break;
149362258278SAlexei Starovoitov
149462258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOV | BPF_X:
1495770546aeSPuranjay Mohan if (insn_is_cast_user(insn)) {
1496142fd4d2SAlexei Starovoitov if (dst_reg != src_reg)
1497142fd4d2SAlexei Starovoitov /* 32-bit mov */
1498142fd4d2SAlexei Starovoitov emit_mov_reg(&prog, false, dst_reg, src_reg);
1499142fd4d2SAlexei Starovoitov /* shl dst_reg, 32 */
1500142fd4d2SAlexei Starovoitov maybe_emit_1mod(&prog, dst_reg, true);
1501142fd4d2SAlexei Starovoitov EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1502142fd4d2SAlexei Starovoitov
1503142fd4d2SAlexei Starovoitov /* or dst_reg, user_vm_start */
1504142fd4d2SAlexei Starovoitov maybe_emit_1mod(&prog, dst_reg, true);
1505142fd4d2SAlexei Starovoitov if (is_axreg(dst_reg))
1506142fd4d2SAlexei Starovoitov EMIT1_off32(0x0D, user_vm_start >> 32);
1507142fd4d2SAlexei Starovoitov else
1508142fd4d2SAlexei Starovoitov EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32);
1509142fd4d2SAlexei Starovoitov
1510142fd4d2SAlexei Starovoitov /* rol dst_reg, 32 */
1511142fd4d2SAlexei Starovoitov maybe_emit_1mod(&prog, dst_reg, true);
1512142fd4d2SAlexei Starovoitov EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1513142fd4d2SAlexei Starovoitov
1514142fd4d2SAlexei Starovoitov /* xor r11, r11 */
1515142fd4d2SAlexei Starovoitov EMIT3(0x4D, 0x31, 0xDB);
1516142fd4d2SAlexei Starovoitov
1517142fd4d2SAlexei Starovoitov /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1518142fd4d2SAlexei Starovoitov maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1519142fd4d2SAlexei Starovoitov EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1520142fd4d2SAlexei Starovoitov
1521142fd4d2SAlexei Starovoitov /* cmove r11, dst_reg; if so, set dst_reg to zero */
1522142fd4d2SAlexei Starovoitov /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1523142fd4d2SAlexei Starovoitov maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1524142fd4d2SAlexei Starovoitov EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1525142fd4d2SAlexei Starovoitov break;
15267bdbf744SAndrii Nakryiko } else if (insn_is_mov_percpu_addr(insn)) {
15277bdbf744SAndrii Nakryiko /* mov <dst>, <src> (if necessary) */
15287bdbf744SAndrii Nakryiko EMIT_mov(dst_reg, src_reg);
15291e9e0b85SAndrii Nakryiko #ifdef CONFIG_SMP
15307bdbf744SAndrii Nakryiko /* add <dst>, gs:[<off>] */
15317bdbf744SAndrii Nakryiko EMIT2(0x65, add_1mod(0x48, dst_reg));
1532462e5e2aSAlexei Starovoitov EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
15331e9e0b85SAndrii Nakryiko EMIT((u32)(unsigned long)&this_cpu_off, 4);
15341e9e0b85SAndrii Nakryiko #endif
15357bdbf744SAndrii Nakryiko break;
1536142fd4d2SAlexei Starovoitov }
1537142fd4d2SAlexei Starovoitov fallthrough;
153862258278SAlexei Starovoitov case BPF_ALU | BPF_MOV | BPF_X:
15398100928cSYonghong Song if (insn->off == 0)
15404c38e2f3SDaniel Borkmann emit_mov_reg(&prog,
15414c38e2f3SDaniel Borkmann BPF_CLASS(insn->code) == BPF_ALU64,
15424c38e2f3SDaniel Borkmann dst_reg, src_reg);
15438100928cSYonghong Song else
15448100928cSYonghong Song emit_movsx_reg(&prog, insn->off,
15458100928cSYonghong Song BPF_CLASS(insn->code) == BPF_ALU64,
15468100928cSYonghong Song dst_reg, src_reg);
154762258278SAlexei Starovoitov break;
154862258278SAlexei Starovoitov
1549e430f34eSAlexei Starovoitov /* neg dst */
155062258278SAlexei Starovoitov case BPF_ALU | BPF_NEG:
155162258278SAlexei Starovoitov case BPF_ALU64 | BPF_NEG:
15526364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg,
15536364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64);
1554e430f34eSAlexei Starovoitov EMIT2(0xF7, add_1reg(0xD8, dst_reg));
155562258278SAlexei Starovoitov break;
155662258278SAlexei Starovoitov
155762258278SAlexei Starovoitov case BPF_ALU | BPF_ADD | BPF_K:
155862258278SAlexei Starovoitov case BPF_ALU | BPF_SUB | BPF_K:
155962258278SAlexei Starovoitov case BPF_ALU | BPF_AND | BPF_K:
156062258278SAlexei Starovoitov case BPF_ALU | BPF_OR | BPF_K:
156162258278SAlexei Starovoitov case BPF_ALU | BPF_XOR | BPF_K:
156262258278SAlexei Starovoitov case BPF_ALU64 | BPF_ADD | BPF_K:
156362258278SAlexei Starovoitov case BPF_ALU64 | BPF_SUB | BPF_K:
156462258278SAlexei Starovoitov case BPF_ALU64 | BPF_AND | BPF_K:
156562258278SAlexei Starovoitov case BPF_ALU64 | BPF_OR | BPF_K:
156662258278SAlexei Starovoitov case BPF_ALU64 | BPF_XOR | BPF_K:
15676364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg,
15686364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64);
156962258278SAlexei Starovoitov
1570a2c7a983SIngo Molnar /*
1571a2c7a983SIngo Molnar * b3 holds 'normal' opcode, b2 short form only valid
1572de0a444dSDaniel Borkmann * in case dst is eax/rax.
1573de0a444dSDaniel Borkmann */
157462258278SAlexei Starovoitov switch (BPF_OP(insn->code)) {
1575de0a444dSDaniel Borkmann case BPF_ADD:
1576de0a444dSDaniel Borkmann b3 = 0xC0;
1577de0a444dSDaniel Borkmann b2 = 0x05;
1578de0a444dSDaniel Borkmann break;
1579de0a444dSDaniel Borkmann case BPF_SUB:
1580de0a444dSDaniel Borkmann b3 = 0xE8;
1581de0a444dSDaniel Borkmann b2 = 0x2D;
1582de0a444dSDaniel Borkmann break;
1583de0a444dSDaniel Borkmann case BPF_AND:
1584de0a444dSDaniel Borkmann b3 = 0xE0;
1585de0a444dSDaniel Borkmann b2 = 0x25;
1586de0a444dSDaniel Borkmann break;
1587de0a444dSDaniel Borkmann case BPF_OR:
1588de0a444dSDaniel Borkmann b3 = 0xC8;
1589de0a444dSDaniel Borkmann b2 = 0x0D;
1590de0a444dSDaniel Borkmann break;
1591de0a444dSDaniel Borkmann case BPF_XOR:
1592de0a444dSDaniel Borkmann b3 = 0xF0;
1593de0a444dSDaniel Borkmann b2 = 0x35;
1594de0a444dSDaniel Borkmann break;
159562258278SAlexei Starovoitov }
159662258278SAlexei Starovoitov
1597e430f34eSAlexei Starovoitov if (is_imm8(imm32))
1598e430f34eSAlexei Starovoitov EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1599de0a444dSDaniel Borkmann else if (is_axreg(dst_reg))
1600de0a444dSDaniel Borkmann EMIT1_off32(b2, imm32);
160162258278SAlexei Starovoitov else
1602e430f34eSAlexei Starovoitov EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
160362258278SAlexei Starovoitov break;
160462258278SAlexei Starovoitov
160562258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOV | BPF_K:
160662258278SAlexei Starovoitov case BPF_ALU | BPF_MOV | BPF_K:
16076fe8b9c1SDaniel Borkmann emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
16086fe8b9c1SDaniel Borkmann dst_reg, imm32);
160962258278SAlexei Starovoitov break;
161062258278SAlexei Starovoitov
161102ab695bSAlexei Starovoitov case BPF_LD | BPF_IMM | BPF_DW:
16126fe8b9c1SDaniel Borkmann emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
161302ab695bSAlexei Starovoitov insn++;
161402ab695bSAlexei Starovoitov i++;
161502ab695bSAlexei Starovoitov break;
161602ab695bSAlexei Starovoitov
1617e430f34eSAlexei Starovoitov /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
161862258278SAlexei Starovoitov case BPF_ALU | BPF_MOD | BPF_X:
161962258278SAlexei Starovoitov case BPF_ALU | BPF_DIV | BPF_X:
162062258278SAlexei Starovoitov case BPF_ALU | BPF_MOD | BPF_K:
162162258278SAlexei Starovoitov case BPF_ALU | BPF_DIV | BPF_K:
162262258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOD | BPF_X:
162362258278SAlexei Starovoitov case BPF_ALU64 | BPF_DIV | BPF_X:
162462258278SAlexei Starovoitov case BPF_ALU64 | BPF_MOD | BPF_K:
162557a610f1SJie Meng case BPF_ALU64 | BPF_DIV | BPF_K: {
16264c38e2f3SDaniel Borkmann bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
16274c38e2f3SDaniel Borkmann
1628d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_0)
162962258278SAlexei Starovoitov EMIT1(0x50); /* push rax */
1630d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_3)
163162258278SAlexei Starovoitov EMIT1(0x52); /* push rdx */
163262258278SAlexei Starovoitov
163357a610f1SJie Meng if (BPF_SRC(insn->code) == BPF_X) {
163457a610f1SJie Meng if (src_reg == BPF_REG_0 ||
163557a610f1SJie Meng src_reg == BPF_REG_3) {
163662258278SAlexei Starovoitov /* mov r11, src_reg */
163762258278SAlexei Starovoitov EMIT_mov(AUX_REG, src_reg);
163857a610f1SJie Meng src_reg = AUX_REG;
163957a610f1SJie Meng }
164057a610f1SJie Meng } else {
164162258278SAlexei Starovoitov /* mov r11, imm32 */
164262258278SAlexei Starovoitov EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
164357a610f1SJie Meng src_reg = AUX_REG;
164457a610f1SJie Meng }
164562258278SAlexei Starovoitov
164657a610f1SJie Meng if (dst_reg != BPF_REG_0)
164762258278SAlexei Starovoitov /* mov rax, dst_reg */
164857a610f1SJie Meng emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
164962258278SAlexei Starovoitov
1650ec0e2da9SYonghong Song if (insn->off == 0) {
165162258278SAlexei Starovoitov /*
165262258278SAlexei Starovoitov * xor edx, edx
165362258278SAlexei Starovoitov * equivalent to 'xor rdx, rdx', but one byte less
165462258278SAlexei Starovoitov */
165562258278SAlexei Starovoitov EMIT2(0x31, 0xd2);
165662258278SAlexei Starovoitov
165757a610f1SJie Meng /* div src_reg */
16586364d7d7SJie Meng maybe_emit_1mod(&prog, src_reg, is64);
165957a610f1SJie Meng EMIT2(0xF7, add_1reg(0xF0, src_reg));
1660ec0e2da9SYonghong Song } else {
1661ec0e2da9SYonghong Song if (BPF_CLASS(insn->code) == BPF_ALU)
1662ec0e2da9SYonghong Song EMIT1(0x99); /* cdq */
1663ec0e2da9SYonghong Song else
1664ec0e2da9SYonghong Song EMIT2(0x48, 0x99); /* cqo */
1665ec0e2da9SYonghong Song
1666ec0e2da9SYonghong Song /* idiv src_reg */
1667ec0e2da9SYonghong Song maybe_emit_1mod(&prog, src_reg, is64);
1668ec0e2da9SYonghong Song EMIT2(0xF7, add_1reg(0xF8, src_reg));
1669ec0e2da9SYonghong Song }
167062258278SAlexei Starovoitov
167157a610f1SJie Meng if (BPF_OP(insn->code) == BPF_MOD &&
167257a610f1SJie Meng dst_reg != BPF_REG_3)
167357a610f1SJie Meng /* mov dst_reg, rdx */
167457a610f1SJie Meng emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
167557a610f1SJie Meng else if (BPF_OP(insn->code) == BPF_DIV &&
167657a610f1SJie Meng dst_reg != BPF_REG_0)
167757a610f1SJie Meng /* mov dst_reg, rax */
167857a610f1SJie Meng emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
167962258278SAlexei Starovoitov
1680d806a0cfSDaniel Borkmann if (dst_reg != BPF_REG_3)
168162258278SAlexei Starovoitov EMIT1(0x5A); /* pop rdx */
168257a610f1SJie Meng if (dst_reg != BPF_REG_0)
168362258278SAlexei Starovoitov EMIT1(0x58); /* pop rax */
168462258278SAlexei Starovoitov break;
16854c38e2f3SDaniel Borkmann }
168662258278SAlexei Starovoitov
168762258278SAlexei Starovoitov case BPF_ALU | BPF_MUL | BPF_K:
168862258278SAlexei Starovoitov case BPF_ALU64 | BPF_MUL | BPF_K:
16896364d7d7SJie Meng maybe_emit_mod(&prog, dst_reg, dst_reg,
16906364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64);
169162258278SAlexei Starovoitov
1692c0354077SJie Meng if (is_imm8(imm32))
1693c0354077SJie Meng /* imul dst_reg, dst_reg, imm8 */
1694c0354077SJie Meng EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1695c0354077SJie Meng imm32);
169662258278SAlexei Starovoitov else
1697c0354077SJie Meng /* imul dst_reg, dst_reg, imm32 */
1698c0354077SJie Meng EMIT2_off32(0x69,
1699c0354077SJie Meng add_2reg(0xC0, dst_reg, dst_reg),
1700c0354077SJie Meng imm32);
170162258278SAlexei Starovoitov break;
1702c0354077SJie Meng
1703c0354077SJie Meng case BPF_ALU | BPF_MUL | BPF_X:
1704c0354077SJie Meng case BPF_ALU64 | BPF_MUL | BPF_X:
17056364d7d7SJie Meng maybe_emit_mod(&prog, src_reg, dst_reg,
17066364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64);
1707c0354077SJie Meng
1708c0354077SJie Meng /* imul dst_reg, src_reg */
1709c0354077SJie Meng EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1710c0354077SJie Meng break;
1711c0354077SJie Meng
1712a2c7a983SIngo Molnar /* Shifts */
171362258278SAlexei Starovoitov case BPF_ALU | BPF_LSH | BPF_K:
171462258278SAlexei Starovoitov case BPF_ALU | BPF_RSH | BPF_K:
171562258278SAlexei Starovoitov case BPF_ALU | BPF_ARSH | BPF_K:
171662258278SAlexei Starovoitov case BPF_ALU64 | BPF_LSH | BPF_K:
171762258278SAlexei Starovoitov case BPF_ALU64 | BPF_RSH | BPF_K:
171862258278SAlexei Starovoitov case BPF_ALU64 | BPF_ARSH | BPF_K:
17196364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg,
17206364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64);
172162258278SAlexei Starovoitov
1722e5f02cacSBrendan Jackman b3 = simple_alu_opcodes[BPF_OP(insn->code)];
172388e69a1fSDaniel Borkmann if (imm32 == 1)
172488e69a1fSDaniel Borkmann EMIT2(0xD1, add_1reg(b3, dst_reg));
172588e69a1fSDaniel Borkmann else
1726e430f34eSAlexei Starovoitov EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
172762258278SAlexei Starovoitov break;
172862258278SAlexei Starovoitov
172972b603eeSAlexei Starovoitov case BPF_ALU | BPF_LSH | BPF_X:
173072b603eeSAlexei Starovoitov case BPF_ALU | BPF_RSH | BPF_X:
173172b603eeSAlexei Starovoitov case BPF_ALU | BPF_ARSH | BPF_X:
173272b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_LSH | BPF_X:
173372b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_RSH | BPF_X:
173472b603eeSAlexei Starovoitov case BPF_ALU64 | BPF_ARSH | BPF_X:
173577d8f5d4SJie Meng /* BMI2 shifts aren't better when shift count is already in rcx */
173677d8f5d4SJie Meng if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
173777d8f5d4SJie Meng /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
173877d8f5d4SJie Meng bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
173977d8f5d4SJie Meng u8 op;
174072b603eeSAlexei Starovoitov
174177d8f5d4SJie Meng switch (BPF_OP(insn->code)) {
174277d8f5d4SJie Meng case BPF_LSH:
174377d8f5d4SJie Meng op = 1; /* prefix 0x66 */
174477d8f5d4SJie Meng break;
174577d8f5d4SJie Meng case BPF_RSH:
174677d8f5d4SJie Meng op = 3; /* prefix 0xf2 */
174777d8f5d4SJie Meng break;
174877d8f5d4SJie Meng case BPF_ARSH:
174977d8f5d4SJie Meng op = 2; /* prefix 0xf3 */
175077d8f5d4SJie Meng break;
175177d8f5d4SJie Meng }
175277d8f5d4SJie Meng
175377d8f5d4SJie Meng emit_shiftx(&prog, dst_reg, src_reg, w, op);
175477d8f5d4SJie Meng
175577d8f5d4SJie Meng break;
175677d8f5d4SJie Meng }
175772b603eeSAlexei Starovoitov
175881b35e7cSJie Meng if (src_reg != BPF_REG_4) { /* common case */
1759a2c7a983SIngo Molnar /* Check for bad case when dst_reg == rcx */
176072b603eeSAlexei Starovoitov if (dst_reg == BPF_REG_4) {
176172b603eeSAlexei Starovoitov /* mov r11, dst_reg */
176272b603eeSAlexei Starovoitov EMIT_mov(AUX_REG, dst_reg);
176372b603eeSAlexei Starovoitov dst_reg = AUX_REG;
176481b35e7cSJie Meng } else {
176572b603eeSAlexei Starovoitov EMIT1(0x51); /* push rcx */
176681b35e7cSJie Meng }
176772b603eeSAlexei Starovoitov /* mov rcx, src_reg */
176872b603eeSAlexei Starovoitov EMIT_mov(BPF_REG_4, src_reg);
176972b603eeSAlexei Starovoitov }
177072b603eeSAlexei Starovoitov
177172b603eeSAlexei Starovoitov /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
17726364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg,
17736364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_ALU64);
177472b603eeSAlexei Starovoitov
1775e5f02cacSBrendan Jackman b3 = simple_alu_opcodes[BPF_OP(insn->code)];
177672b603eeSAlexei Starovoitov EMIT2(0xD3, add_1reg(b3, dst_reg));
177772b603eeSAlexei Starovoitov
177881b35e7cSJie Meng if (src_reg != BPF_REG_4) {
177972b603eeSAlexei Starovoitov if (insn->dst_reg == BPF_REG_4)
178072b603eeSAlexei Starovoitov /* mov dst_reg, r11 */
178172b603eeSAlexei Starovoitov EMIT_mov(insn->dst_reg, AUX_REG);
178281b35e7cSJie Meng else
178381b35e7cSJie Meng EMIT1(0x59); /* pop rcx */
178481b35e7cSJie Meng }
178581b35e7cSJie Meng
178672b603eeSAlexei Starovoitov break;
178772b603eeSAlexei Starovoitov
178862258278SAlexei Starovoitov case BPF_ALU | BPF_END | BPF_FROM_BE:
17890845c3dbSYonghong Song case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1790e430f34eSAlexei Starovoitov switch (imm32) {
179162258278SAlexei Starovoitov case 16:
1792a2c7a983SIngo Molnar /* Emit 'ror %ax, 8' to swap lower 2 bytes */
179362258278SAlexei Starovoitov EMIT1(0x66);
1794e430f34eSAlexei Starovoitov if (is_ereg(dst_reg))
179562258278SAlexei Starovoitov EMIT1(0x41);
1796e430f34eSAlexei Starovoitov EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1797343f845bSAlexei Starovoitov
1798a2c7a983SIngo Molnar /* Emit 'movzwl eax, ax' */
1799343f845bSAlexei Starovoitov if (is_ereg(dst_reg))
1800343f845bSAlexei Starovoitov EMIT3(0x45, 0x0F, 0xB7);
1801343f845bSAlexei Starovoitov else
1802343f845bSAlexei Starovoitov EMIT2(0x0F, 0xB7);
1803343f845bSAlexei Starovoitov EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
180462258278SAlexei Starovoitov break;
180562258278SAlexei Starovoitov case 32:
1806a2c7a983SIngo Molnar /* Emit 'bswap eax' to swap lower 4 bytes */
1807e430f34eSAlexei Starovoitov if (is_ereg(dst_reg))
180862258278SAlexei Starovoitov EMIT2(0x41, 0x0F);
180962258278SAlexei Starovoitov else
181062258278SAlexei Starovoitov EMIT1(0x0F);
1811e430f34eSAlexei Starovoitov EMIT1(add_1reg(0xC8, dst_reg));
181262258278SAlexei Starovoitov break;
181362258278SAlexei Starovoitov case 64:
1814a2c7a983SIngo Molnar /* Emit 'bswap rax' to swap 8 bytes */
1815e430f34eSAlexei Starovoitov EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1816e430f34eSAlexei Starovoitov add_1reg(0xC8, dst_reg));
181762258278SAlexei Starovoitov break;
181862258278SAlexei Starovoitov }
181962258278SAlexei Starovoitov break;
182062258278SAlexei Starovoitov
182162258278SAlexei Starovoitov case BPF_ALU | BPF_END | BPF_FROM_LE:
1822343f845bSAlexei Starovoitov switch (imm32) {
1823343f845bSAlexei Starovoitov case 16:
1824a2c7a983SIngo Molnar /*
1825a2c7a983SIngo Molnar * Emit 'movzwl eax, ax' to zero extend 16-bit
1826343f845bSAlexei Starovoitov * into 64 bit
1827343f845bSAlexei Starovoitov */
1828343f845bSAlexei Starovoitov if (is_ereg(dst_reg))
1829343f845bSAlexei Starovoitov EMIT3(0x45, 0x0F, 0xB7);
1830343f845bSAlexei Starovoitov else
1831343f845bSAlexei Starovoitov EMIT2(0x0F, 0xB7);
1832343f845bSAlexei Starovoitov EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1833343f845bSAlexei Starovoitov break;
1834343f845bSAlexei Starovoitov case 32:
1835a2c7a983SIngo Molnar /* Emit 'mov eax, eax' to clear upper 32-bits */
1836343f845bSAlexei Starovoitov if (is_ereg(dst_reg))
1837343f845bSAlexei Starovoitov EMIT1(0x45);
1838343f845bSAlexei Starovoitov EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1839343f845bSAlexei Starovoitov break;
1840343f845bSAlexei Starovoitov case 64:
1841343f845bSAlexei Starovoitov /* nop */
1842343f845bSAlexei Starovoitov break;
1843343f845bSAlexei Starovoitov }
184462258278SAlexei Starovoitov break;
184562258278SAlexei Starovoitov
1846f5e81d11SDaniel Borkmann /* speculation barrier */
1847f5e81d11SDaniel Borkmann case BPF_ST | BPF_NOSPEC:
184887c87ecdSPeter Zijlstra EMIT_LFENCE();
1849f5e81d11SDaniel Borkmann break;
1850f5e81d11SDaniel Borkmann
1851e430f34eSAlexei Starovoitov /* ST: *(u8*)(dst_reg + off) = imm */
185262258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_B:
1853e430f34eSAlexei Starovoitov if (is_ereg(dst_reg))
185462258278SAlexei Starovoitov EMIT2(0x41, 0xC6);
185562258278SAlexei Starovoitov else
185662258278SAlexei Starovoitov EMIT1(0xC6);
185762258278SAlexei Starovoitov goto st;
185862258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_H:
1859e430f34eSAlexei Starovoitov if (is_ereg(dst_reg))
186062258278SAlexei Starovoitov EMIT3(0x66, 0x41, 0xC7);
186162258278SAlexei Starovoitov else
186262258278SAlexei Starovoitov EMIT2(0x66, 0xC7);
186362258278SAlexei Starovoitov goto st;
186462258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_W:
1865e430f34eSAlexei Starovoitov if (is_ereg(dst_reg))
186662258278SAlexei Starovoitov EMIT2(0x41, 0xC7);
186762258278SAlexei Starovoitov else
186862258278SAlexei Starovoitov EMIT1(0xC7);
186962258278SAlexei Starovoitov goto st;
187062258278SAlexei Starovoitov case BPF_ST | BPF_MEM | BPF_DW:
1871e430f34eSAlexei Starovoitov EMIT2(add_1mod(0x48, dst_reg), 0xC7);
187262258278SAlexei Starovoitov
187362258278SAlexei Starovoitov st: if (is_imm8(insn->off))
1874e430f34eSAlexei Starovoitov EMIT2(add_1reg(0x40, dst_reg), insn->off);
187562258278SAlexei Starovoitov else
1876e430f34eSAlexei Starovoitov EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
187762258278SAlexei Starovoitov
1878e430f34eSAlexei Starovoitov EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
187962258278SAlexei Starovoitov break;
188062258278SAlexei Starovoitov
1881e430f34eSAlexei Starovoitov /* STX: *(u8*)(dst_reg + off) = src_reg */
188262258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_B:
188362258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_H:
188462258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_W:
188562258278SAlexei Starovoitov case BPF_STX | BPF_MEM | BPF_DW:
18863b2744e6SAlexei Starovoitov emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
188762258278SAlexei Starovoitov break;
188862258278SAlexei Starovoitov
18892fe99eb0SAlexei Starovoitov case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
18902fe99eb0SAlexei Starovoitov case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
18912fe99eb0SAlexei Starovoitov case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
18922fe99eb0SAlexei Starovoitov case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
18932fe99eb0SAlexei Starovoitov start_of_ldx = prog;
18942fe99eb0SAlexei Starovoitov emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
18952fe99eb0SAlexei Starovoitov goto populate_extable;
18962fe99eb0SAlexei Starovoitov
18972fe99eb0SAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
18982fe99eb0SAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
18992fe99eb0SAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
19002fe99eb0SAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
19012fe99eb0SAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
19022fe99eb0SAlexei Starovoitov case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
19032fe99eb0SAlexei Starovoitov case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
19042fe99eb0SAlexei Starovoitov case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
19052fe99eb0SAlexei Starovoitov case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
19062fe99eb0SAlexei Starovoitov start_of_ldx = prog;
19072fe99eb0SAlexei Starovoitov if (BPF_CLASS(insn->code) == BPF_LDX)
19082fe99eb0SAlexei Starovoitov emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
19092fe99eb0SAlexei Starovoitov else
19102fe99eb0SAlexei Starovoitov emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
19112fe99eb0SAlexei Starovoitov populate_extable:
19122fe99eb0SAlexei Starovoitov {
19132fe99eb0SAlexei Starovoitov struct exception_table_entry *ex;
19142fe99eb0SAlexei Starovoitov u8 *_insn = image + proglen + (start_of_ldx - temp);
19152fe99eb0SAlexei Starovoitov s64 delta;
19162fe99eb0SAlexei Starovoitov
19172fe99eb0SAlexei Starovoitov if (!bpf_prog->aux->extable)
19182fe99eb0SAlexei Starovoitov break;
19192fe99eb0SAlexei Starovoitov
19202fe99eb0SAlexei Starovoitov if (excnt >= bpf_prog->aux->num_exentries) {
19212fe99eb0SAlexei Starovoitov pr_err("mem32 extable bug\n");
19222fe99eb0SAlexei Starovoitov return -EFAULT;
19232fe99eb0SAlexei Starovoitov }
19242fe99eb0SAlexei Starovoitov ex = &bpf_prog->aux->extable[excnt++];
19252fe99eb0SAlexei Starovoitov
19262fe99eb0SAlexei Starovoitov delta = _insn - (u8 *)&ex->insn;
19272fe99eb0SAlexei Starovoitov /* switch ex to rw buffer for writes */
19282fe99eb0SAlexei Starovoitov ex = (void *)rw_image + ((void *)ex - (void *)image);
19292fe99eb0SAlexei Starovoitov
19302fe99eb0SAlexei Starovoitov ex->insn = delta;
19312fe99eb0SAlexei Starovoitov
19322fe99eb0SAlexei Starovoitov ex->data = EX_TYPE_BPF;
19332fe99eb0SAlexei Starovoitov
19342fe99eb0SAlexei Starovoitov ex->fixup = (prog - start_of_ldx) |
19352fe99eb0SAlexei Starovoitov ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8);
19362fe99eb0SAlexei Starovoitov }
19372fe99eb0SAlexei Starovoitov break;
19382fe99eb0SAlexei Starovoitov
1939e430f34eSAlexei Starovoitov /* LDX: dst_reg = *(u8*)(src_reg + off) */
194062258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_B:
19413dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_B:
194262258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_H:
19433dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_H:
194462258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_W:
19453dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_W:
194662258278SAlexei Starovoitov case BPF_LDX | BPF_MEM | BPF_DW:
19473dec541bSAlexei Starovoitov case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
19481f9a1ea8SYonghong Song /* LDXS: dst_reg = *(s8*)(src_reg + off) */
19491f9a1ea8SYonghong Song case BPF_LDX | BPF_MEMSX | BPF_B:
19501f9a1ea8SYonghong Song case BPF_LDX | BPF_MEMSX | BPF_H:
19511f9a1ea8SYonghong Song case BPF_LDX | BPF_MEMSX | BPF_W:
19521f9a1ea8SYonghong Song case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
19531f9a1ea8SYonghong Song case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
19541f9a1ea8SYonghong Song case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
195590156f4bSDave Marchevsky insn_off = insn->off;
1956588a25e9SAlexei Starovoitov
19571f9a1ea8SYonghong Song if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
19581f9a1ea8SYonghong Song BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1959588a25e9SAlexei Starovoitov /* Conservatively check that src_reg + insn->off is a kernel address:
1960b599d7d2SPuranjay Mohan * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
1961b599d7d2SPuranjay Mohan * and
1962b599d7d2SPuranjay Mohan * src_reg + insn->off < VSYSCALL_ADDR
1963588a25e9SAlexei Starovoitov */
1964588a25e9SAlexei Starovoitov
1965b599d7d2SPuranjay Mohan u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
196690156f4bSDave Marchevsky u8 *end_of_jmp;
196790156f4bSDave Marchevsky
1968b599d7d2SPuranjay Mohan /* movabsq r10, VSYSCALL_ADDR */
1969b599d7d2SPuranjay Mohan emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
1970b599d7d2SPuranjay Mohan (u32)(long)VSYSCALL_ADDR);
197190156f4bSDave Marchevsky
1972b599d7d2SPuranjay Mohan /* mov src_reg, r11 */
1973b599d7d2SPuranjay Mohan EMIT_mov(AUX_REG, src_reg);
197490156f4bSDave Marchevsky
197590156f4bSDave Marchevsky if (insn->off) {
1976b599d7d2SPuranjay Mohan /* add r11, insn->off */
1977b599d7d2SPuranjay Mohan maybe_emit_1mod(&prog, AUX_REG, true);
1978b599d7d2SPuranjay Mohan EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
197990156f4bSDave Marchevsky }
198090156f4bSDave Marchevsky
1981b599d7d2SPuranjay Mohan /* sub r11, r10 */
1982b599d7d2SPuranjay Mohan maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1983b599d7d2SPuranjay Mohan EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1984588a25e9SAlexei Starovoitov
1985b599d7d2SPuranjay Mohan /* movabsq r10, limit */
1986b599d7d2SPuranjay Mohan emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
1987b599d7d2SPuranjay Mohan (u32)(long)limit);
1988b599d7d2SPuranjay Mohan
1989b599d7d2SPuranjay Mohan /* cmp r10, r11 */
1990b599d7d2SPuranjay Mohan maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1991b599d7d2SPuranjay Mohan EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1992b599d7d2SPuranjay Mohan
1993b599d7d2SPuranjay Mohan /* if unsigned '>', goto load */
1994b599d7d2SPuranjay Mohan EMIT2(X86_JA, 0);
199590156f4bSDave Marchevsky end_of_jmp = prog;
1996588a25e9SAlexei Starovoitov
19974c5de127SAlexei Starovoitov /* xor dst_reg, dst_reg */
19984c5de127SAlexei Starovoitov emit_mov_imm32(&prog, false, dst_reg, 0);
19994c5de127SAlexei Starovoitov /* jmp byte_after_ldx */
20004c5de127SAlexei Starovoitov EMIT2(0xEB, 0);
20014c5de127SAlexei Starovoitov
200290156f4bSDave Marchevsky /* populate jmp_offset for JAE above to jump to start_of_ldx */
20034c5de127SAlexei Starovoitov start_of_ldx = prog;
200490156f4bSDave Marchevsky end_of_jmp[-1] = start_of_ldx - end_of_jmp;
20054c5de127SAlexei Starovoitov }
20061f9a1ea8SYonghong Song if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
20071f9a1ea8SYonghong Song BPF_MODE(insn->code) == BPF_MEMSX)
20081f9a1ea8SYonghong Song emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
20091f9a1ea8SYonghong Song else
201090156f4bSDave Marchevsky emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
20111f9a1ea8SYonghong Song if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
20121f9a1ea8SYonghong Song BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
20133dec541bSAlexei Starovoitov struct exception_table_entry *ex;
2014328aac5eSRavi Bangoria u8 *_insn = image + proglen + (start_of_ldx - temp);
20153dec541bSAlexei Starovoitov s64 delta;
20163dec541bSAlexei Starovoitov
20174c5de127SAlexei Starovoitov /* populate jmp_offset for JMP above */
20184c5de127SAlexei Starovoitov start_of_ldx[-1] = prog - start_of_ldx;
20194c5de127SAlexei Starovoitov
20203dec541bSAlexei Starovoitov if (!bpf_prog->aux->extable)
20213dec541bSAlexei Starovoitov break;
20223dec541bSAlexei Starovoitov
20233dec541bSAlexei Starovoitov if (excnt >= bpf_prog->aux->num_exentries) {
20243dec541bSAlexei Starovoitov pr_err("ex gen bug\n");
20253dec541bSAlexei Starovoitov return -EFAULT;
20263dec541bSAlexei Starovoitov }
20273dec541bSAlexei Starovoitov ex = &bpf_prog->aux->extable[excnt++];
20283dec541bSAlexei Starovoitov
20293dec541bSAlexei Starovoitov delta = _insn - (u8 *)&ex->insn;
20303dec541bSAlexei Starovoitov if (!is_simm32(delta)) {
20313dec541bSAlexei Starovoitov pr_err("extable->insn doesn't fit into 32-bit\n");
20323dec541bSAlexei Starovoitov return -EFAULT;
20333dec541bSAlexei Starovoitov }
20341022a549SSong Liu /* switch ex to rw buffer for writes */
20351022a549SSong Liu ex = (void *)rw_image + ((void *)ex - (void *)image);
20361022a549SSong Liu
20373dec541bSAlexei Starovoitov ex->insn = delta;
20383dec541bSAlexei Starovoitov
20394b5305deSPeter Zijlstra ex->data = EX_TYPE_BPF;
20403dec541bSAlexei Starovoitov
20413dec541bSAlexei Starovoitov if (dst_reg > BPF_REG_9) {
20423dec541bSAlexei Starovoitov pr_err("verifier error\n");
20433dec541bSAlexei Starovoitov return -EFAULT;
20443dec541bSAlexei Starovoitov }
20453dec541bSAlexei Starovoitov /*
20463dec541bSAlexei Starovoitov * Compute size of x86 insn and its target dest x86 register.
20473dec541bSAlexei Starovoitov * ex_handler_bpf() will use lower 8 bits to adjust
20483dec541bSAlexei Starovoitov * pt_regs->ip to jump over this x86 instruction
20493dec541bSAlexei Starovoitov * and upper bits to figure out which pt_regs to zero out.
20503dec541bSAlexei Starovoitov * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
20513dec541bSAlexei Starovoitov * of 4 bytes will be ignored and rbx will be zero inited.
20523dec541bSAlexei Starovoitov */
2053433956e9SAlexei Starovoitov ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
20543dec541bSAlexei Starovoitov }
205562258278SAlexei Starovoitov break;
205662258278SAlexei Starovoitov
205791c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_W:
205891c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_DW:
2059981f94c3SBrendan Jackman if (insn->imm == (BPF_AND | BPF_FETCH) ||
2060981f94c3SBrendan Jackman insn->imm == (BPF_OR | BPF_FETCH) ||
2061981f94c3SBrendan Jackman insn->imm == (BPF_XOR | BPF_FETCH)) {
2062981f94c3SBrendan Jackman bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2063b29dd96bSBrendan Jackman u32 real_src_reg = src_reg;
2064ced18582SJohan Almbladh u32 real_dst_reg = dst_reg;
2065ced18582SJohan Almbladh u8 *branch_target;
2066981f94c3SBrendan Jackman
2067981f94c3SBrendan Jackman /*
2068981f94c3SBrendan Jackman * Can't be implemented with a single x86 insn.
2069981f94c3SBrendan Jackman * Need to do a CMPXCHG loop.
2070981f94c3SBrendan Jackman */
2071981f94c3SBrendan Jackman
2072981f94c3SBrendan Jackman /* Will need RAX as a CMPXCHG operand so save R0 */
2073981f94c3SBrendan Jackman emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2074b29dd96bSBrendan Jackman if (src_reg == BPF_REG_0)
2075b29dd96bSBrendan Jackman real_src_reg = BPF_REG_AX;
2076ced18582SJohan Almbladh if (dst_reg == BPF_REG_0)
2077ced18582SJohan Almbladh real_dst_reg = BPF_REG_AX;
2078b29dd96bSBrendan Jackman
2079981f94c3SBrendan Jackman branch_target = prog;
2080981f94c3SBrendan Jackman /* Load old value */
2081981f94c3SBrendan Jackman emit_ldx(&prog, BPF_SIZE(insn->code),
2082ced18582SJohan Almbladh BPF_REG_0, real_dst_reg, insn->off);
2083981f94c3SBrendan Jackman /*
2084981f94c3SBrendan Jackman * Perform the (commutative) operation locally,
2085981f94c3SBrendan Jackman * put the result in the AUX_REG.
2086981f94c3SBrendan Jackman */
2087981f94c3SBrendan Jackman emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2088b29dd96bSBrendan Jackman maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2089981f94c3SBrendan Jackman EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2090b29dd96bSBrendan Jackman add_2reg(0xC0, AUX_REG, real_src_reg));
2091981f94c3SBrendan Jackman /* Attempt to swap in new value */
2092981f94c3SBrendan Jackman err = emit_atomic(&prog, BPF_CMPXCHG,
2093ced18582SJohan Almbladh real_dst_reg, AUX_REG,
2094ced18582SJohan Almbladh insn->off,
2095981f94c3SBrendan Jackman BPF_SIZE(insn->code));
2096981f94c3SBrendan Jackman if (WARN_ON(err))
2097981f94c3SBrendan Jackman return err;
2098981f94c3SBrendan Jackman /*
2099981f94c3SBrendan Jackman * ZF tells us whether we won the race. If it's
2100981f94c3SBrendan Jackman * cleared we need to try again.
2101981f94c3SBrendan Jackman */
2102981f94c3SBrendan Jackman EMIT2(X86_JNE, -(prog - branch_target) - 2);
2103981f94c3SBrendan Jackman /* Return the pre-modification value */
2104b29dd96bSBrendan Jackman emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2105981f94c3SBrendan Jackman /* Restore R0 after clobbering RAX */
2106981f94c3SBrendan Jackman emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2107981f94c3SBrendan Jackman break;
2108981f94c3SBrendan Jackman }
2109981f94c3SBrendan Jackman
211091c960b0SBrendan Jackman err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
211191c960b0SBrendan Jackman insn->off, BPF_SIZE(insn->code));
211291c960b0SBrendan Jackman if (err)
211391c960b0SBrendan Jackman return err;
211462258278SAlexei Starovoitov break;
211562258278SAlexei Starovoitov
2116d503a04fSAlexei Starovoitov case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2117d503a04fSAlexei Starovoitov case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2118d503a04fSAlexei Starovoitov start_of_ldx = prog;
2119d503a04fSAlexei Starovoitov err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
2120d503a04fSAlexei Starovoitov dst_reg, src_reg, X86_REG_R12, insn->off);
2121d503a04fSAlexei Starovoitov if (err)
2122d503a04fSAlexei Starovoitov return err;
2123d503a04fSAlexei Starovoitov goto populate_extable;
2124d503a04fSAlexei Starovoitov
212562258278SAlexei Starovoitov /* call */
2126b2e9dfe5SThomas Gleixner case BPF_JMP | BPF_CALL: {
21276a537453SJoan Bruguera Micó u8 *ip = image + addrs[i - 1];
2128b2e9dfe5SThomas Gleixner
2129e430f34eSAlexei Starovoitov func = (u8 *) __bpf_call_base + imm32;
2130ebf7d1f5SMaciej Fijalkowski if (tail_call_reachable) {
2131116e04baSLeon Hwang LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth);
21326a537453SJoan Bruguera Micó ip += 7;
2133ebf7d1f5SMaciej Fijalkowski }
21346a537453SJoan Bruguera Micó if (!imm32)
21356a537453SJoan Bruguera Micó return -EINVAL;
21366a537453SJoan Bruguera Micó ip += x86_call_depth_emit_accounting(&prog, func, ip);
21376a537453SJoan Bruguera Micó if (emit_call(&prog, func, ip))
2138b2e9dfe5SThomas Gleixner return -EINVAL;
213962258278SAlexei Starovoitov break;
2140b2e9dfe5SThomas Gleixner }
214162258278SAlexei Starovoitov
214271189fa9SAlexei Starovoitov case BPF_JMP | BPF_TAIL_CALL:
2143428d5df1SDaniel Borkmann if (imm32)
2144f18b03faSKumar Kartikeya Dwivedi emit_bpf_tail_call_direct(bpf_prog,
2145f18b03faSKumar Kartikeya Dwivedi &bpf_prog->aux->poke_tab[imm32 - 1],
2146dceba081SPeter Zijlstra &prog, image + addrs[i - 1],
2147ebf7d1f5SMaciej Fijalkowski callee_regs_used,
2148dceba081SPeter Zijlstra bpf_prog->aux->stack_depth,
2149dceba081SPeter Zijlstra ctx);
2150428d5df1SDaniel Borkmann else
2151f18b03faSKumar Kartikeya Dwivedi emit_bpf_tail_call_indirect(bpf_prog,
2152f18b03faSKumar Kartikeya Dwivedi &prog,
2153ebf7d1f5SMaciej Fijalkowski callee_regs_used,
2154dceba081SPeter Zijlstra bpf_prog->aux->stack_depth,
2155dceba081SPeter Zijlstra image + addrs[i - 1],
2156dceba081SPeter Zijlstra ctx);
2157b52f00e6SAlexei Starovoitov break;
2158b52f00e6SAlexei Starovoitov
215962258278SAlexei Starovoitov /* cond jump */
216062258278SAlexei Starovoitov case BPF_JMP | BPF_JEQ | BPF_X:
216162258278SAlexei Starovoitov case BPF_JMP | BPF_JNE | BPF_X:
216262258278SAlexei Starovoitov case BPF_JMP | BPF_JGT | BPF_X:
216352afc51eSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_X:
216462258278SAlexei Starovoitov case BPF_JMP | BPF_JGE | BPF_X:
216552afc51eSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_X:
216662258278SAlexei Starovoitov case BPF_JMP | BPF_JSGT | BPF_X:
216752afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_X:
216862258278SAlexei Starovoitov case BPF_JMP | BPF_JSGE | BPF_X:
216952afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_X:
21703f5d6525SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_X:
21713f5d6525SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_X:
21723f5d6525SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_X:
21733f5d6525SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_X:
21743f5d6525SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_X:
21753f5d6525SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_X:
21763f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_X:
21773f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_X:
21783f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_X:
21793f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_X:
2180e430f34eSAlexei Starovoitov /* cmp dst_reg, src_reg */
218174007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg,
218274007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP);
21833f5d6525SJiong Wang EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
218462258278SAlexei Starovoitov goto emit_cond_jmp;
218562258278SAlexei Starovoitov
218662258278SAlexei Starovoitov case BPF_JMP | BPF_JSET | BPF_X:
21873f5d6525SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_X:
2188e430f34eSAlexei Starovoitov /* test dst_reg, src_reg */
218974007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, src_reg,
219074007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP);
21913f5d6525SJiong Wang EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
219262258278SAlexei Starovoitov goto emit_cond_jmp;
219362258278SAlexei Starovoitov
219462258278SAlexei Starovoitov case BPF_JMP | BPF_JSET | BPF_K:
21953f5d6525SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_K:
2196e430f34eSAlexei Starovoitov /* test dst_reg, imm32 */
21976364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg,
21986364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_JMP);
2199e430f34eSAlexei Starovoitov EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
220062258278SAlexei Starovoitov goto emit_cond_jmp;
220162258278SAlexei Starovoitov
220262258278SAlexei Starovoitov case BPF_JMP | BPF_JEQ | BPF_K:
220362258278SAlexei Starovoitov case BPF_JMP | BPF_JNE | BPF_K:
220462258278SAlexei Starovoitov case BPF_JMP | BPF_JGT | BPF_K:
220552afc51eSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_K:
220662258278SAlexei Starovoitov case BPF_JMP | BPF_JGE | BPF_K:
220752afc51eSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_K:
220862258278SAlexei Starovoitov case BPF_JMP | BPF_JSGT | BPF_K:
220952afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_K:
221062258278SAlexei Starovoitov case BPF_JMP | BPF_JSGE | BPF_K:
221152afc51eSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_K:
22123f5d6525SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_K:
22133f5d6525SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_K:
22143f5d6525SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_K:
22153f5d6525SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_K:
22163f5d6525SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_K:
22173f5d6525SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_K:
22183f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_K:
22193f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_K:
22203f5d6525SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_K:
22213f5d6525SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_K:
222238f51c07SDaniel Borkmann /* test dst_reg, dst_reg to save one extra byte */
222338f51c07SDaniel Borkmann if (imm32 == 0) {
222474007cfcSBrendan Jackman maybe_emit_mod(&prog, dst_reg, dst_reg,
222574007cfcSBrendan Jackman BPF_CLASS(insn->code) == BPF_JMP);
222638f51c07SDaniel Borkmann EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
222738f51c07SDaniel Borkmann goto emit_cond_jmp;
222838f51c07SDaniel Borkmann }
222938f51c07SDaniel Borkmann
2230e430f34eSAlexei Starovoitov /* cmp dst_reg, imm8/32 */
22316364d7d7SJie Meng maybe_emit_1mod(&prog, dst_reg,
22326364d7d7SJie Meng BPF_CLASS(insn->code) == BPF_JMP);
223362258278SAlexei Starovoitov
2234e430f34eSAlexei Starovoitov if (is_imm8(imm32))
2235e430f34eSAlexei Starovoitov EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
223662258278SAlexei Starovoitov else
2237e430f34eSAlexei Starovoitov EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
223862258278SAlexei Starovoitov
2239a2c7a983SIngo Molnar emit_cond_jmp: /* Convert BPF opcode to x86 */
224062258278SAlexei Starovoitov switch (BPF_OP(insn->code)) {
224162258278SAlexei Starovoitov case BPF_JEQ:
224262258278SAlexei Starovoitov jmp_cond = X86_JE;
224362258278SAlexei Starovoitov break;
224462258278SAlexei Starovoitov case BPF_JSET:
224562258278SAlexei Starovoitov case BPF_JNE:
224662258278SAlexei Starovoitov jmp_cond = X86_JNE;
224762258278SAlexei Starovoitov break;
224862258278SAlexei Starovoitov case BPF_JGT:
224962258278SAlexei Starovoitov /* GT is unsigned '>', JA in x86 */
225062258278SAlexei Starovoitov jmp_cond = X86_JA;
225162258278SAlexei Starovoitov break;
225252afc51eSDaniel Borkmann case BPF_JLT:
225352afc51eSDaniel Borkmann /* LT is unsigned '<', JB in x86 */
225452afc51eSDaniel Borkmann jmp_cond = X86_JB;
225552afc51eSDaniel Borkmann break;
225662258278SAlexei Starovoitov case BPF_JGE:
225762258278SAlexei Starovoitov /* GE is unsigned '>=', JAE in x86 */
225862258278SAlexei Starovoitov jmp_cond = X86_JAE;
225962258278SAlexei Starovoitov break;
226052afc51eSDaniel Borkmann case BPF_JLE:
226152afc51eSDaniel Borkmann /* LE is unsigned '<=', JBE in x86 */
226252afc51eSDaniel Borkmann jmp_cond = X86_JBE;
226352afc51eSDaniel Borkmann break;
226462258278SAlexei Starovoitov case BPF_JSGT:
2265a2c7a983SIngo Molnar /* Signed '>', GT in x86 */
226662258278SAlexei Starovoitov jmp_cond = X86_JG;
226762258278SAlexei Starovoitov break;
226852afc51eSDaniel Borkmann case BPF_JSLT:
2269a2c7a983SIngo Molnar /* Signed '<', LT in x86 */
227052afc51eSDaniel Borkmann jmp_cond = X86_JL;
227152afc51eSDaniel Borkmann break;
227262258278SAlexei Starovoitov case BPF_JSGE:
2273a2c7a983SIngo Molnar /* Signed '>=', GE in x86 */
227462258278SAlexei Starovoitov jmp_cond = X86_JGE;
227562258278SAlexei Starovoitov break;
227652afc51eSDaniel Borkmann case BPF_JSLE:
2277a2c7a983SIngo Molnar /* Signed '<=', LE in x86 */
227852afc51eSDaniel Borkmann jmp_cond = X86_JLE;
227952afc51eSDaniel Borkmann break;
2280a2c7a983SIngo Molnar default: /* to silence GCC warning */
228162258278SAlexei Starovoitov return -EFAULT;
228262258278SAlexei Starovoitov }
228362258278SAlexei Starovoitov jmp_offset = addrs[i + insn->off] - addrs[i];
2284*c8831bdbSYonghong Song if (is_imm8_jmp_offset(jmp_offset)) {
228593c5aeccSGary Lin if (jmp_padding) {
228693c5aeccSGary Lin /* To keep the jmp_offset valid, the extra bytes are
2287d9f6e12fSIngo Molnar * padded before the jump insn, so we subtract the
228893c5aeccSGary Lin * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
228993c5aeccSGary Lin *
229093c5aeccSGary Lin * If the previous pass already emits an imm8
229193c5aeccSGary Lin * jmp_cond, then this BPF insn won't shrink, so
229293c5aeccSGary Lin * "nops" is 0.
229393c5aeccSGary Lin *
229493c5aeccSGary Lin * On the other hand, if the previous pass emits an
229593c5aeccSGary Lin * imm32 jmp_cond, the extra 4 bytes(*) is padded to
229693c5aeccSGary Lin * keep the image from shrinking further.
229793c5aeccSGary Lin *
229893c5aeccSGary Lin * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
229993c5aeccSGary Lin * is 2 bytes, so the size difference is 4 bytes.
230093c5aeccSGary Lin */
230193c5aeccSGary Lin nops = INSN_SZ_DIFF - 2;
230293c5aeccSGary Lin if (nops != 0 && nops != 4) {
230393c5aeccSGary Lin pr_err("unexpected jmp_cond padding: %d bytes\n",
230493c5aeccSGary Lin nops);
230593c5aeccSGary Lin return -EFAULT;
230693c5aeccSGary Lin }
2307ced50fc4SJiri Olsa emit_nops(&prog, nops);
230893c5aeccSGary Lin }
230962258278SAlexei Starovoitov EMIT2(jmp_cond, jmp_offset);
231062258278SAlexei Starovoitov } else if (is_simm32(jmp_offset)) {
231162258278SAlexei Starovoitov EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
23123b58908aSEric Dumazet } else {
231362258278SAlexei Starovoitov pr_err("cond_jmp gen bug %llx\n", jmp_offset);
231462258278SAlexei Starovoitov return -EFAULT;
23153b58908aSEric Dumazet }
231662258278SAlexei Starovoitov
23173b58908aSEric Dumazet break;
231862258278SAlexei Starovoitov
231962258278SAlexei Starovoitov case BPF_JMP | BPF_JA:
23204cd58e9aSYonghong Song case BPF_JMP32 | BPF_JA:
23214cd58e9aSYonghong Song if (BPF_CLASS(insn->code) == BPF_JMP) {
23221612a981SGianluca Borello if (insn->off == -1)
23231612a981SGianluca Borello /* -1 jmp instructions will always jump
23241612a981SGianluca Borello * backwards two bytes. Explicitly handling
23251612a981SGianluca Borello * this case avoids wasting too many passes
23261612a981SGianluca Borello * when there are long sequences of replaced
23271612a981SGianluca Borello * dead code.
23281612a981SGianluca Borello */
23291612a981SGianluca Borello jmp_offset = -2;
23301612a981SGianluca Borello else
233162258278SAlexei Starovoitov jmp_offset = addrs[i + insn->off] - addrs[i];
23324cd58e9aSYonghong Song } else {
23334cd58e9aSYonghong Song if (insn->imm == -1)
23344cd58e9aSYonghong Song jmp_offset = -2;
23354cd58e9aSYonghong Song else
23364cd58e9aSYonghong Song jmp_offset = addrs[i + insn->imm] - addrs[i];
23374cd58e9aSYonghong Song }
23381612a981SGianluca Borello
233993c5aeccSGary Lin if (!jmp_offset) {
234093c5aeccSGary Lin /*
234193c5aeccSGary Lin * If jmp_padding is enabled, the extra nops will
234293c5aeccSGary Lin * be inserted. Otherwise, optimize out nop jumps.
234393c5aeccSGary Lin */
234493c5aeccSGary Lin if (jmp_padding) {
234593c5aeccSGary Lin /* There are 3 possible conditions.
234693c5aeccSGary Lin * (1) This BPF_JA is already optimized out in
234793c5aeccSGary Lin * the previous run, so there is no need
234893c5aeccSGary Lin * to pad any extra byte (0 byte).
234993c5aeccSGary Lin * (2) The previous pass emits an imm8 jmp,
235093c5aeccSGary Lin * so we pad 2 bytes to match the previous
235193c5aeccSGary Lin * insn size.
235293c5aeccSGary Lin * (3) Similarly, the previous pass emits an
235393c5aeccSGary Lin * imm32 jmp, and 5 bytes is padded.
235493c5aeccSGary Lin */
235593c5aeccSGary Lin nops = INSN_SZ_DIFF;
235693c5aeccSGary Lin if (nops != 0 && nops != 2 && nops != 5) {
235793c5aeccSGary Lin pr_err("unexpected nop jump padding: %d bytes\n",
235893c5aeccSGary Lin nops);
235993c5aeccSGary Lin return -EFAULT;
236093c5aeccSGary Lin }
2361ced50fc4SJiri Olsa emit_nops(&prog, nops);
236293c5aeccSGary Lin }
236362258278SAlexei Starovoitov break;
236493c5aeccSGary Lin }
236562258278SAlexei Starovoitov emit_jmp:
2366*c8831bdbSYonghong Song if (is_imm8_jmp_offset(jmp_offset)) {
236793c5aeccSGary Lin if (jmp_padding) {
236893c5aeccSGary Lin /* To avoid breaking jmp_offset, the extra bytes
236993c5aeccSGary Lin * are padded before the actual jmp insn, so
2370d9f6e12fSIngo Molnar * 2 bytes is subtracted from INSN_SZ_DIFF.
237193c5aeccSGary Lin *
237293c5aeccSGary Lin * If the previous pass already emits an imm8
237393c5aeccSGary Lin * jmp, there is nothing to pad (0 byte).
237493c5aeccSGary Lin *
237593c5aeccSGary Lin * If it emits an imm32 jmp (5 bytes) previously
237693c5aeccSGary Lin * and now an imm8 jmp (2 bytes), then we pad
237793c5aeccSGary Lin * (5 - 2 = 3) bytes to stop the image from
237893c5aeccSGary Lin * shrinking further.
237993c5aeccSGary Lin */
238093c5aeccSGary Lin nops = INSN_SZ_DIFF - 2;
238193c5aeccSGary Lin if (nops != 0 && nops != 3) {
238293c5aeccSGary Lin pr_err("unexpected jump padding: %d bytes\n",
238393c5aeccSGary Lin nops);
238493c5aeccSGary Lin return -EFAULT;
238593c5aeccSGary Lin }
2386ced50fc4SJiri Olsa emit_nops(&prog, INSN_SZ_DIFF - 2);
238793c5aeccSGary Lin }
238862258278SAlexei Starovoitov EMIT2(0xEB, jmp_offset);
238962258278SAlexei Starovoitov } else if (is_simm32(jmp_offset)) {
239062258278SAlexei Starovoitov EMIT1_off32(0xE9, jmp_offset);
239162258278SAlexei Starovoitov } else {
239262258278SAlexei Starovoitov pr_err("jmp gen bug %llx\n", jmp_offset);
239362258278SAlexei Starovoitov return -EFAULT;
23943b58908aSEric Dumazet }
239562258278SAlexei Starovoitov break;
239662258278SAlexei Starovoitov
239762258278SAlexei Starovoitov case BPF_JMP | BPF_EXIT:
2398769e0de6SAlexei Starovoitov if (seen_exit) {
239962258278SAlexei Starovoitov jmp_offset = ctx->cleanup_addr - addrs[i];
240062258278SAlexei Starovoitov goto emit_jmp;
240162258278SAlexei Starovoitov }
2402769e0de6SAlexei Starovoitov seen_exit = true;
2403a2c7a983SIngo Molnar /* Update cleanup_addr */
240462258278SAlexei Starovoitov ctx->cleanup_addr = proglen;
2405f18b03faSKumar Kartikeya Dwivedi if (bpf_prog->aux->exception_boundary) {
2406f18b03faSKumar Kartikeya Dwivedi pop_callee_regs(&prog, all_callee_regs_used);
2407f18b03faSKumar Kartikeya Dwivedi pop_r12(&prog);
2408f18b03faSKumar Kartikeya Dwivedi } else {
2409ebf7d1f5SMaciej Fijalkowski pop_callee_regs(&prog, callee_regs_used);
24102fe99eb0SAlexei Starovoitov if (arena_vm_start)
24112fe99eb0SAlexei Starovoitov pop_r12(&prog);
2412f18b03faSKumar Kartikeya Dwivedi }
241362258278SAlexei Starovoitov EMIT1(0xC9); /* leave */
2414d77cfe59SPeter Zijlstra emit_return(&prog, image + addrs[i - 1] + (prog - temp));
24150a14842fSEric Dumazet break;
24160a14842fSEric Dumazet
24170a14842fSEric Dumazet default:
2418a2c7a983SIngo Molnar /*
2419a2c7a983SIngo Molnar * By design x86-64 JIT should support all BPF instructions.
242062258278SAlexei Starovoitov * This error will be seen if new instruction was added
2421a2c7a983SIngo Molnar * to the interpreter, but not to the JIT, or if there is
2422a2c7a983SIngo Molnar * junk in bpf_prog.
242362258278SAlexei Starovoitov */
242462258278SAlexei Starovoitov pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2425f3c2af7bSAlexei Starovoitov return -EINVAL;
24260a14842fSEric Dumazet }
242762258278SAlexei Starovoitov
24280a14842fSEric Dumazet ilen = prog - temp;
2429e0ee9c12SAlexei Starovoitov if (ilen > BPF_MAX_INSN_SIZE) {
24309383191dSDaniel Borkmann pr_err("bpf_jit: fatal insn size error\n");
2431e0ee9c12SAlexei Starovoitov return -EFAULT;
2432e0ee9c12SAlexei Starovoitov }
2433e0ee9c12SAlexei Starovoitov
24340a14842fSEric Dumazet if (image) {
2435e4d4d456SPiotr Krysiuk /*
2436e4d4d456SPiotr Krysiuk * When populating the image, assert that:
2437e4d4d456SPiotr Krysiuk *
2438e4d4d456SPiotr Krysiuk * i) We do not write beyond the allocated space, and
2439e4d4d456SPiotr Krysiuk * ii) addrs[i] did not change from the prior run, in order
2440e4d4d456SPiotr Krysiuk * to validate assumptions made for computing branch
2441e4d4d456SPiotr Krysiuk * displacements.
2442e4d4d456SPiotr Krysiuk */
2443e4d4d456SPiotr Krysiuk if (unlikely(proglen + ilen > oldproglen ||
2444e4d4d456SPiotr Krysiuk proglen + ilen != addrs[i])) {
24459383191dSDaniel Borkmann pr_err("bpf_jit: fatal error\n");
2446f3c2af7bSAlexei Starovoitov return -EFAULT;
24470a14842fSEric Dumazet }
24481022a549SSong Liu memcpy(rw_image + proglen, temp, ilen);
24490a14842fSEric Dumazet }
24500a14842fSEric Dumazet proglen += ilen;
24510a14842fSEric Dumazet addrs[i] = proglen;
24520a14842fSEric Dumazet prog = temp;
24530a14842fSEric Dumazet }
24543dec541bSAlexei Starovoitov
24553dec541bSAlexei Starovoitov if (image && excnt != bpf_prog->aux->num_exentries) {
24563dec541bSAlexei Starovoitov pr_err("extable is not populated\n");
24573dec541bSAlexei Starovoitov return -EFAULT;
24583dec541bSAlexei Starovoitov }
2459f3c2af7bSAlexei Starovoitov return proglen;
2460f3c2af7bSAlexei Starovoitov }
2461f3c2af7bSAlexei Starovoitov
clean_stack_garbage(const struct btf_func_model * m,u8 ** pprog,int nr_stack_slots,int stack_size)2462473e3150SMenglong Dong static void clean_stack_garbage(const struct btf_func_model *m,
2463473e3150SMenglong Dong u8 **pprog, int nr_stack_slots,
2464fec56f58SAlexei Starovoitov int stack_size)
2465fec56f58SAlexei Starovoitov {
2466473e3150SMenglong Dong int arg_size, off;
2467473e3150SMenglong Dong u8 *prog;
2468473e3150SMenglong Dong
2469473e3150SMenglong Dong /* Generally speaking, the compiler will pass the arguments
2470473e3150SMenglong Dong * on-stack with "push" instruction, which will take 8-byte
2471473e3150SMenglong Dong * on the stack. In this case, there won't be garbage values
2472473e3150SMenglong Dong * while we copy the arguments from origin stack frame to current
2473473e3150SMenglong Dong * in BPF_DW.
2474473e3150SMenglong Dong *
2475473e3150SMenglong Dong * However, sometimes the compiler will only allocate 4-byte on
2476473e3150SMenglong Dong * the stack for the arguments. For now, this case will only
2477473e3150SMenglong Dong * happen if there is only one argument on-stack and its size
2478473e3150SMenglong Dong * not more than 4 byte. In this case, there will be garbage
2479473e3150SMenglong Dong * values on the upper 4-byte where we store the argument on
2480473e3150SMenglong Dong * current stack frame.
2481473e3150SMenglong Dong *
2482473e3150SMenglong Dong * arguments on origin stack:
2483473e3150SMenglong Dong *
2484473e3150SMenglong Dong * stack_arg_1(4-byte) xxx(4-byte)
2485473e3150SMenglong Dong *
2486473e3150SMenglong Dong * what we copy:
2487473e3150SMenglong Dong *
2488473e3150SMenglong Dong * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2489473e3150SMenglong Dong *
2490473e3150SMenglong Dong * and the xxx is the garbage values which we should clean here.
2491473e3150SMenglong Dong */
2492473e3150SMenglong Dong if (nr_stack_slots != 1)
2493473e3150SMenglong Dong return;
2494473e3150SMenglong Dong
2495473e3150SMenglong Dong /* the size of the last argument */
2496473e3150SMenglong Dong arg_size = m->arg_size[m->nr_args - 1];
2497473e3150SMenglong Dong if (arg_size <= 4) {
2498473e3150SMenglong Dong off = -(stack_size - 4);
2499473e3150SMenglong Dong prog = *pprog;
2500473e3150SMenglong Dong /* mov DWORD PTR [rbp + off], 0 */
2501473e3150SMenglong Dong if (!is_imm8(off))
2502473e3150SMenglong Dong EMIT2_off32(0xC7, 0x85, off);
2503473e3150SMenglong Dong else
2504473e3150SMenglong Dong EMIT3(0xC7, 0x45, off);
2505473e3150SMenglong Dong EMIT(0, 4);
2506473e3150SMenglong Dong *pprog = prog;
2507473e3150SMenglong Dong }
2508473e3150SMenglong Dong }
2509473e3150SMenglong Dong
2510473e3150SMenglong Dong /* get the count of the regs that are used to pass arguments */
get_nr_used_regs(const struct btf_func_model * m)2511473e3150SMenglong Dong static int get_nr_used_regs(const struct btf_func_model *m)
2512473e3150SMenglong Dong {
2513473e3150SMenglong Dong int i, arg_regs, nr_used_regs = 0;
2514473e3150SMenglong Dong
2515473e3150SMenglong Dong for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2516473e3150SMenglong Dong arg_regs = (m->arg_size[i] + 7) / 8;
2517473e3150SMenglong Dong if (nr_used_regs + arg_regs <= 6)
2518473e3150SMenglong Dong nr_used_regs += arg_regs;
2519473e3150SMenglong Dong
2520473e3150SMenglong Dong if (nr_used_regs >= 6)
2521473e3150SMenglong Dong break;
2522473e3150SMenglong Dong }
2523473e3150SMenglong Dong
2524473e3150SMenglong Dong return nr_used_regs;
2525473e3150SMenglong Dong }
2526473e3150SMenglong Dong
save_args(const struct btf_func_model * m,u8 ** prog,int stack_size,bool for_call_origin)2527473e3150SMenglong Dong static void save_args(const struct btf_func_model *m, u8 **prog,
2528473e3150SMenglong Dong int stack_size, bool for_call_origin)
2529473e3150SMenglong Dong {
2530492e797fSMenglong Dong int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2531473e3150SMenglong Dong int i, j;
25327f788049SPu Lehui
2533fec56f58SAlexei Starovoitov /* Store function arguments to stack.
2534fec56f58SAlexei Starovoitov * For a function that accepts two pointers the sequence will be:
2535fec56f58SAlexei Starovoitov * mov QWORD PTR [rbp-0x10],rdi
2536fec56f58SAlexei Starovoitov * mov QWORD PTR [rbp-0x8],rsi
2537fec56f58SAlexei Starovoitov */
2538473e3150SMenglong Dong for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2539473e3150SMenglong Dong arg_regs = (m->arg_size[i] + 7) / 8;
2540473e3150SMenglong Dong
2541473e3150SMenglong Dong /* According to the research of Yonghong, struct members
2542473e3150SMenglong Dong * should be all in register or all on the stack.
2543473e3150SMenglong Dong * Meanwhile, the compiler will pass the argument on regs
2544473e3150SMenglong Dong * if the remaining regs can hold the argument.
2545473e3150SMenglong Dong *
2546473e3150SMenglong Dong * Disorder of the args can happen. For example:
2547473e3150SMenglong Dong *
2548473e3150SMenglong Dong * struct foo_struct {
2549473e3150SMenglong Dong * long a;
2550473e3150SMenglong Dong * int b;
2551473e3150SMenglong Dong * };
2552473e3150SMenglong Dong * int foo(char, char, char, char, char, struct foo_struct,
2553473e3150SMenglong Dong * char);
2554473e3150SMenglong Dong *
2555473e3150SMenglong Dong * the arg1-5,arg7 will be passed by regs, and arg6 will
2556473e3150SMenglong Dong * by stack.
2557473e3150SMenglong Dong */
2558473e3150SMenglong Dong if (nr_regs + arg_regs > 6) {
2559473e3150SMenglong Dong /* copy function arguments from origin stack frame
2560473e3150SMenglong Dong * into current stack frame.
2561473e3150SMenglong Dong *
2562473e3150SMenglong Dong * The starting address of the arguments on-stack
2563473e3150SMenglong Dong * is:
2564473e3150SMenglong Dong * rbp + 8(push rbp) +
2565473e3150SMenglong Dong * 8(return addr of origin call) +
2566473e3150SMenglong Dong * 8(return addr of the caller)
2567473e3150SMenglong Dong * which means: rbp + 24
2568473e3150SMenglong Dong */
2569473e3150SMenglong Dong for (j = 0; j < arg_regs; j++) {
2570473e3150SMenglong Dong emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2571473e3150SMenglong Dong nr_stack_slots * 8 + 0x18);
2572473e3150SMenglong Dong emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2573473e3150SMenglong Dong -stack_size);
2574473e3150SMenglong Dong
2575473e3150SMenglong Dong if (!nr_stack_slots)
2576473e3150SMenglong Dong first_off = stack_size;
2577473e3150SMenglong Dong stack_size -= 8;
2578473e3150SMenglong Dong nr_stack_slots++;
2579473e3150SMenglong Dong }
2580473e3150SMenglong Dong } else {
2581473e3150SMenglong Dong /* Only copy the arguments on-stack to current
2582473e3150SMenglong Dong * 'stack_size' and ignore the regs, used to
258354aa699eSBjorn Helgaas * prepare the arguments on-stack for origin call.
2584473e3150SMenglong Dong */
2585473e3150SMenglong Dong if (for_call_origin) {
2586473e3150SMenglong Dong nr_regs += arg_regs;
2587473e3150SMenglong Dong continue;
2588fec56f58SAlexei Starovoitov }
2589fec56f58SAlexei Starovoitov
2590473e3150SMenglong Dong /* copy the arguments from regs into stack */
2591473e3150SMenglong Dong for (j = 0; j < arg_regs; j++) {
2592473e3150SMenglong Dong emit_stx(prog, BPF_DW, BPF_REG_FP,
2593473e3150SMenglong Dong nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2594473e3150SMenglong Dong -stack_size);
2595473e3150SMenglong Dong stack_size -= 8;
2596473e3150SMenglong Dong nr_regs++;
2597473e3150SMenglong Dong }
2598473e3150SMenglong Dong }
2599473e3150SMenglong Dong }
2600473e3150SMenglong Dong
2601473e3150SMenglong Dong clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2602473e3150SMenglong Dong }
2603473e3150SMenglong Dong
restore_regs(const struct btf_func_model * m,u8 ** prog,int stack_size)2604473e3150SMenglong Dong static void restore_regs(const struct btf_func_model *m, u8 **prog,
2605fec56f58SAlexei Starovoitov int stack_size)
2606fec56f58SAlexei Starovoitov {
2607473e3150SMenglong Dong int i, j, arg_regs, nr_regs = 0;
2608fec56f58SAlexei Starovoitov
2609fec56f58SAlexei Starovoitov /* Restore function arguments from stack.
2610fec56f58SAlexei Starovoitov * For a function that accepts two pointers the sequence will be:
2611fec56f58SAlexei Starovoitov * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2612fec56f58SAlexei Starovoitov * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2613473e3150SMenglong Dong *
2614473e3150SMenglong Dong * The logic here is similar to what we do in save_args()
2615fec56f58SAlexei Starovoitov */
2616473e3150SMenglong Dong for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2617473e3150SMenglong Dong arg_regs = (m->arg_size[i] + 7) / 8;
2618473e3150SMenglong Dong if (nr_regs + arg_regs <= 6) {
2619473e3150SMenglong Dong for (j = 0; j < arg_regs; j++) {
262002a6dfa8SMenglong Dong emit_ldx(prog, BPF_DW,
2621473e3150SMenglong Dong nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2622fec56f58SAlexei Starovoitov BPF_REG_FP,
2623473e3150SMenglong Dong -stack_size);
2624473e3150SMenglong Dong stack_size -= 8;
2625473e3150SMenglong Dong nr_regs++;
2626473e3150SMenglong Dong }
2627473e3150SMenglong Dong } else {
2628473e3150SMenglong Dong stack_size -= 8 * arg_regs;
2629473e3150SMenglong Dong }
2630473e3150SMenglong Dong
2631473e3150SMenglong Dong if (nr_regs >= 6)
2632473e3150SMenglong Dong break;
2633473e3150SMenglong Dong }
2634fec56f58SAlexei Starovoitov }
2635fec56f58SAlexei Starovoitov
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)26367e639208SKP Singh static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2637f7e0beafSKui-Feng Lee struct bpf_tramp_link *l, int stack_size,
26383ba026fcSSong Liu int run_ctx_off, bool save_ret,
26393ba026fcSSong Liu void *image, void *rw_image)
2640fec56f58SAlexei Starovoitov {
2641fec56f58SAlexei Starovoitov u8 *prog = *pprog;
2642ca06f55bSAlexei Starovoitov u8 *jmp_insn;
2643e384c7b7SKui-Feng Lee int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2644f7e0beafSKui-Feng Lee struct bpf_prog *p = l->link.prog;
26452fcc8241SKui-Feng Lee u64 cookie = l->cookie;
2646fec56f58SAlexei Starovoitov
26472fcc8241SKui-Feng Lee /* mov rdi, cookie */
26482fcc8241SKui-Feng Lee emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2649e384c7b7SKui-Feng Lee
2650e384c7b7SKui-Feng Lee /* Prepare struct bpf_tramp_run_ctx.
2651e384c7b7SKui-Feng Lee *
2652e384c7b7SKui-Feng Lee * bpf_tramp_run_ctx is already preserved by
2653e384c7b7SKui-Feng Lee * arch_prepare_bpf_trampoline().
2654e384c7b7SKui-Feng Lee *
2655e384c7b7SKui-Feng Lee * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2656e384c7b7SKui-Feng Lee */
2657e384c7b7SKui-Feng Lee emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2658e384c7b7SKui-Feng Lee
2659ca06f55bSAlexei Starovoitov /* arg1: mov rdi, progs[i] */
2660ca06f55bSAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2661e384c7b7SKui-Feng Lee /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2662473e3150SMenglong Dong if (!is_imm8(-run_ctx_off))
2663473e3150SMenglong Dong EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2664473e3150SMenglong Dong else
2665e384c7b7SKui-Feng Lee EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2666e384c7b7SKui-Feng Lee
26673ba026fcSSong Liu if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2668fec56f58SAlexei Starovoitov return -EINVAL;
2669fec56f58SAlexei Starovoitov /* remember prog start time returned by __bpf_prog_enter */
2670fec56f58SAlexei Starovoitov emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2671fec56f58SAlexei Starovoitov
2672ca06f55bSAlexei Starovoitov /* if (__bpf_prog_enter*(prog) == 0)
2673ca06f55bSAlexei Starovoitov * goto skip_exec_of_prog;
2674ca06f55bSAlexei Starovoitov */
2675ca06f55bSAlexei Starovoitov EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
2676ca06f55bSAlexei Starovoitov /* emit 2 nops that will be replaced with JE insn */
2677ca06f55bSAlexei Starovoitov jmp_insn = prog;
2678ca06f55bSAlexei Starovoitov emit_nops(&prog, 2);
2679ca06f55bSAlexei Starovoitov
2680fec56f58SAlexei Starovoitov /* arg1: lea rdi, [rbp - stack_size] */
2681473e3150SMenglong Dong if (!is_imm8(-stack_size))
2682473e3150SMenglong Dong EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2683473e3150SMenglong Dong else
2684fec56f58SAlexei Starovoitov EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2685fec56f58SAlexei Starovoitov /* arg2: progs[i]->insnsi for interpreter */
26867e639208SKP Singh if (!p->jited)
2687fec56f58SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_2,
26887e639208SKP Singh (long) p->insnsi >> 32,
26897e639208SKP Singh (u32) (long) p->insnsi);
2690fec56f58SAlexei Starovoitov /* call JITed bpf program or interpreter */
26913ba026fcSSong Liu if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2692fec56f58SAlexei Starovoitov return -EINVAL;
2693fec56f58SAlexei Starovoitov
2694356ed649SHou Tao /*
2695356ed649SHou Tao * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2696ae240823SKP Singh * of the previous call which is then passed on the stack to
2697ae240823SKP Singh * the next BPF program.
2698356ed649SHou Tao *
2699356ed649SHou Tao * BPF_TRAMP_FENTRY trampoline may need to return the return
2700356ed649SHou Tao * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2701ae240823SKP Singh */
2702356ed649SHou Tao if (save_ret)
2703ae240823SKP Singh emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2704ae240823SKP Singh
2705ca06f55bSAlexei Starovoitov /* replace 2 nops with JE insn, since jmp target is known */
2706ca06f55bSAlexei Starovoitov jmp_insn[0] = X86_JE;
2707ca06f55bSAlexei Starovoitov jmp_insn[1] = prog - jmp_insn - 2;
2708ca06f55bSAlexei Starovoitov
2709fec56f58SAlexei Starovoitov /* arg1: mov rdi, progs[i] */
2710f2dd3b39SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2711fec56f58SAlexei Starovoitov /* arg2: mov rsi, rbx <- start time in nsec */
2712fec56f58SAlexei Starovoitov emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2713e384c7b7SKui-Feng Lee /* arg3: lea rdx, [rbp - run_ctx_off] */
2714473e3150SMenglong Dong if (!is_imm8(-run_ctx_off))
2715473e3150SMenglong Dong EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2716473e3150SMenglong Dong else
2717e384c7b7SKui-Feng Lee EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
27183ba026fcSSong Liu if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2719fec56f58SAlexei Starovoitov return -EINVAL;
27207e639208SKP Singh
27217e639208SKP Singh *pprog = prog;
27227e639208SKP Singh return 0;
27237e639208SKP Singh }
27247e639208SKP Singh
emit_align(u8 ** pprog,u32 align)27257e639208SKP Singh static void emit_align(u8 **pprog, u32 align)
27267e639208SKP Singh {
27277e639208SKP Singh u8 *target, *prog = *pprog;
27287e639208SKP Singh
27297e639208SKP Singh target = PTR_ALIGN(prog, align);
27307e639208SKP Singh if (target != prog)
27317e639208SKP Singh emit_nops(&prog, target - prog);
27327e639208SKP Singh
27337e639208SKP Singh *pprog = prog;
27347e639208SKP Singh }
27357e639208SKP Singh
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)27367e639208SKP Singh static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
27377e639208SKP Singh {
27387e639208SKP Singh u8 *prog = *pprog;
27397e639208SKP Singh s64 offset;
27407e639208SKP Singh
27417e639208SKP Singh offset = func - (ip + 2 + 4);
27427e639208SKP Singh if (!is_simm32(offset)) {
27437e639208SKP Singh pr_err("Target %p is out of range\n", func);
27447e639208SKP Singh return -EINVAL;
27457e639208SKP Singh }
27467e639208SKP Singh EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
27477e639208SKP Singh *pprog = prog;
27487e639208SKP Singh return 0;
27497e639208SKP Singh }
27507e639208SKP Singh
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)27517e639208SKP Singh static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2752f7e0beafSKui-Feng Lee struct bpf_tramp_links *tl, int stack_size,
27533ba026fcSSong Liu int run_ctx_off, bool save_ret,
27543ba026fcSSong Liu void *image, void *rw_image)
27557e639208SKP Singh {
27567e639208SKP Singh int i;
27577e639208SKP Singh u8 *prog = *pprog;
27587e639208SKP Singh
2759f7e0beafSKui-Feng Lee for (i = 0; i < tl->nr_links; i++) {
2760f7e0beafSKui-Feng Lee if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
27613ba026fcSSong Liu run_ctx_off, save_ret, image, rw_image))
27627e639208SKP Singh return -EINVAL;
2763fec56f58SAlexei Starovoitov }
2764fec56f58SAlexei Starovoitov *pprog = prog;
2765fec56f58SAlexei Starovoitov return 0;
2766fec56f58SAlexei Starovoitov }
2767fec56f58SAlexei Starovoitov
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches,void * image,void * rw_image)2768ae240823SKP Singh static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2769f7e0beafSKui-Feng Lee struct bpf_tramp_links *tl, int stack_size,
27703ba026fcSSong Liu int run_ctx_off, u8 **branches,
27713ba026fcSSong Liu void *image, void *rw_image)
2772ae240823SKP Singh {
2773ae240823SKP Singh u8 *prog = *pprog;
2774ced50fc4SJiri Olsa int i;
2775ae240823SKP Singh
2776ae240823SKP Singh /* The first fmod_ret program will receive a garbage return value.
2777ae240823SKP Singh * Set this to 0 to avoid confusing the program.
2778ae240823SKP Singh */
2779ae240823SKP Singh emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2780ae240823SKP Singh emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2781f7e0beafSKui-Feng Lee for (i = 0; i < tl->nr_links; i++) {
27823ba026fcSSong Liu if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
27833ba026fcSSong Liu image, rw_image))
2784ae240823SKP Singh return -EINVAL;
2785ae240823SKP Singh
278613fac1d8SAlexei Starovoitov /* mod_ret prog stored return value into [rbp - 8]. Emit:
278713fac1d8SAlexei Starovoitov * if (*(u64 *)(rbp - 8) != 0)
2788ae240823SKP Singh * goto do_fexit;
2789ae240823SKP Singh */
279013fac1d8SAlexei Starovoitov /* cmp QWORD PTR [rbp - 0x8], 0x0 */
279113fac1d8SAlexei Starovoitov EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2792ae240823SKP Singh
2793ae240823SKP Singh /* Save the location of the branch and Generate 6 nops
2794ae240823SKP Singh * (4 bytes for an offset and 2 bytes for the jump) These nops
2795ae240823SKP Singh * are replaced with a conditional jump once do_fexit (i.e. the
2796ae240823SKP Singh * start of the fexit invocation) is finalized.
2797ae240823SKP Singh */
2798ae240823SKP Singh branches[i] = prog;
2799ae240823SKP Singh emit_nops(&prog, 4 + 2);
2800ae240823SKP Singh }
2801ae240823SKP Singh
2802ae240823SKP Singh *pprog = prog;
2803ae240823SKP Singh return 0;
2804ae240823SKP Singh }
2805ae240823SKP Singh
2806116e04baSLeon Hwang /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
2807116e04baSLeon Hwang #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
2808116e04baSLeon Hwang __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
2809116e04baSLeon Hwang
2810fec56f58SAlexei Starovoitov /* Example:
2811fec56f58SAlexei Starovoitov * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2812fec56f58SAlexei Starovoitov * its 'struct btf_func_model' will be nr_args=2
2813fec56f58SAlexei Starovoitov * The assembly code when eth_type_trans is executing after trampoline:
2814fec56f58SAlexei Starovoitov *
2815fec56f58SAlexei Starovoitov * push rbp
2816fec56f58SAlexei Starovoitov * mov rbp, rsp
2817fec56f58SAlexei Starovoitov * sub rsp, 16 // space for skb and dev
2818fec56f58SAlexei Starovoitov * push rbx // temp regs to pass start time
2819fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
2820fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
2821fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2822fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time in bpf stats are enabled
2823fec56f58SAlexei Starovoitov * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2824fec56f58SAlexei Starovoitov * call addr_of_jited_FENTRY_prog
2825fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2826fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time
2827fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2828fec56f58SAlexei Starovoitov * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
2829fec56f58SAlexei Starovoitov * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
2830fec56f58SAlexei Starovoitov * pop rbx
2831fec56f58SAlexei Starovoitov * leave
2832fec56f58SAlexei Starovoitov * ret
2833fec56f58SAlexei Starovoitov *
2834fec56f58SAlexei Starovoitov * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2835fec56f58SAlexei Starovoitov * replaced with 'call generated_bpf_trampoline'. When it returns
2836fec56f58SAlexei Starovoitov * eth_type_trans will continue executing with original skb and dev pointers.
2837fec56f58SAlexei Starovoitov *
2838fec56f58SAlexei Starovoitov * The assembly code when eth_type_trans is called from trampoline:
2839fec56f58SAlexei Starovoitov *
2840fec56f58SAlexei Starovoitov * push rbp
2841fec56f58SAlexei Starovoitov * mov rbp, rsp
2842fec56f58SAlexei Starovoitov * sub rsp, 24 // space for skb, dev, return value
2843fec56f58SAlexei Starovoitov * push rbx // temp regs to pass start time
2844fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
2845fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
2846fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2847fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time if bpf stats are enabled
2848fec56f58SAlexei Starovoitov * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2849fec56f58SAlexei Starovoitov * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2850fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2851fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time
2852fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2853fec56f58SAlexei Starovoitov * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2854fec56f58SAlexei Starovoitov * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2855fec56f58SAlexei Starovoitov * call eth_type_trans+5 // execute body of eth_type_trans
2856fec56f58SAlexei Starovoitov * mov qword ptr [rbp - 8], rax // save return value
2857fec56f58SAlexei Starovoitov * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2858fec56f58SAlexei Starovoitov * mov rbx, rax // remember start time in bpf stats are enabled
2859fec56f58SAlexei Starovoitov * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2860fec56f58SAlexei Starovoitov * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2861fec56f58SAlexei Starovoitov * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2862fec56f58SAlexei Starovoitov * mov rsi, rbx // prog start time
2863fec56f58SAlexei Starovoitov * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2864fec56f58SAlexei Starovoitov * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2865fec56f58SAlexei Starovoitov * pop rbx
2866fec56f58SAlexei Starovoitov * leave
2867fec56f58SAlexei Starovoitov * add rsp, 8 // skip eth_type_trans's frame
2868fec56f58SAlexei Starovoitov * ret // return to its caller
2869fec56f58SAlexei Starovoitov */
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * rw_image,void * rw_image_end,void * image,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)28703ba026fcSSong Liu static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
28713ba026fcSSong Liu void *rw_image_end, void *image,
287285d33df3SMartin KaFai Lau const struct btf_func_model *m, u32 flags,
2873f7e0beafSKui-Feng Lee struct bpf_tramp_links *tlinks,
28744d854f4fSJiri Olsa void *func_addr)
2875fec56f58SAlexei Starovoitov {
28767f788049SPu Lehui int i, ret, nr_regs = m->nr_args, stack_size = 0;
2877473e3150SMenglong Dong int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2878f7e0beafSKui-Feng Lee struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2879f7e0beafSKui-Feng Lee struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2880f7e0beafSKui-Feng Lee struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
28814d854f4fSJiri Olsa void *orig_call = func_addr;
2882ae240823SKP Singh u8 **branches = NULL;
2883fec56f58SAlexei Starovoitov u8 *prog;
2884356ed649SHou Tao bool save_ret;
2885fec56f58SAlexei Starovoitov
28862cd3e377SPeter Zijlstra /*
28872cd3e377SPeter Zijlstra * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
28882cd3e377SPeter Zijlstra * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
28892cd3e377SPeter Zijlstra * because @func_addr.
28902cd3e377SPeter Zijlstra */
28912cd3e377SPeter Zijlstra WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
28922cd3e377SPeter Zijlstra (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
28932cd3e377SPeter Zijlstra
28947f788049SPu Lehui /* extra registers for struct arguments */
28952cd3e377SPeter Zijlstra for (i = 0; i < m->nr_args; i++) {
2896a9c5ad31SYonghong Song if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
28977f788049SPu Lehui nr_regs += (m->arg_size[i] + 7) / 8 - 1;
28982cd3e377SPeter Zijlstra }
28997f788049SPu Lehui
2900473e3150SMenglong Dong /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2901473e3150SMenglong Dong * are passed through regs, the remains are through stack.
2902473e3150SMenglong Dong */
2903473e3150SMenglong Dong if (nr_regs > MAX_BPF_FUNC_ARGS)
2904a9c5ad31SYonghong Song return -ENOTSUPP;
2905a9c5ad31SYonghong Song
29065edf6a19SJiri Olsa /* Generated trampoline stack layout:
29075edf6a19SJiri Olsa *
29085edf6a19SJiri Olsa * RBP + 8 [ return address ]
29095edf6a19SJiri Olsa * RBP + 0 [ RBP ]
29105edf6a19SJiri Olsa *
29115edf6a19SJiri Olsa * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
29125edf6a19SJiri Olsa * BPF_TRAMP_F_RET_FENTRY_RET flags
29135edf6a19SJiri Olsa *
29145edf6a19SJiri Olsa * [ reg_argN ] always
29155edf6a19SJiri Olsa * [ ... ]
29165edf6a19SJiri Olsa * RBP - regs_off [ reg_arg1 ] program's ctx pointer
29175edf6a19SJiri Olsa *
29187f788049SPu Lehui * RBP - nregs_off [ regs count ] always
2919f92c1e18SJiri Olsa *
29205edf6a19SJiri Olsa * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2921e384c7b7SKui-Feng Lee *
2922473e3150SMenglong Dong * RBP - rbx_off [ rbx value ] always
2923473e3150SMenglong Dong *
2924e384c7b7SKui-Feng Lee * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2925473e3150SMenglong Dong *
2926473e3150SMenglong Dong * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
2927473e3150SMenglong Dong * [ ... ]
2928473e3150SMenglong Dong * [ stack_arg2 ]
2929473e3150SMenglong Dong * RBP - arg_stack_off [ stack_arg1 ]
2930116e04baSLeon Hwang * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
29315edf6a19SJiri Olsa */
29325edf6a19SJiri Olsa
2933356ed649SHou Tao /* room for return value of orig_call or fentry prog */
2934356ed649SHou Tao save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2935356ed649SHou Tao if (save_ret)
2936356ed649SHou Tao stack_size += 8;
2937fec56f58SAlexei Starovoitov
29387f788049SPu Lehui stack_size += nr_regs * 8;
29395edf6a19SJiri Olsa regs_off = stack_size;
29405edf6a19SJiri Olsa
29417f788049SPu Lehui /* regs count */
2942f92c1e18SJiri Olsa stack_size += 8;
29437f788049SPu Lehui nregs_off = stack_size;
2944f92c1e18SJiri Olsa
29457e6f3cd8SJiri Olsa if (flags & BPF_TRAMP_F_IP_ARG)
29467e6f3cd8SJiri Olsa stack_size += 8; /* room for IP address argument */
29477e6f3cd8SJiri Olsa
29485edf6a19SJiri Olsa ip_off = stack_size;
29495edf6a19SJiri Olsa
2950473e3150SMenglong Dong stack_size += 8;
2951473e3150SMenglong Dong rbx_off = stack_size;
2952473e3150SMenglong Dong
2953e384c7b7SKui-Feng Lee stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2954e384c7b7SKui-Feng Lee run_ctx_off = stack_size;
2955e384c7b7SKui-Feng Lee
2956473e3150SMenglong Dong if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
2957473e3150SMenglong Dong /* the space that used to pass arguments on-stack */
2958473e3150SMenglong Dong stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
2959473e3150SMenglong Dong /* make sure the stack pointer is 16-byte aligned if we
2960473e3150SMenglong Dong * need pass arguments on stack, which means
2961473e3150SMenglong Dong * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2962473e3150SMenglong Dong * should be 16-byte aligned. Following code depend on
2963473e3150SMenglong Dong * that stack_size is already 8-byte aligned.
2964473e3150SMenglong Dong */
2965473e3150SMenglong Dong stack_size += (stack_size % 16) ? 0 : 8;
2966473e3150SMenglong Dong }
2967473e3150SMenglong Dong
2968473e3150SMenglong Dong arg_stack_off = stack_size;
2969473e3150SMenglong Dong
297058912710SPeter Zijlstra if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2971fec56f58SAlexei Starovoitov /* skip patched call instruction and point orig_call to actual
2972fec56f58SAlexei Starovoitov * body of the kernel function.
2973fec56f58SAlexei Starovoitov */
297458912710SPeter Zijlstra if (is_endbr(*(u32 *)orig_call))
297558912710SPeter Zijlstra orig_call += ENDBR_INSN_SIZE;
29764b3da77bSDaniel Borkmann orig_call += X86_PATCH_SIZE;
297758912710SPeter Zijlstra }
2978fec56f58SAlexei Starovoitov
29793ba026fcSSong Liu prog = rw_image;
2980fec56f58SAlexei Starovoitov
29812cd3e377SPeter Zijlstra if (flags & BPF_TRAMP_F_INDIRECT) {
2982ee3e2469SPeter Zijlstra /*
29832cd3e377SPeter Zijlstra * Indirect call for bpf_struct_ops
29842cd3e377SPeter Zijlstra */
29852cd3e377SPeter Zijlstra emit_cfi(&prog, cfi_get_func_hash(func_addr));
29862cd3e377SPeter Zijlstra } else {
29872cd3e377SPeter Zijlstra /*
29882cd3e377SPeter Zijlstra * Direct-call fentry stub, as such it needs accounting for the
29892cd3e377SPeter Zijlstra * __fentry__ call.
2990ee3e2469SPeter Zijlstra */
29916a537453SJoan Bruguera Micó x86_call_depth_emit_accounting(&prog, NULL, image);
29922cd3e377SPeter Zijlstra }
2993fec56f58SAlexei Starovoitov EMIT1(0x55); /* push rbp */
2994fec56f58SAlexei Starovoitov EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
29952cd3e377SPeter Zijlstra if (!is_imm8(stack_size)) {
2996473e3150SMenglong Dong /* sub rsp, stack_size */
2997473e3150SMenglong Dong EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
29982cd3e377SPeter Zijlstra } else {
2999473e3150SMenglong Dong /* sub rsp, stack_size */
3000473e3150SMenglong Dong EMIT4(0x48, 0x83, 0xEC, stack_size);
30012cd3e377SPeter Zijlstra }
30022b5dcb31SLeon Hwang if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
30032b5dcb31SLeon Hwang EMIT1(0x50); /* push rax */
3004473e3150SMenglong Dong /* mov QWORD PTR [rbp - rbx_off], rbx */
3005473e3150SMenglong Dong emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3006fec56f58SAlexei Starovoitov
3007a9c5ad31SYonghong Song /* Store number of argument registers of the traced function:
30087f788049SPu Lehui * mov rax, nr_regs
30097f788049SPu Lehui * mov QWORD PTR [rbp - nregs_off], rax
3010f92c1e18SJiri Olsa */
30117f788049SPu Lehui emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
30127f788049SPu Lehui emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
3013f92c1e18SJiri Olsa
30147e6f3cd8SJiri Olsa if (flags & BPF_TRAMP_F_IP_ARG) {
30157e6f3cd8SJiri Olsa /* Store IP address of the traced function:
30164d854f4fSJiri Olsa * movabsq rax, func_addr
30175edf6a19SJiri Olsa * mov QWORD PTR [rbp - ip_off], rax
30187e6f3cd8SJiri Olsa */
30194d854f4fSJiri Olsa emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
30205edf6a19SJiri Olsa emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
30217e6f3cd8SJiri Olsa }
30227e6f3cd8SJiri Olsa
3023473e3150SMenglong Dong save_args(m, &prog, regs_off, false);
3024fec56f58SAlexei Starovoitov
3025e21aa341SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) {
3026e21aa341SAlexei Starovoitov /* arg1: mov rdi, im */
3027e21aa341SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
30283ba026fcSSong Liu if (emit_rsb_call(&prog, __bpf_tramp_enter,
30293ba026fcSSong Liu image + (prog - (u8 *)rw_image))) {
3030e21aa341SAlexei Starovoitov ret = -EINVAL;
3031e21aa341SAlexei Starovoitov goto cleanup;
3032e21aa341SAlexei Starovoitov }
3033e21aa341SAlexei Starovoitov }
3034e21aa341SAlexei Starovoitov
30352cd3e377SPeter Zijlstra if (fentry->nr_links) {
3036e384c7b7SKui-Feng Lee if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
30373ba026fcSSong Liu flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
3038fec56f58SAlexei Starovoitov return -EINVAL;
30392cd3e377SPeter Zijlstra }
3040fec56f58SAlexei Starovoitov
3041f7e0beafSKui-Feng Lee if (fmod_ret->nr_links) {
3042f7e0beafSKui-Feng Lee branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3043ae240823SKP Singh GFP_KERNEL);
3044ae240823SKP Singh if (!branches)
3045ae240823SKP Singh return -ENOMEM;
3046ae240823SKP Singh
30475edf6a19SJiri Olsa if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
30483ba026fcSSong Liu run_ctx_off, branches, image, rw_image)) {
3049ae240823SKP Singh ret = -EINVAL;
3050ae240823SKP Singh goto cleanup;
3051ae240823SKP Singh }
3052ae240823SKP Singh }
3053ae240823SKP Singh
3054fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) {
3055473e3150SMenglong Dong restore_regs(m, &prog, regs_off);
3056473e3150SMenglong Dong save_args(m, &prog, arg_stack_off, true);
3057fec56f58SAlexei Starovoitov
30582cd3e377SPeter Zijlstra if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3059116e04baSLeon Hwang /* Before calling the original function, load the
3060116e04baSLeon Hwang * tail_call_cnt_ptr from stack to rax.
30612b5dcb31SLeon Hwang */
3062116e04baSLeon Hwang LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
30632cd3e377SPeter Zijlstra }
30642b5dcb31SLeon Hwang
3065316cba62SJiri Olsa if (flags & BPF_TRAMP_F_ORIG_STACK) {
30662b5dcb31SLeon Hwang emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
30672b5dcb31SLeon Hwang EMIT2(0xff, 0xd3); /* call *rbx */
3068316cba62SJiri Olsa } else {
3069fec56f58SAlexei Starovoitov /* call original function */
30703ba026fcSSong Liu if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3071ae240823SKP Singh ret = -EINVAL;
3072ae240823SKP Singh goto cleanup;
3073ae240823SKP Singh }
3074316cba62SJiri Olsa }
3075fec56f58SAlexei Starovoitov /* remember return value in a stack for bpf prog to access */
3076fec56f58SAlexei Starovoitov emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
30773ba026fcSSong Liu im->ip_after_call = image + (prog - (u8 *)rw_image);
307800bc8988SLeon Hwang emit_nops(&prog, X86_PATCH_SIZE);
3079fec56f58SAlexei Starovoitov }
3080fec56f58SAlexei Starovoitov
3081f7e0beafSKui-Feng Lee if (fmod_ret->nr_links) {
3082ae240823SKP Singh /* From Intel 64 and IA-32 Architectures Optimization
3083ae240823SKP Singh * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3084ae240823SKP Singh * Coding Rule 11: All branch targets should be 16-byte
3085ae240823SKP Singh * aligned.
3086ae240823SKP Singh */
3087ae240823SKP Singh emit_align(&prog, 16);
3088ae240823SKP Singh /* Update the branches saved in invoke_bpf_mod_ret with the
3089ae240823SKP Singh * aligned address of do_fexit.
3090ae240823SKP Singh */
30912cd3e377SPeter Zijlstra for (i = 0; i < fmod_ret->nr_links; i++) {
30923ba026fcSSong Liu emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
30933ba026fcSSong Liu image + (branches[i] - (u8 *)rw_image), X86_JNE);
3094ae240823SKP Singh }
3095ae240823SKP Singh }
3096ae240823SKP Singh
30972cd3e377SPeter Zijlstra if (fexit->nr_links) {
30983ba026fcSSong Liu if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
30993ba026fcSSong Liu false, image, rw_image)) {
3100ae240823SKP Singh ret = -EINVAL;
3101ae240823SKP Singh goto cleanup;
3102ae240823SKP Singh }
31032cd3e377SPeter Zijlstra }
3104fec56f58SAlexei Starovoitov
3105fec56f58SAlexei Starovoitov if (flags & BPF_TRAMP_F_RESTORE_REGS)
3106473e3150SMenglong Dong restore_regs(m, &prog, regs_off);
3107fec56f58SAlexei Starovoitov
3108ae240823SKP Singh /* This needs to be done regardless. If there were fmod_ret programs,
3109ae240823SKP Singh * the return value is only updated on the stack and still needs to be
3110ae240823SKP Singh * restored to R0.
3111ae240823SKP Singh */
3112e21aa341SAlexei Starovoitov if (flags & BPF_TRAMP_F_CALL_ORIG) {
31133ba026fcSSong Liu im->ip_epilogue = image + (prog - (u8 *)rw_image);
3114e21aa341SAlexei Starovoitov /* arg1: mov rdi, im */
3115e21aa341SAlexei Starovoitov emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
31163ba026fcSSong Liu if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3117e21aa341SAlexei Starovoitov ret = -EINVAL;
3118e21aa341SAlexei Starovoitov goto cleanup;
3119e21aa341SAlexei Starovoitov }
31202cd3e377SPeter Zijlstra } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3121116e04baSLeon Hwang /* Before running the original function, load the
3122116e04baSLeon Hwang * tail_call_cnt_ptr from stack to rax.
31232b5dcb31SLeon Hwang */
3124116e04baSLeon Hwang LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
31252cd3e377SPeter Zijlstra }
31262b5dcb31SLeon Hwang
3127356ed649SHou Tao /* restore return value of orig_call or fentry prog back into RAX */
3128356ed649SHou Tao if (save_ret)
3129356ed649SHou Tao emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3130fec56f58SAlexei Starovoitov
3131473e3150SMenglong Dong emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3132fec56f58SAlexei Starovoitov EMIT1(0xC9); /* leave */
31332cd3e377SPeter Zijlstra if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3134fec56f58SAlexei Starovoitov /* skip our return address and return to parent */
3135fec56f58SAlexei Starovoitov EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
31362cd3e377SPeter Zijlstra }
31373ba026fcSSong Liu emit_return(&prog, image + (prog - (u8 *)rw_image));
313885d33df3SMartin KaFai Lau /* Make sure the trampoline generation logic doesn't overflow */
31393ba026fcSSong Liu if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3140ae240823SKP Singh ret = -EFAULT;
3141ae240823SKP Singh goto cleanup;
3142ae240823SKP Singh }
31433ba026fcSSong Liu ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3144ae240823SKP Singh
3145ae240823SKP Singh cleanup:
3146ae240823SKP Singh kfree(branches);
3147ae240823SKP Singh return ret;
3148fec56f58SAlexei Starovoitov }
3149fec56f58SAlexei Starovoitov
arch_alloc_bpf_trampoline(unsigned int size)31503ba026fcSSong Liu void *arch_alloc_bpf_trampoline(unsigned int size)
31513ba026fcSSong Liu {
31523ba026fcSSong Liu return bpf_prog_pack_alloc(size, jit_fill_hole);
31533ba026fcSSong Liu }
31543ba026fcSSong Liu
arch_free_bpf_trampoline(void * image,unsigned int size)31553ba026fcSSong Liu void arch_free_bpf_trampoline(void *image, unsigned int size)
31563ba026fcSSong Liu {
31573ba026fcSSong Liu bpf_prog_pack_free(image, size);
31583ba026fcSSong Liu }
31593ba026fcSSong Liu
arch_protect_bpf_trampoline(void * image,unsigned int size)3160c733239fSChristophe Leroy int arch_protect_bpf_trampoline(void *image, unsigned int size)
31613ba026fcSSong Liu {
3162c733239fSChristophe Leroy return 0;
31633ba026fcSSong Liu }
31643ba026fcSSong Liu
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)316596d1b7c0SSong Liu int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
316696d1b7c0SSong Liu const struct btf_func_model *m, u32 flags,
316796d1b7c0SSong Liu struct bpf_tramp_links *tlinks,
316896d1b7c0SSong Liu void *func_addr)
316996d1b7c0SSong Liu {
31703ba026fcSSong Liu void *rw_image, *tmp;
31713ba026fcSSong Liu int ret;
31723ba026fcSSong Liu u32 size = image_end - image;
31733ba026fcSSong Liu
31743ba026fcSSong Liu /* rw_image doesn't need to be in module memory range, so we can
31753ba026fcSSong Liu * use kvmalloc.
31763ba026fcSSong Liu */
31773ba026fcSSong Liu rw_image = kvmalloc(size, GFP_KERNEL);
31783ba026fcSSong Liu if (!rw_image)
31793ba026fcSSong Liu return -ENOMEM;
31803ba026fcSSong Liu
31813ba026fcSSong Liu ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
31823ba026fcSSong Liu flags, tlinks, func_addr);
31833ba026fcSSong Liu if (ret < 0)
31843ba026fcSSong Liu goto out;
31853ba026fcSSong Liu
31863ba026fcSSong Liu tmp = bpf_arch_text_copy(image, rw_image, size);
31873ba026fcSSong Liu if (IS_ERR(tmp))
31883ba026fcSSong Liu ret = PTR_ERR(tmp);
31893ba026fcSSong Liu out:
31903ba026fcSSong Liu kvfree(rw_image);
31913ba026fcSSong Liu return ret;
319296d1b7c0SSong Liu }
319396d1b7c0SSong Liu
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)319496d1b7c0SSong Liu int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
319596d1b7c0SSong Liu struct bpf_tramp_links *tlinks, void *func_addr)
319696d1b7c0SSong Liu {
319796d1b7c0SSong Liu struct bpf_tramp_image im;
319896d1b7c0SSong Liu void *image;
319996d1b7c0SSong Liu int ret;
320096d1b7c0SSong Liu
320196d1b7c0SSong Liu /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
320296d1b7c0SSong Liu * This will NOT cause fragmentation in direct map, as we do not
320396d1b7c0SSong Liu * call set_memory_*() on this buffer.
320496d1b7c0SSong Liu *
320596d1b7c0SSong Liu * We cannot use kvmalloc here, because we need image to be in
320696d1b7c0SSong Liu * module memory range.
320796d1b7c0SSong Liu */
320896d1b7c0SSong Liu image = bpf_jit_alloc_exec(PAGE_SIZE);
320996d1b7c0SSong Liu if (!image)
321096d1b7c0SSong Liu return -ENOMEM;
321196d1b7c0SSong Liu
32123ba026fcSSong Liu ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
32133ba026fcSSong Liu m, flags, tlinks, func_addr);
321496d1b7c0SSong Liu bpf_jit_free_exec(image);
321596d1b7c0SSong Liu return ret;
321696d1b7c0SSong Liu }
321796d1b7c0SSong Liu
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs,u8 * image,u8 * buf)321819c02415SSong Liu static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
321975ccbef6SBjörn Töpel {
32207e639208SKP Singh u8 *jg_reloc, *prog = *pprog;
3221ced50fc4SJiri Olsa int pivot, err, jg_bytes = 1;
322275ccbef6SBjörn Töpel s64 jg_offset;
322375ccbef6SBjörn Töpel
322475ccbef6SBjörn Töpel if (a == b) {
322575ccbef6SBjörn Töpel /* Leaf node of recursion, i.e. not a range of indices
322675ccbef6SBjörn Töpel * anymore.
322775ccbef6SBjörn Töpel */
322875ccbef6SBjörn Töpel EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
322975ccbef6SBjörn Töpel if (!is_simm32(progs[a]))
323075ccbef6SBjörn Töpel return -1;
323175ccbef6SBjörn Töpel EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
323275ccbef6SBjörn Töpel progs[a]);
323375ccbef6SBjörn Töpel err = emit_cond_near_jump(&prog, /* je func */
323419c02415SSong Liu (void *)progs[a], image + (prog - buf),
323575ccbef6SBjörn Töpel X86_JE);
323675ccbef6SBjörn Töpel if (err)
323775ccbef6SBjörn Töpel return err;
323875ccbef6SBjörn Töpel
323919c02415SSong Liu emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
324075ccbef6SBjörn Töpel
324175ccbef6SBjörn Töpel *pprog = prog;
324275ccbef6SBjörn Töpel return 0;
324375ccbef6SBjörn Töpel }
324475ccbef6SBjörn Töpel
324575ccbef6SBjörn Töpel /* Not a leaf node, so we pivot, and recursively descend into
324675ccbef6SBjörn Töpel * the lower and upper ranges.
324775ccbef6SBjörn Töpel */
324875ccbef6SBjörn Töpel pivot = (b - a) / 2;
324975ccbef6SBjörn Töpel EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
325075ccbef6SBjörn Töpel if (!is_simm32(progs[a + pivot]))
325175ccbef6SBjörn Töpel return -1;
325275ccbef6SBjörn Töpel EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
325375ccbef6SBjörn Töpel
325475ccbef6SBjörn Töpel if (pivot > 2) { /* jg upper_part */
325575ccbef6SBjörn Töpel /* Require near jump. */
325675ccbef6SBjörn Töpel jg_bytes = 4;
325775ccbef6SBjörn Töpel EMIT2_off32(0x0F, X86_JG + 0x10, 0);
325875ccbef6SBjörn Töpel } else {
325975ccbef6SBjörn Töpel EMIT2(X86_JG, 0);
326075ccbef6SBjörn Töpel }
326175ccbef6SBjörn Töpel jg_reloc = prog;
326275ccbef6SBjörn Töpel
326375ccbef6SBjörn Töpel err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
326419c02415SSong Liu progs, image, buf);
326575ccbef6SBjörn Töpel if (err)
326675ccbef6SBjörn Töpel return err;
326775ccbef6SBjörn Töpel
3268116eb788SBjörn Töpel /* From Intel 64 and IA-32 Architectures Optimization
3269116eb788SBjörn Töpel * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3270116eb788SBjörn Töpel * Coding Rule 11: All branch targets should be 16-byte
3271116eb788SBjörn Töpel * aligned.
3272116eb788SBjörn Töpel */
32737e639208SKP Singh emit_align(&prog, 16);
327475ccbef6SBjörn Töpel jg_offset = prog - jg_reloc;
327575ccbef6SBjörn Töpel emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
327675ccbef6SBjörn Töpel
327775ccbef6SBjörn Töpel err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
327819c02415SSong Liu b, progs, image, buf);
327975ccbef6SBjörn Töpel if (err)
328075ccbef6SBjörn Töpel return err;
328175ccbef6SBjörn Töpel
328275ccbef6SBjörn Töpel *pprog = prog;
328375ccbef6SBjörn Töpel return 0;
328475ccbef6SBjörn Töpel }
328575ccbef6SBjörn Töpel
cmp_ips(const void * a,const void * b)328675ccbef6SBjörn Töpel static int cmp_ips(const void *a, const void *b)
328775ccbef6SBjörn Töpel {
328875ccbef6SBjörn Töpel const s64 *ipa = a;
328975ccbef6SBjörn Töpel const s64 *ipb = b;
329075ccbef6SBjörn Töpel
329175ccbef6SBjörn Töpel if (*ipa > *ipb)
329275ccbef6SBjörn Töpel return 1;
329375ccbef6SBjörn Töpel if (*ipa < *ipb)
329475ccbef6SBjörn Töpel return -1;
329575ccbef6SBjörn Töpel return 0;
329675ccbef6SBjörn Töpel }
329775ccbef6SBjörn Töpel
arch_prepare_bpf_dispatcher(void * image,void * buf,s64 * funcs,int num_funcs)329819c02415SSong Liu int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
329975ccbef6SBjörn Töpel {
330019c02415SSong Liu u8 *prog = buf;
330175ccbef6SBjörn Töpel
330275ccbef6SBjörn Töpel sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
330319c02415SSong Liu return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
330475ccbef6SBjörn Töpel }
330575ccbef6SBjörn Töpel
33061c2a088aSAlexei Starovoitov struct x64_jit_data {
33071022a549SSong Liu struct bpf_binary_header *rw_header;
33081c2a088aSAlexei Starovoitov struct bpf_binary_header *header;
33091c2a088aSAlexei Starovoitov int *addrs;
33101c2a088aSAlexei Starovoitov u8 *image;
33111c2a088aSAlexei Starovoitov int proglen;
33121c2a088aSAlexei Starovoitov struct jit_context ctx;
33131c2a088aSAlexei Starovoitov };
33141c2a088aSAlexei Starovoitov
331593c5aeccSGary Lin #define MAX_PASSES 20
331693c5aeccSGary Lin #define PADDING_PASSES (MAX_PASSES - 5)
331793c5aeccSGary Lin
bpf_int_jit_compile(struct bpf_prog * prog)3318d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
331962258278SAlexei Starovoitov {
33201022a549SSong Liu struct bpf_binary_header *rw_header = NULL;
3321f3c2af7bSAlexei Starovoitov struct bpf_binary_header *header = NULL;
3322959a7579SDaniel Borkmann struct bpf_prog *tmp, *orig_prog = prog;
33231c2a088aSAlexei Starovoitov struct x64_jit_data *jit_data;
3324f3c2af7bSAlexei Starovoitov int proglen, oldproglen = 0;
3325f3c2af7bSAlexei Starovoitov struct jit_context ctx = {};
3326959a7579SDaniel Borkmann bool tmp_blinded = false;
33271c2a088aSAlexei Starovoitov bool extra_pass = false;
332893c5aeccSGary Lin bool padding = false;
33291022a549SSong Liu u8 *rw_image = NULL;
3330f3c2af7bSAlexei Starovoitov u8 *image = NULL;
3331f3c2af7bSAlexei Starovoitov int *addrs;
3332f3c2af7bSAlexei Starovoitov int pass;
3333f3c2af7bSAlexei Starovoitov int i;
3334f3c2af7bSAlexei Starovoitov
333560b58afcSAlexei Starovoitov if (!prog->jit_requested)
3336959a7579SDaniel Borkmann return orig_prog;
3337959a7579SDaniel Borkmann
3338959a7579SDaniel Borkmann tmp = bpf_jit_blind_constants(prog);
3339a2c7a983SIngo Molnar /*
3340a2c7a983SIngo Molnar * If blinding was requested and we failed during blinding,
3341959a7579SDaniel Borkmann * we must fall back to the interpreter.
3342959a7579SDaniel Borkmann */
3343959a7579SDaniel Borkmann if (IS_ERR(tmp))
3344959a7579SDaniel Borkmann return orig_prog;
3345959a7579SDaniel Borkmann if (tmp != prog) {
3346959a7579SDaniel Borkmann tmp_blinded = true;
3347959a7579SDaniel Borkmann prog = tmp;
3348959a7579SDaniel Borkmann }
3349f3c2af7bSAlexei Starovoitov
33501c2a088aSAlexei Starovoitov jit_data = prog->aux->jit_data;
33511c2a088aSAlexei Starovoitov if (!jit_data) {
33521c2a088aSAlexei Starovoitov jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
33531c2a088aSAlexei Starovoitov if (!jit_data) {
33541c2a088aSAlexei Starovoitov prog = orig_prog;
33551c2a088aSAlexei Starovoitov goto out;
33561c2a088aSAlexei Starovoitov }
33571c2a088aSAlexei Starovoitov prog->aux->jit_data = jit_data;
33581c2a088aSAlexei Starovoitov }
33591c2a088aSAlexei Starovoitov addrs = jit_data->addrs;
33601c2a088aSAlexei Starovoitov if (addrs) {
33611c2a088aSAlexei Starovoitov ctx = jit_data->ctx;
33621c2a088aSAlexei Starovoitov oldproglen = jit_data->proglen;
33631c2a088aSAlexei Starovoitov image = jit_data->image;
33641c2a088aSAlexei Starovoitov header = jit_data->header;
33651022a549SSong Liu rw_header = jit_data->rw_header;
33661022a549SSong Liu rw_image = (void *)rw_header + ((void *)image - (void *)header);
33671c2a088aSAlexei Starovoitov extra_pass = true;
336893c5aeccSGary Lin padding = true;
33691c2a088aSAlexei Starovoitov goto skip_init_addrs;
33701c2a088aSAlexei Starovoitov }
3371de920fc6SYonghong Song addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3372959a7579SDaniel Borkmann if (!addrs) {
3373959a7579SDaniel Borkmann prog = orig_prog;
33741c2a088aSAlexei Starovoitov goto out_addrs;
3375959a7579SDaniel Borkmann }
3376f3c2af7bSAlexei Starovoitov
3377a2c7a983SIngo Molnar /*
3378a2c7a983SIngo Molnar * Before first pass, make a rough estimation of addrs[]
3379a2c7a983SIngo Molnar * each BPF instruction is translated to less than 64 bytes
3380f3c2af7bSAlexei Starovoitov */
33817c2e988fSAlexei Starovoitov for (proglen = 0, i = 0; i <= prog->len; i++) {
3382f3c2af7bSAlexei Starovoitov proglen += 64;
3383f3c2af7bSAlexei Starovoitov addrs[i] = proglen;
3384f3c2af7bSAlexei Starovoitov }
3385f3c2af7bSAlexei Starovoitov ctx.cleanup_addr = proglen;
33861c2a088aSAlexei Starovoitov skip_init_addrs:
3387f3c2af7bSAlexei Starovoitov
3388a2c7a983SIngo Molnar /*
3389a2c7a983SIngo Molnar * JITed image shrinks with every pass and the loop iterates
3390a2c7a983SIngo Molnar * until the image stops shrinking. Very large BPF programs
33913f7352bfSAlexei Starovoitov * may converge on the last pass. In such case do one more
3392a2c7a983SIngo Molnar * pass to emit the final image.
33933f7352bfSAlexei Starovoitov */
339493c5aeccSGary Lin for (pass = 0; pass < MAX_PASSES || image; pass++) {
339593c5aeccSGary Lin if (!padding && pass >= PADDING_PASSES)
339693c5aeccSGary Lin padding = true;
33971022a549SSong Liu proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3398f3c2af7bSAlexei Starovoitov if (proglen <= 0) {
33993aab8884SDaniel Borkmann out_image:
3400f3c2af7bSAlexei Starovoitov image = NULL;
3401676b2daaSSong Liu if (header) {
3402676b2daaSSong Liu bpf_arch_text_copy(&header->size, &rw_header->size,
3403676b2daaSSong Liu sizeof(rw_header->size));
34041022a549SSong Liu bpf_jit_binary_pack_free(header, rw_header);
3405676b2daaSSong Liu }
340673e14451SHou Tao /* Fall back to interpreter mode */
3407959a7579SDaniel Borkmann prog = orig_prog;
340873e14451SHou Tao if (extra_pass) {
340973e14451SHou Tao prog->bpf_func = NULL;
341073e14451SHou Tao prog->jited = 0;
341173e14451SHou Tao prog->jited_len = 0;
341273e14451SHou Tao }
3413959a7579SDaniel Borkmann goto out_addrs;
3414f3c2af7bSAlexei Starovoitov }
34150a14842fSEric Dumazet if (image) {
3416e0ee9c12SAlexei Starovoitov if (proglen != oldproglen) {
3417f3c2af7bSAlexei Starovoitov pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3418f3c2af7bSAlexei Starovoitov proglen, oldproglen);
34193aab8884SDaniel Borkmann goto out_image;
3420e0ee9c12SAlexei Starovoitov }
34210a14842fSEric Dumazet break;
34220a14842fSEric Dumazet }
34230a14842fSEric Dumazet if (proglen == oldproglen) {
34243dec541bSAlexei Starovoitov /*
34253dec541bSAlexei Starovoitov * The number of entries in extable is the number of BPF_LDX
34263dec541bSAlexei Starovoitov * insns that access kernel memory via "pointer to BTF type".
34273dec541bSAlexei Starovoitov * The verifier changed their opcode from LDX|MEM|size
34283dec541bSAlexei Starovoitov * to LDX|PROBE_MEM|size to make JITing easier.
34293dec541bSAlexei Starovoitov */
34303dec541bSAlexei Starovoitov u32 align = __alignof__(struct exception_table_entry);
34313dec541bSAlexei Starovoitov u32 extable_size = prog->aux->num_exentries *
34323dec541bSAlexei Starovoitov sizeof(struct exception_table_entry);
34333dec541bSAlexei Starovoitov
34343dec541bSAlexei Starovoitov /* allocate module memory for x86 insns and extable */
34351022a549SSong Liu header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
34361022a549SSong Liu &image, align, &rw_header, &rw_image,
34371022a549SSong Liu jit_fill_hole);
3438959a7579SDaniel Borkmann if (!header) {
3439959a7579SDaniel Borkmann prog = orig_prog;
3440959a7579SDaniel Borkmann goto out_addrs;
3441959a7579SDaniel Borkmann }
34423dec541bSAlexei Starovoitov prog->aux->extable = (void *) image + roundup(proglen, align);
34430a14842fSEric Dumazet }
34440a14842fSEric Dumazet oldproglen = proglen;
34456007b080SDaniel Borkmann cond_resched();
34460a14842fSEric Dumazet }
344779617801SDaniel Borkmann
34480a14842fSEric Dumazet if (bpf_jit_enable > 1)
3449ad96f1c9SYonghong Song bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
34500a14842fSEric Dumazet
34510a14842fSEric Dumazet if (image) {
34521c2a088aSAlexei Starovoitov if (!prog->is_func || extra_pass) {
34531022a549SSong Liu /*
34541022a549SSong Liu * bpf_jit_binary_pack_finalize fails in two scenarios:
34551022a549SSong Liu * 1) header is not pointing to proper module memory;
34561022a549SSong Liu * 2) the arch doesn't support bpf_arch_text_copy().
34571022a549SSong Liu *
3458f95f768fSSong Liu * Both cases are serious bugs and justify WARN_ON.
34591022a549SSong Liu */
34609919c5c9SRafael Passos if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
346173e14451SHou Tao /* header has been freed */
346273e14451SHou Tao header = NULL;
346373e14451SHou Tao goto out_image;
3464f95f768fSSong Liu }
3465f95f768fSSong Liu
3466428d5df1SDaniel Borkmann bpf_tail_call_direct_fixup(prog);
34671c2a088aSAlexei Starovoitov } else {
34681c2a088aSAlexei Starovoitov jit_data->addrs = addrs;
34691c2a088aSAlexei Starovoitov jit_data->ctx = ctx;
34701c2a088aSAlexei Starovoitov jit_data->proglen = proglen;
34711c2a088aSAlexei Starovoitov jit_data->image = image;
34721c2a088aSAlexei Starovoitov jit_data->header = header;
34731022a549SSong Liu jit_data->rw_header = rw_header;
34741c2a088aSAlexei Starovoitov }
34754f9087f1SPeter Zijlstra /*
34764f9087f1SPeter Zijlstra * ctx.prog_offset is used when CFI preambles put code *before*
34774f9087f1SPeter Zijlstra * the function. See emit_cfi(). For FineIBT specifically this code
34784f9087f1SPeter Zijlstra * can also be executed and bpf_prog_kallsyms_add() will
34794f9087f1SPeter Zijlstra * generate an additional symbol to cover this, hence also
34804f9087f1SPeter Zijlstra * decrement proglen.
34814f9087f1SPeter Zijlstra */
34824f9087f1SPeter Zijlstra prog->bpf_func = (void *)image + cfi_get_offset();
3483a91263d5SDaniel Borkmann prog->jited = 1;
34844f9087f1SPeter Zijlstra prog->jited_len = proglen - cfi_get_offset();
34859d5ecb09SDaniel Borkmann } else {
34869d5ecb09SDaniel Borkmann prog = orig_prog;
34870a14842fSEric Dumazet }
3488959a7579SDaniel Borkmann
348939f56ca9SDaniel Borkmann if (!image || !prog->is_func || extra_pass) {
3490c454a46bSMartin KaFai Lau if (image)
34917c2e988fSAlexei Starovoitov bpf_prog_fill_jited_linfo(prog, addrs + 1);
3492959a7579SDaniel Borkmann out_addrs:
3493de920fc6SYonghong Song kvfree(addrs);
34941c2a088aSAlexei Starovoitov kfree(jit_data);
34951c2a088aSAlexei Starovoitov prog->aux->jit_data = NULL;
34961c2a088aSAlexei Starovoitov }
3497959a7579SDaniel Borkmann out:
3498959a7579SDaniel Borkmann if (tmp_blinded)
3499959a7579SDaniel Borkmann bpf_jit_prog_release_other(prog, prog == orig_prog ?
3500959a7579SDaniel Borkmann tmp : orig_prog);
3501d1c55ab5SDaniel Borkmann return prog;
35020a14842fSEric Dumazet }
3503e6ac2450SMartin KaFai Lau
bpf_jit_supports_kfunc_call(void)3504e6ac2450SMartin KaFai Lau bool bpf_jit_supports_kfunc_call(void)
3505e6ac2450SMartin KaFai Lau {
3506e6ac2450SMartin KaFai Lau return true;
3507e6ac2450SMartin KaFai Lau }
3508ebc1415dSSong Liu
bpf_arch_text_copy(void * dst,void * src,size_t len)3509ebc1415dSSong Liu void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3510ebc1415dSSong Liu {
3511ebc1415dSSong Liu if (text_poke_copy(dst, src, len) == NULL)
3512ebc1415dSSong Liu return ERR_PTR(-EINVAL);
3513ebc1415dSSong Liu return dst;
3514ebc1415dSSong Liu }
351595acd881STony Ambardar
351695acd881STony Ambardar /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)351795acd881STony Ambardar bool bpf_jit_supports_subprog_tailcalls(void)
351895acd881STony Ambardar {
351995acd881STony Ambardar return true;
352095acd881STony Ambardar }
35211d5f82d9SSong Liu
bpf_jit_supports_percpu_insn(void)35227bdbf744SAndrii Nakryiko bool bpf_jit_supports_percpu_insn(void)
35237bdbf744SAndrii Nakryiko {
35247bdbf744SAndrii Nakryiko return true;
35257bdbf744SAndrii Nakryiko }
35267bdbf744SAndrii Nakryiko
bpf_jit_free(struct bpf_prog * prog)35271d5f82d9SSong Liu void bpf_jit_free(struct bpf_prog *prog)
35281d5f82d9SSong Liu {
35291d5f82d9SSong Liu if (prog->jited) {
35301d5f82d9SSong Liu struct x64_jit_data *jit_data = prog->aux->jit_data;
35311d5f82d9SSong Liu struct bpf_binary_header *hdr;
35321d5f82d9SSong Liu
35331d5f82d9SSong Liu /*
35341d5f82d9SSong Liu * If we fail the final pass of JIT (from jit_subprogs),
35351d5f82d9SSong Liu * the program may not be finalized yet. Call finalize here
35361d5f82d9SSong Liu * before freeing it.
35371d5f82d9SSong Liu */
35381d5f82d9SSong Liu if (jit_data) {
35399919c5c9SRafael Passos bpf_jit_binary_pack_finalize(jit_data->header,
35401d5f82d9SSong Liu jit_data->rw_header);
35411d5f82d9SSong Liu kvfree(jit_data->addrs);
35421d5f82d9SSong Liu kfree(jit_data);
35431d5f82d9SSong Liu }
35444f9087f1SPeter Zijlstra prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
35451d5f82d9SSong Liu hdr = bpf_jit_binary_pack_hdr(prog);
35461d5f82d9SSong Liu bpf_jit_binary_pack_free(hdr, NULL);
35471d5f82d9SSong Liu WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
35481d5f82d9SSong Liu }
35491d5f82d9SSong Liu
35501d5f82d9SSong Liu bpf_prog_unlock_free(prog);
35511d5f82d9SSong Liu }
3552fd5d27b7SKumar Kartikeya Dwivedi
bpf_jit_supports_exceptions(void)3553fd5d27b7SKumar Kartikeya Dwivedi bool bpf_jit_supports_exceptions(void)
3554fd5d27b7SKumar Kartikeya Dwivedi {
3555fd5d27b7SKumar Kartikeya Dwivedi /* We unwind through both kernel frames (starting from within bpf_throw
35565bfdb4fbSKumar Kartikeya Dwivedi * call) and BPF frames. Therefore we require ORC unwinder to be enabled
35575bfdb4fbSKumar Kartikeya Dwivedi * to walk kernel frames and reach BPF frames in the stack trace.
3558fd5d27b7SKumar Kartikeya Dwivedi */
35595bfdb4fbSKumar Kartikeya Dwivedi return IS_ENABLED(CONFIG_UNWINDER_ORC);
3560fd5d27b7SKumar Kartikeya Dwivedi }
3561fd5d27b7SKumar Kartikeya Dwivedi
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3562fd5d27b7SKumar Kartikeya Dwivedi void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3563fd5d27b7SKumar Kartikeya Dwivedi {
35645bfdb4fbSKumar Kartikeya Dwivedi #if defined(CONFIG_UNWINDER_ORC)
3565fd5d27b7SKumar Kartikeya Dwivedi struct unwind_state state;
3566fd5d27b7SKumar Kartikeya Dwivedi unsigned long addr;
3567fd5d27b7SKumar Kartikeya Dwivedi
3568fd5d27b7SKumar Kartikeya Dwivedi for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3569fd5d27b7SKumar Kartikeya Dwivedi unwind_next_frame(&state)) {
3570fd5d27b7SKumar Kartikeya Dwivedi addr = unwind_get_return_address(&state);
3571fd5d27b7SKumar Kartikeya Dwivedi if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3572fd5d27b7SKumar Kartikeya Dwivedi break;
3573fd5d27b7SKumar Kartikeya Dwivedi }
3574fd5d27b7SKumar Kartikeya Dwivedi return;
3575fd5d27b7SKumar Kartikeya Dwivedi #endif
3576fd5d27b7SKumar Kartikeya Dwivedi WARN(1, "verification of programs using bpf_throw should have failed\n");
3577fd5d27b7SKumar Kartikeya Dwivedi }
35784b7de801SJiri Olsa
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)35794b7de801SJiri Olsa void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
35804b7de801SJiri Olsa struct bpf_prog *new, struct bpf_prog *old)
35814b7de801SJiri Olsa {
35824b7de801SJiri Olsa u8 *old_addr, *new_addr, *old_bypass_addr;
35834b7de801SJiri Olsa int ret;
35844b7de801SJiri Olsa
35854b7de801SJiri Olsa old_bypass_addr = old ? NULL : poke->bypass_addr;
35864b7de801SJiri Olsa old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
35874b7de801SJiri Olsa new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
35884b7de801SJiri Olsa
35894b7de801SJiri Olsa /*
35904b7de801SJiri Olsa * On program loading or teardown, the program's kallsym entry
35914b7de801SJiri Olsa * might not be in place, so we use __bpf_arch_text_poke to skip
35924b7de801SJiri Olsa * the kallsyms check.
35934b7de801SJiri Olsa */
35944b7de801SJiri Olsa if (new) {
35954b7de801SJiri Olsa ret = __bpf_arch_text_poke(poke->tailcall_target,
35964b7de801SJiri Olsa BPF_MOD_JUMP,
35974b7de801SJiri Olsa old_addr, new_addr);
35984b7de801SJiri Olsa BUG_ON(ret < 0);
35994b7de801SJiri Olsa if (!old) {
36004b7de801SJiri Olsa ret = __bpf_arch_text_poke(poke->tailcall_bypass,
36014b7de801SJiri Olsa BPF_MOD_JUMP,
36024b7de801SJiri Olsa poke->bypass_addr,
36034b7de801SJiri Olsa NULL);
36044b7de801SJiri Olsa BUG_ON(ret < 0);
36054b7de801SJiri Olsa }
36064b7de801SJiri Olsa } else {
36074b7de801SJiri Olsa ret = __bpf_arch_text_poke(poke->tailcall_bypass,
36084b7de801SJiri Olsa BPF_MOD_JUMP,
36094b7de801SJiri Olsa old_bypass_addr,
36104b7de801SJiri Olsa poke->bypass_addr);
36114b7de801SJiri Olsa BUG_ON(ret < 0);
36124b7de801SJiri Olsa /* let other CPUs finish the execution of program
36134b7de801SJiri Olsa * so that it will not possible to expose them
36144b7de801SJiri Olsa * to invalid nop, stack unwind, nop state
36154b7de801SJiri Olsa */
36164b7de801SJiri Olsa if (!ret)
36174b7de801SJiri Olsa synchronize_rcu();
36184b7de801SJiri Olsa ret = __bpf_arch_text_poke(poke->tailcall_target,
36194b7de801SJiri Olsa BPF_MOD_JUMP,
36204b7de801SJiri Olsa old_addr, NULL);
36214b7de801SJiri Olsa BUG_ON(ret < 0);
36224b7de801SJiri Olsa }
36234b7de801SJiri Olsa }
36247c05e7f3SHou Tao
bpf_jit_supports_arena(void)3625142fd4d2SAlexei Starovoitov bool bpf_jit_supports_arena(void)
3626142fd4d2SAlexei Starovoitov {
3627142fd4d2SAlexei Starovoitov return true;
3628142fd4d2SAlexei Starovoitov }
3629142fd4d2SAlexei Starovoitov
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3630d503a04fSAlexei Starovoitov bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3631d503a04fSAlexei Starovoitov {
3632d503a04fSAlexei Starovoitov if (!in_arena)
3633d503a04fSAlexei Starovoitov return true;
3634d503a04fSAlexei Starovoitov switch (insn->code) {
3635d503a04fSAlexei Starovoitov case BPF_STX | BPF_ATOMIC | BPF_W:
3636d503a04fSAlexei Starovoitov case BPF_STX | BPF_ATOMIC | BPF_DW:
3637d503a04fSAlexei Starovoitov if (insn->imm == (BPF_AND | BPF_FETCH) ||
3638d503a04fSAlexei Starovoitov insn->imm == (BPF_OR | BPF_FETCH) ||
3639d503a04fSAlexei Starovoitov insn->imm == (BPF_XOR | BPF_FETCH))
3640d503a04fSAlexei Starovoitov return false;
3641d503a04fSAlexei Starovoitov }
3642d503a04fSAlexei Starovoitov return true;
3643d503a04fSAlexei Starovoitov }
3644d503a04fSAlexei Starovoitov
bpf_jit_supports_ptr_xchg(void)36457c05e7f3SHou Tao bool bpf_jit_supports_ptr_xchg(void)
36467c05e7f3SHou Tao {
36477c05e7f3SHou Tao return true;
36487c05e7f3SHou Tao }
364966e13b61SPuranjay Mohan
365066e13b61SPuranjay Mohan /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
bpf_arch_uaddress_limit(void)365166e13b61SPuranjay Mohan u64 bpf_arch_uaddress_limit(void)
365266e13b61SPuranjay Mohan {
365366e13b61SPuranjay Mohan return 0;
365466e13b61SPuranjay Mohan }
3655