1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 #include <asm/unwind.h>
20 #include <asm/cfi.h>
21
22 static bool all_callee_regs_used[4] = {true, true, true, true};
23
emit_code(u8 * ptr,u32 bytes,unsigned int len)24 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
25 {
26 if (len == 1)
27 *ptr = bytes;
28 else if (len == 2)
29 *(u16 *)ptr = bytes;
30 else {
31 *(u32 *)ptr = bytes;
32 barrier();
33 }
34 return ptr + len;
35 }
36
37 #define EMIT(bytes, len) \
38 do { prog = emit_code(prog, bytes, len); } while (0)
39
40 #define EMIT1(b1) EMIT(b1, 1)
41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
44
45 #define EMIT1_off32(b1, off) \
46 do { EMIT1(b1); EMIT(off, 4); } while (0)
47 #define EMIT2_off32(b1, b2, off) \
48 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49 #define EMIT3_off32(b1, b2, b3, off) \
50 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51 #define EMIT4_off32(b1, b2, b3, b4, off) \
52 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
53
54 #ifdef CONFIG_X86_KERNEL_IBT
55 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
56 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
57 #else
58 #define EMIT_ENDBR()
59 #define EMIT_ENDBR_POISON()
60 #endif
61
is_imm8(int value)62 static bool is_imm8(int value)
63 {
64 return value <= 127 && value >= -128;
65 }
66
is_simm32(s64 value)67 static bool is_simm32(s64 value)
68 {
69 return value == (s64)(s32)value;
70 }
71
is_uimm32(u64 value)72 static bool is_uimm32(u64 value)
73 {
74 return value == (u64)(u32)value;
75 }
76
77 /* mov dst, src */
78 #define EMIT_mov(DST, SRC) \
79 do { \
80 if (DST != SRC) \
81 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
82 } while (0)
83
bpf_size_to_x86_bytes(int bpf_size)84 static int bpf_size_to_x86_bytes(int bpf_size)
85 {
86 if (bpf_size == BPF_W)
87 return 4;
88 else if (bpf_size == BPF_H)
89 return 2;
90 else if (bpf_size == BPF_B)
91 return 1;
92 else if (bpf_size == BPF_DW)
93 return 4; /* imm32 */
94 else
95 return 0;
96 }
97
98 /*
99 * List of x86 cond jumps opcodes (. + s8)
100 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
101 */
102 #define X86_JB 0x72
103 #define X86_JAE 0x73
104 #define X86_JE 0x74
105 #define X86_JNE 0x75
106 #define X86_JBE 0x76
107 #define X86_JA 0x77
108 #define X86_JL 0x7C
109 #define X86_JGE 0x7D
110 #define X86_JLE 0x7E
111 #define X86_JG 0x7F
112
113 /* Pick a register outside of BPF range for JIT internal work */
114 #define AUX_REG (MAX_BPF_JIT_REG + 1)
115 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
116 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
117
118 /*
119 * The following table maps BPF registers to x86-64 registers.
120 *
121 * x86-64 register R12 is unused, since if used as base address
122 * register in load/store instructions, it always needs an
123 * extra byte of encoding and is callee saved.
124 *
125 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
126 * trampoline. x86-64 register R10 is used for blinding (if enabled).
127 */
128 static const int reg2hex[] = {
129 [BPF_REG_0] = 0, /* RAX */
130 [BPF_REG_1] = 7, /* RDI */
131 [BPF_REG_2] = 6, /* RSI */
132 [BPF_REG_3] = 2, /* RDX */
133 [BPF_REG_4] = 1, /* RCX */
134 [BPF_REG_5] = 0, /* R8 */
135 [BPF_REG_6] = 3, /* RBX callee saved */
136 [BPF_REG_7] = 5, /* R13 callee saved */
137 [BPF_REG_8] = 6, /* R14 callee saved */
138 [BPF_REG_9] = 7, /* R15 callee saved */
139 [BPF_REG_FP] = 5, /* RBP readonly */
140 [BPF_REG_AX] = 2, /* R10 temp register */
141 [AUX_REG] = 3, /* R11 temp register */
142 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
143 [X86_REG_R12] = 4, /* R12 callee saved */
144 };
145
146 static const int reg2pt_regs[] = {
147 [BPF_REG_0] = offsetof(struct pt_regs, ax),
148 [BPF_REG_1] = offsetof(struct pt_regs, di),
149 [BPF_REG_2] = offsetof(struct pt_regs, si),
150 [BPF_REG_3] = offsetof(struct pt_regs, dx),
151 [BPF_REG_4] = offsetof(struct pt_regs, cx),
152 [BPF_REG_5] = offsetof(struct pt_regs, r8),
153 [BPF_REG_6] = offsetof(struct pt_regs, bx),
154 [BPF_REG_7] = offsetof(struct pt_regs, r13),
155 [BPF_REG_8] = offsetof(struct pt_regs, r14),
156 [BPF_REG_9] = offsetof(struct pt_regs, r15),
157 };
158
159 /*
160 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
161 * which need extra byte of encoding.
162 * rax,rcx,...,rbp have simpler encoding
163 */
is_ereg(u32 reg)164 static bool is_ereg(u32 reg)
165 {
166 return (1 << reg) & (BIT(BPF_REG_5) |
167 BIT(AUX_REG) |
168 BIT(BPF_REG_7) |
169 BIT(BPF_REG_8) |
170 BIT(BPF_REG_9) |
171 BIT(X86_REG_R9) |
172 BIT(X86_REG_R12) |
173 BIT(BPF_REG_AX));
174 }
175
176 /*
177 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
178 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
179 * of encoding. al,cl,dl,bl have simpler encoding.
180 */
is_ereg_8l(u32 reg)181 static bool is_ereg_8l(u32 reg)
182 {
183 return is_ereg(reg) ||
184 (1 << reg) & (BIT(BPF_REG_1) |
185 BIT(BPF_REG_2) |
186 BIT(BPF_REG_FP));
187 }
188
is_axreg(u32 reg)189 static bool is_axreg(u32 reg)
190 {
191 return reg == BPF_REG_0;
192 }
193
194 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)195 static u8 add_1mod(u8 byte, u32 reg)
196 {
197 if (is_ereg(reg))
198 byte |= 1;
199 return byte;
200 }
201
add_2mod(u8 byte,u32 r1,u32 r2)202 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
203 {
204 if (is_ereg(r1))
205 byte |= 1;
206 if (is_ereg(r2))
207 byte |= 4;
208 return byte;
209 }
210
add_3mod(u8 byte,u32 r1,u32 r2,u32 index)211 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
212 {
213 if (is_ereg(r1))
214 byte |= 1;
215 if (is_ereg(index))
216 byte |= 2;
217 if (is_ereg(r2))
218 byte |= 4;
219 return byte;
220 }
221
222 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)223 static u8 add_1reg(u8 byte, u32 dst_reg)
224 {
225 return byte + reg2hex[dst_reg];
226 }
227
228 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)229 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
230 {
231 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
232 }
233
234 /* Some 1-byte opcodes for binary ALU operations */
235 static u8 simple_alu_opcodes[] = {
236 [BPF_ADD] = 0x01,
237 [BPF_SUB] = 0x29,
238 [BPF_AND] = 0x21,
239 [BPF_OR] = 0x09,
240 [BPF_XOR] = 0x31,
241 [BPF_LSH] = 0xE0,
242 [BPF_RSH] = 0xE8,
243 [BPF_ARSH] = 0xF8,
244 };
245
jit_fill_hole(void * area,unsigned int size)246 static void jit_fill_hole(void *area, unsigned int size)
247 {
248 /* Fill whole space with INT3 instructions */
249 memset(area, 0xcc, size);
250 }
251
bpf_arch_text_invalidate(void * dst,size_t len)252 int bpf_arch_text_invalidate(void *dst, size_t len)
253 {
254 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
255 }
256
257 struct jit_context {
258 int cleanup_addr; /* Epilogue code offset */
259
260 /*
261 * Program specific offsets of labels in the code; these rely on the
262 * JIT doing at least 2 passes, recording the position on the first
263 * pass, only to generate the correct offset on the second pass.
264 */
265 int tail_call_direct_label;
266 int tail_call_indirect_label;
267 };
268
269 /* Maximum number of bytes emitted while JITing one eBPF insn */
270 #define BPF_MAX_INSN_SIZE 128
271 #define BPF_INSN_SAFETY 64
272
273 /* Number of bytes emit_patch() needs to generate instructions */
274 #define X86_PATCH_SIZE 5
275 /* Number of bytes that will be skipped on tailcall */
276 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
277
push_r12(u8 ** pprog)278 static void push_r12(u8 **pprog)
279 {
280 u8 *prog = *pprog;
281
282 EMIT2(0x41, 0x54); /* push r12 */
283 *pprog = prog;
284 }
285
push_callee_regs(u8 ** pprog,bool * callee_regs_used)286 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
287 {
288 u8 *prog = *pprog;
289
290 if (callee_regs_used[0])
291 EMIT1(0x53); /* push rbx */
292 if (callee_regs_used[1])
293 EMIT2(0x41, 0x55); /* push r13 */
294 if (callee_regs_used[2])
295 EMIT2(0x41, 0x56); /* push r14 */
296 if (callee_regs_used[3])
297 EMIT2(0x41, 0x57); /* push r15 */
298 *pprog = prog;
299 }
300
pop_r12(u8 ** pprog)301 static void pop_r12(u8 **pprog)
302 {
303 u8 *prog = *pprog;
304
305 EMIT2(0x41, 0x5C); /* pop r12 */
306 *pprog = prog;
307 }
308
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)309 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
310 {
311 u8 *prog = *pprog;
312
313 if (callee_regs_used[3])
314 EMIT2(0x41, 0x5F); /* pop r15 */
315 if (callee_regs_used[2])
316 EMIT2(0x41, 0x5E); /* pop r14 */
317 if (callee_regs_used[1])
318 EMIT2(0x41, 0x5D); /* pop r13 */
319 if (callee_regs_used[0])
320 EMIT1(0x5B); /* pop rbx */
321 *pprog = prog;
322 }
323
emit_nops(u8 ** pprog,int len)324 static void emit_nops(u8 **pprog, int len)
325 {
326 u8 *prog = *pprog;
327 int i, noplen;
328
329 while (len > 0) {
330 noplen = len;
331
332 if (noplen > ASM_NOP_MAX)
333 noplen = ASM_NOP_MAX;
334
335 for (i = 0; i < noplen; i++)
336 EMIT1(x86_nops[noplen][i]);
337 len -= noplen;
338 }
339
340 *pprog = prog;
341 }
342
343 /*
344 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
345 * in arch/x86/kernel/alternative.c
346 */
347
emit_fineibt(u8 ** pprog,u32 hash)348 static void emit_fineibt(u8 **pprog, u32 hash)
349 {
350 u8 *prog = *pprog;
351
352 EMIT_ENDBR();
353 EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */
354 EMIT2(0x74, 0x07); /* jz.d8 +7 */
355 EMIT2(0x0f, 0x0b); /* ud2 */
356 EMIT1(0x90); /* nop */
357 EMIT_ENDBR_POISON();
358
359 *pprog = prog;
360 }
361
emit_kcfi(u8 ** pprog,u32 hash)362 static void emit_kcfi(u8 **pprog, u32 hash)
363 {
364 u8 *prog = *pprog;
365
366 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
367 #ifdef CONFIG_CALL_PADDING
368 EMIT1(0x90);
369 EMIT1(0x90);
370 EMIT1(0x90);
371 EMIT1(0x90);
372 EMIT1(0x90);
373 EMIT1(0x90);
374 EMIT1(0x90);
375 EMIT1(0x90);
376 EMIT1(0x90);
377 EMIT1(0x90);
378 EMIT1(0x90);
379 #endif
380 EMIT_ENDBR();
381
382 *pprog = prog;
383 }
384
emit_cfi(u8 ** pprog,u32 hash)385 static void emit_cfi(u8 **pprog, u32 hash)
386 {
387 u8 *prog = *pprog;
388
389 switch (cfi_mode) {
390 case CFI_FINEIBT:
391 emit_fineibt(&prog, hash);
392 break;
393
394 case CFI_KCFI:
395 emit_kcfi(&prog, hash);
396 break;
397
398 default:
399 EMIT_ENDBR();
400 break;
401 }
402
403 *pprog = prog;
404 }
405
406 /*
407 * Emit x86-64 prologue code for BPF program.
408 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
409 * while jumping to another program
410 */
emit_prologue(u8 ** pprog,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog,bool is_exception_cb)411 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
412 bool tail_call_reachable, bool is_subprog,
413 bool is_exception_cb)
414 {
415 u8 *prog = *pprog;
416
417 emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
418 /* BPF trampoline can be made to work without these nops,
419 * but let's waste 5 bytes for now and optimize later
420 */
421 emit_nops(&prog, X86_PATCH_SIZE);
422 if (!ebpf_from_cbpf) {
423 if (tail_call_reachable && !is_subprog)
424 /* When it's the entry of the whole tailcall context,
425 * zeroing rax means initialising tail_call_cnt.
426 */
427 EMIT2(0x31, 0xC0); /* xor eax, eax */
428 else
429 /* Keep the same instruction layout. */
430 EMIT2(0x66, 0x90); /* nop2 */
431 }
432 /* Exception callback receives FP as third parameter */
433 if (is_exception_cb) {
434 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
435 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
436 /* The main frame must have exception_boundary as true, so we
437 * first restore those callee-saved regs from stack, before
438 * reusing the stack frame.
439 */
440 pop_callee_regs(&prog, all_callee_regs_used);
441 pop_r12(&prog);
442 /* Reset the stack frame. */
443 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
444 } else {
445 EMIT1(0x55); /* push rbp */
446 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
447 }
448
449 /* X86_TAIL_CALL_OFFSET is here */
450 EMIT_ENDBR();
451
452 /* sub rsp, rounded_stack_depth */
453 if (stack_depth)
454 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
455 if (tail_call_reachable)
456 EMIT1(0x50); /* push rax */
457 *pprog = prog;
458 }
459
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)460 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
461 {
462 u8 *prog = *pprog;
463 s64 offset;
464
465 offset = func - (ip + X86_PATCH_SIZE);
466 if (!is_simm32(offset)) {
467 pr_err("Target call %p is out of range\n", func);
468 return -ERANGE;
469 }
470 EMIT1_off32(opcode, offset);
471 *pprog = prog;
472 return 0;
473 }
474
emit_call(u8 ** pprog,void * func,void * ip)475 static int emit_call(u8 **pprog, void *func, void *ip)
476 {
477 return emit_patch(pprog, func, ip, 0xE8);
478 }
479
emit_rsb_call(u8 ** pprog,void * func,void * ip)480 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
481 {
482 OPTIMIZER_HIDE_VAR(func);
483 ip += x86_call_depth_emit_accounting(pprog, func, ip);
484 return emit_patch(pprog, func, ip, 0xE8);
485 }
486
emit_jump(u8 ** pprog,void * func,void * ip)487 static int emit_jump(u8 **pprog, void *func, void *ip)
488 {
489 return emit_patch(pprog, func, ip, 0xE9);
490 }
491
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)492 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
493 void *old_addr, void *new_addr)
494 {
495 const u8 *nop_insn = x86_nops[5];
496 u8 old_insn[X86_PATCH_SIZE];
497 u8 new_insn[X86_PATCH_SIZE];
498 u8 *prog;
499 int ret;
500
501 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
502 if (old_addr) {
503 prog = old_insn;
504 ret = t == BPF_MOD_CALL ?
505 emit_call(&prog, old_addr, ip) :
506 emit_jump(&prog, old_addr, ip);
507 if (ret)
508 return ret;
509 }
510
511 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
512 if (new_addr) {
513 prog = new_insn;
514 ret = t == BPF_MOD_CALL ?
515 emit_call(&prog, new_addr, ip) :
516 emit_jump(&prog, new_addr, ip);
517 if (ret)
518 return ret;
519 }
520
521 ret = -EBUSY;
522 mutex_lock(&text_mutex);
523 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
524 goto out;
525 ret = 1;
526 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
527 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
528 ret = 0;
529 }
530 out:
531 mutex_unlock(&text_mutex);
532 return ret;
533 }
534
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)535 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
536 void *old_addr, void *new_addr)
537 {
538 if (!is_kernel_text((long)ip) &&
539 !is_bpf_text_address((long)ip))
540 /* BPF poking in modules is not supported */
541 return -EINVAL;
542
543 /*
544 * See emit_prologue(), for IBT builds the trampoline hook is preceded
545 * with an ENDBR instruction.
546 */
547 if (is_endbr(*(u32 *)ip))
548 ip += ENDBR_INSN_SIZE;
549
550 return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
551 }
552
553 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
554
emit_indirect_jump(u8 ** pprog,int reg,u8 * ip)555 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
556 {
557 u8 *prog = *pprog;
558
559 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
560 EMIT_LFENCE();
561 EMIT2(0xFF, 0xE0 + reg);
562 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
563 OPTIMIZER_HIDE_VAR(reg);
564 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
565 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
566 else
567 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
568 } else {
569 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
570 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
571 EMIT1(0xCC); /* int3 */
572 }
573
574 *pprog = prog;
575 }
576
emit_return(u8 ** pprog,u8 * ip)577 static void emit_return(u8 **pprog, u8 *ip)
578 {
579 u8 *prog = *pprog;
580
581 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
582 emit_jump(&prog, x86_return_thunk, ip);
583 } else {
584 EMIT1(0xC3); /* ret */
585 if (IS_ENABLED(CONFIG_MITIGATION_SLS))
586 EMIT1(0xCC); /* int3 */
587 }
588
589 *pprog = prog;
590 }
591
592 /*
593 * Generate the following code:
594 *
595 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
596 * if (index >= array->map.max_entries)
597 * goto out;
598 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
599 * goto out;
600 * prog = array->ptrs[index];
601 * if (prog == NULL)
602 * goto out;
603 * goto *(prog->bpf_func + prologue_size);
604 * out:
605 */
emit_bpf_tail_call_indirect(struct bpf_prog * bpf_prog,u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)606 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
607 u8 **pprog, bool *callee_regs_used,
608 u32 stack_depth, u8 *ip,
609 struct jit_context *ctx)
610 {
611 int tcc_off = -4 - round_up(stack_depth, 8);
612 u8 *prog = *pprog, *start = *pprog;
613 int offset;
614
615 /*
616 * rdi - pointer to ctx
617 * rsi - pointer to bpf_array
618 * rdx - index in bpf_array
619 */
620
621 /*
622 * if (index >= array->map.max_entries)
623 * goto out;
624 */
625 EMIT2(0x89, 0xD2); /* mov edx, edx */
626 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
627 offsetof(struct bpf_array, map.max_entries));
628
629 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
630 EMIT2(X86_JBE, offset); /* jbe out */
631
632 /*
633 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
634 * goto out;
635 */
636 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
637 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
638
639 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
640 EMIT2(X86_JAE, offset); /* jae out */
641 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
642 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
643
644 /* prog = array->ptrs[index]; */
645 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
646 offsetof(struct bpf_array, ptrs));
647
648 /*
649 * if (prog == NULL)
650 * goto out;
651 */
652 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
653
654 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
655 EMIT2(X86_JE, offset); /* je out */
656
657 if (bpf_prog->aux->exception_boundary) {
658 pop_callee_regs(&prog, all_callee_regs_used);
659 pop_r12(&prog);
660 } else {
661 pop_callee_regs(&prog, callee_regs_used);
662 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
663 pop_r12(&prog);
664 }
665
666 EMIT1(0x58); /* pop rax */
667 if (stack_depth)
668 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
669 round_up(stack_depth, 8));
670
671 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
672 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
673 offsetof(struct bpf_prog, bpf_func));
674 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
675 X86_TAIL_CALL_OFFSET);
676 /*
677 * Now we're ready to jump into next BPF program
678 * rdi == ctx (1st arg)
679 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
680 */
681 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
682
683 /* out: */
684 ctx->tail_call_indirect_label = prog - start;
685 *pprog = prog;
686 }
687
emit_bpf_tail_call_direct(struct bpf_prog * bpf_prog,struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)688 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
689 struct bpf_jit_poke_descriptor *poke,
690 u8 **pprog, u8 *ip,
691 bool *callee_regs_used, u32 stack_depth,
692 struct jit_context *ctx)
693 {
694 int tcc_off = -4 - round_up(stack_depth, 8);
695 u8 *prog = *pprog, *start = *pprog;
696 int offset;
697
698 /*
699 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
700 * goto out;
701 */
702 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
703 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
704
705 offset = ctx->tail_call_direct_label - (prog + 2 - start);
706 EMIT2(X86_JAE, offset); /* jae out */
707 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
708 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
709
710 poke->tailcall_bypass = ip + (prog - start);
711 poke->adj_off = X86_TAIL_CALL_OFFSET;
712 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
713 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
714
715 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
716 poke->tailcall_bypass);
717
718 if (bpf_prog->aux->exception_boundary) {
719 pop_callee_regs(&prog, all_callee_regs_used);
720 pop_r12(&prog);
721 } else {
722 pop_callee_regs(&prog, callee_regs_used);
723 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
724 pop_r12(&prog);
725 }
726
727 EMIT1(0x58); /* pop rax */
728 if (stack_depth)
729 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
730
731 emit_nops(&prog, X86_PATCH_SIZE);
732
733 /* out: */
734 ctx->tail_call_direct_label = prog - start;
735
736 *pprog = prog;
737 }
738
bpf_tail_call_direct_fixup(struct bpf_prog * prog)739 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
740 {
741 struct bpf_jit_poke_descriptor *poke;
742 struct bpf_array *array;
743 struct bpf_prog *target;
744 int i, ret;
745
746 for (i = 0; i < prog->aux->size_poke_tab; i++) {
747 poke = &prog->aux->poke_tab[i];
748 if (poke->aux && poke->aux != prog->aux)
749 continue;
750
751 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
752
753 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
754 continue;
755
756 array = container_of(poke->tail_call.map, struct bpf_array, map);
757 mutex_lock(&array->aux->poke_mutex);
758 target = array->ptrs[poke->tail_call.key];
759 if (target) {
760 ret = __bpf_arch_text_poke(poke->tailcall_target,
761 BPF_MOD_JUMP, NULL,
762 (u8 *)target->bpf_func +
763 poke->adj_off);
764 BUG_ON(ret < 0);
765 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
766 BPF_MOD_JUMP,
767 (u8 *)poke->tailcall_target +
768 X86_PATCH_SIZE, NULL);
769 BUG_ON(ret < 0);
770 }
771 WRITE_ONCE(poke->tailcall_target_stable, true);
772 mutex_unlock(&array->aux->poke_mutex);
773 }
774 }
775
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)776 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
777 u32 dst_reg, const u32 imm32)
778 {
779 u8 *prog = *pprog;
780 u8 b1, b2, b3;
781
782 /*
783 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
784 * (which zero-extends imm32) to save 2 bytes.
785 */
786 if (sign_propagate && (s32)imm32 < 0) {
787 /* 'mov %rax, imm32' sign extends imm32 */
788 b1 = add_1mod(0x48, dst_reg);
789 b2 = 0xC7;
790 b3 = 0xC0;
791 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
792 goto done;
793 }
794
795 /*
796 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
797 * to save 3 bytes.
798 */
799 if (imm32 == 0) {
800 if (is_ereg(dst_reg))
801 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
802 b2 = 0x31; /* xor */
803 b3 = 0xC0;
804 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
805 goto done;
806 }
807
808 /* mov %eax, imm32 */
809 if (is_ereg(dst_reg))
810 EMIT1(add_1mod(0x40, dst_reg));
811 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
812 done:
813 *pprog = prog;
814 }
815
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)816 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
817 const u32 imm32_hi, const u32 imm32_lo)
818 {
819 u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
820 u8 *prog = *pprog;
821
822 if (is_uimm32(imm64)) {
823 /*
824 * For emitting plain u32, where sign bit must not be
825 * propagated LLVM tends to load imm64 over mov32
826 * directly, so save couple of bytes by just doing
827 * 'mov %eax, imm32' instead.
828 */
829 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
830 } else if (is_simm32(imm64)) {
831 emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
832 } else {
833 /* movabsq rax, imm64 */
834 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
835 EMIT(imm32_lo, 4);
836 EMIT(imm32_hi, 4);
837 }
838
839 *pprog = prog;
840 }
841
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)842 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
843 {
844 u8 *prog = *pprog;
845
846 if (is64) {
847 /* mov dst, src */
848 EMIT_mov(dst_reg, src_reg);
849 } else {
850 /* mov32 dst, src */
851 if (is_ereg(dst_reg) || is_ereg(src_reg))
852 EMIT1(add_2mod(0x40, dst_reg, src_reg));
853 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
854 }
855
856 *pprog = prog;
857 }
858
emit_movsx_reg(u8 ** pprog,int num_bits,bool is64,u32 dst_reg,u32 src_reg)859 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
860 u32 src_reg)
861 {
862 u8 *prog = *pprog;
863
864 if (is64) {
865 /* movs[b,w,l]q dst, src */
866 if (num_bits == 8)
867 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
868 add_2reg(0xC0, src_reg, dst_reg));
869 else if (num_bits == 16)
870 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
871 add_2reg(0xC0, src_reg, dst_reg));
872 else if (num_bits == 32)
873 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
874 add_2reg(0xC0, src_reg, dst_reg));
875 } else {
876 /* movs[b,w]l dst, src */
877 if (num_bits == 8) {
878 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
879 add_2reg(0xC0, src_reg, dst_reg));
880 } else if (num_bits == 16) {
881 if (is_ereg(dst_reg) || is_ereg(src_reg))
882 EMIT1(add_2mod(0x40, src_reg, dst_reg));
883 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
884 add_2reg(0xC0, src_reg, dst_reg));
885 }
886 }
887
888 *pprog = prog;
889 }
890
891 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)892 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
893 {
894 u8 *prog = *pprog;
895
896 if (is_imm8(off)) {
897 /* 1-byte signed displacement.
898 *
899 * If off == 0 we could skip this and save one extra byte, but
900 * special case of x86 R13 which always needs an offset is not
901 * worth the hassle
902 */
903 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
904 } else {
905 /* 4-byte signed displacement */
906 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
907 }
908 *pprog = prog;
909 }
910
emit_insn_suffix_SIB(u8 ** pprog,u32 ptr_reg,u32 val_reg,u32 index_reg,int off)911 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
912 {
913 u8 *prog = *pprog;
914
915 if (is_imm8(off)) {
916 EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
917 } else {
918 EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
919 }
920 *pprog = prog;
921 }
922
923 /*
924 * Emit a REX byte if it will be necessary to address these registers
925 */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)926 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
927 {
928 u8 *prog = *pprog;
929
930 if (is64)
931 EMIT1(add_2mod(0x48, dst_reg, src_reg));
932 else if (is_ereg(dst_reg) || is_ereg(src_reg))
933 EMIT1(add_2mod(0x40, dst_reg, src_reg));
934 *pprog = prog;
935 }
936
937 /*
938 * Similar version of maybe_emit_mod() for a single register
939 */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)940 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
941 {
942 u8 *prog = *pprog;
943
944 if (is64)
945 EMIT1(add_1mod(0x48, reg));
946 else if (is_ereg(reg))
947 EMIT1(add_1mod(0x40, reg));
948 *pprog = prog;
949 }
950
951 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)952 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
953 {
954 u8 *prog = *pprog;
955
956 switch (size) {
957 case BPF_B:
958 /* Emit 'movzx rax, byte ptr [rax + off]' */
959 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
960 break;
961 case BPF_H:
962 /* Emit 'movzx rax, word ptr [rax + off]' */
963 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
964 break;
965 case BPF_W:
966 /* Emit 'mov eax, dword ptr [rax+0x14]' */
967 if (is_ereg(dst_reg) || is_ereg(src_reg))
968 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
969 else
970 EMIT1(0x8B);
971 break;
972 case BPF_DW:
973 /* Emit 'mov rax, qword ptr [rax+0x14]' */
974 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
975 break;
976 }
977 emit_insn_suffix(&prog, src_reg, dst_reg, off);
978 *pprog = prog;
979 }
980
981 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
emit_ldsx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)982 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
983 {
984 u8 *prog = *pprog;
985
986 switch (size) {
987 case BPF_B:
988 /* Emit 'movsx rax, byte ptr [rax + off]' */
989 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
990 break;
991 case BPF_H:
992 /* Emit 'movsx rax, word ptr [rax + off]' */
993 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
994 break;
995 case BPF_W:
996 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
997 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
998 break;
999 }
1000 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1001 *pprog = prog;
1002 }
1003
emit_ldx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1004 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1005 {
1006 u8 *prog = *pprog;
1007
1008 switch (size) {
1009 case BPF_B:
1010 /* movzx rax, byte ptr [rax + r12 + off] */
1011 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1012 break;
1013 case BPF_H:
1014 /* movzx rax, word ptr [rax + r12 + off] */
1015 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1016 break;
1017 case BPF_W:
1018 /* mov eax, dword ptr [rax + r12 + off] */
1019 EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1020 break;
1021 case BPF_DW:
1022 /* mov rax, qword ptr [rax + r12 + off] */
1023 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1024 break;
1025 }
1026 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1027 *pprog = prog;
1028 }
1029
emit_ldx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1030 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1031 {
1032 emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1033 }
1034
1035 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1036 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1037 {
1038 u8 *prog = *pprog;
1039
1040 switch (size) {
1041 case BPF_B:
1042 /* Emit 'mov byte ptr [rax + off], al' */
1043 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1044 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1045 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1046 else
1047 EMIT1(0x88);
1048 break;
1049 case BPF_H:
1050 if (is_ereg(dst_reg) || is_ereg(src_reg))
1051 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1052 else
1053 EMIT2(0x66, 0x89);
1054 break;
1055 case BPF_W:
1056 if (is_ereg(dst_reg) || is_ereg(src_reg))
1057 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1058 else
1059 EMIT1(0x89);
1060 break;
1061 case BPF_DW:
1062 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1063 break;
1064 }
1065 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1066 *pprog = prog;
1067 }
1068
1069 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
emit_stx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1070 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1071 {
1072 u8 *prog = *pprog;
1073
1074 switch (size) {
1075 case BPF_B:
1076 /* mov byte ptr [rax + r12 + off], al */
1077 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1078 break;
1079 case BPF_H:
1080 /* mov word ptr [rax + r12 + off], ax */
1081 EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1082 break;
1083 case BPF_W:
1084 /* mov dword ptr [rax + r12 + 1], eax */
1085 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1086 break;
1087 case BPF_DW:
1088 /* mov qword ptr [rax + r12 + 1], rax */
1089 EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1090 break;
1091 }
1092 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1093 *pprog = prog;
1094 }
1095
emit_stx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1096 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1097 {
1098 emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1099 }
1100
1101 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
emit_st_index(u8 ** pprog,u32 size,u32 dst_reg,u32 index_reg,int off,int imm)1102 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1103 {
1104 u8 *prog = *pprog;
1105
1106 switch (size) {
1107 case BPF_B:
1108 /* mov byte ptr [rax + r12 + off], imm8 */
1109 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1110 break;
1111 case BPF_H:
1112 /* mov word ptr [rax + r12 + off], imm16 */
1113 EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1114 break;
1115 case BPF_W:
1116 /* mov dword ptr [rax + r12 + 1], imm32 */
1117 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1118 break;
1119 case BPF_DW:
1120 /* mov qword ptr [rax + r12 + 1], imm32 */
1121 EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1122 break;
1123 }
1124 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1125 EMIT(imm, bpf_size_to_x86_bytes(size));
1126 *pprog = prog;
1127 }
1128
emit_st_r12(u8 ** pprog,u32 size,u32 dst_reg,int off,int imm)1129 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1130 {
1131 emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1132 }
1133
emit_atomic(u8 ** pprog,u8 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)1134 static int emit_atomic(u8 **pprog, u8 atomic_op,
1135 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1136 {
1137 u8 *prog = *pprog;
1138
1139 EMIT1(0xF0); /* lock prefix */
1140
1141 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1142
1143 /* emit opcode */
1144 switch (atomic_op) {
1145 case BPF_ADD:
1146 case BPF_AND:
1147 case BPF_OR:
1148 case BPF_XOR:
1149 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1150 EMIT1(simple_alu_opcodes[atomic_op]);
1151 break;
1152 case BPF_ADD | BPF_FETCH:
1153 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1154 EMIT2(0x0F, 0xC1);
1155 break;
1156 case BPF_XCHG:
1157 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1158 EMIT1(0x87);
1159 break;
1160 case BPF_CMPXCHG:
1161 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1162 EMIT2(0x0F, 0xB1);
1163 break;
1164 default:
1165 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1166 return -EFAULT;
1167 }
1168
1169 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1170
1171 *pprog = prog;
1172 return 0;
1173 }
1174
emit_atomic_index(u8 ** pprog,u8 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1175 static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1176 u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1177 {
1178 u8 *prog = *pprog;
1179
1180 EMIT1(0xF0); /* lock prefix */
1181 switch (size) {
1182 case BPF_W:
1183 EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1184 break;
1185 case BPF_DW:
1186 EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1187 break;
1188 default:
1189 pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
1190 return -EFAULT;
1191 }
1192
1193 /* emit opcode */
1194 switch (atomic_op) {
1195 case BPF_ADD:
1196 case BPF_AND:
1197 case BPF_OR:
1198 case BPF_XOR:
1199 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1200 EMIT1(simple_alu_opcodes[atomic_op]);
1201 break;
1202 case BPF_ADD | BPF_FETCH:
1203 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1204 EMIT2(0x0F, 0xC1);
1205 break;
1206 case BPF_XCHG:
1207 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1208 EMIT1(0x87);
1209 break;
1210 case BPF_CMPXCHG:
1211 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1212 EMIT2(0x0F, 0xB1);
1213 break;
1214 default:
1215 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1216 return -EFAULT;
1217 }
1218 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1219 *pprog = prog;
1220 return 0;
1221 }
1222
1223 #define DONT_CLEAR 1
1224
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)1225 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1226 {
1227 u32 reg = x->fixup >> 8;
1228
1229 /* jump over faulting load and clear dest register */
1230 if (reg != DONT_CLEAR)
1231 *(unsigned long *)((void *)regs + reg) = 0;
1232 regs->ip += x->fixup & 0xff;
1233 return true;
1234 }
1235
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used)1236 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1237 bool *regs_used)
1238 {
1239 int i;
1240
1241 for (i = 1; i <= insn_cnt; i++, insn++) {
1242 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1243 regs_used[0] = true;
1244 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1245 regs_used[1] = true;
1246 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1247 regs_used[2] = true;
1248 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1249 regs_used[3] = true;
1250 }
1251 }
1252
1253 /* emit the 3-byte VEX prefix
1254 *
1255 * r: same as rex.r, extra bit for ModRM reg field
1256 * x: same as rex.x, extra bit for SIB index field
1257 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1258 * m: opcode map select, encoding escape bytes e.g. 0x0f38
1259 * w: same as rex.w (32 bit or 64 bit) or opcode specific
1260 * src_reg2: additional source reg (encoded as BPF reg)
1261 * l: vector length (128 bit or 256 bit) or reserved
1262 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1263 */
emit_3vex(u8 ** pprog,bool r,bool x,bool b,u8 m,bool w,u8 src_reg2,bool l,u8 pp)1264 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1265 bool w, u8 src_reg2, bool l, u8 pp)
1266 {
1267 u8 *prog = *pprog;
1268 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1269 u8 b1, b2;
1270 u8 vvvv = reg2hex[src_reg2];
1271
1272 /* reg2hex gives only the lower 3 bit of vvvv */
1273 if (is_ereg(src_reg2))
1274 vvvv |= 1 << 3;
1275
1276 /*
1277 * 2nd byte of 3-byte VEX prefix
1278 * ~ means bit inverted encoding
1279 *
1280 * 7 0
1281 * +---+---+---+---+---+---+---+---+
1282 * |~R |~X |~B | m |
1283 * +---+---+---+---+---+---+---+---+
1284 */
1285 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1286 /*
1287 * 3rd byte of 3-byte VEX prefix
1288 *
1289 * 7 0
1290 * +---+---+---+---+---+---+---+---+
1291 * | W | ~vvvv | L | pp |
1292 * +---+---+---+---+---+---+---+---+
1293 */
1294 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1295
1296 EMIT3(b0, b1, b2);
1297 *pprog = prog;
1298 }
1299
1300 /* emit BMI2 shift instruction */
emit_shiftx(u8 ** pprog,u32 dst_reg,u8 src_reg,bool is64,u8 op)1301 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1302 {
1303 u8 *prog = *pprog;
1304 bool r = is_ereg(dst_reg);
1305 u8 m = 2; /* escape code 0f38 */
1306
1307 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1308 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1309 *pprog = prog;
1310 }
1311
1312 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1313
1314 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1315 #define RESTORE_TAIL_CALL_CNT(stack) \
1316 EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
1317
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)1318 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1319 int oldproglen, struct jit_context *ctx, bool jmp_padding)
1320 {
1321 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1322 struct bpf_insn *insn = bpf_prog->insnsi;
1323 bool callee_regs_used[4] = {};
1324 int insn_cnt = bpf_prog->len;
1325 bool seen_exit = false;
1326 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1327 u64 arena_vm_start, user_vm_start;
1328 int i, excnt = 0;
1329 int ilen, proglen = 0;
1330 u8 *prog = temp;
1331 int err;
1332
1333 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1334 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1335
1336 detect_reg_usage(insn, insn_cnt, callee_regs_used);
1337
1338 emit_prologue(&prog, bpf_prog->aux->stack_depth,
1339 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1340 bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1341 /* Exception callback will clobber callee regs for its own use, and
1342 * restore the original callee regs from main prog's stack frame.
1343 */
1344 if (bpf_prog->aux->exception_boundary) {
1345 /* We also need to save r12, which is not mapped to any BPF
1346 * register, as we throw after entry into the kernel, which may
1347 * overwrite r12.
1348 */
1349 push_r12(&prog);
1350 push_callee_regs(&prog, all_callee_regs_used);
1351 } else {
1352 if (arena_vm_start)
1353 push_r12(&prog);
1354 push_callee_regs(&prog, callee_regs_used);
1355 }
1356 if (arena_vm_start)
1357 emit_mov_imm64(&prog, X86_REG_R12,
1358 arena_vm_start >> 32, (u32) arena_vm_start);
1359
1360 ilen = prog - temp;
1361 if (rw_image)
1362 memcpy(rw_image + proglen, temp, ilen);
1363 proglen += ilen;
1364 addrs[0] = proglen;
1365 prog = temp;
1366
1367 for (i = 1; i <= insn_cnt; i++, insn++) {
1368 const s32 imm32 = insn->imm;
1369 u32 dst_reg = insn->dst_reg;
1370 u32 src_reg = insn->src_reg;
1371 u8 b2 = 0, b3 = 0;
1372 u8 *start_of_ldx;
1373 s64 jmp_offset;
1374 s16 insn_off;
1375 u8 jmp_cond;
1376 u8 *func;
1377 int nops;
1378
1379 switch (insn->code) {
1380 /* ALU */
1381 case BPF_ALU | BPF_ADD | BPF_X:
1382 case BPF_ALU | BPF_SUB | BPF_X:
1383 case BPF_ALU | BPF_AND | BPF_X:
1384 case BPF_ALU | BPF_OR | BPF_X:
1385 case BPF_ALU | BPF_XOR | BPF_X:
1386 case BPF_ALU64 | BPF_ADD | BPF_X:
1387 case BPF_ALU64 | BPF_SUB | BPF_X:
1388 case BPF_ALU64 | BPF_AND | BPF_X:
1389 case BPF_ALU64 | BPF_OR | BPF_X:
1390 case BPF_ALU64 | BPF_XOR | BPF_X:
1391 maybe_emit_mod(&prog, dst_reg, src_reg,
1392 BPF_CLASS(insn->code) == BPF_ALU64);
1393 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1394 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1395 break;
1396
1397 case BPF_ALU64 | BPF_MOV | BPF_X:
1398 if (insn_is_cast_user(insn)) {
1399 if (dst_reg != src_reg)
1400 /* 32-bit mov */
1401 emit_mov_reg(&prog, false, dst_reg, src_reg);
1402 /* shl dst_reg, 32 */
1403 maybe_emit_1mod(&prog, dst_reg, true);
1404 EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1405
1406 /* or dst_reg, user_vm_start */
1407 maybe_emit_1mod(&prog, dst_reg, true);
1408 if (is_axreg(dst_reg))
1409 EMIT1_off32(0x0D, user_vm_start >> 32);
1410 else
1411 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32);
1412
1413 /* rol dst_reg, 32 */
1414 maybe_emit_1mod(&prog, dst_reg, true);
1415 EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1416
1417 /* xor r11, r11 */
1418 EMIT3(0x4D, 0x31, 0xDB);
1419
1420 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1421 maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1422 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1423
1424 /* cmove r11, dst_reg; if so, set dst_reg to zero */
1425 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1426 maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1427 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1428 break;
1429 } else if (insn_is_mov_percpu_addr(insn)) {
1430 /* mov <dst>, <src> (if necessary) */
1431 EMIT_mov(dst_reg, src_reg);
1432 #ifdef CONFIG_SMP
1433 /* add <dst>, gs:[<off>] */
1434 EMIT2(0x65, add_1mod(0x48, dst_reg));
1435 EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1436 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1437 #endif
1438 break;
1439 }
1440 fallthrough;
1441 case BPF_ALU | BPF_MOV | BPF_X:
1442 if (insn->off == 0)
1443 emit_mov_reg(&prog,
1444 BPF_CLASS(insn->code) == BPF_ALU64,
1445 dst_reg, src_reg);
1446 else
1447 emit_movsx_reg(&prog, insn->off,
1448 BPF_CLASS(insn->code) == BPF_ALU64,
1449 dst_reg, src_reg);
1450 break;
1451
1452 /* neg dst */
1453 case BPF_ALU | BPF_NEG:
1454 case BPF_ALU64 | BPF_NEG:
1455 maybe_emit_1mod(&prog, dst_reg,
1456 BPF_CLASS(insn->code) == BPF_ALU64);
1457 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1458 break;
1459
1460 case BPF_ALU | BPF_ADD | BPF_K:
1461 case BPF_ALU | BPF_SUB | BPF_K:
1462 case BPF_ALU | BPF_AND | BPF_K:
1463 case BPF_ALU | BPF_OR | BPF_K:
1464 case BPF_ALU | BPF_XOR | BPF_K:
1465 case BPF_ALU64 | BPF_ADD | BPF_K:
1466 case BPF_ALU64 | BPF_SUB | BPF_K:
1467 case BPF_ALU64 | BPF_AND | BPF_K:
1468 case BPF_ALU64 | BPF_OR | BPF_K:
1469 case BPF_ALU64 | BPF_XOR | BPF_K:
1470 maybe_emit_1mod(&prog, dst_reg,
1471 BPF_CLASS(insn->code) == BPF_ALU64);
1472
1473 /*
1474 * b3 holds 'normal' opcode, b2 short form only valid
1475 * in case dst is eax/rax.
1476 */
1477 switch (BPF_OP(insn->code)) {
1478 case BPF_ADD:
1479 b3 = 0xC0;
1480 b2 = 0x05;
1481 break;
1482 case BPF_SUB:
1483 b3 = 0xE8;
1484 b2 = 0x2D;
1485 break;
1486 case BPF_AND:
1487 b3 = 0xE0;
1488 b2 = 0x25;
1489 break;
1490 case BPF_OR:
1491 b3 = 0xC8;
1492 b2 = 0x0D;
1493 break;
1494 case BPF_XOR:
1495 b3 = 0xF0;
1496 b2 = 0x35;
1497 break;
1498 }
1499
1500 if (is_imm8(imm32))
1501 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1502 else if (is_axreg(dst_reg))
1503 EMIT1_off32(b2, imm32);
1504 else
1505 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1506 break;
1507
1508 case BPF_ALU64 | BPF_MOV | BPF_K:
1509 case BPF_ALU | BPF_MOV | BPF_K:
1510 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1511 dst_reg, imm32);
1512 break;
1513
1514 case BPF_LD | BPF_IMM | BPF_DW:
1515 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1516 insn++;
1517 i++;
1518 break;
1519
1520 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1521 case BPF_ALU | BPF_MOD | BPF_X:
1522 case BPF_ALU | BPF_DIV | BPF_X:
1523 case BPF_ALU | BPF_MOD | BPF_K:
1524 case BPF_ALU | BPF_DIV | BPF_K:
1525 case BPF_ALU64 | BPF_MOD | BPF_X:
1526 case BPF_ALU64 | BPF_DIV | BPF_X:
1527 case BPF_ALU64 | BPF_MOD | BPF_K:
1528 case BPF_ALU64 | BPF_DIV | BPF_K: {
1529 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1530
1531 if (dst_reg != BPF_REG_0)
1532 EMIT1(0x50); /* push rax */
1533 if (dst_reg != BPF_REG_3)
1534 EMIT1(0x52); /* push rdx */
1535
1536 if (BPF_SRC(insn->code) == BPF_X) {
1537 if (src_reg == BPF_REG_0 ||
1538 src_reg == BPF_REG_3) {
1539 /* mov r11, src_reg */
1540 EMIT_mov(AUX_REG, src_reg);
1541 src_reg = AUX_REG;
1542 }
1543 } else {
1544 /* mov r11, imm32 */
1545 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1546 src_reg = AUX_REG;
1547 }
1548
1549 if (dst_reg != BPF_REG_0)
1550 /* mov rax, dst_reg */
1551 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1552
1553 if (insn->off == 0) {
1554 /*
1555 * xor edx, edx
1556 * equivalent to 'xor rdx, rdx', but one byte less
1557 */
1558 EMIT2(0x31, 0xd2);
1559
1560 /* div src_reg */
1561 maybe_emit_1mod(&prog, src_reg, is64);
1562 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1563 } else {
1564 if (BPF_CLASS(insn->code) == BPF_ALU)
1565 EMIT1(0x99); /* cdq */
1566 else
1567 EMIT2(0x48, 0x99); /* cqo */
1568
1569 /* idiv src_reg */
1570 maybe_emit_1mod(&prog, src_reg, is64);
1571 EMIT2(0xF7, add_1reg(0xF8, src_reg));
1572 }
1573
1574 if (BPF_OP(insn->code) == BPF_MOD &&
1575 dst_reg != BPF_REG_3)
1576 /* mov dst_reg, rdx */
1577 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1578 else if (BPF_OP(insn->code) == BPF_DIV &&
1579 dst_reg != BPF_REG_0)
1580 /* mov dst_reg, rax */
1581 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1582
1583 if (dst_reg != BPF_REG_3)
1584 EMIT1(0x5A); /* pop rdx */
1585 if (dst_reg != BPF_REG_0)
1586 EMIT1(0x58); /* pop rax */
1587 break;
1588 }
1589
1590 case BPF_ALU | BPF_MUL | BPF_K:
1591 case BPF_ALU64 | BPF_MUL | BPF_K:
1592 maybe_emit_mod(&prog, dst_reg, dst_reg,
1593 BPF_CLASS(insn->code) == BPF_ALU64);
1594
1595 if (is_imm8(imm32))
1596 /* imul dst_reg, dst_reg, imm8 */
1597 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1598 imm32);
1599 else
1600 /* imul dst_reg, dst_reg, imm32 */
1601 EMIT2_off32(0x69,
1602 add_2reg(0xC0, dst_reg, dst_reg),
1603 imm32);
1604 break;
1605
1606 case BPF_ALU | BPF_MUL | BPF_X:
1607 case BPF_ALU64 | BPF_MUL | BPF_X:
1608 maybe_emit_mod(&prog, src_reg, dst_reg,
1609 BPF_CLASS(insn->code) == BPF_ALU64);
1610
1611 /* imul dst_reg, src_reg */
1612 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1613 break;
1614
1615 /* Shifts */
1616 case BPF_ALU | BPF_LSH | BPF_K:
1617 case BPF_ALU | BPF_RSH | BPF_K:
1618 case BPF_ALU | BPF_ARSH | BPF_K:
1619 case BPF_ALU64 | BPF_LSH | BPF_K:
1620 case BPF_ALU64 | BPF_RSH | BPF_K:
1621 case BPF_ALU64 | BPF_ARSH | BPF_K:
1622 maybe_emit_1mod(&prog, dst_reg,
1623 BPF_CLASS(insn->code) == BPF_ALU64);
1624
1625 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1626 if (imm32 == 1)
1627 EMIT2(0xD1, add_1reg(b3, dst_reg));
1628 else
1629 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1630 break;
1631
1632 case BPF_ALU | BPF_LSH | BPF_X:
1633 case BPF_ALU | BPF_RSH | BPF_X:
1634 case BPF_ALU | BPF_ARSH | BPF_X:
1635 case BPF_ALU64 | BPF_LSH | BPF_X:
1636 case BPF_ALU64 | BPF_RSH | BPF_X:
1637 case BPF_ALU64 | BPF_ARSH | BPF_X:
1638 /* BMI2 shifts aren't better when shift count is already in rcx */
1639 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1640 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1641 bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1642 u8 op;
1643
1644 switch (BPF_OP(insn->code)) {
1645 case BPF_LSH:
1646 op = 1; /* prefix 0x66 */
1647 break;
1648 case BPF_RSH:
1649 op = 3; /* prefix 0xf2 */
1650 break;
1651 case BPF_ARSH:
1652 op = 2; /* prefix 0xf3 */
1653 break;
1654 }
1655
1656 emit_shiftx(&prog, dst_reg, src_reg, w, op);
1657
1658 break;
1659 }
1660
1661 if (src_reg != BPF_REG_4) { /* common case */
1662 /* Check for bad case when dst_reg == rcx */
1663 if (dst_reg == BPF_REG_4) {
1664 /* mov r11, dst_reg */
1665 EMIT_mov(AUX_REG, dst_reg);
1666 dst_reg = AUX_REG;
1667 } else {
1668 EMIT1(0x51); /* push rcx */
1669 }
1670 /* mov rcx, src_reg */
1671 EMIT_mov(BPF_REG_4, src_reg);
1672 }
1673
1674 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1675 maybe_emit_1mod(&prog, dst_reg,
1676 BPF_CLASS(insn->code) == BPF_ALU64);
1677
1678 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1679 EMIT2(0xD3, add_1reg(b3, dst_reg));
1680
1681 if (src_reg != BPF_REG_4) {
1682 if (insn->dst_reg == BPF_REG_4)
1683 /* mov dst_reg, r11 */
1684 EMIT_mov(insn->dst_reg, AUX_REG);
1685 else
1686 EMIT1(0x59); /* pop rcx */
1687 }
1688
1689 break;
1690
1691 case BPF_ALU | BPF_END | BPF_FROM_BE:
1692 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1693 switch (imm32) {
1694 case 16:
1695 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1696 EMIT1(0x66);
1697 if (is_ereg(dst_reg))
1698 EMIT1(0x41);
1699 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1700
1701 /* Emit 'movzwl eax, ax' */
1702 if (is_ereg(dst_reg))
1703 EMIT3(0x45, 0x0F, 0xB7);
1704 else
1705 EMIT2(0x0F, 0xB7);
1706 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1707 break;
1708 case 32:
1709 /* Emit 'bswap eax' to swap lower 4 bytes */
1710 if (is_ereg(dst_reg))
1711 EMIT2(0x41, 0x0F);
1712 else
1713 EMIT1(0x0F);
1714 EMIT1(add_1reg(0xC8, dst_reg));
1715 break;
1716 case 64:
1717 /* Emit 'bswap rax' to swap 8 bytes */
1718 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1719 add_1reg(0xC8, dst_reg));
1720 break;
1721 }
1722 break;
1723
1724 case BPF_ALU | BPF_END | BPF_FROM_LE:
1725 switch (imm32) {
1726 case 16:
1727 /*
1728 * Emit 'movzwl eax, ax' to zero extend 16-bit
1729 * into 64 bit
1730 */
1731 if (is_ereg(dst_reg))
1732 EMIT3(0x45, 0x0F, 0xB7);
1733 else
1734 EMIT2(0x0F, 0xB7);
1735 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1736 break;
1737 case 32:
1738 /* Emit 'mov eax, eax' to clear upper 32-bits */
1739 if (is_ereg(dst_reg))
1740 EMIT1(0x45);
1741 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1742 break;
1743 case 64:
1744 /* nop */
1745 break;
1746 }
1747 break;
1748
1749 /* speculation barrier */
1750 case BPF_ST | BPF_NOSPEC:
1751 EMIT_LFENCE();
1752 break;
1753
1754 /* ST: *(u8*)(dst_reg + off) = imm */
1755 case BPF_ST | BPF_MEM | BPF_B:
1756 if (is_ereg(dst_reg))
1757 EMIT2(0x41, 0xC6);
1758 else
1759 EMIT1(0xC6);
1760 goto st;
1761 case BPF_ST | BPF_MEM | BPF_H:
1762 if (is_ereg(dst_reg))
1763 EMIT3(0x66, 0x41, 0xC7);
1764 else
1765 EMIT2(0x66, 0xC7);
1766 goto st;
1767 case BPF_ST | BPF_MEM | BPF_W:
1768 if (is_ereg(dst_reg))
1769 EMIT2(0x41, 0xC7);
1770 else
1771 EMIT1(0xC7);
1772 goto st;
1773 case BPF_ST | BPF_MEM | BPF_DW:
1774 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1775
1776 st: if (is_imm8(insn->off))
1777 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1778 else
1779 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1780
1781 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1782 break;
1783
1784 /* STX: *(u8*)(dst_reg + off) = src_reg */
1785 case BPF_STX | BPF_MEM | BPF_B:
1786 case BPF_STX | BPF_MEM | BPF_H:
1787 case BPF_STX | BPF_MEM | BPF_W:
1788 case BPF_STX | BPF_MEM | BPF_DW:
1789 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1790 break;
1791
1792 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1793 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1794 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1795 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1796 start_of_ldx = prog;
1797 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
1798 goto populate_extable;
1799
1800 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
1801 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1802 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1803 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1804 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1805 case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1806 case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1807 case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1808 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1809 start_of_ldx = prog;
1810 if (BPF_CLASS(insn->code) == BPF_LDX)
1811 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1812 else
1813 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1814 populate_extable:
1815 {
1816 struct exception_table_entry *ex;
1817 u8 *_insn = image + proglen + (start_of_ldx - temp);
1818 s64 delta;
1819
1820 if (!bpf_prog->aux->extable)
1821 break;
1822
1823 if (excnt >= bpf_prog->aux->num_exentries) {
1824 pr_err("mem32 extable bug\n");
1825 return -EFAULT;
1826 }
1827 ex = &bpf_prog->aux->extable[excnt++];
1828
1829 delta = _insn - (u8 *)&ex->insn;
1830 /* switch ex to rw buffer for writes */
1831 ex = (void *)rw_image + ((void *)ex - (void *)image);
1832
1833 ex->insn = delta;
1834
1835 ex->data = EX_TYPE_BPF;
1836
1837 ex->fixup = (prog - start_of_ldx) |
1838 ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8);
1839 }
1840 break;
1841
1842 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1843 case BPF_LDX | BPF_MEM | BPF_B:
1844 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1845 case BPF_LDX | BPF_MEM | BPF_H:
1846 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1847 case BPF_LDX | BPF_MEM | BPF_W:
1848 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1849 case BPF_LDX | BPF_MEM | BPF_DW:
1850 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1851 /* LDXS: dst_reg = *(s8*)(src_reg + off) */
1852 case BPF_LDX | BPF_MEMSX | BPF_B:
1853 case BPF_LDX | BPF_MEMSX | BPF_H:
1854 case BPF_LDX | BPF_MEMSX | BPF_W:
1855 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1856 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1857 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1858 insn_off = insn->off;
1859
1860 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1861 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1862 /* Conservatively check that src_reg + insn->off is a kernel address:
1863 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
1864 * and
1865 * src_reg + insn->off < VSYSCALL_ADDR
1866 */
1867
1868 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
1869 u8 *end_of_jmp;
1870
1871 /* movabsq r10, VSYSCALL_ADDR */
1872 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
1873 (u32)(long)VSYSCALL_ADDR);
1874
1875 /* mov src_reg, r11 */
1876 EMIT_mov(AUX_REG, src_reg);
1877
1878 if (insn->off) {
1879 /* add r11, insn->off */
1880 maybe_emit_1mod(&prog, AUX_REG, true);
1881 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1882 }
1883
1884 /* sub r11, r10 */
1885 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1886 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1887
1888 /* movabsq r10, limit */
1889 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
1890 (u32)(long)limit);
1891
1892 /* cmp r10, r11 */
1893 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1894 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1895
1896 /* if unsigned '>', goto load */
1897 EMIT2(X86_JA, 0);
1898 end_of_jmp = prog;
1899
1900 /* xor dst_reg, dst_reg */
1901 emit_mov_imm32(&prog, false, dst_reg, 0);
1902 /* jmp byte_after_ldx */
1903 EMIT2(0xEB, 0);
1904
1905 /* populate jmp_offset for JAE above to jump to start_of_ldx */
1906 start_of_ldx = prog;
1907 end_of_jmp[-1] = start_of_ldx - end_of_jmp;
1908 }
1909 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
1910 BPF_MODE(insn->code) == BPF_MEMSX)
1911 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1912 else
1913 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1914 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1915 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1916 struct exception_table_entry *ex;
1917 u8 *_insn = image + proglen + (start_of_ldx - temp);
1918 s64 delta;
1919
1920 /* populate jmp_offset for JMP above */
1921 start_of_ldx[-1] = prog - start_of_ldx;
1922
1923 if (!bpf_prog->aux->extable)
1924 break;
1925
1926 if (excnt >= bpf_prog->aux->num_exentries) {
1927 pr_err("ex gen bug\n");
1928 return -EFAULT;
1929 }
1930 ex = &bpf_prog->aux->extable[excnt++];
1931
1932 delta = _insn - (u8 *)&ex->insn;
1933 if (!is_simm32(delta)) {
1934 pr_err("extable->insn doesn't fit into 32-bit\n");
1935 return -EFAULT;
1936 }
1937 /* switch ex to rw buffer for writes */
1938 ex = (void *)rw_image + ((void *)ex - (void *)image);
1939
1940 ex->insn = delta;
1941
1942 ex->data = EX_TYPE_BPF;
1943
1944 if (dst_reg > BPF_REG_9) {
1945 pr_err("verifier error\n");
1946 return -EFAULT;
1947 }
1948 /*
1949 * Compute size of x86 insn and its target dest x86 register.
1950 * ex_handler_bpf() will use lower 8 bits to adjust
1951 * pt_regs->ip to jump over this x86 instruction
1952 * and upper bits to figure out which pt_regs to zero out.
1953 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1954 * of 4 bytes will be ignored and rbx will be zero inited.
1955 */
1956 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1957 }
1958 break;
1959
1960 case BPF_STX | BPF_ATOMIC | BPF_W:
1961 case BPF_STX | BPF_ATOMIC | BPF_DW:
1962 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1963 insn->imm == (BPF_OR | BPF_FETCH) ||
1964 insn->imm == (BPF_XOR | BPF_FETCH)) {
1965 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1966 u32 real_src_reg = src_reg;
1967 u32 real_dst_reg = dst_reg;
1968 u8 *branch_target;
1969
1970 /*
1971 * Can't be implemented with a single x86 insn.
1972 * Need to do a CMPXCHG loop.
1973 */
1974
1975 /* Will need RAX as a CMPXCHG operand so save R0 */
1976 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1977 if (src_reg == BPF_REG_0)
1978 real_src_reg = BPF_REG_AX;
1979 if (dst_reg == BPF_REG_0)
1980 real_dst_reg = BPF_REG_AX;
1981
1982 branch_target = prog;
1983 /* Load old value */
1984 emit_ldx(&prog, BPF_SIZE(insn->code),
1985 BPF_REG_0, real_dst_reg, insn->off);
1986 /*
1987 * Perform the (commutative) operation locally,
1988 * put the result in the AUX_REG.
1989 */
1990 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1991 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1992 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1993 add_2reg(0xC0, AUX_REG, real_src_reg));
1994 /* Attempt to swap in new value */
1995 err = emit_atomic(&prog, BPF_CMPXCHG,
1996 real_dst_reg, AUX_REG,
1997 insn->off,
1998 BPF_SIZE(insn->code));
1999 if (WARN_ON(err))
2000 return err;
2001 /*
2002 * ZF tells us whether we won the race. If it's
2003 * cleared we need to try again.
2004 */
2005 EMIT2(X86_JNE, -(prog - branch_target) - 2);
2006 /* Return the pre-modification value */
2007 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2008 /* Restore R0 after clobbering RAX */
2009 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2010 break;
2011 }
2012
2013 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
2014 insn->off, BPF_SIZE(insn->code));
2015 if (err)
2016 return err;
2017 break;
2018
2019 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2020 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2021 start_of_ldx = prog;
2022 err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
2023 dst_reg, src_reg, X86_REG_R12, insn->off);
2024 if (err)
2025 return err;
2026 goto populate_extable;
2027
2028 /* call */
2029 case BPF_JMP | BPF_CALL: {
2030 u8 *ip = image + addrs[i - 1];
2031
2032 func = (u8 *) __bpf_call_base + imm32;
2033 if (tail_call_reachable) {
2034 RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
2035 ip += 7;
2036 }
2037 if (!imm32)
2038 return -EINVAL;
2039 ip += x86_call_depth_emit_accounting(&prog, func, ip);
2040 if (emit_call(&prog, func, ip))
2041 return -EINVAL;
2042 break;
2043 }
2044
2045 case BPF_JMP | BPF_TAIL_CALL:
2046 if (imm32)
2047 emit_bpf_tail_call_direct(bpf_prog,
2048 &bpf_prog->aux->poke_tab[imm32 - 1],
2049 &prog, image + addrs[i - 1],
2050 callee_regs_used,
2051 bpf_prog->aux->stack_depth,
2052 ctx);
2053 else
2054 emit_bpf_tail_call_indirect(bpf_prog,
2055 &prog,
2056 callee_regs_used,
2057 bpf_prog->aux->stack_depth,
2058 image + addrs[i - 1],
2059 ctx);
2060 break;
2061
2062 /* cond jump */
2063 case BPF_JMP | BPF_JEQ | BPF_X:
2064 case BPF_JMP | BPF_JNE | BPF_X:
2065 case BPF_JMP | BPF_JGT | BPF_X:
2066 case BPF_JMP | BPF_JLT | BPF_X:
2067 case BPF_JMP | BPF_JGE | BPF_X:
2068 case BPF_JMP | BPF_JLE | BPF_X:
2069 case BPF_JMP | BPF_JSGT | BPF_X:
2070 case BPF_JMP | BPF_JSLT | BPF_X:
2071 case BPF_JMP | BPF_JSGE | BPF_X:
2072 case BPF_JMP | BPF_JSLE | BPF_X:
2073 case BPF_JMP32 | BPF_JEQ | BPF_X:
2074 case BPF_JMP32 | BPF_JNE | BPF_X:
2075 case BPF_JMP32 | BPF_JGT | BPF_X:
2076 case BPF_JMP32 | BPF_JLT | BPF_X:
2077 case BPF_JMP32 | BPF_JGE | BPF_X:
2078 case BPF_JMP32 | BPF_JLE | BPF_X:
2079 case BPF_JMP32 | BPF_JSGT | BPF_X:
2080 case BPF_JMP32 | BPF_JSLT | BPF_X:
2081 case BPF_JMP32 | BPF_JSGE | BPF_X:
2082 case BPF_JMP32 | BPF_JSLE | BPF_X:
2083 /* cmp dst_reg, src_reg */
2084 maybe_emit_mod(&prog, dst_reg, src_reg,
2085 BPF_CLASS(insn->code) == BPF_JMP);
2086 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2087 goto emit_cond_jmp;
2088
2089 case BPF_JMP | BPF_JSET | BPF_X:
2090 case BPF_JMP32 | BPF_JSET | BPF_X:
2091 /* test dst_reg, src_reg */
2092 maybe_emit_mod(&prog, dst_reg, src_reg,
2093 BPF_CLASS(insn->code) == BPF_JMP);
2094 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2095 goto emit_cond_jmp;
2096
2097 case BPF_JMP | BPF_JSET | BPF_K:
2098 case BPF_JMP32 | BPF_JSET | BPF_K:
2099 /* test dst_reg, imm32 */
2100 maybe_emit_1mod(&prog, dst_reg,
2101 BPF_CLASS(insn->code) == BPF_JMP);
2102 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2103 goto emit_cond_jmp;
2104
2105 case BPF_JMP | BPF_JEQ | BPF_K:
2106 case BPF_JMP | BPF_JNE | BPF_K:
2107 case BPF_JMP | BPF_JGT | BPF_K:
2108 case BPF_JMP | BPF_JLT | BPF_K:
2109 case BPF_JMP | BPF_JGE | BPF_K:
2110 case BPF_JMP | BPF_JLE | BPF_K:
2111 case BPF_JMP | BPF_JSGT | BPF_K:
2112 case BPF_JMP | BPF_JSLT | BPF_K:
2113 case BPF_JMP | BPF_JSGE | BPF_K:
2114 case BPF_JMP | BPF_JSLE | BPF_K:
2115 case BPF_JMP32 | BPF_JEQ | BPF_K:
2116 case BPF_JMP32 | BPF_JNE | BPF_K:
2117 case BPF_JMP32 | BPF_JGT | BPF_K:
2118 case BPF_JMP32 | BPF_JLT | BPF_K:
2119 case BPF_JMP32 | BPF_JGE | BPF_K:
2120 case BPF_JMP32 | BPF_JLE | BPF_K:
2121 case BPF_JMP32 | BPF_JSGT | BPF_K:
2122 case BPF_JMP32 | BPF_JSLT | BPF_K:
2123 case BPF_JMP32 | BPF_JSGE | BPF_K:
2124 case BPF_JMP32 | BPF_JSLE | BPF_K:
2125 /* test dst_reg, dst_reg to save one extra byte */
2126 if (imm32 == 0) {
2127 maybe_emit_mod(&prog, dst_reg, dst_reg,
2128 BPF_CLASS(insn->code) == BPF_JMP);
2129 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2130 goto emit_cond_jmp;
2131 }
2132
2133 /* cmp dst_reg, imm8/32 */
2134 maybe_emit_1mod(&prog, dst_reg,
2135 BPF_CLASS(insn->code) == BPF_JMP);
2136
2137 if (is_imm8(imm32))
2138 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2139 else
2140 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2141
2142 emit_cond_jmp: /* Convert BPF opcode to x86 */
2143 switch (BPF_OP(insn->code)) {
2144 case BPF_JEQ:
2145 jmp_cond = X86_JE;
2146 break;
2147 case BPF_JSET:
2148 case BPF_JNE:
2149 jmp_cond = X86_JNE;
2150 break;
2151 case BPF_JGT:
2152 /* GT is unsigned '>', JA in x86 */
2153 jmp_cond = X86_JA;
2154 break;
2155 case BPF_JLT:
2156 /* LT is unsigned '<', JB in x86 */
2157 jmp_cond = X86_JB;
2158 break;
2159 case BPF_JGE:
2160 /* GE is unsigned '>=', JAE in x86 */
2161 jmp_cond = X86_JAE;
2162 break;
2163 case BPF_JLE:
2164 /* LE is unsigned '<=', JBE in x86 */
2165 jmp_cond = X86_JBE;
2166 break;
2167 case BPF_JSGT:
2168 /* Signed '>', GT in x86 */
2169 jmp_cond = X86_JG;
2170 break;
2171 case BPF_JSLT:
2172 /* Signed '<', LT in x86 */
2173 jmp_cond = X86_JL;
2174 break;
2175 case BPF_JSGE:
2176 /* Signed '>=', GE in x86 */
2177 jmp_cond = X86_JGE;
2178 break;
2179 case BPF_JSLE:
2180 /* Signed '<=', LE in x86 */
2181 jmp_cond = X86_JLE;
2182 break;
2183 default: /* to silence GCC warning */
2184 return -EFAULT;
2185 }
2186 jmp_offset = addrs[i + insn->off] - addrs[i];
2187 if (is_imm8(jmp_offset)) {
2188 if (jmp_padding) {
2189 /* To keep the jmp_offset valid, the extra bytes are
2190 * padded before the jump insn, so we subtract the
2191 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2192 *
2193 * If the previous pass already emits an imm8
2194 * jmp_cond, then this BPF insn won't shrink, so
2195 * "nops" is 0.
2196 *
2197 * On the other hand, if the previous pass emits an
2198 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2199 * keep the image from shrinking further.
2200 *
2201 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2202 * is 2 bytes, so the size difference is 4 bytes.
2203 */
2204 nops = INSN_SZ_DIFF - 2;
2205 if (nops != 0 && nops != 4) {
2206 pr_err("unexpected jmp_cond padding: %d bytes\n",
2207 nops);
2208 return -EFAULT;
2209 }
2210 emit_nops(&prog, nops);
2211 }
2212 EMIT2(jmp_cond, jmp_offset);
2213 } else if (is_simm32(jmp_offset)) {
2214 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2215 } else {
2216 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2217 return -EFAULT;
2218 }
2219
2220 break;
2221
2222 case BPF_JMP | BPF_JA:
2223 case BPF_JMP32 | BPF_JA:
2224 if (BPF_CLASS(insn->code) == BPF_JMP) {
2225 if (insn->off == -1)
2226 /* -1 jmp instructions will always jump
2227 * backwards two bytes. Explicitly handling
2228 * this case avoids wasting too many passes
2229 * when there are long sequences of replaced
2230 * dead code.
2231 */
2232 jmp_offset = -2;
2233 else
2234 jmp_offset = addrs[i + insn->off] - addrs[i];
2235 } else {
2236 if (insn->imm == -1)
2237 jmp_offset = -2;
2238 else
2239 jmp_offset = addrs[i + insn->imm] - addrs[i];
2240 }
2241
2242 if (!jmp_offset) {
2243 /*
2244 * If jmp_padding is enabled, the extra nops will
2245 * be inserted. Otherwise, optimize out nop jumps.
2246 */
2247 if (jmp_padding) {
2248 /* There are 3 possible conditions.
2249 * (1) This BPF_JA is already optimized out in
2250 * the previous run, so there is no need
2251 * to pad any extra byte (0 byte).
2252 * (2) The previous pass emits an imm8 jmp,
2253 * so we pad 2 bytes to match the previous
2254 * insn size.
2255 * (3) Similarly, the previous pass emits an
2256 * imm32 jmp, and 5 bytes is padded.
2257 */
2258 nops = INSN_SZ_DIFF;
2259 if (nops != 0 && nops != 2 && nops != 5) {
2260 pr_err("unexpected nop jump padding: %d bytes\n",
2261 nops);
2262 return -EFAULT;
2263 }
2264 emit_nops(&prog, nops);
2265 }
2266 break;
2267 }
2268 emit_jmp:
2269 if (is_imm8(jmp_offset)) {
2270 if (jmp_padding) {
2271 /* To avoid breaking jmp_offset, the extra bytes
2272 * are padded before the actual jmp insn, so
2273 * 2 bytes is subtracted from INSN_SZ_DIFF.
2274 *
2275 * If the previous pass already emits an imm8
2276 * jmp, there is nothing to pad (0 byte).
2277 *
2278 * If it emits an imm32 jmp (5 bytes) previously
2279 * and now an imm8 jmp (2 bytes), then we pad
2280 * (5 - 2 = 3) bytes to stop the image from
2281 * shrinking further.
2282 */
2283 nops = INSN_SZ_DIFF - 2;
2284 if (nops != 0 && nops != 3) {
2285 pr_err("unexpected jump padding: %d bytes\n",
2286 nops);
2287 return -EFAULT;
2288 }
2289 emit_nops(&prog, INSN_SZ_DIFF - 2);
2290 }
2291 EMIT2(0xEB, jmp_offset);
2292 } else if (is_simm32(jmp_offset)) {
2293 EMIT1_off32(0xE9, jmp_offset);
2294 } else {
2295 pr_err("jmp gen bug %llx\n", jmp_offset);
2296 return -EFAULT;
2297 }
2298 break;
2299
2300 case BPF_JMP | BPF_EXIT:
2301 if (seen_exit) {
2302 jmp_offset = ctx->cleanup_addr - addrs[i];
2303 goto emit_jmp;
2304 }
2305 seen_exit = true;
2306 /* Update cleanup_addr */
2307 ctx->cleanup_addr = proglen;
2308 if (bpf_prog->aux->exception_boundary) {
2309 pop_callee_regs(&prog, all_callee_regs_used);
2310 pop_r12(&prog);
2311 } else {
2312 pop_callee_regs(&prog, callee_regs_used);
2313 if (arena_vm_start)
2314 pop_r12(&prog);
2315 }
2316 EMIT1(0xC9); /* leave */
2317 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2318 break;
2319
2320 default:
2321 /*
2322 * By design x86-64 JIT should support all BPF instructions.
2323 * This error will be seen if new instruction was added
2324 * to the interpreter, but not to the JIT, or if there is
2325 * junk in bpf_prog.
2326 */
2327 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2328 return -EINVAL;
2329 }
2330
2331 ilen = prog - temp;
2332 if (ilen > BPF_MAX_INSN_SIZE) {
2333 pr_err("bpf_jit: fatal insn size error\n");
2334 return -EFAULT;
2335 }
2336
2337 if (image) {
2338 /*
2339 * When populating the image, assert that:
2340 *
2341 * i) We do not write beyond the allocated space, and
2342 * ii) addrs[i] did not change from the prior run, in order
2343 * to validate assumptions made for computing branch
2344 * displacements.
2345 */
2346 if (unlikely(proglen + ilen > oldproglen ||
2347 proglen + ilen != addrs[i])) {
2348 pr_err("bpf_jit: fatal error\n");
2349 return -EFAULT;
2350 }
2351 memcpy(rw_image + proglen, temp, ilen);
2352 }
2353 proglen += ilen;
2354 addrs[i] = proglen;
2355 prog = temp;
2356 }
2357
2358 if (image && excnt != bpf_prog->aux->num_exentries) {
2359 pr_err("extable is not populated\n");
2360 return -EFAULT;
2361 }
2362 return proglen;
2363 }
2364
clean_stack_garbage(const struct btf_func_model * m,u8 ** pprog,int nr_stack_slots,int stack_size)2365 static void clean_stack_garbage(const struct btf_func_model *m,
2366 u8 **pprog, int nr_stack_slots,
2367 int stack_size)
2368 {
2369 int arg_size, off;
2370 u8 *prog;
2371
2372 /* Generally speaking, the compiler will pass the arguments
2373 * on-stack with "push" instruction, which will take 8-byte
2374 * on the stack. In this case, there won't be garbage values
2375 * while we copy the arguments from origin stack frame to current
2376 * in BPF_DW.
2377 *
2378 * However, sometimes the compiler will only allocate 4-byte on
2379 * the stack for the arguments. For now, this case will only
2380 * happen if there is only one argument on-stack and its size
2381 * not more than 4 byte. In this case, there will be garbage
2382 * values on the upper 4-byte where we store the argument on
2383 * current stack frame.
2384 *
2385 * arguments on origin stack:
2386 *
2387 * stack_arg_1(4-byte) xxx(4-byte)
2388 *
2389 * what we copy:
2390 *
2391 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2392 *
2393 * and the xxx is the garbage values which we should clean here.
2394 */
2395 if (nr_stack_slots != 1)
2396 return;
2397
2398 /* the size of the last argument */
2399 arg_size = m->arg_size[m->nr_args - 1];
2400 if (arg_size <= 4) {
2401 off = -(stack_size - 4);
2402 prog = *pprog;
2403 /* mov DWORD PTR [rbp + off], 0 */
2404 if (!is_imm8(off))
2405 EMIT2_off32(0xC7, 0x85, off);
2406 else
2407 EMIT3(0xC7, 0x45, off);
2408 EMIT(0, 4);
2409 *pprog = prog;
2410 }
2411 }
2412
2413 /* get the count of the regs that are used to pass arguments */
get_nr_used_regs(const struct btf_func_model * m)2414 static int get_nr_used_regs(const struct btf_func_model *m)
2415 {
2416 int i, arg_regs, nr_used_regs = 0;
2417
2418 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2419 arg_regs = (m->arg_size[i] + 7) / 8;
2420 if (nr_used_regs + arg_regs <= 6)
2421 nr_used_regs += arg_regs;
2422
2423 if (nr_used_regs >= 6)
2424 break;
2425 }
2426
2427 return nr_used_regs;
2428 }
2429
save_args(const struct btf_func_model * m,u8 ** prog,int stack_size,bool for_call_origin)2430 static void save_args(const struct btf_func_model *m, u8 **prog,
2431 int stack_size, bool for_call_origin)
2432 {
2433 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2434 int i, j;
2435
2436 /* Store function arguments to stack.
2437 * For a function that accepts two pointers the sequence will be:
2438 * mov QWORD PTR [rbp-0x10],rdi
2439 * mov QWORD PTR [rbp-0x8],rsi
2440 */
2441 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2442 arg_regs = (m->arg_size[i] + 7) / 8;
2443
2444 /* According to the research of Yonghong, struct members
2445 * should be all in register or all on the stack.
2446 * Meanwhile, the compiler will pass the argument on regs
2447 * if the remaining regs can hold the argument.
2448 *
2449 * Disorder of the args can happen. For example:
2450 *
2451 * struct foo_struct {
2452 * long a;
2453 * int b;
2454 * };
2455 * int foo(char, char, char, char, char, struct foo_struct,
2456 * char);
2457 *
2458 * the arg1-5,arg7 will be passed by regs, and arg6 will
2459 * by stack.
2460 */
2461 if (nr_regs + arg_regs > 6) {
2462 /* copy function arguments from origin stack frame
2463 * into current stack frame.
2464 *
2465 * The starting address of the arguments on-stack
2466 * is:
2467 * rbp + 8(push rbp) +
2468 * 8(return addr of origin call) +
2469 * 8(return addr of the caller)
2470 * which means: rbp + 24
2471 */
2472 for (j = 0; j < arg_regs; j++) {
2473 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2474 nr_stack_slots * 8 + 0x18);
2475 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2476 -stack_size);
2477
2478 if (!nr_stack_slots)
2479 first_off = stack_size;
2480 stack_size -= 8;
2481 nr_stack_slots++;
2482 }
2483 } else {
2484 /* Only copy the arguments on-stack to current
2485 * 'stack_size' and ignore the regs, used to
2486 * prepare the arguments on-stack for origin call.
2487 */
2488 if (for_call_origin) {
2489 nr_regs += arg_regs;
2490 continue;
2491 }
2492
2493 /* copy the arguments from regs into stack */
2494 for (j = 0; j < arg_regs; j++) {
2495 emit_stx(prog, BPF_DW, BPF_REG_FP,
2496 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2497 -stack_size);
2498 stack_size -= 8;
2499 nr_regs++;
2500 }
2501 }
2502 }
2503
2504 clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2505 }
2506
restore_regs(const struct btf_func_model * m,u8 ** prog,int stack_size)2507 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2508 int stack_size)
2509 {
2510 int i, j, arg_regs, nr_regs = 0;
2511
2512 /* Restore function arguments from stack.
2513 * For a function that accepts two pointers the sequence will be:
2514 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2515 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2516 *
2517 * The logic here is similar to what we do in save_args()
2518 */
2519 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2520 arg_regs = (m->arg_size[i] + 7) / 8;
2521 if (nr_regs + arg_regs <= 6) {
2522 for (j = 0; j < arg_regs; j++) {
2523 emit_ldx(prog, BPF_DW,
2524 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2525 BPF_REG_FP,
2526 -stack_size);
2527 stack_size -= 8;
2528 nr_regs++;
2529 }
2530 } else {
2531 stack_size -= 8 * arg_regs;
2532 }
2533
2534 if (nr_regs >= 6)
2535 break;
2536 }
2537 }
2538
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)2539 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2540 struct bpf_tramp_link *l, int stack_size,
2541 int run_ctx_off, bool save_ret,
2542 void *image, void *rw_image)
2543 {
2544 u8 *prog = *pprog;
2545 u8 *jmp_insn;
2546 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2547 struct bpf_prog *p = l->link.prog;
2548 u64 cookie = l->cookie;
2549
2550 /* mov rdi, cookie */
2551 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2552
2553 /* Prepare struct bpf_tramp_run_ctx.
2554 *
2555 * bpf_tramp_run_ctx is already preserved by
2556 * arch_prepare_bpf_trampoline().
2557 *
2558 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2559 */
2560 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2561
2562 /* arg1: mov rdi, progs[i] */
2563 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2564 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2565 if (!is_imm8(-run_ctx_off))
2566 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2567 else
2568 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2569
2570 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2571 return -EINVAL;
2572 /* remember prog start time returned by __bpf_prog_enter */
2573 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2574
2575 /* if (__bpf_prog_enter*(prog) == 0)
2576 * goto skip_exec_of_prog;
2577 */
2578 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
2579 /* emit 2 nops that will be replaced with JE insn */
2580 jmp_insn = prog;
2581 emit_nops(&prog, 2);
2582
2583 /* arg1: lea rdi, [rbp - stack_size] */
2584 if (!is_imm8(-stack_size))
2585 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2586 else
2587 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2588 /* arg2: progs[i]->insnsi for interpreter */
2589 if (!p->jited)
2590 emit_mov_imm64(&prog, BPF_REG_2,
2591 (long) p->insnsi >> 32,
2592 (u32) (long) p->insnsi);
2593 /* call JITed bpf program or interpreter */
2594 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2595 return -EINVAL;
2596
2597 /*
2598 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2599 * of the previous call which is then passed on the stack to
2600 * the next BPF program.
2601 *
2602 * BPF_TRAMP_FENTRY trampoline may need to return the return
2603 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2604 */
2605 if (save_ret)
2606 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2607
2608 /* replace 2 nops with JE insn, since jmp target is known */
2609 jmp_insn[0] = X86_JE;
2610 jmp_insn[1] = prog - jmp_insn - 2;
2611
2612 /* arg1: mov rdi, progs[i] */
2613 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2614 /* arg2: mov rsi, rbx <- start time in nsec */
2615 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2616 /* arg3: lea rdx, [rbp - run_ctx_off] */
2617 if (!is_imm8(-run_ctx_off))
2618 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2619 else
2620 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2621 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2622 return -EINVAL;
2623
2624 *pprog = prog;
2625 return 0;
2626 }
2627
emit_align(u8 ** pprog,u32 align)2628 static void emit_align(u8 **pprog, u32 align)
2629 {
2630 u8 *target, *prog = *pprog;
2631
2632 target = PTR_ALIGN(prog, align);
2633 if (target != prog)
2634 emit_nops(&prog, target - prog);
2635
2636 *pprog = prog;
2637 }
2638
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)2639 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2640 {
2641 u8 *prog = *pprog;
2642 s64 offset;
2643
2644 offset = func - (ip + 2 + 4);
2645 if (!is_simm32(offset)) {
2646 pr_err("Target %p is out of range\n", func);
2647 return -EINVAL;
2648 }
2649 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2650 *pprog = prog;
2651 return 0;
2652 }
2653
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)2654 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2655 struct bpf_tramp_links *tl, int stack_size,
2656 int run_ctx_off, bool save_ret,
2657 void *image, void *rw_image)
2658 {
2659 int i;
2660 u8 *prog = *pprog;
2661
2662 for (i = 0; i < tl->nr_links; i++) {
2663 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2664 run_ctx_off, save_ret, image, rw_image))
2665 return -EINVAL;
2666 }
2667 *pprog = prog;
2668 return 0;
2669 }
2670
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches,void * image,void * rw_image)2671 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2672 struct bpf_tramp_links *tl, int stack_size,
2673 int run_ctx_off, u8 **branches,
2674 void *image, void *rw_image)
2675 {
2676 u8 *prog = *pprog;
2677 int i;
2678
2679 /* The first fmod_ret program will receive a garbage return value.
2680 * Set this to 0 to avoid confusing the program.
2681 */
2682 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2683 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2684 for (i = 0; i < tl->nr_links; i++) {
2685 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2686 image, rw_image))
2687 return -EINVAL;
2688
2689 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2690 * if (*(u64 *)(rbp - 8) != 0)
2691 * goto do_fexit;
2692 */
2693 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
2694 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2695
2696 /* Save the location of the branch and Generate 6 nops
2697 * (4 bytes for an offset and 2 bytes for the jump) These nops
2698 * are replaced with a conditional jump once do_fexit (i.e. the
2699 * start of the fexit invocation) is finalized.
2700 */
2701 branches[i] = prog;
2702 emit_nops(&prog, 4 + 2);
2703 }
2704
2705 *pprog = prog;
2706 return 0;
2707 }
2708
2709 /* Example:
2710 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2711 * its 'struct btf_func_model' will be nr_args=2
2712 * The assembly code when eth_type_trans is executing after trampoline:
2713 *
2714 * push rbp
2715 * mov rbp, rsp
2716 * sub rsp, 16 // space for skb and dev
2717 * push rbx // temp regs to pass start time
2718 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
2719 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
2720 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2721 * mov rbx, rax // remember start time in bpf stats are enabled
2722 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2723 * call addr_of_jited_FENTRY_prog
2724 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2725 * mov rsi, rbx // prog start time
2726 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2727 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
2728 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
2729 * pop rbx
2730 * leave
2731 * ret
2732 *
2733 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2734 * replaced with 'call generated_bpf_trampoline'. When it returns
2735 * eth_type_trans will continue executing with original skb and dev pointers.
2736 *
2737 * The assembly code when eth_type_trans is called from trampoline:
2738 *
2739 * push rbp
2740 * mov rbp, rsp
2741 * sub rsp, 24 // space for skb, dev, return value
2742 * push rbx // temp regs to pass start time
2743 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
2744 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
2745 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2746 * mov rbx, rax // remember start time if bpf stats are enabled
2747 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2748 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2749 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2750 * mov rsi, rbx // prog start time
2751 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2752 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2753 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2754 * call eth_type_trans+5 // execute body of eth_type_trans
2755 * mov qword ptr [rbp - 8], rax // save return value
2756 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2757 * mov rbx, rax // remember start time in bpf stats are enabled
2758 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2759 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2760 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2761 * mov rsi, rbx // prog start time
2762 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2763 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2764 * pop rbx
2765 * leave
2766 * add rsp, 8 // skip eth_type_trans's frame
2767 * ret // return to its caller
2768 */
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * rw_image,void * rw_image_end,void * image,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)2769 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
2770 void *rw_image_end, void *image,
2771 const struct btf_func_model *m, u32 flags,
2772 struct bpf_tramp_links *tlinks,
2773 void *func_addr)
2774 {
2775 int i, ret, nr_regs = m->nr_args, stack_size = 0;
2776 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2777 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2778 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2779 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2780 void *orig_call = func_addr;
2781 u8 **branches = NULL;
2782 u8 *prog;
2783 bool save_ret;
2784
2785 /*
2786 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2787 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2788 * because @func_addr.
2789 */
2790 WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
2791 (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
2792
2793 /* extra registers for struct arguments */
2794 for (i = 0; i < m->nr_args; i++) {
2795 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2796 nr_regs += (m->arg_size[i] + 7) / 8 - 1;
2797 }
2798
2799 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2800 * are passed through regs, the remains are through stack.
2801 */
2802 if (nr_regs > MAX_BPF_FUNC_ARGS)
2803 return -ENOTSUPP;
2804
2805 /* Generated trampoline stack layout:
2806 *
2807 * RBP + 8 [ return address ]
2808 * RBP + 0 [ RBP ]
2809 *
2810 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2811 * BPF_TRAMP_F_RET_FENTRY_RET flags
2812 *
2813 * [ reg_argN ] always
2814 * [ ... ]
2815 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2816 *
2817 * RBP - nregs_off [ regs count ] always
2818 *
2819 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2820 *
2821 * RBP - rbx_off [ rbx value ] always
2822 *
2823 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2824 *
2825 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
2826 * [ ... ]
2827 * [ stack_arg2 ]
2828 * RBP - arg_stack_off [ stack_arg1 ]
2829 * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
2830 */
2831
2832 /* room for return value of orig_call or fentry prog */
2833 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2834 if (save_ret)
2835 stack_size += 8;
2836
2837 stack_size += nr_regs * 8;
2838 regs_off = stack_size;
2839
2840 /* regs count */
2841 stack_size += 8;
2842 nregs_off = stack_size;
2843
2844 if (flags & BPF_TRAMP_F_IP_ARG)
2845 stack_size += 8; /* room for IP address argument */
2846
2847 ip_off = stack_size;
2848
2849 stack_size += 8;
2850 rbx_off = stack_size;
2851
2852 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2853 run_ctx_off = stack_size;
2854
2855 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
2856 /* the space that used to pass arguments on-stack */
2857 stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
2858 /* make sure the stack pointer is 16-byte aligned if we
2859 * need pass arguments on stack, which means
2860 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2861 * should be 16-byte aligned. Following code depend on
2862 * that stack_size is already 8-byte aligned.
2863 */
2864 stack_size += (stack_size % 16) ? 0 : 8;
2865 }
2866
2867 arg_stack_off = stack_size;
2868
2869 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2870 /* skip patched call instruction and point orig_call to actual
2871 * body of the kernel function.
2872 */
2873 if (is_endbr(*(u32 *)orig_call))
2874 orig_call += ENDBR_INSN_SIZE;
2875 orig_call += X86_PATCH_SIZE;
2876 }
2877
2878 prog = rw_image;
2879
2880 if (flags & BPF_TRAMP_F_INDIRECT) {
2881 /*
2882 * Indirect call for bpf_struct_ops
2883 */
2884 emit_cfi(&prog, cfi_get_func_hash(func_addr));
2885 } else {
2886 /*
2887 * Direct-call fentry stub, as such it needs accounting for the
2888 * __fentry__ call.
2889 */
2890 x86_call_depth_emit_accounting(&prog, NULL, image);
2891 }
2892 EMIT1(0x55); /* push rbp */
2893 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2894 if (!is_imm8(stack_size)) {
2895 /* sub rsp, stack_size */
2896 EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
2897 } else {
2898 /* sub rsp, stack_size */
2899 EMIT4(0x48, 0x83, 0xEC, stack_size);
2900 }
2901 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
2902 EMIT1(0x50); /* push rax */
2903 /* mov QWORD PTR [rbp - rbx_off], rbx */
2904 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
2905
2906 /* Store number of argument registers of the traced function:
2907 * mov rax, nr_regs
2908 * mov QWORD PTR [rbp - nregs_off], rax
2909 */
2910 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
2911 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
2912
2913 if (flags & BPF_TRAMP_F_IP_ARG) {
2914 /* Store IP address of the traced function:
2915 * movabsq rax, func_addr
2916 * mov QWORD PTR [rbp - ip_off], rax
2917 */
2918 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2919 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2920 }
2921
2922 save_args(m, &prog, regs_off, false);
2923
2924 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2925 /* arg1: mov rdi, im */
2926 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2927 if (emit_rsb_call(&prog, __bpf_tramp_enter,
2928 image + (prog - (u8 *)rw_image))) {
2929 ret = -EINVAL;
2930 goto cleanup;
2931 }
2932 }
2933
2934 if (fentry->nr_links) {
2935 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2936 flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
2937 return -EINVAL;
2938 }
2939
2940 if (fmod_ret->nr_links) {
2941 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2942 GFP_KERNEL);
2943 if (!branches)
2944 return -ENOMEM;
2945
2946 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2947 run_ctx_off, branches, image, rw_image)) {
2948 ret = -EINVAL;
2949 goto cleanup;
2950 }
2951 }
2952
2953 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2954 restore_regs(m, &prog, regs_off);
2955 save_args(m, &prog, arg_stack_off, true);
2956
2957 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
2958 /* Before calling the original function, restore the
2959 * tail_call_cnt from stack to rax.
2960 */
2961 RESTORE_TAIL_CALL_CNT(stack_size);
2962 }
2963
2964 if (flags & BPF_TRAMP_F_ORIG_STACK) {
2965 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
2966 EMIT2(0xff, 0xd3); /* call *rbx */
2967 } else {
2968 /* call original function */
2969 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
2970 ret = -EINVAL;
2971 goto cleanup;
2972 }
2973 }
2974 /* remember return value in a stack for bpf prog to access */
2975 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2976 im->ip_after_call = image + (prog - (u8 *)rw_image);
2977 emit_nops(&prog, X86_PATCH_SIZE);
2978 }
2979
2980 if (fmod_ret->nr_links) {
2981 /* From Intel 64 and IA-32 Architectures Optimization
2982 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2983 * Coding Rule 11: All branch targets should be 16-byte
2984 * aligned.
2985 */
2986 emit_align(&prog, 16);
2987 /* Update the branches saved in invoke_bpf_mod_ret with the
2988 * aligned address of do_fexit.
2989 */
2990 for (i = 0; i < fmod_ret->nr_links; i++) {
2991 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
2992 image + (branches[i] - (u8 *)rw_image), X86_JNE);
2993 }
2994 }
2995
2996 if (fexit->nr_links) {
2997 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
2998 false, image, rw_image)) {
2999 ret = -EINVAL;
3000 goto cleanup;
3001 }
3002 }
3003
3004 if (flags & BPF_TRAMP_F_RESTORE_REGS)
3005 restore_regs(m, &prog, regs_off);
3006
3007 /* This needs to be done regardless. If there were fmod_ret programs,
3008 * the return value is only updated on the stack and still needs to be
3009 * restored to R0.
3010 */
3011 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3012 im->ip_epilogue = image + (prog - (u8 *)rw_image);
3013 /* arg1: mov rdi, im */
3014 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3015 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3016 ret = -EINVAL;
3017 goto cleanup;
3018 }
3019 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3020 /* Before running the original function, restore the
3021 * tail_call_cnt from stack to rax.
3022 */
3023 RESTORE_TAIL_CALL_CNT(stack_size);
3024 }
3025
3026 /* restore return value of orig_call or fentry prog back into RAX */
3027 if (save_ret)
3028 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3029
3030 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3031 EMIT1(0xC9); /* leave */
3032 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3033 /* skip our return address and return to parent */
3034 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3035 }
3036 emit_return(&prog, image + (prog - (u8 *)rw_image));
3037 /* Make sure the trampoline generation logic doesn't overflow */
3038 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3039 ret = -EFAULT;
3040 goto cleanup;
3041 }
3042 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3043
3044 cleanup:
3045 kfree(branches);
3046 return ret;
3047 }
3048
arch_alloc_bpf_trampoline(unsigned int size)3049 void *arch_alloc_bpf_trampoline(unsigned int size)
3050 {
3051 return bpf_prog_pack_alloc(size, jit_fill_hole);
3052 }
3053
arch_free_bpf_trampoline(void * image,unsigned int size)3054 void arch_free_bpf_trampoline(void *image, unsigned int size)
3055 {
3056 bpf_prog_pack_free(image, size);
3057 }
3058
arch_protect_bpf_trampoline(void * image,unsigned int size)3059 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3060 {
3061 return 0;
3062 }
3063
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3064 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3065 const struct btf_func_model *m, u32 flags,
3066 struct bpf_tramp_links *tlinks,
3067 void *func_addr)
3068 {
3069 void *rw_image, *tmp;
3070 int ret;
3071 u32 size = image_end - image;
3072
3073 /* rw_image doesn't need to be in module memory range, so we can
3074 * use kvmalloc.
3075 */
3076 rw_image = kvmalloc(size, GFP_KERNEL);
3077 if (!rw_image)
3078 return -ENOMEM;
3079
3080 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3081 flags, tlinks, func_addr);
3082 if (ret < 0)
3083 goto out;
3084
3085 tmp = bpf_arch_text_copy(image, rw_image, size);
3086 if (IS_ERR(tmp))
3087 ret = PTR_ERR(tmp);
3088 out:
3089 kvfree(rw_image);
3090 return ret;
3091 }
3092
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3093 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3094 struct bpf_tramp_links *tlinks, void *func_addr)
3095 {
3096 struct bpf_tramp_image im;
3097 void *image;
3098 int ret;
3099
3100 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3101 * This will NOT cause fragmentation in direct map, as we do not
3102 * call set_memory_*() on this buffer.
3103 *
3104 * We cannot use kvmalloc here, because we need image to be in
3105 * module memory range.
3106 */
3107 image = bpf_jit_alloc_exec(PAGE_SIZE);
3108 if (!image)
3109 return -ENOMEM;
3110
3111 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3112 m, flags, tlinks, func_addr);
3113 bpf_jit_free_exec(image);
3114 return ret;
3115 }
3116
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs,u8 * image,u8 * buf)3117 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3118 {
3119 u8 *jg_reloc, *prog = *pprog;
3120 int pivot, err, jg_bytes = 1;
3121 s64 jg_offset;
3122
3123 if (a == b) {
3124 /* Leaf node of recursion, i.e. not a range of indices
3125 * anymore.
3126 */
3127 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3128 if (!is_simm32(progs[a]))
3129 return -1;
3130 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3131 progs[a]);
3132 err = emit_cond_near_jump(&prog, /* je func */
3133 (void *)progs[a], image + (prog - buf),
3134 X86_JE);
3135 if (err)
3136 return err;
3137
3138 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
3139
3140 *pprog = prog;
3141 return 0;
3142 }
3143
3144 /* Not a leaf node, so we pivot, and recursively descend into
3145 * the lower and upper ranges.
3146 */
3147 pivot = (b - a) / 2;
3148 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3149 if (!is_simm32(progs[a + pivot]))
3150 return -1;
3151 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3152
3153 if (pivot > 2) { /* jg upper_part */
3154 /* Require near jump. */
3155 jg_bytes = 4;
3156 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3157 } else {
3158 EMIT2(X86_JG, 0);
3159 }
3160 jg_reloc = prog;
3161
3162 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
3163 progs, image, buf);
3164 if (err)
3165 return err;
3166
3167 /* From Intel 64 and IA-32 Architectures Optimization
3168 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3169 * Coding Rule 11: All branch targets should be 16-byte
3170 * aligned.
3171 */
3172 emit_align(&prog, 16);
3173 jg_offset = prog - jg_reloc;
3174 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3175
3176 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
3177 b, progs, image, buf);
3178 if (err)
3179 return err;
3180
3181 *pprog = prog;
3182 return 0;
3183 }
3184
cmp_ips(const void * a,const void * b)3185 static int cmp_ips(const void *a, const void *b)
3186 {
3187 const s64 *ipa = a;
3188 const s64 *ipb = b;
3189
3190 if (*ipa > *ipb)
3191 return 1;
3192 if (*ipa < *ipb)
3193 return -1;
3194 return 0;
3195 }
3196
arch_prepare_bpf_dispatcher(void * image,void * buf,s64 * funcs,int num_funcs)3197 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3198 {
3199 u8 *prog = buf;
3200
3201 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3202 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3203 }
3204
3205 struct x64_jit_data {
3206 struct bpf_binary_header *rw_header;
3207 struct bpf_binary_header *header;
3208 int *addrs;
3209 u8 *image;
3210 int proglen;
3211 struct jit_context ctx;
3212 };
3213
3214 #define MAX_PASSES 20
3215 #define PADDING_PASSES (MAX_PASSES - 5)
3216
bpf_int_jit_compile(struct bpf_prog * prog)3217 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3218 {
3219 struct bpf_binary_header *rw_header = NULL;
3220 struct bpf_binary_header *header = NULL;
3221 struct bpf_prog *tmp, *orig_prog = prog;
3222 struct x64_jit_data *jit_data;
3223 int proglen, oldproglen = 0;
3224 struct jit_context ctx = {};
3225 bool tmp_blinded = false;
3226 bool extra_pass = false;
3227 bool padding = false;
3228 u8 *rw_image = NULL;
3229 u8 *image = NULL;
3230 int *addrs;
3231 int pass;
3232 int i;
3233
3234 if (!prog->jit_requested)
3235 return orig_prog;
3236
3237 tmp = bpf_jit_blind_constants(prog);
3238 /*
3239 * If blinding was requested and we failed during blinding,
3240 * we must fall back to the interpreter.
3241 */
3242 if (IS_ERR(tmp))
3243 return orig_prog;
3244 if (tmp != prog) {
3245 tmp_blinded = true;
3246 prog = tmp;
3247 }
3248
3249 jit_data = prog->aux->jit_data;
3250 if (!jit_data) {
3251 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3252 if (!jit_data) {
3253 prog = orig_prog;
3254 goto out;
3255 }
3256 prog->aux->jit_data = jit_data;
3257 }
3258 addrs = jit_data->addrs;
3259 if (addrs) {
3260 ctx = jit_data->ctx;
3261 oldproglen = jit_data->proglen;
3262 image = jit_data->image;
3263 header = jit_data->header;
3264 rw_header = jit_data->rw_header;
3265 rw_image = (void *)rw_header + ((void *)image - (void *)header);
3266 extra_pass = true;
3267 padding = true;
3268 goto skip_init_addrs;
3269 }
3270 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3271 if (!addrs) {
3272 prog = orig_prog;
3273 goto out_addrs;
3274 }
3275
3276 /*
3277 * Before first pass, make a rough estimation of addrs[]
3278 * each BPF instruction is translated to less than 64 bytes
3279 */
3280 for (proglen = 0, i = 0; i <= prog->len; i++) {
3281 proglen += 64;
3282 addrs[i] = proglen;
3283 }
3284 ctx.cleanup_addr = proglen;
3285 skip_init_addrs:
3286
3287 /*
3288 * JITed image shrinks with every pass and the loop iterates
3289 * until the image stops shrinking. Very large BPF programs
3290 * may converge on the last pass. In such case do one more
3291 * pass to emit the final image.
3292 */
3293 for (pass = 0; pass < MAX_PASSES || image; pass++) {
3294 if (!padding && pass >= PADDING_PASSES)
3295 padding = true;
3296 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3297 if (proglen <= 0) {
3298 out_image:
3299 image = NULL;
3300 if (header) {
3301 bpf_arch_text_copy(&header->size, &rw_header->size,
3302 sizeof(rw_header->size));
3303 bpf_jit_binary_pack_free(header, rw_header);
3304 }
3305 /* Fall back to interpreter mode */
3306 prog = orig_prog;
3307 if (extra_pass) {
3308 prog->bpf_func = NULL;
3309 prog->jited = 0;
3310 prog->jited_len = 0;
3311 }
3312 goto out_addrs;
3313 }
3314 if (image) {
3315 if (proglen != oldproglen) {
3316 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3317 proglen, oldproglen);
3318 goto out_image;
3319 }
3320 break;
3321 }
3322 if (proglen == oldproglen) {
3323 /*
3324 * The number of entries in extable is the number of BPF_LDX
3325 * insns that access kernel memory via "pointer to BTF type".
3326 * The verifier changed their opcode from LDX|MEM|size
3327 * to LDX|PROBE_MEM|size to make JITing easier.
3328 */
3329 u32 align = __alignof__(struct exception_table_entry);
3330 u32 extable_size = prog->aux->num_exentries *
3331 sizeof(struct exception_table_entry);
3332
3333 /* allocate module memory for x86 insns and extable */
3334 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3335 &image, align, &rw_header, &rw_image,
3336 jit_fill_hole);
3337 if (!header) {
3338 prog = orig_prog;
3339 goto out_addrs;
3340 }
3341 prog->aux->extable = (void *) image + roundup(proglen, align);
3342 }
3343 oldproglen = proglen;
3344 cond_resched();
3345 }
3346
3347 if (bpf_jit_enable > 1)
3348 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3349
3350 if (image) {
3351 if (!prog->is_func || extra_pass) {
3352 /*
3353 * bpf_jit_binary_pack_finalize fails in two scenarios:
3354 * 1) header is not pointing to proper module memory;
3355 * 2) the arch doesn't support bpf_arch_text_copy().
3356 *
3357 * Both cases are serious bugs and justify WARN_ON.
3358 */
3359 if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3360 /* header has been freed */
3361 header = NULL;
3362 goto out_image;
3363 }
3364
3365 bpf_tail_call_direct_fixup(prog);
3366 } else {
3367 jit_data->addrs = addrs;
3368 jit_data->ctx = ctx;
3369 jit_data->proglen = proglen;
3370 jit_data->image = image;
3371 jit_data->header = header;
3372 jit_data->rw_header = rw_header;
3373 }
3374 /*
3375 * ctx.prog_offset is used when CFI preambles put code *before*
3376 * the function. See emit_cfi(). For FineIBT specifically this code
3377 * can also be executed and bpf_prog_kallsyms_add() will
3378 * generate an additional symbol to cover this, hence also
3379 * decrement proglen.
3380 */
3381 prog->bpf_func = (void *)image + cfi_get_offset();
3382 prog->jited = 1;
3383 prog->jited_len = proglen - cfi_get_offset();
3384 } else {
3385 prog = orig_prog;
3386 }
3387
3388 if (!image || !prog->is_func || extra_pass) {
3389 if (image)
3390 bpf_prog_fill_jited_linfo(prog, addrs + 1);
3391 out_addrs:
3392 kvfree(addrs);
3393 kfree(jit_data);
3394 prog->aux->jit_data = NULL;
3395 }
3396 out:
3397 if (tmp_blinded)
3398 bpf_jit_prog_release_other(prog, prog == orig_prog ?
3399 tmp : orig_prog);
3400 return prog;
3401 }
3402
bpf_jit_supports_kfunc_call(void)3403 bool bpf_jit_supports_kfunc_call(void)
3404 {
3405 return true;
3406 }
3407
bpf_arch_text_copy(void * dst,void * src,size_t len)3408 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3409 {
3410 if (text_poke_copy(dst, src, len) == NULL)
3411 return ERR_PTR(-EINVAL);
3412 return dst;
3413 }
3414
3415 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3416 bool bpf_jit_supports_subprog_tailcalls(void)
3417 {
3418 return true;
3419 }
3420
bpf_jit_supports_percpu_insn(void)3421 bool bpf_jit_supports_percpu_insn(void)
3422 {
3423 return true;
3424 }
3425
bpf_jit_free(struct bpf_prog * prog)3426 void bpf_jit_free(struct bpf_prog *prog)
3427 {
3428 if (prog->jited) {
3429 struct x64_jit_data *jit_data = prog->aux->jit_data;
3430 struct bpf_binary_header *hdr;
3431
3432 /*
3433 * If we fail the final pass of JIT (from jit_subprogs),
3434 * the program may not be finalized yet. Call finalize here
3435 * before freeing it.
3436 */
3437 if (jit_data) {
3438 bpf_jit_binary_pack_finalize(jit_data->header,
3439 jit_data->rw_header);
3440 kvfree(jit_data->addrs);
3441 kfree(jit_data);
3442 }
3443 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3444 hdr = bpf_jit_binary_pack_hdr(prog);
3445 bpf_jit_binary_pack_free(hdr, NULL);
3446 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3447 }
3448
3449 bpf_prog_unlock_free(prog);
3450 }
3451
bpf_jit_supports_exceptions(void)3452 bool bpf_jit_supports_exceptions(void)
3453 {
3454 /* We unwind through both kernel frames (starting from within bpf_throw
3455 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3456 * to walk kernel frames and reach BPF frames in the stack trace.
3457 */
3458 return IS_ENABLED(CONFIG_UNWINDER_ORC);
3459 }
3460
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3461 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3462 {
3463 #if defined(CONFIG_UNWINDER_ORC)
3464 struct unwind_state state;
3465 unsigned long addr;
3466
3467 for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3468 unwind_next_frame(&state)) {
3469 addr = unwind_get_return_address(&state);
3470 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3471 break;
3472 }
3473 return;
3474 #endif
3475 WARN(1, "verification of programs using bpf_throw should have failed\n");
3476 }
3477
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)3478 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3479 struct bpf_prog *new, struct bpf_prog *old)
3480 {
3481 u8 *old_addr, *new_addr, *old_bypass_addr;
3482 int ret;
3483
3484 old_bypass_addr = old ? NULL : poke->bypass_addr;
3485 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
3486 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
3487
3488 /*
3489 * On program loading or teardown, the program's kallsym entry
3490 * might not be in place, so we use __bpf_arch_text_poke to skip
3491 * the kallsyms check.
3492 */
3493 if (new) {
3494 ret = __bpf_arch_text_poke(poke->tailcall_target,
3495 BPF_MOD_JUMP,
3496 old_addr, new_addr);
3497 BUG_ON(ret < 0);
3498 if (!old) {
3499 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3500 BPF_MOD_JUMP,
3501 poke->bypass_addr,
3502 NULL);
3503 BUG_ON(ret < 0);
3504 }
3505 } else {
3506 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3507 BPF_MOD_JUMP,
3508 old_bypass_addr,
3509 poke->bypass_addr);
3510 BUG_ON(ret < 0);
3511 /* let other CPUs finish the execution of program
3512 * so that it will not possible to expose them
3513 * to invalid nop, stack unwind, nop state
3514 */
3515 if (!ret)
3516 synchronize_rcu();
3517 ret = __bpf_arch_text_poke(poke->tailcall_target,
3518 BPF_MOD_JUMP,
3519 old_addr, NULL);
3520 BUG_ON(ret < 0);
3521 }
3522 }
3523
bpf_jit_supports_arena(void)3524 bool bpf_jit_supports_arena(void)
3525 {
3526 return true;
3527 }
3528
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3529 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3530 {
3531 if (!in_arena)
3532 return true;
3533 switch (insn->code) {
3534 case BPF_STX | BPF_ATOMIC | BPF_W:
3535 case BPF_STX | BPF_ATOMIC | BPF_DW:
3536 if (insn->imm == (BPF_AND | BPF_FETCH) ||
3537 insn->imm == (BPF_OR | BPF_FETCH) ||
3538 insn->imm == (BPF_XOR | BPF_FETCH))
3539 return false;
3540 }
3541 return true;
3542 }
3543
bpf_jit_supports_ptr_xchg(void)3544 bool bpf_jit_supports_ptr_xchg(void)
3545 {
3546 return true;
3547 }
3548
3549 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
bpf_arch_uaddress_limit(void)3550 u64 bpf_arch_uaddress_limit(void)
3551 {
3552 return 0;
3553 }
3554