1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bitfield.h>
12 #include <linux/bpf.h>
13 #include <linux/memory.h>
14 #include <linux/sort.h>
15 #include <asm/extable.h>
16 #include <asm/ftrace.h>
17 #include <asm/set_memory.h>
18 #include <asm/nospec-branch.h>
19 #include <asm/text-patching.h>
20 #include <asm/unwind.h>
21 #include <asm/cfi.h>
22
23 static bool all_callee_regs_used[4] = {true, true, true, true};
24
emit_code(u8 * ptr,u32 bytes,unsigned int len)25 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
26 {
27 if (len == 1)
28 *ptr = bytes;
29 else if (len == 2)
30 *(u16 *)ptr = bytes;
31 else {
32 *(u32 *)ptr = bytes;
33 barrier();
34 }
35 return ptr + len;
36 }
37
38 #define EMIT(bytes, len) \
39 do { prog = emit_code(prog, bytes, len); } while (0)
40
41 #define EMIT1(b1) EMIT(b1, 1)
42 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
43 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
44 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
45 #define EMIT5(b1, b2, b3, b4, b5) \
46 do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
47
48 #define EMIT1_off32(b1, off) \
49 do { EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56
57 #ifdef CONFIG_X86_KERNEL_IBT
58 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
59 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
60 #else
61 #define EMIT_ENDBR() do { } while (0)
62 #define EMIT_ENDBR_POISON() do { } while (0)
63 #endif
64
is_imm8(int value)65 static bool is_imm8(int value)
66 {
67 return value <= 127 && value >= -128;
68 }
69
70 /*
71 * Let us limit the positive offset to be <= 123.
72 * This is to ensure eventual jit convergence For the following patterns:
73 * ...
74 * pass4, final_proglen=4391:
75 * ...
76 * 20e: 48 85 ff test rdi,rdi
77 * 211: 74 7d je 0x290
78 * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
79 * ...
80 * 289: 48 85 ff test rdi,rdi
81 * 28c: 74 17 je 0x2a5
82 * 28e: e9 7f ff ff ff jmp 0x212
83 * 293: bf 03 00 00 00 mov edi,0x3
84 * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
85 * and insn at 0x28e is 5-byte jmp insn with offset -129.
86 *
87 * pass5, final_proglen=4392:
88 * ...
89 * 20e: 48 85 ff test rdi,rdi
90 * 211: 0f 84 80 00 00 00 je 0x297
91 * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
92 * ...
93 * 28d: 48 85 ff test rdi,rdi
94 * 290: 74 1a je 0x2ac
95 * 292: eb 84 jmp 0x218
96 * 294: bf 03 00 00 00 mov edi,0x3
97 * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
98 * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
99 * At the same time, insn at 0x292 is a 2-byte insn since its offset is
100 * -124.
101 *
102 * pass6 will repeat the same code as in pass4 and this will prevent
103 * eventual convergence.
104 *
105 * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
106 * cycle in the above. In the above example je offset <= 0x7c should work.
107 *
108 * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
109 * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
110 * avoid no convergence issue.
111 *
112 * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
113 * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
114 */
is_imm8_jmp_offset(int value)115 static bool is_imm8_jmp_offset(int value)
116 {
117 return value <= 123 && value >= -128;
118 }
119
is_simm32(s64 value)120 static bool is_simm32(s64 value)
121 {
122 return value == (s64)(s32)value;
123 }
124
is_uimm32(u64 value)125 static bool is_uimm32(u64 value)
126 {
127 return value == (u64)(u32)value;
128 }
129
130 /* mov dst, src */
131 #define EMIT_mov(DST, SRC) \
132 do { \
133 if (DST != SRC) \
134 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
135 } while (0)
136
bpf_size_to_x86_bytes(int bpf_size)137 static int bpf_size_to_x86_bytes(int bpf_size)
138 {
139 if (bpf_size == BPF_W)
140 return 4;
141 else if (bpf_size == BPF_H)
142 return 2;
143 else if (bpf_size == BPF_B)
144 return 1;
145 else if (bpf_size == BPF_DW)
146 return 4; /* imm32 */
147 else
148 return 0;
149 }
150
151 /*
152 * List of x86 cond jumps opcodes (. + s8)
153 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
154 */
155 #define X86_JB 0x72
156 #define X86_JAE 0x73
157 #define X86_JE 0x74
158 #define X86_JNE 0x75
159 #define X86_JBE 0x76
160 #define X86_JA 0x77
161 #define X86_JL 0x7C
162 #define X86_JGE 0x7D
163 #define X86_JLE 0x7E
164 #define X86_JG 0x7F
165
166 /* Pick a register outside of BPF range for JIT internal work */
167 #define AUX_REG (MAX_BPF_JIT_REG + 1)
168 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
169 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
170
171 /*
172 * The following table maps BPF registers to x86-64 registers.
173 *
174 * x86-64 register R12 is unused, since if used as base address
175 * register in load/store instructions, it always needs an
176 * extra byte of encoding and is callee saved.
177 *
178 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
179 * trampoline. x86-64 register R10 is used for blinding (if enabled).
180 */
181 static const int reg2hex[] = {
182 [BPF_REG_0] = 0, /* RAX */
183 [BPF_REG_1] = 7, /* RDI */
184 [BPF_REG_2] = 6, /* RSI */
185 [BPF_REG_3] = 2, /* RDX */
186 [BPF_REG_4] = 1, /* RCX */
187 [BPF_REG_5] = 0, /* R8 */
188 [BPF_REG_6] = 3, /* RBX callee saved */
189 [BPF_REG_7] = 5, /* R13 callee saved */
190 [BPF_REG_8] = 6, /* R14 callee saved */
191 [BPF_REG_9] = 7, /* R15 callee saved */
192 [BPF_REG_FP] = 5, /* RBP readonly */
193 [BPF_REG_AX] = 2, /* R10 temp register */
194 [AUX_REG] = 3, /* R11 temp register */
195 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
196 [X86_REG_R12] = 4, /* R12 callee saved */
197 };
198
199 static const int reg2pt_regs[] = {
200 [BPF_REG_0] = offsetof(struct pt_regs, ax),
201 [BPF_REG_1] = offsetof(struct pt_regs, di),
202 [BPF_REG_2] = offsetof(struct pt_regs, si),
203 [BPF_REG_3] = offsetof(struct pt_regs, dx),
204 [BPF_REG_4] = offsetof(struct pt_regs, cx),
205 [BPF_REG_5] = offsetof(struct pt_regs, r8),
206 [BPF_REG_6] = offsetof(struct pt_regs, bx),
207 [BPF_REG_7] = offsetof(struct pt_regs, r13),
208 [BPF_REG_8] = offsetof(struct pt_regs, r14),
209 [BPF_REG_9] = offsetof(struct pt_regs, r15),
210 };
211
212 /*
213 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
214 * which need extra byte of encoding.
215 * rax,rcx,...,rbp have simpler encoding
216 */
is_ereg(u32 reg)217 static bool is_ereg(u32 reg)
218 {
219 return (1 << reg) & (BIT(BPF_REG_5) |
220 BIT(AUX_REG) |
221 BIT(BPF_REG_7) |
222 BIT(BPF_REG_8) |
223 BIT(BPF_REG_9) |
224 BIT(X86_REG_R9) |
225 BIT(X86_REG_R12) |
226 BIT(BPF_REG_AX));
227 }
228
229 /*
230 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
231 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
232 * of encoding. al,cl,dl,bl have simpler encoding.
233 */
is_ereg_8l(u32 reg)234 static bool is_ereg_8l(u32 reg)
235 {
236 return is_ereg(reg) ||
237 (1 << reg) & (BIT(BPF_REG_1) |
238 BIT(BPF_REG_2) |
239 BIT(BPF_REG_FP));
240 }
241
is_axreg(u32 reg)242 static bool is_axreg(u32 reg)
243 {
244 return reg == BPF_REG_0;
245 }
246
247 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)248 static u8 add_1mod(u8 byte, u32 reg)
249 {
250 if (is_ereg(reg))
251 byte |= 1;
252 return byte;
253 }
254
add_2mod(u8 byte,u32 r1,u32 r2)255 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
256 {
257 if (is_ereg(r1))
258 byte |= 1;
259 if (is_ereg(r2))
260 byte |= 4;
261 return byte;
262 }
263
add_3mod(u8 byte,u32 r1,u32 r2,u32 index)264 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
265 {
266 if (is_ereg(r1))
267 byte |= 1;
268 if (is_ereg(index))
269 byte |= 2;
270 if (is_ereg(r2))
271 byte |= 4;
272 return byte;
273 }
274
275 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)276 static u8 add_1reg(u8 byte, u32 dst_reg)
277 {
278 return byte + reg2hex[dst_reg];
279 }
280
281 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)282 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
283 {
284 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
285 }
286
287 /* Some 1-byte opcodes for binary ALU operations */
288 static u8 simple_alu_opcodes[] = {
289 [BPF_ADD] = 0x01,
290 [BPF_SUB] = 0x29,
291 [BPF_AND] = 0x21,
292 [BPF_OR] = 0x09,
293 [BPF_XOR] = 0x31,
294 [BPF_LSH] = 0xE0,
295 [BPF_RSH] = 0xE8,
296 [BPF_ARSH] = 0xF8,
297 };
298
jit_fill_hole(void * area,unsigned int size)299 static void jit_fill_hole(void *area, unsigned int size)
300 {
301 /* Fill whole space with INT3 instructions */
302 memset(area, 0xcc, size);
303 }
304
bpf_arch_text_invalidate(void * dst,size_t len)305 int bpf_arch_text_invalidate(void *dst, size_t len)
306 {
307 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
308 }
309
310 struct jit_context {
311 int cleanup_addr; /* Epilogue code offset */
312
313 /*
314 * Program specific offsets of labels in the code; these rely on the
315 * JIT doing at least 2 passes, recording the position on the first
316 * pass, only to generate the correct offset on the second pass.
317 */
318 int tail_call_direct_label;
319 int tail_call_indirect_label;
320 };
321
322 /* Maximum number of bytes emitted while JITing one eBPF insn */
323 #define BPF_MAX_INSN_SIZE 128
324 #define BPF_INSN_SAFETY 64
325
326 /* Number of bytes emit_patch() needs to generate instructions */
327 #define X86_PATCH_SIZE 5
328 /* Number of bytes that will be skipped on tailcall */
329 #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
330
push_r9(u8 ** pprog)331 static void push_r9(u8 **pprog)
332 {
333 u8 *prog = *pprog;
334
335 EMIT2(0x41, 0x51); /* push r9 */
336 *pprog = prog;
337 }
338
pop_r9(u8 ** pprog)339 static void pop_r9(u8 **pprog)
340 {
341 u8 *prog = *pprog;
342
343 EMIT2(0x41, 0x59); /* pop r9 */
344 *pprog = prog;
345 }
346
push_r12(u8 ** pprog)347 static void push_r12(u8 **pprog)
348 {
349 u8 *prog = *pprog;
350
351 EMIT2(0x41, 0x54); /* push r12 */
352 *pprog = prog;
353 }
354
push_callee_regs(u8 ** pprog,bool * callee_regs_used)355 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
356 {
357 u8 *prog = *pprog;
358
359 if (callee_regs_used[0])
360 EMIT1(0x53); /* push rbx */
361 if (callee_regs_used[1])
362 EMIT2(0x41, 0x55); /* push r13 */
363 if (callee_regs_used[2])
364 EMIT2(0x41, 0x56); /* push r14 */
365 if (callee_regs_used[3])
366 EMIT2(0x41, 0x57); /* push r15 */
367 *pprog = prog;
368 }
369
pop_r12(u8 ** pprog)370 static void pop_r12(u8 **pprog)
371 {
372 u8 *prog = *pprog;
373
374 EMIT2(0x41, 0x5C); /* pop r12 */
375 *pprog = prog;
376 }
377
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)378 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
379 {
380 u8 *prog = *pprog;
381
382 if (callee_regs_used[3])
383 EMIT2(0x41, 0x5F); /* pop r15 */
384 if (callee_regs_used[2])
385 EMIT2(0x41, 0x5E); /* pop r14 */
386 if (callee_regs_used[1])
387 EMIT2(0x41, 0x5D); /* pop r13 */
388 if (callee_regs_used[0])
389 EMIT1(0x5B); /* pop rbx */
390 *pprog = prog;
391 }
392
emit_nops(u8 ** pprog,int len)393 static void emit_nops(u8 **pprog, int len)
394 {
395 u8 *prog = *pprog;
396 int i, noplen;
397
398 while (len > 0) {
399 noplen = len;
400
401 if (noplen > ASM_NOP_MAX)
402 noplen = ASM_NOP_MAX;
403
404 for (i = 0; i < noplen; i++)
405 EMIT1(x86_nops[noplen][i]);
406 len -= noplen;
407 }
408
409 *pprog = prog;
410 }
411
412 /*
413 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
414 * in arch/x86/kernel/alternative.c
415 */
416 static int emit_call(u8 **prog, void *func, void *ip);
417
emit_fineibt(u8 ** pprog,u8 * ip,u32 hash,int arity)418 static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity)
419 {
420 u8 *prog = *pprog;
421
422 EMIT_ENDBR();
423 EMIT1_off32(0x2d, hash); /* subl $hash, %eax */
424 if (cfi_bhi) {
425 EMIT2(0x2e, 0x2e); /* cs cs */
426 emit_call(&prog, __bhi_args[arity], ip + 11);
427 } else {
428 EMIT3_off32(0x2e, 0x0f, 0x85, 3); /* jne.d32,pn 3 */
429 }
430 EMIT_ENDBR_POISON();
431
432 *pprog = prog;
433 }
434
emit_kcfi(u8 ** pprog,u32 hash)435 static void emit_kcfi(u8 **pprog, u32 hash)
436 {
437 u8 *prog = *pprog;
438
439 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
440 #ifdef CONFIG_CALL_PADDING
441 for (int i = 0; i < CONFIG_FUNCTION_PADDING_CFI; i++)
442 EMIT1(0x90);
443 #endif
444 EMIT_ENDBR();
445
446 *pprog = prog;
447 }
448
emit_cfi(u8 ** pprog,u8 * ip,u32 hash,int arity)449 static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity)
450 {
451 u8 *prog = *pprog;
452
453 switch (cfi_mode) {
454 case CFI_FINEIBT:
455 emit_fineibt(&prog, ip, hash, arity);
456 break;
457
458 case CFI_KCFI:
459 emit_kcfi(&prog, hash);
460 break;
461
462 default:
463 EMIT_ENDBR();
464 break;
465 }
466
467 *pprog = prog;
468 }
469
emit_prologue_tail_call(u8 ** pprog,bool is_subprog)470 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
471 {
472 u8 *prog = *pprog;
473
474 if (!is_subprog) {
475 /* cmp rax, MAX_TAIL_CALL_CNT */
476 EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
477 EMIT2(X86_JA, 6); /* ja 6 */
478 /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
479 * case1: entry of main prog.
480 * case2: tail callee of main prog.
481 */
482 EMIT1(0x50); /* push rax */
483 /* Make rax as tail_call_cnt_ptr. */
484 EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
485 EMIT2(0xEB, 1); /* jmp 1 */
486 /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
487 * case: tail callee of subprog.
488 */
489 EMIT1(0x50); /* push rax */
490 /* push tail_call_cnt_ptr */
491 EMIT1(0x50); /* push rax */
492 } else { /* is_subprog */
493 /* rax is tail_call_cnt_ptr. */
494 EMIT1(0x50); /* push rax */
495 EMIT1(0x50); /* push rax */
496 }
497
498 *pprog = prog;
499 }
500
501 /*
502 * Emit x86-64 prologue code for BPF program.
503 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
504 * while jumping to another program
505 */
emit_prologue(u8 ** pprog,u8 * ip,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog,bool is_exception_cb)506 static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf,
507 bool tail_call_reachable, bool is_subprog,
508 bool is_exception_cb)
509 {
510 u8 *prog = *pprog;
511
512 if (is_subprog) {
513 emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5);
514 } else {
515 emit_cfi(&prog, ip, cfi_bpf_hash, 1);
516 }
517 /* BPF trampoline can be made to work without these nops,
518 * but let's waste 5 bytes for now and optimize later
519 */
520 emit_nops(&prog, X86_PATCH_SIZE);
521 if (!ebpf_from_cbpf) {
522 if (tail_call_reachable && !is_subprog)
523 /* When it's the entry of the whole tailcall context,
524 * zeroing rax means initialising tail_call_cnt.
525 */
526 EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
527 else
528 /* Keep the same instruction layout. */
529 emit_nops(&prog, 3); /* nop3 */
530 }
531 /* Exception callback receives FP as third parameter */
532 if (is_exception_cb) {
533 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
534 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
535 /* The main frame must have exception_boundary as true, so we
536 * first restore those callee-saved regs from stack, before
537 * reusing the stack frame.
538 */
539 pop_callee_regs(&prog, all_callee_regs_used);
540 pop_r12(&prog);
541 /* Reset the stack frame. */
542 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
543 } else {
544 EMIT1(0x55); /* push rbp */
545 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
546 }
547
548 /* X86_TAIL_CALL_OFFSET is here */
549 EMIT_ENDBR();
550
551 /* sub rsp, rounded_stack_depth */
552 if (stack_depth)
553 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
554 if (tail_call_reachable)
555 emit_prologue_tail_call(&prog, is_subprog);
556 *pprog = prog;
557 }
558
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)559 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
560 {
561 u8 *prog = *pprog;
562 s64 offset;
563
564 offset = func - (ip + X86_PATCH_SIZE);
565 if (!is_simm32(offset)) {
566 pr_err("Target call %p is out of range\n", func);
567 return -ERANGE;
568 }
569 EMIT1_off32(opcode, offset);
570 *pprog = prog;
571 return 0;
572 }
573
emit_call(u8 ** pprog,void * func,void * ip)574 static int emit_call(u8 **pprog, void *func, void *ip)
575 {
576 return emit_patch(pprog, func, ip, 0xE8);
577 }
578
emit_rsb_call(u8 ** pprog,void * func,void * ip)579 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
580 {
581 OPTIMIZER_HIDE_VAR(func);
582 ip += x86_call_depth_emit_accounting(pprog, func, ip);
583 return emit_patch(pprog, func, ip, 0xE8);
584 }
585
emit_jump(u8 ** pprog,void * func,void * ip)586 static int emit_jump(u8 **pprog, void *func, void *ip)
587 {
588 return emit_patch(pprog, func, ip, 0xE9);
589 }
590
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)591 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
592 enum bpf_text_poke_type new_t,
593 void *old_addr, void *new_addr)
594 {
595 const u8 *nop_insn = x86_nops[5];
596 u8 old_insn[X86_PATCH_SIZE];
597 u8 new_insn[X86_PATCH_SIZE];
598 u8 *prog;
599 int ret;
600
601 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
602 if (old_t != BPF_MOD_NOP && old_addr) {
603 prog = old_insn;
604 ret = old_t == BPF_MOD_CALL ?
605 emit_call(&prog, old_addr, ip) :
606 emit_jump(&prog, old_addr, ip);
607 if (ret)
608 return ret;
609 }
610
611 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
612 if (new_t != BPF_MOD_NOP && new_addr) {
613 prog = new_insn;
614 ret = new_t == BPF_MOD_CALL ?
615 emit_call(&prog, new_addr, ip) :
616 emit_jump(&prog, new_addr, ip);
617 if (ret)
618 return ret;
619 }
620
621 ret = -EBUSY;
622 mutex_lock(&text_mutex);
623 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
624 goto out;
625 ret = 1;
626 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
627 smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL);
628 ret = 0;
629 }
630 out:
631 mutex_unlock(&text_mutex);
632 return ret;
633 }
634
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)635 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
636 enum bpf_text_poke_type new_t, void *old_addr,
637 void *new_addr)
638 {
639 if (!is_kernel_text((long)ip) &&
640 !is_bpf_text_address((long)ip))
641 /* BPF poking in modules is not supported */
642 return -EINVAL;
643
644 /*
645 * See emit_prologue(), for IBT builds the trampoline hook is preceded
646 * with an ENDBR instruction.
647 */
648 if (is_endbr(ip))
649 ip += ENDBR_INSN_SIZE;
650
651 return __bpf_arch_text_poke(ip, old_t, new_t, old_addr, new_addr);
652 }
653
654 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
655
__emit_indirect_jump(u8 ** pprog,int reg,bool ereg)656 static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
657 {
658 u8 *prog = *pprog;
659
660 if (ereg)
661 EMIT1(0x41);
662
663 EMIT2(0xFF, 0xE0 + reg);
664
665 *pprog = prog;
666 }
667
emit_indirect_jump(u8 ** pprog,int bpf_reg,u8 * ip)668 static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
669 {
670 u8 *prog = *pprog;
671 int reg = reg2hex[bpf_reg];
672 bool ereg = is_ereg(bpf_reg);
673
674 if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
675 OPTIMIZER_HIDE_VAR(reg);
676 emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip);
677 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
678 EMIT_LFENCE();
679 __emit_indirect_jump(&prog, reg, ereg);
680 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
681 OPTIMIZER_HIDE_VAR(reg);
682 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
683 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
684 else
685 emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
686 } else {
687 __emit_indirect_jump(&prog, reg, ereg);
688 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
689 EMIT1(0xCC); /* int3 */
690 }
691
692 *pprog = prog;
693 }
694
emit_return(u8 ** pprog,u8 * ip)695 static void emit_return(u8 **pprog, u8 *ip)
696 {
697 u8 *prog = *pprog;
698
699 if (cpu_wants_rethunk()) {
700 emit_jump(&prog, x86_return_thunk, ip);
701 } else {
702 EMIT1(0xC3); /* ret */
703 if (IS_ENABLED(CONFIG_MITIGATION_SLS))
704 EMIT1(0xCC); /* int3 */
705 }
706
707 *pprog = prog;
708 }
709
710 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
711
712 /*
713 * Generate the following code:
714 *
715 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
716 * if (index >= array->map.max_entries)
717 * goto out;
718 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
719 * goto out;
720 * prog = array->ptrs[index];
721 * if (prog == NULL)
722 * goto out;
723 * goto *(prog->bpf_func + prologue_size);
724 * out:
725 */
emit_bpf_tail_call_indirect(struct bpf_prog * bpf_prog,u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)726 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
727 u8 **pprog, bool *callee_regs_used,
728 u32 stack_depth, u8 *ip,
729 struct jit_context *ctx)
730 {
731 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
732 u8 *prog = *pprog, *start = *pprog;
733 int offset;
734
735 /*
736 * rdi - pointer to ctx
737 * rsi - pointer to bpf_array
738 * rdx - index in bpf_array
739 */
740
741 /*
742 * if (index >= array->map.max_entries)
743 * goto out;
744 */
745 EMIT2(0x89, 0xD2); /* mov edx, edx */
746 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
747 offsetof(struct bpf_array, map.max_entries));
748
749 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
750 EMIT2(X86_JBE, offset); /* jbe out */
751
752 /*
753 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
754 * goto out;
755 */
756 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
757 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
758
759 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
760 EMIT2(X86_JAE, offset); /* jae out */
761
762 /* prog = array->ptrs[index]; */
763 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
764 offsetof(struct bpf_array, ptrs));
765
766 /*
767 * if (prog == NULL)
768 * goto out;
769 */
770 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
771
772 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
773 EMIT2(X86_JE, offset); /* je out */
774
775 /* Inc tail_call_cnt if the slot is populated. */
776 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
777
778 if (bpf_prog->aux->exception_boundary) {
779 pop_callee_regs(&prog, all_callee_regs_used);
780 pop_r12(&prog);
781 } else {
782 pop_callee_regs(&prog, callee_regs_used);
783 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
784 pop_r12(&prog);
785 }
786
787 /* Pop tail_call_cnt_ptr. */
788 EMIT1(0x58); /* pop rax */
789 /* Pop tail_call_cnt, if it's main prog.
790 * Pop tail_call_cnt_ptr, if it's subprog.
791 */
792 EMIT1(0x58); /* pop rax */
793 if (stack_depth)
794 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
795 round_up(stack_depth, 8));
796
797 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
798 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
799 offsetof(struct bpf_prog, bpf_func));
800 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
801 X86_TAIL_CALL_OFFSET);
802 /*
803 * Now we're ready to jump into next BPF program
804 * rdi == ctx (1st arg)
805 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
806 */
807 emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
808
809 /* out: */
810 ctx->tail_call_indirect_label = prog - start;
811 *pprog = prog;
812 }
813
emit_bpf_tail_call_direct(struct bpf_prog * bpf_prog,struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)814 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
815 struct bpf_jit_poke_descriptor *poke,
816 u8 **pprog, u8 *ip,
817 bool *callee_regs_used, u32 stack_depth,
818 struct jit_context *ctx)
819 {
820 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
821 u8 *prog = *pprog, *start = *pprog;
822 int offset;
823
824 /*
825 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
826 * goto out;
827 */
828 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
829 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
830
831 offset = ctx->tail_call_direct_label - (prog + 2 - start);
832 EMIT2(X86_JAE, offset); /* jae out */
833
834 poke->tailcall_bypass = ip + (prog - start);
835 poke->adj_off = X86_TAIL_CALL_OFFSET;
836 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
837 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
838
839 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
840 poke->tailcall_bypass);
841
842 /* Inc tail_call_cnt if the slot is populated. */
843 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
844
845 if (bpf_prog->aux->exception_boundary) {
846 pop_callee_regs(&prog, all_callee_regs_used);
847 pop_r12(&prog);
848 } else {
849 pop_callee_regs(&prog, callee_regs_used);
850 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
851 pop_r12(&prog);
852 }
853
854 /* Pop tail_call_cnt_ptr. */
855 EMIT1(0x58); /* pop rax */
856 /* Pop tail_call_cnt, if it's main prog.
857 * Pop tail_call_cnt_ptr, if it's subprog.
858 */
859 EMIT1(0x58); /* pop rax */
860 if (stack_depth)
861 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
862
863 emit_nops(&prog, X86_PATCH_SIZE);
864
865 /* out: */
866 ctx->tail_call_direct_label = prog - start;
867
868 *pprog = prog;
869 }
870
bpf_tail_call_direct_fixup(struct bpf_prog * prog)871 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
872 {
873 struct bpf_jit_poke_descriptor *poke;
874 struct bpf_array *array;
875 struct bpf_prog *target;
876 int i, ret;
877
878 for (i = 0; i < prog->aux->size_poke_tab; i++) {
879 poke = &prog->aux->poke_tab[i];
880 if (poke->aux && poke->aux != prog->aux)
881 continue;
882
883 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
884
885 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
886 continue;
887
888 array = container_of(poke->tail_call.map, struct bpf_array, map);
889 mutex_lock(&array->aux->poke_mutex);
890 target = array->ptrs[poke->tail_call.key];
891 if (target) {
892 ret = __bpf_arch_text_poke(poke->tailcall_target,
893 BPF_MOD_NOP, BPF_MOD_JUMP,
894 NULL,
895 (u8 *)target->bpf_func +
896 poke->adj_off);
897 BUG_ON(ret < 0);
898 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
899 BPF_MOD_JUMP, BPF_MOD_NOP,
900 (u8 *)poke->tailcall_target +
901 X86_PATCH_SIZE, NULL);
902 BUG_ON(ret < 0);
903 }
904 WRITE_ONCE(poke->tailcall_target_stable, true);
905 mutex_unlock(&array->aux->poke_mutex);
906 }
907 }
908
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)909 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
910 u32 dst_reg, const u32 imm32)
911 {
912 u8 *prog = *pprog;
913 u8 b1, b2, b3;
914
915 /*
916 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
917 * (which zero-extends imm32) to save 2 bytes.
918 */
919 if (sign_propagate && (s32)imm32 < 0) {
920 /* 'mov %rax, imm32' sign extends imm32 */
921 b1 = add_1mod(0x48, dst_reg);
922 b2 = 0xC7;
923 b3 = 0xC0;
924 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
925 goto done;
926 }
927
928 /*
929 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
930 * to save 3 bytes.
931 */
932 if (imm32 == 0) {
933 if (is_ereg(dst_reg))
934 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
935 b2 = 0x31; /* xor */
936 b3 = 0xC0;
937 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
938 goto done;
939 }
940
941 /* mov %eax, imm32 */
942 if (is_ereg(dst_reg))
943 EMIT1(add_1mod(0x40, dst_reg));
944 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
945 done:
946 *pprog = prog;
947 }
948
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)949 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
950 const u32 imm32_hi, const u32 imm32_lo)
951 {
952 u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
953 u8 *prog = *pprog;
954
955 if (is_uimm32(imm64)) {
956 /*
957 * For emitting plain u32, where sign bit must not be
958 * propagated LLVM tends to load imm64 over mov32
959 * directly, so save couple of bytes by just doing
960 * 'mov %eax, imm32' instead.
961 */
962 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
963 } else if (is_simm32(imm64)) {
964 emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
965 } else {
966 /* movabsq rax, imm64 */
967 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
968 EMIT(imm32_lo, 4);
969 EMIT(imm32_hi, 4);
970 }
971
972 *pprog = prog;
973 }
974
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)975 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
976 {
977 u8 *prog = *pprog;
978
979 if (is64) {
980 /* mov dst, src */
981 EMIT_mov(dst_reg, src_reg);
982 } else {
983 /* mov32 dst, src */
984 if (is_ereg(dst_reg) || is_ereg(src_reg))
985 EMIT1(add_2mod(0x40, dst_reg, src_reg));
986 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
987 }
988
989 *pprog = prog;
990 }
991
emit_movsx_reg(u8 ** pprog,int num_bits,bool is64,u32 dst_reg,u32 src_reg)992 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
993 u32 src_reg)
994 {
995 u8 *prog = *pprog;
996
997 if (is64) {
998 /* movs[b,w,l]q dst, src */
999 if (num_bits == 8)
1000 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
1001 add_2reg(0xC0, src_reg, dst_reg));
1002 else if (num_bits == 16)
1003 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
1004 add_2reg(0xC0, src_reg, dst_reg));
1005 else if (num_bits == 32)
1006 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
1007 add_2reg(0xC0, src_reg, dst_reg));
1008 } else {
1009 /* movs[b,w]l dst, src */
1010 if (num_bits == 8) {
1011 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
1012 add_2reg(0xC0, src_reg, dst_reg));
1013 } else if (num_bits == 16) {
1014 if (is_ereg(dst_reg) || is_ereg(src_reg))
1015 EMIT1(add_2mod(0x40, src_reg, dst_reg));
1016 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
1017 add_2reg(0xC0, src_reg, dst_reg));
1018 }
1019 }
1020
1021 *pprog = prog;
1022 }
1023
1024 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)1025 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
1026 {
1027 u8 *prog = *pprog;
1028
1029 if (is_imm8(off)) {
1030 /* 1-byte signed displacement.
1031 *
1032 * If off == 0 we could skip this and save one extra byte, but
1033 * special case of x86 R13 which always needs an offset is not
1034 * worth the hassle
1035 */
1036 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
1037 } else {
1038 /* 4-byte signed displacement */
1039 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
1040 }
1041 *pprog = prog;
1042 }
1043
emit_insn_suffix_SIB(u8 ** pprog,u32 ptr_reg,u32 val_reg,u32 index_reg,int off)1044 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
1045 {
1046 u8 *prog = *pprog;
1047
1048 if (is_imm8(off)) {
1049 EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1050 } else {
1051 EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1052 }
1053 *pprog = prog;
1054 }
1055
1056 /*
1057 * Emit a REX byte if it will be necessary to address these registers
1058 */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)1059 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
1060 {
1061 u8 *prog = *pprog;
1062
1063 if (is64)
1064 EMIT1(add_2mod(0x48, dst_reg, src_reg));
1065 else if (is_ereg(dst_reg) || is_ereg(src_reg))
1066 EMIT1(add_2mod(0x40, dst_reg, src_reg));
1067 *pprog = prog;
1068 }
1069
1070 /*
1071 * Similar version of maybe_emit_mod() for a single register
1072 */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)1073 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
1074 {
1075 u8 *prog = *pprog;
1076
1077 if (is64)
1078 EMIT1(add_1mod(0x48, reg));
1079 else if (is_ereg(reg))
1080 EMIT1(add_1mod(0x40, reg));
1081 *pprog = prog;
1082 }
1083
1084 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1085 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1086 {
1087 u8 *prog = *pprog;
1088
1089 switch (size) {
1090 case BPF_B:
1091 /* Emit 'movzx rax, byte ptr [rax + off]' */
1092 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
1093 break;
1094 case BPF_H:
1095 /* Emit 'movzx rax, word ptr [rax + off]' */
1096 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
1097 break;
1098 case BPF_W:
1099 /* Emit 'mov eax, dword ptr [rax+0x14]' */
1100 if (is_ereg(dst_reg) || is_ereg(src_reg))
1101 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
1102 else
1103 EMIT1(0x8B);
1104 break;
1105 case BPF_DW:
1106 /* Emit 'mov rax, qword ptr [rax+0x14]' */
1107 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
1108 break;
1109 }
1110 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1111 *pprog = prog;
1112 }
1113
1114 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
emit_ldsx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1115 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1116 {
1117 u8 *prog = *pprog;
1118
1119 switch (size) {
1120 case BPF_B:
1121 /* Emit 'movsx rax, byte ptr [rax + off]' */
1122 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
1123 break;
1124 case BPF_H:
1125 /* Emit 'movsx rax, word ptr [rax + off]' */
1126 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
1127 break;
1128 case BPF_W:
1129 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
1130 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
1131 break;
1132 }
1133 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1134 *pprog = prog;
1135 }
1136
emit_ldx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1137 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1138 {
1139 u8 *prog = *pprog;
1140
1141 switch (size) {
1142 case BPF_B:
1143 /* movzx rax, byte ptr [rax + r12 + off] */
1144 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1145 break;
1146 case BPF_H:
1147 /* movzx rax, word ptr [rax + r12 + off] */
1148 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1149 break;
1150 case BPF_W:
1151 /* mov eax, dword ptr [rax + r12 + off] */
1152 EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1153 break;
1154 case BPF_DW:
1155 /* mov rax, qword ptr [rax + r12 + off] */
1156 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1157 break;
1158 }
1159 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1160 *pprog = prog;
1161 }
1162
emit_ldsx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1163 static void emit_ldsx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1164 {
1165 u8 *prog = *pprog;
1166
1167 switch (size) {
1168 case BPF_B:
1169 /* movsx rax, byte ptr [rax + r12 + off] */
1170 EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBE);
1171 break;
1172 case BPF_H:
1173 /* movsx rax, word ptr [rax + r12 + off] */
1174 EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBF);
1175 break;
1176 case BPF_W:
1177 /* movsx rax, dword ptr [rax + r12 + off] */
1178 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x63);
1179 break;
1180 }
1181 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1182 *pprog = prog;
1183 }
1184
emit_ldx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1185 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1186 {
1187 emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1188 }
1189
emit_ldsx_r12(u8 ** prog,u32 size,u32 dst_reg,u32 src_reg,int off)1190 static void emit_ldsx_r12(u8 **prog, u32 size, u32 dst_reg, u32 src_reg, int off)
1191 {
1192 emit_ldsx_index(prog, size, dst_reg, src_reg, X86_REG_R12, off);
1193 }
1194
1195 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1196 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1197 {
1198 u8 *prog = *pprog;
1199
1200 switch (size) {
1201 case BPF_B:
1202 /* Emit 'mov byte ptr [rax + off], al' */
1203 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1204 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1205 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1206 else
1207 EMIT1(0x88);
1208 break;
1209 case BPF_H:
1210 if (is_ereg(dst_reg) || is_ereg(src_reg))
1211 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1212 else
1213 EMIT2(0x66, 0x89);
1214 break;
1215 case BPF_W:
1216 if (is_ereg(dst_reg) || is_ereg(src_reg))
1217 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1218 else
1219 EMIT1(0x89);
1220 break;
1221 case BPF_DW:
1222 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1223 break;
1224 }
1225 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1226 *pprog = prog;
1227 }
1228
1229 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
emit_stx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1230 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1231 {
1232 u8 *prog = *pprog;
1233
1234 switch (size) {
1235 case BPF_B:
1236 /* mov byte ptr [rax + r12 + off], al */
1237 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1238 break;
1239 case BPF_H:
1240 /* mov word ptr [rax + r12 + off], ax */
1241 EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1242 break;
1243 case BPF_W:
1244 /* mov dword ptr [rax + r12 + 1], eax */
1245 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1246 break;
1247 case BPF_DW:
1248 /* mov qword ptr [rax + r12 + 1], rax */
1249 EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1250 break;
1251 }
1252 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1253 *pprog = prog;
1254 }
1255
emit_stx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1256 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1257 {
1258 emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1259 }
1260
1261 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
emit_st_index(u8 ** pprog,u32 size,u32 dst_reg,u32 index_reg,int off,int imm)1262 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1263 {
1264 u8 *prog = *pprog;
1265
1266 switch (size) {
1267 case BPF_B:
1268 /* mov byte ptr [rax + r12 + off], imm8 */
1269 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1270 break;
1271 case BPF_H:
1272 /* mov word ptr [rax + r12 + off], imm16 */
1273 EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1274 break;
1275 case BPF_W:
1276 /* mov dword ptr [rax + r12 + 1], imm32 */
1277 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1278 break;
1279 case BPF_DW:
1280 /* mov qword ptr [rax + r12 + 1], imm32 */
1281 EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1282 break;
1283 }
1284 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1285 EMIT(imm, bpf_size_to_x86_bytes(size));
1286 *pprog = prog;
1287 }
1288
emit_st_r12(u8 ** pprog,u32 size,u32 dst_reg,int off,int imm)1289 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1290 {
1291 emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1292 }
1293
emit_store_stack_imm64(u8 ** pprog,int reg,int stack_off,u64 imm64)1294 static void emit_store_stack_imm64(u8 **pprog, int reg, int stack_off, u64 imm64)
1295 {
1296 /*
1297 * mov reg, imm64
1298 * mov QWORD PTR [rbp + stack_off], reg
1299 */
1300 emit_mov_imm64(pprog, reg, imm64 >> 32, (u32) imm64);
1301 emit_stx(pprog, BPF_DW, BPF_REG_FP, reg, stack_off);
1302 }
1303
emit_atomic_rmw(u8 ** pprog,u32 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)1304 static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
1305 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1306 {
1307 u8 *prog = *pprog;
1308
1309 if (atomic_op != BPF_XCHG)
1310 EMIT1(0xF0); /* lock prefix */
1311
1312 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1313
1314 /* emit opcode */
1315 switch (atomic_op) {
1316 case BPF_ADD:
1317 case BPF_AND:
1318 case BPF_OR:
1319 case BPF_XOR:
1320 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1321 EMIT1(simple_alu_opcodes[atomic_op]);
1322 break;
1323 case BPF_ADD | BPF_FETCH:
1324 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1325 EMIT2(0x0F, 0xC1);
1326 break;
1327 case BPF_XCHG:
1328 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1329 EMIT1(0x87);
1330 break;
1331 case BPF_CMPXCHG:
1332 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1333 EMIT2(0x0F, 0xB1);
1334 break;
1335 default:
1336 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1337 return -EFAULT;
1338 }
1339
1340 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1341
1342 *pprog = prog;
1343 return 0;
1344 }
1345
emit_atomic_rmw_index(u8 ** pprog,u32 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1346 static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
1347 u32 dst_reg, u32 src_reg, u32 index_reg,
1348 int off)
1349 {
1350 u8 *prog = *pprog;
1351
1352 if (atomic_op != BPF_XCHG)
1353 EMIT1(0xF0); /* lock prefix */
1354
1355 switch (size) {
1356 case BPF_W:
1357 EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1358 break;
1359 case BPF_DW:
1360 EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1361 break;
1362 default:
1363 pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
1364 return -EFAULT;
1365 }
1366
1367 /* emit opcode */
1368 switch (atomic_op) {
1369 case BPF_ADD:
1370 case BPF_AND:
1371 case BPF_OR:
1372 case BPF_XOR:
1373 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1374 EMIT1(simple_alu_opcodes[atomic_op]);
1375 break;
1376 case BPF_ADD | BPF_FETCH:
1377 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1378 EMIT2(0x0F, 0xC1);
1379 break;
1380 case BPF_XCHG:
1381 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1382 EMIT1(0x87);
1383 break;
1384 case BPF_CMPXCHG:
1385 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1386 EMIT2(0x0F, 0xB1);
1387 break;
1388 default:
1389 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1390 return -EFAULT;
1391 }
1392 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1393 *pprog = prog;
1394 return 0;
1395 }
1396
emit_atomic_ld_st(u8 ** pprog,u32 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)1397 static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
1398 u32 src_reg, s16 off, u8 bpf_size)
1399 {
1400 switch (atomic_op) {
1401 case BPF_LOAD_ACQ:
1402 /* dst_reg = smp_load_acquire(src_reg + off16) */
1403 emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
1404 break;
1405 case BPF_STORE_REL:
1406 /* smp_store_release(dst_reg + off16, src_reg) */
1407 emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
1408 break;
1409 default:
1410 pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1411 atomic_op);
1412 return -EFAULT;
1413 }
1414
1415 return 0;
1416 }
1417
emit_atomic_ld_st_index(u8 ** pprog,u32 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1418 static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
1419 u32 dst_reg, u32 src_reg, u32 index_reg,
1420 int off)
1421 {
1422 switch (atomic_op) {
1423 case BPF_LOAD_ACQ:
1424 /* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
1425 emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1426 break;
1427 case BPF_STORE_REL:
1428 /* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
1429 emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1430 break;
1431 default:
1432 pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1433 atomic_op);
1434 return -EFAULT;
1435 }
1436
1437 return 0;
1438 }
1439
1440 /*
1441 * Metadata encoding for exception handling in JITed code.
1442 *
1443 * Format of `fixup` and `data` fields in `struct exception_table_entry`:
1444 *
1445 * Bit layout of `fixup` (32-bit):
1446 *
1447 * +-----------+--------+-----------+---------+----------+
1448 * | 31 | 30-24 | 23-16 | 15-8 | 7-0 |
1449 * | | | | | |
1450 * | ARENA_ACC | Unused | ARENA_REG | DST_REG | INSN_LEN |
1451 * +-----------+--------+-----------+---------+----------+
1452 *
1453 * - INSN_LEN (8 bits): Length of faulting insn (max x86 insn = 15 bytes (fits in 8 bits)).
1454 * - DST_REG (8 bits): Offset of dst_reg from reg2pt_regs[] (max offset = 112 (fits in 8 bits)).
1455 * This is set to DONT_CLEAR if the insn is a store.
1456 * - ARENA_REG (8 bits): Offset of the register that is used to calculate the
1457 * address for load/store when accessing the arena region.
1458 * - ARENA_ACCESS (1 bit): This bit is set when the faulting instruction accessed the arena region.
1459 *
1460 * Bit layout of `data` (32-bit):
1461 *
1462 * +--------------+--------+--------------+
1463 * | 31-16 | 15-8 | 7-0 |
1464 * | | | |
1465 * | ARENA_OFFSET | Unused | EX_TYPE_BPF |
1466 * +--------------+--------+--------------+
1467 *
1468 * - ARENA_OFFSET (16 bits): Offset used to calculate the address for load/store when
1469 * accessing the arena region.
1470 */
1471
1472 #define DONT_CLEAR 1
1473 #define FIXUP_INSN_LEN_MASK GENMASK(7, 0)
1474 #define FIXUP_REG_MASK GENMASK(15, 8)
1475 #define FIXUP_ARENA_REG_MASK GENMASK(23, 16)
1476 #define FIXUP_ARENA_ACCESS BIT(31)
1477 #define DATA_ARENA_OFFSET_MASK GENMASK(31, 16)
1478
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)1479 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1480 {
1481 u32 reg = FIELD_GET(FIXUP_REG_MASK, x->fixup);
1482 u32 insn_len = FIELD_GET(FIXUP_INSN_LEN_MASK, x->fixup);
1483 bool is_arena = !!(x->fixup & FIXUP_ARENA_ACCESS);
1484 bool is_write = (reg == DONT_CLEAR);
1485 unsigned long addr;
1486 s16 off;
1487 u32 arena_reg;
1488
1489 if (is_arena) {
1490 arena_reg = FIELD_GET(FIXUP_ARENA_REG_MASK, x->fixup);
1491 off = FIELD_GET(DATA_ARENA_OFFSET_MASK, x->data);
1492 addr = *(unsigned long *)((void *)regs + arena_reg) + off;
1493 bpf_prog_report_arena_violation(is_write, addr, regs->ip);
1494 }
1495
1496 /* jump over faulting load and clear dest register */
1497 if (reg != DONT_CLEAR)
1498 *(unsigned long *)((void *)regs + reg) = 0;
1499 regs->ip += insn_len;
1500
1501 return true;
1502 }
1503
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used)1504 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1505 bool *regs_used)
1506 {
1507 int i;
1508
1509 for (i = 1; i <= insn_cnt; i++, insn++) {
1510 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1511 regs_used[0] = true;
1512 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1513 regs_used[1] = true;
1514 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1515 regs_used[2] = true;
1516 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1517 regs_used[3] = true;
1518 }
1519 }
1520
1521 /* emit the 3-byte VEX prefix
1522 *
1523 * r: same as rex.r, extra bit for ModRM reg field
1524 * x: same as rex.x, extra bit for SIB index field
1525 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1526 * m: opcode map select, encoding escape bytes e.g. 0x0f38
1527 * w: same as rex.w (32 bit or 64 bit) or opcode specific
1528 * src_reg2: additional source reg (encoded as BPF reg)
1529 * l: vector length (128 bit or 256 bit) or reserved
1530 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1531 */
emit_3vex(u8 ** pprog,bool r,bool x,bool b,u8 m,bool w,u8 src_reg2,bool l,u8 pp)1532 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1533 bool w, u8 src_reg2, bool l, u8 pp)
1534 {
1535 u8 *prog = *pprog;
1536 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1537 u8 b1, b2;
1538 u8 vvvv = reg2hex[src_reg2];
1539
1540 /* reg2hex gives only the lower 3 bit of vvvv */
1541 if (is_ereg(src_reg2))
1542 vvvv |= 1 << 3;
1543
1544 /*
1545 * 2nd byte of 3-byte VEX prefix
1546 * ~ means bit inverted encoding
1547 *
1548 * 7 0
1549 * +---+---+---+---+---+---+---+---+
1550 * |~R |~X |~B | m |
1551 * +---+---+---+---+---+---+---+---+
1552 */
1553 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1554 /*
1555 * 3rd byte of 3-byte VEX prefix
1556 *
1557 * 7 0
1558 * +---+---+---+---+---+---+---+---+
1559 * | W | ~vvvv | L | pp |
1560 * +---+---+---+---+---+---+---+---+
1561 */
1562 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1563
1564 EMIT3(b0, b1, b2);
1565 *pprog = prog;
1566 }
1567
1568 /* emit BMI2 shift instruction */
emit_shiftx(u8 ** pprog,u32 dst_reg,u8 src_reg,bool is64,u8 op)1569 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1570 {
1571 u8 *prog = *pprog;
1572 bool r = is_ereg(dst_reg);
1573 u8 m = 2; /* escape code 0f38 */
1574
1575 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1576 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1577 *pprog = prog;
1578 }
1579
emit_priv_frame_ptr(u8 ** pprog,void __percpu * priv_frame_ptr)1580 static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
1581 {
1582 u8 *prog = *pprog;
1583
1584 /* movabs r9, priv_frame_ptr */
1585 emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32,
1586 (u32) (__force long) priv_frame_ptr);
1587
1588 #ifdef CONFIG_SMP
1589 /* add <r9>, gs:[<off>] */
1590 EMIT2(0x65, 0x4c);
1591 EMIT3(0x03, 0x0c, 0x25);
1592 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1593 #endif
1594
1595 *pprog = prog;
1596 }
1597
1598 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1599
1600 #define __LOAD_TCC_PTR(off) \
1601 EMIT3_off32(0x48, 0x8B, 0x85, off)
1602 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1603 #define LOAD_TAIL_CALL_CNT_PTR(stack) \
1604 __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1605
1606 /* Memory size/value to protect private stack overflow/underflow */
1607 #define PRIV_STACK_GUARD_SZ 8
1608 #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
1609
emit_spectre_bhb_barrier(u8 ** pprog,u8 * ip,struct bpf_prog * bpf_prog)1610 static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
1611 struct bpf_prog *bpf_prog)
1612 {
1613 u8 *prog = *pprog;
1614 u8 *func;
1615
1616 if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
1617 /* The clearing sequence clobbers eax and ecx. */
1618 EMIT1(0x50); /* push rax */
1619 EMIT1(0x51); /* push rcx */
1620 ip += 2;
1621
1622 func = (u8 *)clear_bhb_loop;
1623 ip += x86_call_depth_emit_accounting(&prog, func, ip);
1624
1625 if (emit_call(&prog, func, ip))
1626 return -EINVAL;
1627 EMIT1(0x59); /* pop rcx */
1628 EMIT1(0x58); /* pop rax */
1629 }
1630 /* Insert IBHF instruction */
1631 if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
1632 cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
1633 cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
1634 /*
1635 * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
1636 * fence preventing branch history from before the fence from
1637 * affecting indirect branches after the fence. This is
1638 * specifically used in cBPF jitted code to prevent Intra-mode
1639 * BHI attacks. The IBHF instruction is designed to be a NOP on
1640 * hardware that doesn't need or support it. The REP and REX.W
1641 * prefixes are required by the microcode, and they also ensure
1642 * that the NOP is unlikely to be used in existing code.
1643 *
1644 * IBHF is not a valid instruction in 32-bit mode.
1645 */
1646 EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
1647 }
1648 *pprog = prog;
1649 return 0;
1650 }
1651
do_jit(struct bpf_verifier_env * env,struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)1652 static int do_jit(struct bpf_verifier_env *env, struct bpf_prog *bpf_prog, int *addrs, u8 *image,
1653 u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding)
1654 {
1655 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1656 struct bpf_insn *insn = bpf_prog->insnsi;
1657 bool callee_regs_used[4] = {};
1658 int insn_cnt = bpf_prog->len;
1659 bool seen_exit = false;
1660 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1661 void __percpu *priv_frame_ptr = NULL;
1662 u64 arena_vm_start, user_vm_start;
1663 void __percpu *priv_stack_ptr;
1664 int i, excnt = 0;
1665 int ilen, proglen = 0;
1666 u8 *ip, *prog = temp;
1667 u32 stack_depth;
1668 int err;
1669
1670 stack_depth = bpf_prog->aux->stack_depth;
1671 priv_stack_ptr = bpf_prog->aux->priv_stack_ptr;
1672 if (priv_stack_ptr) {
1673 priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8);
1674 stack_depth = 0;
1675 }
1676
1677 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1678 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1679
1680 detect_reg_usage(insn, insn_cnt, callee_regs_used);
1681
1682 emit_prologue(&prog, image, stack_depth,
1683 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1684 bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1685
1686 bpf_prog->aux->ksym.fp_start = prog - temp;
1687
1688 /* Exception callback will clobber callee regs for its own use, and
1689 * restore the original callee regs from main prog's stack frame.
1690 */
1691 if (bpf_prog->aux->exception_boundary) {
1692 /* We also need to save r12, which is not mapped to any BPF
1693 * register, as we throw after entry into the kernel, which may
1694 * overwrite r12.
1695 */
1696 push_r12(&prog);
1697 push_callee_regs(&prog, all_callee_regs_used);
1698 } else {
1699 if (arena_vm_start)
1700 push_r12(&prog);
1701 push_callee_regs(&prog, callee_regs_used);
1702 }
1703 if (arena_vm_start)
1704 emit_mov_imm64(&prog, X86_REG_R12,
1705 arena_vm_start >> 32, (u32) arena_vm_start);
1706
1707 if (priv_frame_ptr)
1708 emit_priv_frame_ptr(&prog, priv_frame_ptr);
1709
1710 ilen = prog - temp;
1711 if (rw_image)
1712 memcpy(rw_image + proglen, temp, ilen);
1713 proglen += ilen;
1714 addrs[0] = proglen;
1715 prog = temp;
1716
1717 for (i = 1; i <= insn_cnt; i++, insn++) {
1718 const s32 imm32 = insn->imm;
1719 u32 dst_reg = insn->dst_reg;
1720 u32 src_reg = insn->src_reg;
1721 u8 b2 = 0, b3 = 0;
1722 u8 *start_of_ldx;
1723 s64 jmp_offset;
1724 s16 insn_off;
1725 u8 jmp_cond;
1726 u8 *func;
1727 int nops;
1728
1729 if (priv_frame_ptr) {
1730 if (src_reg == BPF_REG_FP)
1731 src_reg = X86_REG_R9;
1732
1733 if (dst_reg == BPF_REG_FP)
1734 dst_reg = X86_REG_R9;
1735 }
1736
1737 if (bpf_insn_is_indirect_target(env, bpf_prog, i - 1))
1738 EMIT_ENDBR();
1739
1740 ip = image + addrs[i - 1] + (prog - temp);
1741
1742 switch (insn->code) {
1743 /* ALU */
1744 case BPF_ALU | BPF_ADD | BPF_X:
1745 case BPF_ALU | BPF_SUB | BPF_X:
1746 case BPF_ALU | BPF_AND | BPF_X:
1747 case BPF_ALU | BPF_OR | BPF_X:
1748 case BPF_ALU | BPF_XOR | BPF_X:
1749 case BPF_ALU64 | BPF_ADD | BPF_X:
1750 case BPF_ALU64 | BPF_SUB | BPF_X:
1751 case BPF_ALU64 | BPF_AND | BPF_X:
1752 case BPF_ALU64 | BPF_OR | BPF_X:
1753 case BPF_ALU64 | BPF_XOR | BPF_X:
1754 maybe_emit_mod(&prog, dst_reg, src_reg,
1755 BPF_CLASS(insn->code) == BPF_ALU64);
1756 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1757 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1758 break;
1759
1760 case BPF_ALU64 | BPF_MOV | BPF_X:
1761 if (insn_is_cast_user(insn)) {
1762 if (dst_reg != src_reg)
1763 /* 32-bit mov */
1764 emit_mov_reg(&prog, false, dst_reg, src_reg);
1765 /* shl dst_reg, 32 */
1766 maybe_emit_1mod(&prog, dst_reg, true);
1767 EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1768
1769 /* or dst_reg, user_vm_start */
1770 maybe_emit_1mod(&prog, dst_reg, true);
1771 if (is_axreg(dst_reg))
1772 EMIT1_off32(0x0D, user_vm_start >> 32);
1773 else
1774 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32);
1775
1776 /* rol dst_reg, 32 */
1777 maybe_emit_1mod(&prog, dst_reg, true);
1778 EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1779
1780 /* xor r11, r11 */
1781 EMIT3(0x4D, 0x31, 0xDB);
1782
1783 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1784 maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1785 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1786
1787 /* cmove r11, dst_reg; if so, set dst_reg to zero */
1788 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1789 maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1790 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1791 break;
1792 } else if (insn_is_mov_percpu_addr(insn)) {
1793 /* mov <dst>, <src> (if necessary) */
1794 EMIT_mov(dst_reg, src_reg);
1795 #ifdef CONFIG_SMP
1796 /* add <dst>, gs:[<off>] */
1797 EMIT2(0x65, add_1mod(0x48, dst_reg));
1798 EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1799 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1800 #endif
1801 break;
1802 }
1803 fallthrough;
1804 case BPF_ALU | BPF_MOV | BPF_X:
1805 if (insn->off == 0)
1806 emit_mov_reg(&prog,
1807 BPF_CLASS(insn->code) == BPF_ALU64,
1808 dst_reg, src_reg);
1809 else
1810 emit_movsx_reg(&prog, insn->off,
1811 BPF_CLASS(insn->code) == BPF_ALU64,
1812 dst_reg, src_reg);
1813 break;
1814
1815 /* neg dst */
1816 case BPF_ALU | BPF_NEG:
1817 case BPF_ALU64 | BPF_NEG:
1818 maybe_emit_1mod(&prog, dst_reg,
1819 BPF_CLASS(insn->code) == BPF_ALU64);
1820 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1821 break;
1822
1823 case BPF_ALU | BPF_ADD | BPF_K:
1824 case BPF_ALU | BPF_SUB | BPF_K:
1825 case BPF_ALU | BPF_AND | BPF_K:
1826 case BPF_ALU | BPF_OR | BPF_K:
1827 case BPF_ALU | BPF_XOR | BPF_K:
1828 case BPF_ALU64 | BPF_ADD | BPF_K:
1829 case BPF_ALU64 | BPF_SUB | BPF_K:
1830 case BPF_ALU64 | BPF_AND | BPF_K:
1831 case BPF_ALU64 | BPF_OR | BPF_K:
1832 case BPF_ALU64 | BPF_XOR | BPF_K:
1833 maybe_emit_1mod(&prog, dst_reg,
1834 BPF_CLASS(insn->code) == BPF_ALU64);
1835
1836 /*
1837 * b3 holds 'normal' opcode, b2 short form only valid
1838 * in case dst is eax/rax.
1839 */
1840 switch (BPF_OP(insn->code)) {
1841 case BPF_ADD:
1842 b3 = 0xC0;
1843 b2 = 0x05;
1844 break;
1845 case BPF_SUB:
1846 b3 = 0xE8;
1847 b2 = 0x2D;
1848 break;
1849 case BPF_AND:
1850 b3 = 0xE0;
1851 b2 = 0x25;
1852 break;
1853 case BPF_OR:
1854 b3 = 0xC8;
1855 b2 = 0x0D;
1856 break;
1857 case BPF_XOR:
1858 b3 = 0xF0;
1859 b2 = 0x35;
1860 break;
1861 }
1862
1863 if (is_imm8(imm32))
1864 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1865 else if (is_axreg(dst_reg))
1866 EMIT1_off32(b2, imm32);
1867 else
1868 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1869 break;
1870
1871 case BPF_ALU64 | BPF_MOV | BPF_K:
1872 case BPF_ALU | BPF_MOV | BPF_K:
1873 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1874 dst_reg, imm32);
1875 break;
1876
1877 case BPF_LD | BPF_IMM | BPF_DW:
1878 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1879 insn++;
1880 i++;
1881 break;
1882
1883 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1884 case BPF_ALU | BPF_MOD | BPF_X:
1885 case BPF_ALU | BPF_DIV | BPF_X:
1886 case BPF_ALU | BPF_MOD | BPF_K:
1887 case BPF_ALU | BPF_DIV | BPF_K:
1888 case BPF_ALU64 | BPF_MOD | BPF_X:
1889 case BPF_ALU64 | BPF_DIV | BPF_X:
1890 case BPF_ALU64 | BPF_MOD | BPF_K:
1891 case BPF_ALU64 | BPF_DIV | BPF_K: {
1892 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1893
1894 if (dst_reg != BPF_REG_0)
1895 EMIT1(0x50); /* push rax */
1896 if (dst_reg != BPF_REG_3)
1897 EMIT1(0x52); /* push rdx */
1898
1899 if (BPF_SRC(insn->code) == BPF_X) {
1900 if (src_reg == BPF_REG_0 ||
1901 src_reg == BPF_REG_3) {
1902 /* mov r11, src_reg */
1903 EMIT_mov(AUX_REG, src_reg);
1904 src_reg = AUX_REG;
1905 }
1906 } else {
1907 /* mov r11, imm32 */
1908 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1909 src_reg = AUX_REG;
1910 }
1911
1912 if (dst_reg != BPF_REG_0)
1913 /* mov rax, dst_reg */
1914 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1915
1916 if (insn->off == 0) {
1917 /*
1918 * xor edx, edx
1919 * equivalent to 'xor rdx, rdx', but one byte less
1920 */
1921 EMIT2(0x31, 0xd2);
1922
1923 /* div src_reg */
1924 maybe_emit_1mod(&prog, src_reg, is64);
1925 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1926 } else {
1927 if (BPF_CLASS(insn->code) == BPF_ALU)
1928 EMIT1(0x99); /* cdq */
1929 else
1930 EMIT2(0x48, 0x99); /* cqo */
1931
1932 /* idiv src_reg */
1933 maybe_emit_1mod(&prog, src_reg, is64);
1934 EMIT2(0xF7, add_1reg(0xF8, src_reg));
1935 }
1936
1937 if (BPF_OP(insn->code) == BPF_MOD &&
1938 dst_reg != BPF_REG_3)
1939 /* mov dst_reg, rdx */
1940 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1941 else if (BPF_OP(insn->code) == BPF_DIV &&
1942 dst_reg != BPF_REG_0)
1943 /* mov dst_reg, rax */
1944 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1945
1946 if (dst_reg != BPF_REG_3)
1947 EMIT1(0x5A); /* pop rdx */
1948 if (dst_reg != BPF_REG_0)
1949 EMIT1(0x58); /* pop rax */
1950 break;
1951 }
1952
1953 case BPF_ALU | BPF_MUL | BPF_K:
1954 case BPF_ALU64 | BPF_MUL | BPF_K:
1955 maybe_emit_mod(&prog, dst_reg, dst_reg,
1956 BPF_CLASS(insn->code) == BPF_ALU64);
1957
1958 if (is_imm8(imm32))
1959 /* imul dst_reg, dst_reg, imm8 */
1960 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1961 imm32);
1962 else
1963 /* imul dst_reg, dst_reg, imm32 */
1964 EMIT2_off32(0x69,
1965 add_2reg(0xC0, dst_reg, dst_reg),
1966 imm32);
1967 break;
1968
1969 case BPF_ALU | BPF_MUL | BPF_X:
1970 case BPF_ALU64 | BPF_MUL | BPF_X:
1971 maybe_emit_mod(&prog, src_reg, dst_reg,
1972 BPF_CLASS(insn->code) == BPF_ALU64);
1973
1974 /* imul dst_reg, src_reg */
1975 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1976 break;
1977
1978 /* Shifts */
1979 case BPF_ALU | BPF_LSH | BPF_K:
1980 case BPF_ALU | BPF_RSH | BPF_K:
1981 case BPF_ALU | BPF_ARSH | BPF_K:
1982 case BPF_ALU64 | BPF_LSH | BPF_K:
1983 case BPF_ALU64 | BPF_RSH | BPF_K:
1984 case BPF_ALU64 | BPF_ARSH | BPF_K:
1985 maybe_emit_1mod(&prog, dst_reg,
1986 BPF_CLASS(insn->code) == BPF_ALU64);
1987
1988 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1989 if (imm32 == 1)
1990 EMIT2(0xD1, add_1reg(b3, dst_reg));
1991 else
1992 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1993 break;
1994
1995 case BPF_ALU | BPF_LSH | BPF_X:
1996 case BPF_ALU | BPF_RSH | BPF_X:
1997 case BPF_ALU | BPF_ARSH | BPF_X:
1998 case BPF_ALU64 | BPF_LSH | BPF_X:
1999 case BPF_ALU64 | BPF_RSH | BPF_X:
2000 case BPF_ALU64 | BPF_ARSH | BPF_X:
2001 /* BMI2 shifts aren't better when shift count is already in rcx */
2002 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
2003 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
2004 bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
2005 u8 op;
2006
2007 switch (BPF_OP(insn->code)) {
2008 case BPF_LSH:
2009 op = 1; /* prefix 0x66 */
2010 break;
2011 case BPF_RSH:
2012 op = 3; /* prefix 0xf2 */
2013 break;
2014 case BPF_ARSH:
2015 op = 2; /* prefix 0xf3 */
2016 break;
2017 }
2018
2019 emit_shiftx(&prog, dst_reg, src_reg, w, op);
2020
2021 break;
2022 }
2023
2024 if (src_reg != BPF_REG_4) { /* common case */
2025 /* Check for bad case when dst_reg == rcx */
2026 if (dst_reg == BPF_REG_4) {
2027 /* mov r11, dst_reg */
2028 EMIT_mov(AUX_REG, dst_reg);
2029 dst_reg = AUX_REG;
2030 } else {
2031 EMIT1(0x51); /* push rcx */
2032 }
2033 /* mov rcx, src_reg */
2034 EMIT_mov(BPF_REG_4, src_reg);
2035 }
2036
2037 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
2038 maybe_emit_1mod(&prog, dst_reg,
2039 BPF_CLASS(insn->code) == BPF_ALU64);
2040
2041 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
2042 EMIT2(0xD3, add_1reg(b3, dst_reg));
2043
2044 if (src_reg != BPF_REG_4) {
2045 if (insn->dst_reg == BPF_REG_4)
2046 /* mov dst_reg, r11 */
2047 EMIT_mov(insn->dst_reg, AUX_REG);
2048 else
2049 EMIT1(0x59); /* pop rcx */
2050 }
2051
2052 break;
2053
2054 case BPF_ALU | BPF_END | BPF_FROM_BE:
2055 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
2056 switch (imm32) {
2057 case 16:
2058 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
2059 EMIT1(0x66);
2060 if (is_ereg(dst_reg))
2061 EMIT1(0x41);
2062 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
2063
2064 /* Emit 'movzwl eax, ax' */
2065 if (is_ereg(dst_reg))
2066 EMIT3(0x45, 0x0F, 0xB7);
2067 else
2068 EMIT2(0x0F, 0xB7);
2069 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2070 break;
2071 case 32:
2072 /* Emit 'bswap eax' to swap lower 4 bytes */
2073 if (is_ereg(dst_reg))
2074 EMIT2(0x41, 0x0F);
2075 else
2076 EMIT1(0x0F);
2077 EMIT1(add_1reg(0xC8, dst_reg));
2078 break;
2079 case 64:
2080 /* Emit 'bswap rax' to swap 8 bytes */
2081 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
2082 add_1reg(0xC8, dst_reg));
2083 break;
2084 }
2085 break;
2086
2087 case BPF_ALU | BPF_END | BPF_FROM_LE:
2088 switch (imm32) {
2089 case 16:
2090 /*
2091 * Emit 'movzwl eax, ax' to zero extend 16-bit
2092 * into 64 bit
2093 */
2094 if (is_ereg(dst_reg))
2095 EMIT3(0x45, 0x0F, 0xB7);
2096 else
2097 EMIT2(0x0F, 0xB7);
2098 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2099 break;
2100 case 32:
2101 /* Emit 'mov eax, eax' to clear upper 32-bits */
2102 if (is_ereg(dst_reg))
2103 EMIT1(0x45);
2104 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
2105 break;
2106 case 64:
2107 /* nop */
2108 break;
2109 }
2110 break;
2111
2112 /* speculation barrier */
2113 case BPF_ST | BPF_NOSPEC:
2114 EMIT_LFENCE();
2115 break;
2116
2117 /* ST: *(u8*)(dst_reg + off) = imm */
2118 case BPF_ST | BPF_MEM | BPF_B:
2119 if (is_ereg(dst_reg))
2120 EMIT2(0x41, 0xC6);
2121 else
2122 EMIT1(0xC6);
2123 goto st;
2124 case BPF_ST | BPF_MEM | BPF_H:
2125 if (is_ereg(dst_reg))
2126 EMIT3(0x66, 0x41, 0xC7);
2127 else
2128 EMIT2(0x66, 0xC7);
2129 goto st;
2130 case BPF_ST | BPF_MEM | BPF_W:
2131 if (is_ereg(dst_reg))
2132 EMIT2(0x41, 0xC7);
2133 else
2134 EMIT1(0xC7);
2135 goto st;
2136 case BPF_ST | BPF_MEM | BPF_DW:
2137 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
2138
2139 st: if (is_imm8(insn->off))
2140 EMIT2(add_1reg(0x40, dst_reg), insn->off);
2141 else
2142 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
2143
2144 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
2145 break;
2146
2147 /* STX: *(u8*)(dst_reg + off) = src_reg */
2148 case BPF_STX | BPF_MEM | BPF_B:
2149 case BPF_STX | BPF_MEM | BPF_H:
2150 case BPF_STX | BPF_MEM | BPF_W:
2151 case BPF_STX | BPF_MEM | BPF_DW:
2152 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2153 break;
2154
2155 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
2156 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
2157 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
2158 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
2159 start_of_ldx = prog;
2160 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
2161 goto populate_extable;
2162
2163 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
2164 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
2165 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
2166 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
2167 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
2168 case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
2169 case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
2170 case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
2171 case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
2172 case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
2173 case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
2174 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
2175 start_of_ldx = prog;
2176 if (BPF_CLASS(insn->code) == BPF_LDX) {
2177 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX)
2178 emit_ldsx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2179 else
2180 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2181 } else {
2182 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2183 }
2184 populate_extable:
2185 {
2186 struct exception_table_entry *ex;
2187 u8 *_insn = image + proglen + (start_of_ldx - temp);
2188 u32 arena_reg, fixup_reg;
2189 s64 delta;
2190
2191 if (!bpf_prog->aux->extable)
2192 break;
2193
2194 if (excnt >= bpf_prog->aux->num_exentries) {
2195 pr_err("mem32 extable bug\n");
2196 return -EFAULT;
2197 }
2198 ex = &bpf_prog->aux->extable[excnt++];
2199
2200 delta = _insn - (u8 *)&ex->insn;
2201 /* switch ex to rw buffer for writes */
2202 ex = (void *)rw_image + ((void *)ex - (void *)image);
2203
2204 ex->insn = delta;
2205
2206 ex->data = EX_TYPE_BPF;
2207
2208 /*
2209 * src_reg/dst_reg holds the address in the arena region with upper
2210 * 32-bits being zero because of a preceding addr_space_cast(r<n>,
2211 * 0x0, 0x1) instruction. This address is adjusted with the addition
2212 * of arena_vm_start (see the implementation of BPF_PROBE_MEM32 and
2213 * BPF_PROBE_ATOMIC) before being used for the memory access. Pass
2214 * the reg holding the unmodified 32-bit address to
2215 * ex_handler_bpf().
2216 */
2217 if (BPF_CLASS(insn->code) == BPF_LDX) {
2218 arena_reg = reg2pt_regs[src_reg];
2219 fixup_reg = reg2pt_regs[dst_reg];
2220 } else {
2221 arena_reg = reg2pt_regs[dst_reg];
2222 fixup_reg = DONT_CLEAR;
2223 }
2224
2225 ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2226 FIELD_PREP(FIXUP_ARENA_REG_MASK, arena_reg) |
2227 FIELD_PREP(FIXUP_REG_MASK, fixup_reg);
2228 ex->fixup |= FIXUP_ARENA_ACCESS;
2229
2230 ex->data |= FIELD_PREP(DATA_ARENA_OFFSET_MASK, insn->off);
2231 }
2232 break;
2233
2234 /* LDX: dst_reg = *(u8*)(src_reg + off) */
2235 case BPF_LDX | BPF_MEM | BPF_B:
2236 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
2237 case BPF_LDX | BPF_MEM | BPF_H:
2238 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
2239 case BPF_LDX | BPF_MEM | BPF_W:
2240 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
2241 case BPF_LDX | BPF_MEM | BPF_DW:
2242 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
2243 /* LDXS: dst_reg = *(s8*)(src_reg + off) */
2244 case BPF_LDX | BPF_MEMSX | BPF_B:
2245 case BPF_LDX | BPF_MEMSX | BPF_H:
2246 case BPF_LDX | BPF_MEMSX | BPF_W:
2247 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
2248 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
2249 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
2250 insn_off = insn->off;
2251
2252 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2253 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2254 /* Conservatively check that src_reg + insn->off is a kernel address:
2255 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
2256 * and
2257 * src_reg + insn->off < VSYSCALL_ADDR
2258 */
2259
2260 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
2261 u8 *end_of_jmp;
2262
2263 /* movabsq r10, VSYSCALL_ADDR */
2264 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
2265 (u32)(long)VSYSCALL_ADDR);
2266
2267 /* mov src_reg, r11 */
2268 EMIT_mov(AUX_REG, src_reg);
2269
2270 if (insn->off) {
2271 /* add r11, insn->off */
2272 maybe_emit_1mod(&prog, AUX_REG, true);
2273 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
2274 }
2275
2276 /* sub r11, r10 */
2277 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2278 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2279
2280 /* movabsq r10, limit */
2281 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
2282 (u32)(long)limit);
2283
2284 /* cmp r10, r11 */
2285 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2286 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2287
2288 /* if unsigned '>', goto load */
2289 EMIT2(X86_JA, 0);
2290 end_of_jmp = prog;
2291
2292 /* xor dst_reg, dst_reg */
2293 emit_mov_imm32(&prog, false, dst_reg, 0);
2294 /* jmp byte_after_ldx */
2295 EMIT2(0xEB, 0);
2296
2297 /* populate jmp_offset for JAE above to jump to start_of_ldx */
2298 start_of_ldx = prog;
2299 end_of_jmp[-1] = start_of_ldx - end_of_jmp;
2300 }
2301 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
2302 BPF_MODE(insn->code) == BPF_MEMSX)
2303 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2304 else
2305 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2306 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2307 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2308 struct exception_table_entry *ex;
2309 u8 *_insn = image + proglen + (start_of_ldx - temp);
2310 s64 delta;
2311
2312 /* populate jmp_offset for JMP above */
2313 start_of_ldx[-1] = prog - start_of_ldx;
2314
2315 if (!bpf_prog->aux->extable)
2316 break;
2317
2318 if (excnt >= bpf_prog->aux->num_exentries) {
2319 pr_err("ex gen bug\n");
2320 return -EFAULT;
2321 }
2322 ex = &bpf_prog->aux->extable[excnt++];
2323
2324 delta = _insn - (u8 *)&ex->insn;
2325 if (!is_simm32(delta)) {
2326 pr_err("extable->insn doesn't fit into 32-bit\n");
2327 return -EFAULT;
2328 }
2329 /* switch ex to rw buffer for writes */
2330 ex = (void *)rw_image + ((void *)ex - (void *)image);
2331
2332 ex->insn = delta;
2333
2334 ex->data = EX_TYPE_BPF;
2335
2336 if (dst_reg > BPF_REG_9) {
2337 pr_err("verifier error\n");
2338 return -EFAULT;
2339 }
2340 /*
2341 * Compute size of x86 insn and its target dest x86 register.
2342 * ex_handler_bpf() will use lower 8 bits to adjust
2343 * pt_regs->ip to jump over this x86 instruction
2344 * and upper bits to figure out which pt_regs to zero out.
2345 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2346 * of 4 bytes will be ignored and rbx will be zero inited.
2347 */
2348 ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2349 FIELD_PREP(FIXUP_REG_MASK, reg2pt_regs[dst_reg]);
2350 }
2351 break;
2352
2353 case BPF_STX | BPF_ATOMIC | BPF_B:
2354 case BPF_STX | BPF_ATOMIC | BPF_H:
2355 if (!bpf_atomic_is_load_store(insn)) {
2356 pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2357 return -EFAULT;
2358 }
2359 fallthrough;
2360 case BPF_STX | BPF_ATOMIC | BPF_W:
2361 case BPF_STX | BPF_ATOMIC | BPF_DW:
2362 if (insn->imm == (BPF_AND | BPF_FETCH) ||
2363 insn->imm == (BPF_OR | BPF_FETCH) ||
2364 insn->imm == (BPF_XOR | BPF_FETCH)) {
2365 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2366 u32 real_src_reg = src_reg;
2367 u32 real_dst_reg = dst_reg;
2368 u8 *branch_target;
2369
2370 /*
2371 * Can't be implemented with a single x86 insn.
2372 * Need to do a CMPXCHG loop.
2373 */
2374
2375 /* Will need RAX as a CMPXCHG operand so save R0 */
2376 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2377 if (src_reg == BPF_REG_0)
2378 real_src_reg = BPF_REG_AX;
2379 if (dst_reg == BPF_REG_0)
2380 real_dst_reg = BPF_REG_AX;
2381
2382 branch_target = prog;
2383 /* Load old value */
2384 emit_ldx(&prog, BPF_SIZE(insn->code),
2385 BPF_REG_0, real_dst_reg, insn->off);
2386 /*
2387 * Perform the (commutative) operation locally,
2388 * put the result in the AUX_REG.
2389 */
2390 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2391 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2392 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2393 add_2reg(0xC0, AUX_REG, real_src_reg));
2394 /* Attempt to swap in new value */
2395 err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
2396 real_dst_reg, AUX_REG,
2397 insn->off,
2398 BPF_SIZE(insn->code));
2399 if (WARN_ON(err))
2400 return err;
2401 /*
2402 * ZF tells us whether we won the race. If it's
2403 * cleared we need to try again.
2404 */
2405 EMIT2(X86_JNE, -(prog - branch_target) - 2);
2406 /* Return the pre-modification value */
2407 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2408 /* Restore R0 after clobbering RAX */
2409 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2410 break;
2411 }
2412
2413 if (bpf_atomic_is_load_store(insn))
2414 err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
2415 insn->off, BPF_SIZE(insn->code));
2416 else
2417 err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
2418 insn->off, BPF_SIZE(insn->code));
2419 if (err)
2420 return err;
2421 break;
2422
2423 case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
2424 case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
2425 if (!bpf_atomic_is_load_store(insn)) {
2426 pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2427 return -EFAULT;
2428 }
2429 fallthrough;
2430 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2431 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2432 start_of_ldx = prog;
2433
2434 if (bpf_atomic_is_load_store(insn))
2435 err = emit_atomic_ld_st_index(&prog, insn->imm,
2436 BPF_SIZE(insn->code), dst_reg,
2437 src_reg, X86_REG_R12, insn->off);
2438 else
2439 err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
2440 dst_reg, src_reg, X86_REG_R12,
2441 insn->off);
2442 if (err)
2443 return err;
2444 goto populate_extable;
2445
2446 /* call */
2447 case BPF_JMP | BPF_CALL: {
2448 func = (u8 *) __bpf_call_base + imm32;
2449 if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
2450 LOAD_TAIL_CALL_CNT_PTR(stack_depth);
2451 ip += 7;
2452 }
2453 if (!imm32)
2454 return -EINVAL;
2455 if (priv_frame_ptr) {
2456 push_r9(&prog);
2457 ip += 2;
2458 }
2459 ip += x86_call_depth_emit_accounting(&prog, func, ip);
2460 if (emit_call(&prog, func, ip))
2461 return -EINVAL;
2462 if (priv_frame_ptr)
2463 pop_r9(&prog);
2464 break;
2465 }
2466
2467 case BPF_JMP | BPF_TAIL_CALL:
2468 if (imm32)
2469 emit_bpf_tail_call_direct(bpf_prog,
2470 &bpf_prog->aux->poke_tab[imm32 - 1],
2471 &prog,
2472 ip,
2473 callee_regs_used,
2474 stack_depth,
2475 ctx);
2476 else
2477 emit_bpf_tail_call_indirect(bpf_prog,
2478 &prog,
2479 callee_regs_used,
2480 stack_depth,
2481 ip,
2482 ctx);
2483 break;
2484
2485 /* cond jump */
2486 case BPF_JMP | BPF_JEQ | BPF_X:
2487 case BPF_JMP | BPF_JNE | BPF_X:
2488 case BPF_JMP | BPF_JGT | BPF_X:
2489 case BPF_JMP | BPF_JLT | BPF_X:
2490 case BPF_JMP | BPF_JGE | BPF_X:
2491 case BPF_JMP | BPF_JLE | BPF_X:
2492 case BPF_JMP | BPF_JSGT | BPF_X:
2493 case BPF_JMP | BPF_JSLT | BPF_X:
2494 case BPF_JMP | BPF_JSGE | BPF_X:
2495 case BPF_JMP | BPF_JSLE | BPF_X:
2496 case BPF_JMP32 | BPF_JEQ | BPF_X:
2497 case BPF_JMP32 | BPF_JNE | BPF_X:
2498 case BPF_JMP32 | BPF_JGT | BPF_X:
2499 case BPF_JMP32 | BPF_JLT | BPF_X:
2500 case BPF_JMP32 | BPF_JGE | BPF_X:
2501 case BPF_JMP32 | BPF_JLE | BPF_X:
2502 case BPF_JMP32 | BPF_JSGT | BPF_X:
2503 case BPF_JMP32 | BPF_JSLT | BPF_X:
2504 case BPF_JMP32 | BPF_JSGE | BPF_X:
2505 case BPF_JMP32 | BPF_JSLE | BPF_X:
2506 /* cmp dst_reg, src_reg */
2507 maybe_emit_mod(&prog, dst_reg, src_reg,
2508 BPF_CLASS(insn->code) == BPF_JMP);
2509 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2510 goto emit_cond_jmp;
2511
2512 case BPF_JMP | BPF_JSET | BPF_X:
2513 case BPF_JMP32 | BPF_JSET | BPF_X:
2514 /* test dst_reg, src_reg */
2515 maybe_emit_mod(&prog, dst_reg, src_reg,
2516 BPF_CLASS(insn->code) == BPF_JMP);
2517 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2518 goto emit_cond_jmp;
2519
2520 case BPF_JMP | BPF_JSET | BPF_K:
2521 case BPF_JMP32 | BPF_JSET | BPF_K:
2522 /* test dst_reg, imm32 */
2523 maybe_emit_1mod(&prog, dst_reg,
2524 BPF_CLASS(insn->code) == BPF_JMP);
2525 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2526 goto emit_cond_jmp;
2527
2528 case BPF_JMP | BPF_JEQ | BPF_K:
2529 case BPF_JMP | BPF_JNE | BPF_K:
2530 case BPF_JMP | BPF_JGT | BPF_K:
2531 case BPF_JMP | BPF_JLT | BPF_K:
2532 case BPF_JMP | BPF_JGE | BPF_K:
2533 case BPF_JMP | BPF_JLE | BPF_K:
2534 case BPF_JMP | BPF_JSGT | BPF_K:
2535 case BPF_JMP | BPF_JSLT | BPF_K:
2536 case BPF_JMP | BPF_JSGE | BPF_K:
2537 case BPF_JMP | BPF_JSLE | BPF_K:
2538 case BPF_JMP32 | BPF_JEQ | BPF_K:
2539 case BPF_JMP32 | BPF_JNE | BPF_K:
2540 case BPF_JMP32 | BPF_JGT | BPF_K:
2541 case BPF_JMP32 | BPF_JLT | BPF_K:
2542 case BPF_JMP32 | BPF_JGE | BPF_K:
2543 case BPF_JMP32 | BPF_JLE | BPF_K:
2544 case BPF_JMP32 | BPF_JSGT | BPF_K:
2545 case BPF_JMP32 | BPF_JSLT | BPF_K:
2546 case BPF_JMP32 | BPF_JSGE | BPF_K:
2547 case BPF_JMP32 | BPF_JSLE | BPF_K:
2548 /* test dst_reg, dst_reg to save one extra byte */
2549 if (imm32 == 0) {
2550 maybe_emit_mod(&prog, dst_reg, dst_reg,
2551 BPF_CLASS(insn->code) == BPF_JMP);
2552 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2553 goto emit_cond_jmp;
2554 }
2555
2556 /* cmp dst_reg, imm8/32 */
2557 maybe_emit_1mod(&prog, dst_reg,
2558 BPF_CLASS(insn->code) == BPF_JMP);
2559
2560 if (is_imm8(imm32))
2561 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2562 else
2563 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2564
2565 emit_cond_jmp: /* Convert BPF opcode to x86 */
2566 switch (BPF_OP(insn->code)) {
2567 case BPF_JEQ:
2568 jmp_cond = X86_JE;
2569 break;
2570 case BPF_JSET:
2571 case BPF_JNE:
2572 jmp_cond = X86_JNE;
2573 break;
2574 case BPF_JGT:
2575 /* GT is unsigned '>', JA in x86 */
2576 jmp_cond = X86_JA;
2577 break;
2578 case BPF_JLT:
2579 /* LT is unsigned '<', JB in x86 */
2580 jmp_cond = X86_JB;
2581 break;
2582 case BPF_JGE:
2583 /* GE is unsigned '>=', JAE in x86 */
2584 jmp_cond = X86_JAE;
2585 break;
2586 case BPF_JLE:
2587 /* LE is unsigned '<=', JBE in x86 */
2588 jmp_cond = X86_JBE;
2589 break;
2590 case BPF_JSGT:
2591 /* Signed '>', GT in x86 */
2592 jmp_cond = X86_JG;
2593 break;
2594 case BPF_JSLT:
2595 /* Signed '<', LT in x86 */
2596 jmp_cond = X86_JL;
2597 break;
2598 case BPF_JSGE:
2599 /* Signed '>=', GE in x86 */
2600 jmp_cond = X86_JGE;
2601 break;
2602 case BPF_JSLE:
2603 /* Signed '<=', LE in x86 */
2604 jmp_cond = X86_JLE;
2605 break;
2606 default: /* to silence GCC warning */
2607 return -EFAULT;
2608 }
2609 jmp_offset = addrs[i + insn->off] - addrs[i];
2610 if (is_imm8_jmp_offset(jmp_offset)) {
2611 if (jmp_padding) {
2612 /* To keep the jmp_offset valid, the extra bytes are
2613 * padded before the jump insn, so we subtract the
2614 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2615 *
2616 * If the previous pass already emits an imm8
2617 * jmp_cond, then this BPF insn won't shrink, so
2618 * "nops" is 0.
2619 *
2620 * On the other hand, if the previous pass emits an
2621 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2622 * keep the image from shrinking further.
2623 *
2624 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2625 * is 2 bytes, so the size difference is 4 bytes.
2626 */
2627 nops = INSN_SZ_DIFF - 2;
2628 if (nops != 0 && nops != 4) {
2629 pr_err("unexpected jmp_cond padding: %d bytes\n",
2630 nops);
2631 return -EFAULT;
2632 }
2633 emit_nops(&prog, nops);
2634 }
2635 EMIT2(jmp_cond, jmp_offset);
2636 } else if (is_simm32(jmp_offset)) {
2637 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2638 } else {
2639 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2640 return -EFAULT;
2641 }
2642
2643 break;
2644
2645 case BPF_JMP | BPF_JA | BPF_X:
2646 emit_indirect_jump(&prog, insn->dst_reg, ip);
2647 break;
2648 case BPF_JMP | BPF_JA:
2649 case BPF_JMP32 | BPF_JA:
2650 if (BPF_CLASS(insn->code) == BPF_JMP) {
2651 if (insn->off == -1)
2652 /* -1 jmp instructions will always jump
2653 * backwards two bytes. Explicitly handling
2654 * this case avoids wasting too many passes
2655 * when there are long sequences of replaced
2656 * dead code.
2657 */
2658 jmp_offset = -2;
2659 else
2660 jmp_offset = addrs[i + insn->off] - addrs[i];
2661 } else {
2662 if (insn->imm == -1)
2663 jmp_offset = -2;
2664 else
2665 jmp_offset = addrs[i + insn->imm] - addrs[i];
2666 }
2667
2668 if (!jmp_offset) {
2669 /*
2670 * If jmp_padding is enabled, the extra nops will
2671 * be inserted. Otherwise, optimize out nop jumps.
2672 */
2673 if (jmp_padding) {
2674 /* There are 3 possible conditions.
2675 * (1) This BPF_JA is already optimized out in
2676 * the previous run, so there is no need
2677 * to pad any extra byte (0 byte).
2678 * (2) The previous pass emits an imm8 jmp,
2679 * so we pad 2 bytes to match the previous
2680 * insn size.
2681 * (3) Similarly, the previous pass emits an
2682 * imm32 jmp, and 5 bytes is padded.
2683 */
2684 nops = INSN_SZ_DIFF;
2685 if (nops != 0 && nops != 2 && nops != 5) {
2686 pr_err("unexpected nop jump padding: %d bytes\n",
2687 nops);
2688 return -EFAULT;
2689 }
2690 emit_nops(&prog, nops);
2691 }
2692 break;
2693 }
2694 emit_jmp:
2695 if (is_imm8_jmp_offset(jmp_offset)) {
2696 if (jmp_padding) {
2697 /* To avoid breaking jmp_offset, the extra bytes
2698 * are padded before the actual jmp insn, so
2699 * 2 bytes is subtracted from INSN_SZ_DIFF.
2700 *
2701 * If the previous pass already emits an imm8
2702 * jmp, there is nothing to pad (0 byte).
2703 *
2704 * If it emits an imm32 jmp (5 bytes) previously
2705 * and now an imm8 jmp (2 bytes), then we pad
2706 * (5 - 2 = 3) bytes to stop the image from
2707 * shrinking further.
2708 */
2709 nops = INSN_SZ_DIFF - 2;
2710 if (nops != 0 && nops != 3) {
2711 pr_err("unexpected jump padding: %d bytes\n",
2712 nops);
2713 return -EFAULT;
2714 }
2715 emit_nops(&prog, INSN_SZ_DIFF - 2);
2716 }
2717 EMIT2(0xEB, jmp_offset);
2718 } else if (is_simm32(jmp_offset)) {
2719 EMIT1_off32(0xE9, jmp_offset);
2720 } else {
2721 pr_err("jmp gen bug %llx\n", jmp_offset);
2722 return -EFAULT;
2723 }
2724 break;
2725
2726 case BPF_JMP | BPF_EXIT:
2727 if (seen_exit) {
2728 jmp_offset = ctx->cleanup_addr - addrs[i];
2729 goto emit_jmp;
2730 }
2731 seen_exit = true;
2732 /* Update cleanup_addr */
2733 ctx->cleanup_addr = proglen;
2734 if (bpf_prog_was_classic(bpf_prog) &&
2735 !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
2736 if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
2737 return -EINVAL;
2738 }
2739 if (bpf_prog->aux->exception_boundary) {
2740 pop_callee_regs(&prog, all_callee_regs_used);
2741 pop_r12(&prog);
2742 } else {
2743 pop_callee_regs(&prog, callee_regs_used);
2744 if (arena_vm_start)
2745 pop_r12(&prog);
2746 }
2747 EMIT1(0xC9); /* leave */
2748 bpf_prog->aux->ksym.fp_end = prog - temp;
2749
2750 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2751 break;
2752
2753 default:
2754 /*
2755 * By design x86-64 JIT should support all BPF instructions.
2756 * This error will be seen if new instruction was added
2757 * to the interpreter, but not to the JIT, or if there is
2758 * junk in bpf_prog.
2759 */
2760 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2761 return -EINVAL;
2762 }
2763
2764 ilen = prog - temp;
2765 if (ilen > BPF_MAX_INSN_SIZE) {
2766 pr_err("bpf_jit: fatal insn size error\n");
2767 return -EFAULT;
2768 }
2769
2770 if (image) {
2771 /*
2772 * When populating the image, assert that:
2773 *
2774 * i) We do not write beyond the allocated space, and
2775 * ii) addrs[i] did not change from the prior run, in order
2776 * to validate assumptions made for computing branch
2777 * displacements.
2778 */
2779 if (unlikely(proglen + ilen > oldproglen ||
2780 proglen + ilen != addrs[i])) {
2781 pr_err("bpf_jit: fatal error\n");
2782 return -EFAULT;
2783 }
2784 memcpy(rw_image + proglen, temp, ilen);
2785 }
2786 proglen += ilen;
2787 addrs[i] = proglen;
2788 prog = temp;
2789 }
2790
2791 if (image && excnt != bpf_prog->aux->num_exentries) {
2792 pr_err("extable is not populated\n");
2793 return -EFAULT;
2794 }
2795 return proglen;
2796 }
2797
clean_stack_garbage(const struct btf_func_model * m,u8 ** pprog,int nr_stack_slots,int stack_size)2798 static void clean_stack_garbage(const struct btf_func_model *m,
2799 u8 **pprog, int nr_stack_slots,
2800 int stack_size)
2801 {
2802 int arg_size, off;
2803 u8 *prog;
2804
2805 /* Generally speaking, the compiler will pass the arguments
2806 * on-stack with "push" instruction, which will take 8-byte
2807 * on the stack. In this case, there won't be garbage values
2808 * while we copy the arguments from origin stack frame to current
2809 * in BPF_DW.
2810 *
2811 * However, sometimes the compiler will only allocate 4-byte on
2812 * the stack for the arguments. For now, this case will only
2813 * happen if there is only one argument on-stack and its size
2814 * not more than 4 byte. In this case, there will be garbage
2815 * values on the upper 4-byte where we store the argument on
2816 * current stack frame.
2817 *
2818 * arguments on origin stack:
2819 *
2820 * stack_arg_1(4-byte) xxx(4-byte)
2821 *
2822 * what we copy:
2823 *
2824 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2825 *
2826 * and the xxx is the garbage values which we should clean here.
2827 */
2828 if (nr_stack_slots != 1)
2829 return;
2830
2831 /* the size of the last argument */
2832 arg_size = m->arg_size[m->nr_args - 1];
2833 if (arg_size <= 4) {
2834 off = -(stack_size - 4);
2835 prog = *pprog;
2836 /* mov DWORD PTR [rbp + off], 0 */
2837 if (!is_imm8(off))
2838 EMIT2_off32(0xC7, 0x85, off);
2839 else
2840 EMIT3(0xC7, 0x45, off);
2841 EMIT(0, 4);
2842 *pprog = prog;
2843 }
2844 }
2845
2846 /* get the count of the regs that are used to pass arguments */
get_nr_used_regs(const struct btf_func_model * m)2847 static int get_nr_used_regs(const struct btf_func_model *m)
2848 {
2849 int i, arg_regs, nr_used_regs = 0;
2850
2851 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2852 arg_regs = (m->arg_size[i] + 7) / 8;
2853 if (nr_used_regs + arg_regs <= 6)
2854 nr_used_regs += arg_regs;
2855
2856 if (nr_used_regs >= 6)
2857 break;
2858 }
2859
2860 return nr_used_regs;
2861 }
2862
save_args(const struct btf_func_model * m,u8 ** prog,int stack_size,bool for_call_origin,u32 flags)2863 static void save_args(const struct btf_func_model *m, u8 **prog,
2864 int stack_size, bool for_call_origin, u32 flags)
2865 {
2866 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2867 bool use_jmp = bpf_trampoline_use_jmp(flags);
2868 int i, j;
2869
2870 /* Store function arguments to stack.
2871 * For a function that accepts two pointers the sequence will be:
2872 * mov QWORD PTR [rbp-0x10],rdi
2873 * mov QWORD PTR [rbp-0x8],rsi
2874 */
2875 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2876 arg_regs = (m->arg_size[i] + 7) / 8;
2877
2878 /* According to the research of Yonghong, struct members
2879 * should be all in register or all on the stack.
2880 * Meanwhile, the compiler will pass the argument on regs
2881 * if the remaining regs can hold the argument.
2882 *
2883 * Disorder of the args can happen. For example:
2884 *
2885 * struct foo_struct {
2886 * long a;
2887 * int b;
2888 * };
2889 * int foo(char, char, char, char, char, struct foo_struct,
2890 * char);
2891 *
2892 * the arg1-5,arg7 will be passed by regs, and arg6 will
2893 * by stack.
2894 */
2895 if (nr_regs + arg_regs > 6) {
2896 /* copy function arguments from origin stack frame
2897 * into current stack frame.
2898 *
2899 * The starting address of the arguments on-stack
2900 * is:
2901 * rbp + 8(push rbp) +
2902 * 8(return addr of origin call) +
2903 * 8(return addr of the caller)
2904 * which means: rbp + 24
2905 */
2906 for (j = 0; j < arg_regs; j++) {
2907 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2908 nr_stack_slots * 8 + 16 + (!use_jmp) * 8);
2909 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2910 -stack_size);
2911
2912 if (!nr_stack_slots)
2913 first_off = stack_size;
2914 stack_size -= 8;
2915 nr_stack_slots++;
2916 }
2917 } else {
2918 /* Only copy the arguments on-stack to current
2919 * 'stack_size' and ignore the regs, used to
2920 * prepare the arguments on-stack for origin call.
2921 */
2922 if (for_call_origin) {
2923 nr_regs += arg_regs;
2924 continue;
2925 }
2926
2927 /* copy the arguments from regs into stack */
2928 for (j = 0; j < arg_regs; j++) {
2929 emit_stx(prog, BPF_DW, BPF_REG_FP,
2930 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2931 -stack_size);
2932 stack_size -= 8;
2933 nr_regs++;
2934 }
2935 }
2936 }
2937
2938 clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2939 }
2940
restore_regs(const struct btf_func_model * m,u8 ** prog,int stack_size)2941 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2942 int stack_size)
2943 {
2944 int i, j, arg_regs, nr_regs = 0;
2945
2946 /* Restore function arguments from stack.
2947 * For a function that accepts two pointers the sequence will be:
2948 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2949 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2950 *
2951 * The logic here is similar to what we do in save_args()
2952 */
2953 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2954 arg_regs = (m->arg_size[i] + 7) / 8;
2955 if (nr_regs + arg_regs <= 6) {
2956 for (j = 0; j < arg_regs; j++) {
2957 emit_ldx(prog, BPF_DW,
2958 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2959 BPF_REG_FP,
2960 -stack_size);
2961 stack_size -= 8;
2962 nr_regs++;
2963 }
2964 } else {
2965 stack_size -= 8 * arg_regs;
2966 }
2967
2968 if (nr_regs >= 6)
2969 break;
2970 }
2971 }
2972
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)2973 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2974 struct bpf_tramp_link *l, int stack_size,
2975 int run_ctx_off, bool save_ret,
2976 void *image, void *rw_image)
2977 {
2978 u8 *prog = *pprog;
2979 u8 *jmp_insn;
2980 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2981 struct bpf_prog *p = l->link.prog;
2982 u64 cookie = l->cookie;
2983
2984 /* mov rdi, cookie */
2985 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2986
2987 /* Prepare struct bpf_tramp_run_ctx.
2988 *
2989 * bpf_tramp_run_ctx is already preserved by
2990 * arch_prepare_bpf_trampoline().
2991 *
2992 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2993 */
2994 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2995
2996 /* arg1: mov rdi, progs[i] */
2997 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2998 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2999 if (!is_imm8(-run_ctx_off))
3000 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
3001 else
3002 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
3003
3004 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
3005 return -EINVAL;
3006 /* remember prog start time returned by __bpf_prog_enter */
3007 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
3008
3009 /* if (__bpf_prog_enter*(prog) == 0)
3010 * goto skip_exec_of_prog;
3011 */
3012 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
3013 /* emit 2 nops that will be replaced with JE insn */
3014 jmp_insn = prog;
3015 emit_nops(&prog, 2);
3016
3017 /* arg1: lea rdi, [rbp - stack_size] */
3018 if (!is_imm8(-stack_size))
3019 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
3020 else
3021 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
3022 /* arg2: progs[i]->insnsi for interpreter */
3023 if (!p->jited)
3024 emit_mov_imm64(&prog, BPF_REG_2,
3025 (long) p->insnsi >> 32,
3026 (u32) (long) p->insnsi);
3027 /* call JITed bpf program or interpreter */
3028 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
3029 return -EINVAL;
3030
3031 /*
3032 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
3033 * of the previous call which is then passed on the stack to
3034 * the next BPF program.
3035 *
3036 * BPF_TRAMP_FENTRY trampoline may need to return the return
3037 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
3038 */
3039 if (save_ret)
3040 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3041
3042 /* replace 2 nops with JE insn, since jmp target is known */
3043 jmp_insn[0] = X86_JE;
3044 jmp_insn[1] = prog - jmp_insn - 2;
3045
3046 /* arg1: mov rdi, progs[i] */
3047 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
3048 /* arg2: mov rsi, rbx <- start time in nsec */
3049 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
3050 /* arg3: lea rdx, [rbp - run_ctx_off] */
3051 if (!is_imm8(-run_ctx_off))
3052 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
3053 else
3054 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
3055 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
3056 return -EINVAL;
3057
3058 *pprog = prog;
3059 return 0;
3060 }
3061
emit_align(u8 ** pprog,u32 align)3062 static void emit_align(u8 **pprog, u32 align)
3063 {
3064 u8 *target, *prog = *pprog;
3065
3066 target = PTR_ALIGN(prog, align);
3067 if (target != prog)
3068 emit_nops(&prog, target - prog);
3069
3070 *pprog = prog;
3071 }
3072
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)3073 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
3074 {
3075 u8 *prog = *pprog;
3076 s64 offset;
3077
3078 offset = func - (ip + 2 + 4);
3079 if (!is_simm32(offset)) {
3080 pr_err("Target %p is out of range\n", func);
3081 return -EINVAL;
3082 }
3083 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
3084 *pprog = prog;
3085 return 0;
3086 }
3087
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,int func_meta_off,bool save_ret,void * image,void * rw_image,u64 func_meta,int cookie_off)3088 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
3089 struct bpf_tramp_links *tl, int stack_size,
3090 int run_ctx_off, int func_meta_off, bool save_ret,
3091 void *image, void *rw_image, u64 func_meta,
3092 int cookie_off)
3093 {
3094 int i, cur_cookie = (cookie_off - stack_size) / 8;
3095 u8 *prog = *pprog;
3096
3097 for (i = 0; i < tl->nr_links; i++) {
3098 if (tl->links[i]->link.prog->call_session_cookie) {
3099 emit_store_stack_imm64(&prog, BPF_REG_0, -func_meta_off,
3100 func_meta | (cur_cookie << BPF_TRAMP_COOKIE_INDEX_SHIFT));
3101 cur_cookie--;
3102 }
3103 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
3104 run_ctx_off, save_ret, image, rw_image))
3105 return -EINVAL;
3106 }
3107 *pprog = prog;
3108 return 0;
3109 }
3110
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches,void * image,void * rw_image)3111 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
3112 struct bpf_tramp_links *tl, int stack_size,
3113 int run_ctx_off, u8 **branches,
3114 void *image, void *rw_image)
3115 {
3116 u8 *prog = *pprog;
3117 int i;
3118
3119 /* The first fmod_ret program will receive a garbage return value.
3120 * Set this to 0 to avoid confusing the program.
3121 */
3122 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
3123 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3124 for (i = 0; i < tl->nr_links; i++) {
3125 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
3126 image, rw_image))
3127 return -EINVAL;
3128
3129 /* mod_ret prog stored return value into [rbp - 8]. Emit:
3130 * if (*(u64 *)(rbp - 8) != 0)
3131 * goto do_fexit;
3132 */
3133 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
3134 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
3135
3136 /* Save the location of the branch and Generate 6 nops
3137 * (4 bytes for an offset and 2 bytes for the jump) These nops
3138 * are replaced with a conditional jump once do_fexit (i.e. the
3139 * start of the fexit invocation) is finalized.
3140 */
3141 branches[i] = prog;
3142 emit_nops(&prog, 4 + 2);
3143 }
3144
3145 *pprog = prog;
3146 return 0;
3147 }
3148
3149 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
3150 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
3151 __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
3152
3153 /* Example:
3154 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
3155 * its 'struct btf_func_model' will be nr_args=2
3156 * The assembly code when eth_type_trans is executing after trampoline:
3157 *
3158 * push rbp
3159 * mov rbp, rsp
3160 * sub rsp, 16 // space for skb and dev
3161 * push rbx // temp regs to pass start time
3162 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
3163 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
3164 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
3165 * mov rbx, rax // remember start time in bpf stats are enabled
3166 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
3167 * call addr_of_jited_FENTRY_prog
3168 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
3169 * mov rsi, rbx // prog start time
3170 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
3171 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
3172 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
3173 * pop rbx
3174 * leave
3175 * ret
3176 *
3177 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
3178 * replaced with 'call generated_bpf_trampoline'. When it returns
3179 * eth_type_trans will continue executing with original skb and dev pointers.
3180 *
3181 * The assembly code when eth_type_trans is called from trampoline:
3182 *
3183 * push rbp
3184 * mov rbp, rsp
3185 * sub rsp, 24 // space for skb, dev, return value
3186 * push rbx // temp regs to pass start time
3187 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
3188 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
3189 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
3190 * mov rbx, rax // remember start time if bpf stats are enabled
3191 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
3192 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
3193 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
3194 * mov rsi, rbx // prog start time
3195 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
3196 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
3197 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
3198 * call eth_type_trans+5 // execute body of eth_type_trans
3199 * mov qword ptr [rbp - 8], rax // save return value
3200 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
3201 * mov rbx, rax // remember start time in bpf stats are enabled
3202 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
3203 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
3204 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
3205 * mov rsi, rbx // prog start time
3206 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
3207 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
3208 * pop rbx
3209 * leave
3210 * add rsp, 8 // skip eth_type_trans's frame
3211 * ret // return to its caller
3212 */
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * rw_image,void * rw_image_end,void * image,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3213 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
3214 void *rw_image_end, void *image,
3215 const struct btf_func_model *m, u32 flags,
3216 struct bpf_tramp_links *tlinks,
3217 void *func_addr)
3218 {
3219 int i, ret, nr_regs = m->nr_args, stack_size = 0;
3220 int regs_off, func_meta_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
3221 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
3222 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
3223 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
3224 void *orig_call = func_addr;
3225 int cookie_off, cookie_cnt;
3226 u8 **branches = NULL;
3227 u64 func_meta;
3228 u8 *prog;
3229 bool save_ret;
3230
3231 /*
3232 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
3233 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
3234 * because @func_addr.
3235 */
3236 WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
3237 (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
3238
3239 /* extra registers for struct arguments */
3240 for (i = 0; i < m->nr_args; i++) {
3241 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
3242 nr_regs += (m->arg_size[i] + 7) / 8 - 1;
3243 }
3244
3245 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
3246 * are passed through regs, the remains are through stack.
3247 */
3248 if (nr_regs > MAX_BPF_FUNC_ARGS)
3249 return -ENOTSUPP;
3250
3251 /* Generated trampoline stack layout:
3252 *
3253 * RBP + 8 [ return address ]
3254 * RBP + 0 [ RBP ]
3255 *
3256 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
3257 * BPF_TRAMP_F_RET_FENTRY_RET flags
3258 *
3259 * [ reg_argN ] always
3260 * [ ... ]
3261 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
3262 *
3263 * RBP - func_meta_off [ regs count, etc ] always
3264 *
3265 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
3266 *
3267 * RBP - rbx_off [ rbx value ] always
3268 *
3269 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
3270 *
3271 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
3272 * [ ... ]
3273 * [ stack_arg2 ]
3274 * RBP - arg_stack_off [ stack_arg1 ]
3275 * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
3276 */
3277
3278 /* room for return value of orig_call or fentry prog */
3279 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
3280 if (save_ret)
3281 stack_size += 8;
3282
3283 stack_size += nr_regs * 8;
3284 regs_off = stack_size;
3285
3286 /* function matedata, such as regs count */
3287 stack_size += 8;
3288 func_meta_off = stack_size;
3289
3290 if (flags & BPF_TRAMP_F_IP_ARG)
3291 stack_size += 8; /* room for IP address argument */
3292
3293 ip_off = stack_size;
3294
3295 cookie_cnt = bpf_fsession_cookie_cnt(tlinks);
3296 /* room for session cookies */
3297 stack_size += cookie_cnt * 8;
3298 cookie_off = stack_size;
3299
3300 stack_size += 8;
3301 rbx_off = stack_size;
3302
3303 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
3304 run_ctx_off = stack_size;
3305
3306 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
3307 /* the space that used to pass arguments on-stack */
3308 stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
3309 /* make sure the stack pointer is 16-byte aligned if we
3310 * need pass arguments on stack, which means
3311 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
3312 * should be 16-byte aligned. Following code depend on
3313 * that stack_size is already 8-byte aligned.
3314 */
3315 if (bpf_trampoline_use_jmp(flags)) {
3316 /* no rip in the "jmp" case */
3317 stack_size += (stack_size % 16) ? 8 : 0;
3318 } else {
3319 stack_size += (stack_size % 16) ? 0 : 8;
3320 }
3321 }
3322
3323 arg_stack_off = stack_size;
3324
3325 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3326 /* skip patched call instruction and point orig_call to actual
3327 * body of the kernel function.
3328 */
3329 if (is_endbr(orig_call))
3330 orig_call += ENDBR_INSN_SIZE;
3331 orig_call += X86_PATCH_SIZE;
3332 }
3333
3334 prog = rw_image;
3335
3336 if (flags & BPF_TRAMP_F_INDIRECT) {
3337 /*
3338 * Indirect call for bpf_struct_ops
3339 */
3340 emit_cfi(&prog, image,
3341 cfi_get_func_hash(func_addr),
3342 cfi_get_func_arity(func_addr));
3343 } else {
3344 /*
3345 * Direct-call fentry stub, as such it needs accounting for the
3346 * __fentry__ call.
3347 */
3348 x86_call_depth_emit_accounting(&prog, NULL, image);
3349 }
3350 EMIT1(0x55); /* push rbp */
3351 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
3352 if (im)
3353 im->ksym.fp_start = prog - (u8 *)rw_image;
3354
3355 if (!is_imm8(stack_size)) {
3356 /* sub rsp, stack_size */
3357 EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
3358 } else {
3359 /* sub rsp, stack_size */
3360 EMIT4(0x48, 0x83, 0xEC, stack_size);
3361 }
3362 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
3363 EMIT1(0x50); /* push rax */
3364 /* mov QWORD PTR [rbp - rbx_off], rbx */
3365 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3366
3367 func_meta = nr_regs;
3368 /* Store number of argument registers of the traced function */
3369 emit_store_stack_imm64(&prog, BPF_REG_0, -func_meta_off, func_meta);
3370
3371 if (flags & BPF_TRAMP_F_IP_ARG) {
3372 /* Store IP address of the traced function */
3373 emit_store_stack_imm64(&prog, BPF_REG_0, -ip_off, (long)func_addr);
3374 }
3375
3376 save_args(m, &prog, regs_off, false, flags);
3377
3378 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3379 /* arg1: mov rdi, im */
3380 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3381 if (emit_rsb_call(&prog, __bpf_tramp_enter,
3382 image + (prog - (u8 *)rw_image))) {
3383 ret = -EINVAL;
3384 goto cleanup;
3385 }
3386 }
3387
3388 if (bpf_fsession_cnt(tlinks)) {
3389 /* clear all the session cookies' value */
3390 for (int i = 0; i < cookie_cnt; i++)
3391 emit_store_stack_imm64(&prog, BPF_REG_0, -cookie_off + 8 * i, 0);
3392 /* clear the return value to make sure fentry always get 0 */
3393 emit_store_stack_imm64(&prog, BPF_REG_0, -8, 0);
3394 }
3395
3396 if (fentry->nr_links) {
3397 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, func_meta_off,
3398 flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image,
3399 func_meta, cookie_off))
3400 return -EINVAL;
3401 }
3402
3403 if (fmod_ret->nr_links) {
3404 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3405 GFP_KERNEL);
3406 if (!branches)
3407 return -ENOMEM;
3408
3409 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
3410 run_ctx_off, branches, image, rw_image)) {
3411 ret = -EINVAL;
3412 goto cleanup;
3413 }
3414 }
3415
3416 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3417 restore_regs(m, &prog, regs_off);
3418 save_args(m, &prog, arg_stack_off, true, flags);
3419
3420 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3421 /* Before calling the original function, load the
3422 * tail_call_cnt_ptr from stack to rax.
3423 */
3424 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3425 }
3426
3427 if (flags & BPF_TRAMP_F_ORIG_STACK) {
3428 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
3429 EMIT2(0xff, 0xd3); /* call *rbx */
3430 } else {
3431 /* call original function */
3432 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3433 ret = -EINVAL;
3434 goto cleanup;
3435 }
3436 }
3437 /* remember return value in a stack for bpf prog to access */
3438 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3439 im->ip_after_call = image + (prog - (u8 *)rw_image);
3440 emit_nops(&prog, X86_PATCH_SIZE);
3441 }
3442
3443 if (fmod_ret->nr_links) {
3444 /* From Intel 64 and IA-32 Architectures Optimization
3445 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3446 * Coding Rule 11: All branch targets should be 16-byte
3447 * aligned.
3448 */
3449 emit_align(&prog, 16);
3450 /* Update the branches saved in invoke_bpf_mod_ret with the
3451 * aligned address of do_fexit.
3452 */
3453 for (i = 0; i < fmod_ret->nr_links; i++) {
3454 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3455 image + (branches[i] - (u8 *)rw_image), X86_JNE);
3456 }
3457 }
3458
3459 /* set the "is_return" flag for fsession */
3460 func_meta |= (1ULL << BPF_TRAMP_IS_RETURN_SHIFT);
3461 if (bpf_fsession_cnt(tlinks))
3462 emit_store_stack_imm64(&prog, BPF_REG_0, -func_meta_off, func_meta);
3463
3464 if (fexit->nr_links) {
3465 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, func_meta_off,
3466 false, image, rw_image, func_meta, cookie_off)) {
3467 ret = -EINVAL;
3468 goto cleanup;
3469 }
3470 }
3471
3472 if (flags & BPF_TRAMP_F_RESTORE_REGS)
3473 restore_regs(m, &prog, regs_off);
3474
3475 /* This needs to be done regardless. If there were fmod_ret programs,
3476 * the return value is only updated on the stack and still needs to be
3477 * restored to R0.
3478 */
3479 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3480 im->ip_epilogue = image + (prog - (u8 *)rw_image);
3481 /* arg1: mov rdi, im */
3482 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3483 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3484 ret = -EINVAL;
3485 goto cleanup;
3486 }
3487 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3488 /* Before running the original function, load the
3489 * tail_call_cnt_ptr from stack to rax.
3490 */
3491 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3492 }
3493
3494 /* restore return value of orig_call or fentry prog back into RAX */
3495 if (save_ret)
3496 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3497
3498 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3499
3500 EMIT1(0xC9); /* leave */
3501 if (im)
3502 im->ksym.fp_end = prog - (u8 *)rw_image;
3503
3504 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3505 /* skip our return address and return to parent */
3506 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3507 }
3508 emit_return(&prog, image + (prog - (u8 *)rw_image));
3509 /* Make sure the trampoline generation logic doesn't overflow */
3510 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3511 ret = -EFAULT;
3512 goto cleanup;
3513 }
3514 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3515
3516 cleanup:
3517 kfree(branches);
3518 return ret;
3519 }
3520
arch_alloc_bpf_trampoline(unsigned int size)3521 void *arch_alloc_bpf_trampoline(unsigned int size)
3522 {
3523 return bpf_prog_pack_alloc(size, jit_fill_hole);
3524 }
3525
arch_free_bpf_trampoline(void * image,unsigned int size)3526 void arch_free_bpf_trampoline(void *image, unsigned int size)
3527 {
3528 bpf_prog_pack_free(image, size);
3529 }
3530
arch_protect_bpf_trampoline(void * image,unsigned int size)3531 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3532 {
3533 return 0;
3534 }
3535
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3536 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3537 const struct btf_func_model *m, u32 flags,
3538 struct bpf_tramp_links *tlinks,
3539 void *func_addr)
3540 {
3541 void *rw_image, *tmp;
3542 int ret;
3543 u32 size = image_end - image;
3544
3545 /* rw_image doesn't need to be in module memory range, so we can
3546 * use kvmalloc.
3547 */
3548 rw_image = kvmalloc(size, GFP_KERNEL);
3549 if (!rw_image)
3550 return -ENOMEM;
3551
3552 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3553 flags, tlinks, func_addr);
3554 if (ret < 0)
3555 goto out;
3556
3557 tmp = bpf_arch_text_copy(image, rw_image, size);
3558 if (IS_ERR(tmp))
3559 ret = PTR_ERR(tmp);
3560 out:
3561 kvfree(rw_image);
3562 return ret;
3563 }
3564
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3565 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3566 struct bpf_tramp_links *tlinks, void *func_addr)
3567 {
3568 struct bpf_tramp_image im;
3569 void *image;
3570 int ret;
3571
3572 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3573 * This will NOT cause fragmentation in direct map, as we do not
3574 * call set_memory_*() on this buffer.
3575 *
3576 * We cannot use kvmalloc here, because we need image to be in
3577 * module memory range.
3578 */
3579 image = bpf_jit_alloc_exec(PAGE_SIZE);
3580 if (!image)
3581 return -ENOMEM;
3582
3583 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3584 m, flags, tlinks, func_addr);
3585 bpf_jit_free_exec(image);
3586 return ret;
3587 }
3588
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs,u8 * image,u8 * buf)3589 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3590 {
3591 u8 *jg_reloc, *prog = *pprog;
3592 int pivot, err, jg_bytes = 1;
3593 s64 jg_offset;
3594
3595 if (a == b) {
3596 /* Leaf node of recursion, i.e. not a range of indices
3597 * anymore.
3598 */
3599 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3600 if (!is_simm32(progs[a]))
3601 return -1;
3602 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3603 progs[a]);
3604 err = emit_cond_near_jump(&prog, /* je func */
3605 (void *)progs[a], image + (prog - buf),
3606 X86_JE);
3607 if (err)
3608 return err;
3609
3610 emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
3611
3612 *pprog = prog;
3613 return 0;
3614 }
3615
3616 /* Not a leaf node, so we pivot, and recursively descend into
3617 * the lower and upper ranges.
3618 */
3619 pivot = (b - a) / 2;
3620 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3621 if (!is_simm32(progs[a + pivot]))
3622 return -1;
3623 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3624
3625 if (pivot > 2) { /* jg upper_part */
3626 /* Require near jump. */
3627 jg_bytes = 4;
3628 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3629 } else {
3630 EMIT2(X86_JG, 0);
3631 }
3632 jg_reloc = prog;
3633
3634 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
3635 progs, image, buf);
3636 if (err)
3637 return err;
3638
3639 /* From Intel 64 and IA-32 Architectures Optimization
3640 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3641 * Coding Rule 11: All branch targets should be 16-byte
3642 * aligned.
3643 */
3644 emit_align(&prog, 16);
3645 jg_offset = prog - jg_reloc;
3646 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3647
3648 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
3649 b, progs, image, buf);
3650 if (err)
3651 return err;
3652
3653 *pprog = prog;
3654 return 0;
3655 }
3656
cmp_ips(const void * a,const void * b)3657 static int cmp_ips(const void *a, const void *b)
3658 {
3659 const s64 *ipa = a;
3660 const s64 *ipb = b;
3661
3662 if (*ipa > *ipb)
3663 return 1;
3664 if (*ipa < *ipb)
3665 return -1;
3666 return 0;
3667 }
3668
arch_prepare_bpf_dispatcher(void * image,void * buf,s64 * funcs,int num_funcs)3669 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3670 {
3671 u8 *prog = buf;
3672
3673 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3674 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3675 }
3676
priv_stack_init_guard(void __percpu * priv_stack_ptr,int alloc_size)3677 static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
3678 {
3679 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3680 u64 *stack_ptr;
3681
3682 for_each_possible_cpu(cpu) {
3683 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3684 stack_ptr[0] = PRIV_STACK_GUARD_VAL;
3685 stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
3686 }
3687 }
3688
priv_stack_check_guard(void __percpu * priv_stack_ptr,int alloc_size,struct bpf_prog * prog)3689 static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
3690 struct bpf_prog *prog)
3691 {
3692 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3693 u64 *stack_ptr;
3694
3695 for_each_possible_cpu(cpu) {
3696 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3697 if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
3698 stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) {
3699 pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
3700 bpf_jit_get_prog_name(prog));
3701 break;
3702 }
3703 }
3704 }
3705
3706 struct x64_jit_data {
3707 struct bpf_binary_header *rw_header;
3708 struct bpf_binary_header *header;
3709 int *addrs;
3710 u8 *image;
3711 int proglen;
3712 struct jit_context ctx;
3713 };
3714
3715 #define MAX_PASSES 20
3716 #define PADDING_PASSES (MAX_PASSES - 5)
3717
bpf_int_jit_compile(struct bpf_verifier_env * env,struct bpf_prog * prog)3718 struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
3719 {
3720 struct bpf_binary_header *rw_header = NULL;
3721 struct bpf_binary_header *header = NULL;
3722 void __percpu *priv_stack_ptr = NULL;
3723 struct x64_jit_data *jit_data;
3724 int priv_stack_alloc_sz;
3725 int proglen, oldproglen = 0;
3726 struct jit_context ctx = {};
3727 bool extra_pass = false;
3728 bool padding = false;
3729 u8 *rw_image = NULL;
3730 u8 *image = NULL;
3731 int *addrs;
3732 int pass;
3733 int i;
3734
3735 if (!prog->jit_requested)
3736 return prog;
3737
3738 jit_data = prog->aux->jit_data;
3739 if (!jit_data) {
3740 jit_data = kzalloc_obj(*jit_data);
3741 if (!jit_data)
3742 return prog;
3743 prog->aux->jit_data = jit_data;
3744 }
3745 priv_stack_ptr = prog->aux->priv_stack_ptr;
3746 if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) {
3747 /* Allocate actual private stack size with verifier-calculated
3748 * stack size plus two memory guards to protect overflow and
3749 * underflow.
3750 */
3751 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3752 2 * PRIV_STACK_GUARD_SZ;
3753 priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL);
3754 if (!priv_stack_ptr)
3755 goto out_priv_stack;
3756
3757 priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
3758 prog->aux->priv_stack_ptr = priv_stack_ptr;
3759 }
3760 addrs = jit_data->addrs;
3761 if (addrs) {
3762 ctx = jit_data->ctx;
3763 oldproglen = jit_data->proglen;
3764 image = jit_data->image;
3765 header = jit_data->header;
3766 rw_header = jit_data->rw_header;
3767 rw_image = (void *)rw_header + ((void *)image - (void *)header);
3768 extra_pass = true;
3769 padding = true;
3770 goto skip_init_addrs;
3771 }
3772 addrs = kvmalloc_objs(*addrs, prog->len + 1);
3773 if (!addrs)
3774 goto out_addrs;
3775
3776 /*
3777 * Before first pass, make a rough estimation of addrs[]
3778 * each BPF instruction is translated to less than 64 bytes
3779 */
3780 for (proglen = 0, i = 0; i <= prog->len; i++) {
3781 proglen += 64;
3782 addrs[i] = proglen;
3783 }
3784 ctx.cleanup_addr = proglen;
3785 skip_init_addrs:
3786
3787 /*
3788 * JITed image shrinks with every pass and the loop iterates
3789 * until the image stops shrinking. Very large BPF programs
3790 * may converge on the last pass. In such case do one more
3791 * pass to emit the final image.
3792 */
3793 for (pass = 0; pass < MAX_PASSES || image; pass++) {
3794 if (!padding && pass >= PADDING_PASSES)
3795 padding = true;
3796 proglen = do_jit(env, prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3797 if (proglen <= 0) {
3798 out_image:
3799 image = NULL;
3800 if (header) {
3801 bpf_arch_text_copy(&header->size, &rw_header->size,
3802 sizeof(rw_header->size));
3803 bpf_jit_binary_pack_free(header, rw_header);
3804 }
3805 if (extra_pass) {
3806 prog->bpf_func = NULL;
3807 prog->jited = 0;
3808 prog->jited_len = 0;
3809 }
3810 goto out_addrs;
3811 }
3812 if (image) {
3813 if (proglen != oldproglen) {
3814 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3815 proglen, oldproglen);
3816 goto out_image;
3817 }
3818 break;
3819 }
3820 if (proglen == oldproglen) {
3821 /*
3822 * The number of entries in extable is the number of BPF_LDX
3823 * insns that access kernel memory via "pointer to BTF type".
3824 * The verifier changed their opcode from LDX|MEM|size
3825 * to LDX|PROBE_MEM|size to make JITing easier.
3826 */
3827 u32 align = __alignof__(struct exception_table_entry);
3828 u32 extable_size = prog->aux->num_exentries *
3829 sizeof(struct exception_table_entry);
3830
3831 /* allocate module memory for x86 insns and extable */
3832 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3833 &image, align, &rw_header, &rw_image,
3834 jit_fill_hole);
3835 if (!header)
3836 goto out_addrs;
3837 prog->aux->extable = (void *) image + roundup(proglen, align);
3838 }
3839 oldproglen = proglen;
3840 cond_resched();
3841 }
3842
3843 if (bpf_jit_enable > 1)
3844 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3845
3846 if (image) {
3847 if (!prog->is_func || extra_pass) {
3848 /*
3849 * bpf_jit_binary_pack_finalize fails in two scenarios:
3850 * 1) header is not pointing to proper module memory;
3851 * 2) the arch doesn't support bpf_arch_text_copy().
3852 *
3853 * Both cases are serious bugs and justify WARN_ON.
3854 */
3855 if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3856 /* header has been freed */
3857 header = NULL;
3858 goto out_image;
3859 }
3860
3861 bpf_tail_call_direct_fixup(prog);
3862 } else {
3863 jit_data->addrs = addrs;
3864 jit_data->ctx = ctx;
3865 jit_data->proglen = proglen;
3866 jit_data->image = image;
3867 jit_data->header = header;
3868 jit_data->rw_header = rw_header;
3869 }
3870
3871 /*
3872 * The bpf_prog_update_insn_ptrs function expects addrs to
3873 * point to the first byte of the jitted instruction (unlike
3874 * the bpf_prog_fill_jited_linfo below, which, for historical
3875 * reasons, expects to point to the next instruction)
3876 */
3877 bpf_prog_update_insn_ptrs(prog, addrs, image);
3878
3879 /*
3880 * ctx.prog_offset is used when CFI preambles put code *before*
3881 * the function. See emit_cfi(). For FineIBT specifically this code
3882 * can also be executed and bpf_prog_kallsyms_add() will
3883 * generate an additional symbol to cover this, hence also
3884 * decrement proglen.
3885 */
3886 prog->bpf_func = (void *)image + cfi_get_offset();
3887 prog->jited = 1;
3888 prog->jited_len = proglen - cfi_get_offset();
3889 }
3890
3891 if (!image || !prog->is_func || extra_pass) {
3892 if (image)
3893 bpf_prog_fill_jited_linfo(prog, addrs + 1);
3894 out_addrs:
3895 kvfree(addrs);
3896 if (!image && priv_stack_ptr) {
3897 free_percpu(priv_stack_ptr);
3898 prog->aux->priv_stack_ptr = NULL;
3899 }
3900 out_priv_stack:
3901 kfree(jit_data);
3902 prog->aux->jit_data = NULL;
3903 }
3904
3905 return prog;
3906 }
3907
bpf_jit_supports_kfunc_call(void)3908 bool bpf_jit_supports_kfunc_call(void)
3909 {
3910 return true;
3911 }
3912
bpf_arch_text_copy(void * dst,void * src,size_t len)3913 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3914 {
3915 if (text_poke_copy(dst, src, len) == NULL)
3916 return ERR_PTR(-EINVAL);
3917 return dst;
3918 }
3919
3920 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3921 bool bpf_jit_supports_subprog_tailcalls(void)
3922 {
3923 return true;
3924 }
3925
bpf_jit_supports_percpu_insn(void)3926 bool bpf_jit_supports_percpu_insn(void)
3927 {
3928 return true;
3929 }
3930
bpf_jit_free(struct bpf_prog * prog)3931 void bpf_jit_free(struct bpf_prog *prog)
3932 {
3933 if (prog->jited) {
3934 struct x64_jit_data *jit_data = prog->aux->jit_data;
3935 struct bpf_binary_header *hdr;
3936 void __percpu *priv_stack_ptr;
3937 int priv_stack_alloc_sz;
3938
3939 /*
3940 * If we fail the final pass of JIT (from jit_subprogs),
3941 * the program may not be finalized yet. Call finalize here
3942 * before freeing it.
3943 */
3944 if (jit_data) {
3945 bpf_jit_binary_pack_finalize(jit_data->header,
3946 jit_data->rw_header);
3947 kvfree(jit_data->addrs);
3948 kfree(jit_data);
3949 }
3950 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3951 hdr = bpf_jit_binary_pack_hdr(prog);
3952 bpf_jit_binary_pack_free(hdr, NULL);
3953 priv_stack_ptr = prog->aux->priv_stack_ptr;
3954 if (priv_stack_ptr) {
3955 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3956 2 * PRIV_STACK_GUARD_SZ;
3957 priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog);
3958 free_percpu(prog->aux->priv_stack_ptr);
3959 }
3960 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3961 }
3962
3963 bpf_prog_unlock_free(prog);
3964 }
3965
bpf_jit_supports_exceptions(void)3966 bool bpf_jit_supports_exceptions(void)
3967 {
3968 /* We unwind through both kernel frames (starting from within bpf_throw
3969 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3970 * to walk kernel frames and reach BPF frames in the stack trace.
3971 */
3972 return IS_ENABLED(CONFIG_UNWINDER_ORC);
3973 }
3974
bpf_jit_supports_private_stack(void)3975 bool bpf_jit_supports_private_stack(void)
3976 {
3977 return true;
3978 }
3979
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3980 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3981 {
3982 #if defined(CONFIG_UNWINDER_ORC)
3983 struct unwind_state state;
3984 unsigned long addr;
3985
3986 for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3987 unwind_next_frame(&state)) {
3988 addr = unwind_get_return_address(&state);
3989 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3990 break;
3991 }
3992 return;
3993 #endif
3994 }
3995
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)3996 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3997 struct bpf_prog *new, struct bpf_prog *old)
3998 {
3999 u8 *old_addr, *new_addr, *old_bypass_addr;
4000 enum bpf_text_poke_type t;
4001 int ret;
4002
4003 old_bypass_addr = old ? NULL : poke->bypass_addr;
4004 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
4005 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
4006
4007 /*
4008 * On program loading or teardown, the program's kallsym entry
4009 * might not be in place, so we use __bpf_arch_text_poke to skip
4010 * the kallsyms check.
4011 */
4012 if (new) {
4013 t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4014 ret = __bpf_arch_text_poke(poke->tailcall_target,
4015 t, BPF_MOD_JUMP,
4016 old_addr, new_addr);
4017 BUG_ON(ret < 0);
4018 if (!old) {
4019 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4020 BPF_MOD_JUMP, BPF_MOD_NOP,
4021 poke->bypass_addr,
4022 NULL);
4023 BUG_ON(ret < 0);
4024 }
4025 } else {
4026 t = old_bypass_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4027 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4028 t, BPF_MOD_JUMP, old_bypass_addr,
4029 poke->bypass_addr);
4030 BUG_ON(ret < 0);
4031 /* let other CPUs finish the execution of program
4032 * so that it will not possible to expose them
4033 * to invalid nop, stack unwind, nop state
4034 */
4035 if (!ret)
4036 synchronize_rcu();
4037 t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4038 ret = __bpf_arch_text_poke(poke->tailcall_target,
4039 t, BPF_MOD_NOP, old_addr, NULL);
4040 BUG_ON(ret < 0);
4041 }
4042 }
4043
bpf_jit_supports_arena(void)4044 bool bpf_jit_supports_arena(void)
4045 {
4046 return true;
4047 }
4048
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)4049 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
4050 {
4051 if (!in_arena)
4052 return true;
4053 switch (insn->code) {
4054 case BPF_STX | BPF_ATOMIC | BPF_W:
4055 case BPF_STX | BPF_ATOMIC | BPF_DW:
4056 if (insn->imm == (BPF_AND | BPF_FETCH) ||
4057 insn->imm == (BPF_OR | BPF_FETCH) ||
4058 insn->imm == (BPF_XOR | BPF_FETCH))
4059 return false;
4060 }
4061 return true;
4062 }
4063
bpf_jit_supports_ptr_xchg(void)4064 bool bpf_jit_supports_ptr_xchg(void)
4065 {
4066 return true;
4067 }
4068
4069 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
bpf_arch_uaddress_limit(void)4070 u64 bpf_arch_uaddress_limit(void)
4071 {
4072 return 0;
4073 }
4074
bpf_jit_supports_timed_may_goto(void)4075 bool bpf_jit_supports_timed_may_goto(void)
4076 {
4077 return true;
4078 }
4079
bpf_jit_supports_fsession(void)4080 bool bpf_jit_supports_fsession(void)
4081 {
4082 return true;
4083 }
4084