1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bitfield.h>
12 #include <linux/bpf.h>
13 #include <linux/memory.h>
14 #include <linux/sort.h>
15 #include <asm/extable.h>
16 #include <asm/ftrace.h>
17 #include <asm/set_memory.h>
18 #include <asm/nospec-branch.h>
19 #include <asm/text-patching.h>
20 #include <asm/unwind.h>
21 #include <asm/cfi.h>
22
23 static bool all_callee_regs_used[4] = {true, true, true, true};
24
emit_code(u8 * ptr,u32 bytes,unsigned int len)25 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
26 {
27 if (len == 1)
28 *ptr = bytes;
29 else if (len == 2)
30 *(u16 *)ptr = bytes;
31 else {
32 *(u32 *)ptr = bytes;
33 barrier();
34 }
35 return ptr + len;
36 }
37
38 #define EMIT(bytes, len) \
39 do { prog = emit_code(prog, bytes, len); } while (0)
40
41 #define EMIT1(b1) EMIT(b1, 1)
42 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
43 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
44 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
45 #define EMIT5(b1, b2, b3, b4, b5) \
46 do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
47
48 #define EMIT1_off32(b1, off) \
49 do { EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56
57 #ifdef CONFIG_X86_KERNEL_IBT
58 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
59 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
60 #else
61 #define EMIT_ENDBR()
62 #define EMIT_ENDBR_POISON()
63 #endif
64
is_imm8(int value)65 static bool is_imm8(int value)
66 {
67 return value <= 127 && value >= -128;
68 }
69
70 /*
71 * Let us limit the positive offset to be <= 123.
72 * This is to ensure eventual jit convergence For the following patterns:
73 * ...
74 * pass4, final_proglen=4391:
75 * ...
76 * 20e: 48 85 ff test rdi,rdi
77 * 211: 74 7d je 0x290
78 * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
79 * ...
80 * 289: 48 85 ff test rdi,rdi
81 * 28c: 74 17 je 0x2a5
82 * 28e: e9 7f ff ff ff jmp 0x212
83 * 293: bf 03 00 00 00 mov edi,0x3
84 * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
85 * and insn at 0x28e is 5-byte jmp insn with offset -129.
86 *
87 * pass5, final_proglen=4392:
88 * ...
89 * 20e: 48 85 ff test rdi,rdi
90 * 211: 0f 84 80 00 00 00 je 0x297
91 * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
92 * ...
93 * 28d: 48 85 ff test rdi,rdi
94 * 290: 74 1a je 0x2ac
95 * 292: eb 84 jmp 0x218
96 * 294: bf 03 00 00 00 mov edi,0x3
97 * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
98 * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
99 * At the same time, insn at 0x292 is a 2-byte insn since its offset is
100 * -124.
101 *
102 * pass6 will repeat the same code as in pass4 and this will prevent
103 * eventual convergence.
104 *
105 * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
106 * cycle in the above. In the above example je offset <= 0x7c should work.
107 *
108 * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
109 * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
110 * avoid no convergence issue.
111 *
112 * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
113 * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
114 */
is_imm8_jmp_offset(int value)115 static bool is_imm8_jmp_offset(int value)
116 {
117 return value <= 123 && value >= -128;
118 }
119
is_simm32(s64 value)120 static bool is_simm32(s64 value)
121 {
122 return value == (s64)(s32)value;
123 }
124
is_uimm32(u64 value)125 static bool is_uimm32(u64 value)
126 {
127 return value == (u64)(u32)value;
128 }
129
130 /* mov dst, src */
131 #define EMIT_mov(DST, SRC) \
132 do { \
133 if (DST != SRC) \
134 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
135 } while (0)
136
bpf_size_to_x86_bytes(int bpf_size)137 static int bpf_size_to_x86_bytes(int bpf_size)
138 {
139 if (bpf_size == BPF_W)
140 return 4;
141 else if (bpf_size == BPF_H)
142 return 2;
143 else if (bpf_size == BPF_B)
144 return 1;
145 else if (bpf_size == BPF_DW)
146 return 4; /* imm32 */
147 else
148 return 0;
149 }
150
151 /*
152 * List of x86 cond jumps opcodes (. + s8)
153 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
154 */
155 #define X86_JB 0x72
156 #define X86_JAE 0x73
157 #define X86_JE 0x74
158 #define X86_JNE 0x75
159 #define X86_JBE 0x76
160 #define X86_JA 0x77
161 #define X86_JL 0x7C
162 #define X86_JGE 0x7D
163 #define X86_JLE 0x7E
164 #define X86_JG 0x7F
165
166 /* Pick a register outside of BPF range for JIT internal work */
167 #define AUX_REG (MAX_BPF_JIT_REG + 1)
168 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
169 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
170
171 /*
172 * The following table maps BPF registers to x86-64 registers.
173 *
174 * x86-64 register R12 is unused, since if used as base address
175 * register in load/store instructions, it always needs an
176 * extra byte of encoding and is callee saved.
177 *
178 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
179 * trampoline. x86-64 register R10 is used for blinding (if enabled).
180 */
181 static const int reg2hex[] = {
182 [BPF_REG_0] = 0, /* RAX */
183 [BPF_REG_1] = 7, /* RDI */
184 [BPF_REG_2] = 6, /* RSI */
185 [BPF_REG_3] = 2, /* RDX */
186 [BPF_REG_4] = 1, /* RCX */
187 [BPF_REG_5] = 0, /* R8 */
188 [BPF_REG_6] = 3, /* RBX callee saved */
189 [BPF_REG_7] = 5, /* R13 callee saved */
190 [BPF_REG_8] = 6, /* R14 callee saved */
191 [BPF_REG_9] = 7, /* R15 callee saved */
192 [BPF_REG_FP] = 5, /* RBP readonly */
193 [BPF_REG_AX] = 2, /* R10 temp register */
194 [AUX_REG] = 3, /* R11 temp register */
195 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
196 [X86_REG_R12] = 4, /* R12 callee saved */
197 };
198
199 static const int reg2pt_regs[] = {
200 [BPF_REG_0] = offsetof(struct pt_regs, ax),
201 [BPF_REG_1] = offsetof(struct pt_regs, di),
202 [BPF_REG_2] = offsetof(struct pt_regs, si),
203 [BPF_REG_3] = offsetof(struct pt_regs, dx),
204 [BPF_REG_4] = offsetof(struct pt_regs, cx),
205 [BPF_REG_5] = offsetof(struct pt_regs, r8),
206 [BPF_REG_6] = offsetof(struct pt_regs, bx),
207 [BPF_REG_7] = offsetof(struct pt_regs, r13),
208 [BPF_REG_8] = offsetof(struct pt_regs, r14),
209 [BPF_REG_9] = offsetof(struct pt_regs, r15),
210 };
211
212 /*
213 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
214 * which need extra byte of encoding.
215 * rax,rcx,...,rbp have simpler encoding
216 */
is_ereg(u32 reg)217 static bool is_ereg(u32 reg)
218 {
219 return (1 << reg) & (BIT(BPF_REG_5) |
220 BIT(AUX_REG) |
221 BIT(BPF_REG_7) |
222 BIT(BPF_REG_8) |
223 BIT(BPF_REG_9) |
224 BIT(X86_REG_R9) |
225 BIT(X86_REG_R12) |
226 BIT(BPF_REG_AX));
227 }
228
229 /*
230 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
231 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
232 * of encoding. al,cl,dl,bl have simpler encoding.
233 */
is_ereg_8l(u32 reg)234 static bool is_ereg_8l(u32 reg)
235 {
236 return is_ereg(reg) ||
237 (1 << reg) & (BIT(BPF_REG_1) |
238 BIT(BPF_REG_2) |
239 BIT(BPF_REG_FP));
240 }
241
is_axreg(u32 reg)242 static bool is_axreg(u32 reg)
243 {
244 return reg == BPF_REG_0;
245 }
246
247 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)248 static u8 add_1mod(u8 byte, u32 reg)
249 {
250 if (is_ereg(reg))
251 byte |= 1;
252 return byte;
253 }
254
add_2mod(u8 byte,u32 r1,u32 r2)255 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
256 {
257 if (is_ereg(r1))
258 byte |= 1;
259 if (is_ereg(r2))
260 byte |= 4;
261 return byte;
262 }
263
add_3mod(u8 byte,u32 r1,u32 r2,u32 index)264 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
265 {
266 if (is_ereg(r1))
267 byte |= 1;
268 if (is_ereg(index))
269 byte |= 2;
270 if (is_ereg(r2))
271 byte |= 4;
272 return byte;
273 }
274
275 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)276 static u8 add_1reg(u8 byte, u32 dst_reg)
277 {
278 return byte + reg2hex[dst_reg];
279 }
280
281 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)282 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
283 {
284 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
285 }
286
287 /* Some 1-byte opcodes for binary ALU operations */
288 static u8 simple_alu_opcodes[] = {
289 [BPF_ADD] = 0x01,
290 [BPF_SUB] = 0x29,
291 [BPF_AND] = 0x21,
292 [BPF_OR] = 0x09,
293 [BPF_XOR] = 0x31,
294 [BPF_LSH] = 0xE0,
295 [BPF_RSH] = 0xE8,
296 [BPF_ARSH] = 0xF8,
297 };
298
jit_fill_hole(void * area,unsigned int size)299 static void jit_fill_hole(void *area, unsigned int size)
300 {
301 /* Fill whole space with INT3 instructions */
302 memset(area, 0xcc, size);
303 }
304
bpf_arch_text_invalidate(void * dst,size_t len)305 int bpf_arch_text_invalidate(void *dst, size_t len)
306 {
307 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
308 }
309
310 struct jit_context {
311 int cleanup_addr; /* Epilogue code offset */
312
313 /*
314 * Program specific offsets of labels in the code; these rely on the
315 * JIT doing at least 2 passes, recording the position on the first
316 * pass, only to generate the correct offset on the second pass.
317 */
318 int tail_call_direct_label;
319 int tail_call_indirect_label;
320 };
321
322 /* Maximum number of bytes emitted while JITing one eBPF insn */
323 #define BPF_MAX_INSN_SIZE 128
324 #define BPF_INSN_SAFETY 64
325
326 /* Number of bytes emit_patch() needs to generate instructions */
327 #define X86_PATCH_SIZE 5
328 /* Number of bytes that will be skipped on tailcall */
329 #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
330
push_r9(u8 ** pprog)331 static void push_r9(u8 **pprog)
332 {
333 u8 *prog = *pprog;
334
335 EMIT2(0x41, 0x51); /* push r9 */
336 *pprog = prog;
337 }
338
pop_r9(u8 ** pprog)339 static void pop_r9(u8 **pprog)
340 {
341 u8 *prog = *pprog;
342
343 EMIT2(0x41, 0x59); /* pop r9 */
344 *pprog = prog;
345 }
346
push_r12(u8 ** pprog)347 static void push_r12(u8 **pprog)
348 {
349 u8 *prog = *pprog;
350
351 EMIT2(0x41, 0x54); /* push r12 */
352 *pprog = prog;
353 }
354
push_callee_regs(u8 ** pprog,bool * callee_regs_used)355 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
356 {
357 u8 *prog = *pprog;
358
359 if (callee_regs_used[0])
360 EMIT1(0x53); /* push rbx */
361 if (callee_regs_used[1])
362 EMIT2(0x41, 0x55); /* push r13 */
363 if (callee_regs_used[2])
364 EMIT2(0x41, 0x56); /* push r14 */
365 if (callee_regs_used[3])
366 EMIT2(0x41, 0x57); /* push r15 */
367 *pprog = prog;
368 }
369
pop_r12(u8 ** pprog)370 static void pop_r12(u8 **pprog)
371 {
372 u8 *prog = *pprog;
373
374 EMIT2(0x41, 0x5C); /* pop r12 */
375 *pprog = prog;
376 }
377
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)378 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
379 {
380 u8 *prog = *pprog;
381
382 if (callee_regs_used[3])
383 EMIT2(0x41, 0x5F); /* pop r15 */
384 if (callee_regs_used[2])
385 EMIT2(0x41, 0x5E); /* pop r14 */
386 if (callee_regs_used[1])
387 EMIT2(0x41, 0x5D); /* pop r13 */
388 if (callee_regs_used[0])
389 EMIT1(0x5B); /* pop rbx */
390 *pprog = prog;
391 }
392
emit_nops(u8 ** pprog,int len)393 static void emit_nops(u8 **pprog, int len)
394 {
395 u8 *prog = *pprog;
396 int i, noplen;
397
398 while (len > 0) {
399 noplen = len;
400
401 if (noplen > ASM_NOP_MAX)
402 noplen = ASM_NOP_MAX;
403
404 for (i = 0; i < noplen; i++)
405 EMIT1(x86_nops[noplen][i]);
406 len -= noplen;
407 }
408
409 *pprog = prog;
410 }
411
412 /*
413 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
414 * in arch/x86/kernel/alternative.c
415 */
416 static int emit_call(u8 **prog, void *func, void *ip);
417
emit_fineibt(u8 ** pprog,u8 * ip,u32 hash,int arity)418 static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity)
419 {
420 u8 *prog = *pprog;
421
422 EMIT_ENDBR();
423 EMIT1_off32(0x2d, hash); /* subl $hash, %eax */
424 if (cfi_bhi) {
425 EMIT2(0x2e, 0x2e); /* cs cs */
426 emit_call(&prog, __bhi_args[arity], ip + 11);
427 } else {
428 EMIT3_off32(0x2e, 0x0f, 0x85, 3); /* jne.d32,pn 3 */
429 }
430 EMIT_ENDBR_POISON();
431
432 *pprog = prog;
433 }
434
emit_kcfi(u8 ** pprog,u32 hash)435 static void emit_kcfi(u8 **pprog, u32 hash)
436 {
437 u8 *prog = *pprog;
438
439 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
440 #ifdef CONFIG_CALL_PADDING
441 EMIT1(0x90);
442 EMIT1(0x90);
443 EMIT1(0x90);
444 EMIT1(0x90);
445 EMIT1(0x90);
446 EMIT1(0x90);
447 EMIT1(0x90);
448 EMIT1(0x90);
449 EMIT1(0x90);
450 EMIT1(0x90);
451 EMIT1(0x90);
452 #endif
453 EMIT_ENDBR();
454
455 *pprog = prog;
456 }
457
emit_cfi(u8 ** pprog,u8 * ip,u32 hash,int arity)458 static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity)
459 {
460 u8 *prog = *pprog;
461
462 switch (cfi_mode) {
463 case CFI_FINEIBT:
464 emit_fineibt(&prog, ip, hash, arity);
465 break;
466
467 case CFI_KCFI:
468 emit_kcfi(&prog, hash);
469 break;
470
471 default:
472 EMIT_ENDBR();
473 break;
474 }
475
476 *pprog = prog;
477 }
478
emit_prologue_tail_call(u8 ** pprog,bool is_subprog)479 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
480 {
481 u8 *prog = *pprog;
482
483 if (!is_subprog) {
484 /* cmp rax, MAX_TAIL_CALL_CNT */
485 EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
486 EMIT2(X86_JA, 6); /* ja 6 */
487 /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
488 * case1: entry of main prog.
489 * case2: tail callee of main prog.
490 */
491 EMIT1(0x50); /* push rax */
492 /* Make rax as tail_call_cnt_ptr. */
493 EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
494 EMIT2(0xEB, 1); /* jmp 1 */
495 /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
496 * case: tail callee of subprog.
497 */
498 EMIT1(0x50); /* push rax */
499 /* push tail_call_cnt_ptr */
500 EMIT1(0x50); /* push rax */
501 } else { /* is_subprog */
502 /* rax is tail_call_cnt_ptr. */
503 EMIT1(0x50); /* push rax */
504 EMIT1(0x50); /* push rax */
505 }
506
507 *pprog = prog;
508 }
509
510 /*
511 * Emit x86-64 prologue code for BPF program.
512 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
513 * while jumping to another program
514 */
emit_prologue(u8 ** pprog,u8 * ip,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog,bool is_exception_cb)515 static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf,
516 bool tail_call_reachable, bool is_subprog,
517 bool is_exception_cb)
518 {
519 u8 *prog = *pprog;
520
521 if (is_subprog) {
522 emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5);
523 } else {
524 emit_cfi(&prog, ip, cfi_bpf_hash, 1);
525 }
526 /* BPF trampoline can be made to work without these nops,
527 * but let's waste 5 bytes for now and optimize later
528 */
529 emit_nops(&prog, X86_PATCH_SIZE);
530 if (!ebpf_from_cbpf) {
531 if (tail_call_reachable && !is_subprog)
532 /* When it's the entry of the whole tailcall context,
533 * zeroing rax means initialising tail_call_cnt.
534 */
535 EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
536 else
537 /* Keep the same instruction layout. */
538 emit_nops(&prog, 3); /* nop3 */
539 }
540 /* Exception callback receives FP as third parameter */
541 if (is_exception_cb) {
542 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
543 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
544 /* The main frame must have exception_boundary as true, so we
545 * first restore those callee-saved regs from stack, before
546 * reusing the stack frame.
547 */
548 pop_callee_regs(&prog, all_callee_regs_used);
549 pop_r12(&prog);
550 /* Reset the stack frame. */
551 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
552 } else {
553 EMIT1(0x55); /* push rbp */
554 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
555 }
556
557 /* X86_TAIL_CALL_OFFSET is here */
558 EMIT_ENDBR();
559
560 /* sub rsp, rounded_stack_depth */
561 if (stack_depth)
562 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
563 if (tail_call_reachable)
564 emit_prologue_tail_call(&prog, is_subprog);
565 *pprog = prog;
566 }
567
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)568 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
569 {
570 u8 *prog = *pprog;
571 s64 offset;
572
573 offset = func - (ip + X86_PATCH_SIZE);
574 if (!is_simm32(offset)) {
575 pr_err("Target call %p is out of range\n", func);
576 return -ERANGE;
577 }
578 EMIT1_off32(opcode, offset);
579 *pprog = prog;
580 return 0;
581 }
582
emit_call(u8 ** pprog,void * func,void * ip)583 static int emit_call(u8 **pprog, void *func, void *ip)
584 {
585 return emit_patch(pprog, func, ip, 0xE8);
586 }
587
emit_rsb_call(u8 ** pprog,void * func,void * ip)588 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
589 {
590 OPTIMIZER_HIDE_VAR(func);
591 ip += x86_call_depth_emit_accounting(pprog, func, ip);
592 return emit_patch(pprog, func, ip, 0xE8);
593 }
594
emit_jump(u8 ** pprog,void * func,void * ip)595 static int emit_jump(u8 **pprog, void *func, void *ip)
596 {
597 return emit_patch(pprog, func, ip, 0xE9);
598 }
599
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)600 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
601 enum bpf_text_poke_type new_t,
602 void *old_addr, void *new_addr)
603 {
604 const u8 *nop_insn = x86_nops[5];
605 u8 old_insn[X86_PATCH_SIZE];
606 u8 new_insn[X86_PATCH_SIZE];
607 u8 *prog;
608 int ret;
609
610 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
611 if (old_t != BPF_MOD_NOP && old_addr) {
612 prog = old_insn;
613 ret = old_t == BPF_MOD_CALL ?
614 emit_call(&prog, old_addr, ip) :
615 emit_jump(&prog, old_addr, ip);
616 if (ret)
617 return ret;
618 }
619
620 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
621 if (new_t != BPF_MOD_NOP && new_addr) {
622 prog = new_insn;
623 ret = new_t == BPF_MOD_CALL ?
624 emit_call(&prog, new_addr, ip) :
625 emit_jump(&prog, new_addr, ip);
626 if (ret)
627 return ret;
628 }
629
630 ret = -EBUSY;
631 mutex_lock(&text_mutex);
632 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
633 goto out;
634 ret = 1;
635 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
636 smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL);
637 ret = 0;
638 }
639 out:
640 mutex_unlock(&text_mutex);
641 return ret;
642 }
643
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type old_t,enum bpf_text_poke_type new_t,void * old_addr,void * new_addr)644 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
645 enum bpf_text_poke_type new_t, void *old_addr,
646 void *new_addr)
647 {
648 if (!is_kernel_text((long)ip) &&
649 !is_bpf_text_address((long)ip))
650 /* BPF poking in modules is not supported */
651 return -EINVAL;
652
653 /*
654 * See emit_prologue(), for IBT builds the trampoline hook is preceded
655 * with an ENDBR instruction.
656 */
657 if (is_endbr(ip))
658 ip += ENDBR_INSN_SIZE;
659
660 return __bpf_arch_text_poke(ip, old_t, new_t, old_addr, new_addr);
661 }
662
663 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
664
__emit_indirect_jump(u8 ** pprog,int reg,bool ereg)665 static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
666 {
667 u8 *prog = *pprog;
668
669 if (ereg)
670 EMIT1(0x41);
671
672 EMIT2(0xFF, 0xE0 + reg);
673
674 *pprog = prog;
675 }
676
emit_indirect_jump(u8 ** pprog,int bpf_reg,u8 * ip)677 static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
678 {
679 u8 *prog = *pprog;
680 int reg = reg2hex[bpf_reg];
681 bool ereg = is_ereg(bpf_reg);
682
683 if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
684 OPTIMIZER_HIDE_VAR(reg);
685 emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip);
686 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
687 EMIT_LFENCE();
688 __emit_indirect_jump(&prog, reg, ereg);
689 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
690 OPTIMIZER_HIDE_VAR(reg);
691 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
692 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
693 else
694 emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
695 } else {
696 __emit_indirect_jump(&prog, reg, ereg);
697 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
698 EMIT1(0xCC); /* int3 */
699 }
700
701 *pprog = prog;
702 }
703
emit_return(u8 ** pprog,u8 * ip)704 static void emit_return(u8 **pprog, u8 *ip)
705 {
706 u8 *prog = *pprog;
707
708 if (cpu_wants_rethunk()) {
709 emit_jump(&prog, x86_return_thunk, ip);
710 } else {
711 EMIT1(0xC3); /* ret */
712 if (IS_ENABLED(CONFIG_MITIGATION_SLS))
713 EMIT1(0xCC); /* int3 */
714 }
715
716 *pprog = prog;
717 }
718
719 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
720
721 /*
722 * Generate the following code:
723 *
724 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
725 * if (index >= array->map.max_entries)
726 * goto out;
727 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
728 * goto out;
729 * prog = array->ptrs[index];
730 * if (prog == NULL)
731 * goto out;
732 * goto *(prog->bpf_func + prologue_size);
733 * out:
734 */
emit_bpf_tail_call_indirect(struct bpf_prog * bpf_prog,u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)735 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
736 u8 **pprog, bool *callee_regs_used,
737 u32 stack_depth, u8 *ip,
738 struct jit_context *ctx)
739 {
740 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
741 u8 *prog = *pprog, *start = *pprog;
742 int offset;
743
744 /*
745 * rdi - pointer to ctx
746 * rsi - pointer to bpf_array
747 * rdx - index in bpf_array
748 */
749
750 /*
751 * if (index >= array->map.max_entries)
752 * goto out;
753 */
754 EMIT2(0x89, 0xD2); /* mov edx, edx */
755 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
756 offsetof(struct bpf_array, map.max_entries));
757
758 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
759 EMIT2(X86_JBE, offset); /* jbe out */
760
761 /*
762 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
763 * goto out;
764 */
765 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
766 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
767
768 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
769 EMIT2(X86_JAE, offset); /* jae out */
770
771 /* prog = array->ptrs[index]; */
772 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
773 offsetof(struct bpf_array, ptrs));
774
775 /*
776 * if (prog == NULL)
777 * goto out;
778 */
779 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
780
781 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
782 EMIT2(X86_JE, offset); /* je out */
783
784 /* Inc tail_call_cnt if the slot is populated. */
785 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
786
787 if (bpf_prog->aux->exception_boundary) {
788 pop_callee_regs(&prog, all_callee_regs_used);
789 pop_r12(&prog);
790 } else {
791 pop_callee_regs(&prog, callee_regs_used);
792 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
793 pop_r12(&prog);
794 }
795
796 /* Pop tail_call_cnt_ptr. */
797 EMIT1(0x58); /* pop rax */
798 /* Pop tail_call_cnt, if it's main prog.
799 * Pop tail_call_cnt_ptr, if it's subprog.
800 */
801 EMIT1(0x58); /* pop rax */
802 if (stack_depth)
803 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
804 round_up(stack_depth, 8));
805
806 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
807 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
808 offsetof(struct bpf_prog, bpf_func));
809 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
810 X86_TAIL_CALL_OFFSET);
811 /*
812 * Now we're ready to jump into next BPF program
813 * rdi == ctx (1st arg)
814 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
815 */
816 emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
817
818 /* out: */
819 ctx->tail_call_indirect_label = prog - start;
820 *pprog = prog;
821 }
822
emit_bpf_tail_call_direct(struct bpf_prog * bpf_prog,struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)823 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
824 struct bpf_jit_poke_descriptor *poke,
825 u8 **pprog, u8 *ip,
826 bool *callee_regs_used, u32 stack_depth,
827 struct jit_context *ctx)
828 {
829 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
830 u8 *prog = *pprog, *start = *pprog;
831 int offset;
832
833 /*
834 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
835 * goto out;
836 */
837 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
838 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
839
840 offset = ctx->tail_call_direct_label - (prog + 2 - start);
841 EMIT2(X86_JAE, offset); /* jae out */
842
843 poke->tailcall_bypass = ip + (prog - start);
844 poke->adj_off = X86_TAIL_CALL_OFFSET;
845 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
846 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
847
848 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
849 poke->tailcall_bypass);
850
851 /* Inc tail_call_cnt if the slot is populated. */
852 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
853
854 if (bpf_prog->aux->exception_boundary) {
855 pop_callee_regs(&prog, all_callee_regs_used);
856 pop_r12(&prog);
857 } else {
858 pop_callee_regs(&prog, callee_regs_used);
859 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
860 pop_r12(&prog);
861 }
862
863 /* Pop tail_call_cnt_ptr. */
864 EMIT1(0x58); /* pop rax */
865 /* Pop tail_call_cnt, if it's main prog.
866 * Pop tail_call_cnt_ptr, if it's subprog.
867 */
868 EMIT1(0x58); /* pop rax */
869 if (stack_depth)
870 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
871
872 emit_nops(&prog, X86_PATCH_SIZE);
873
874 /* out: */
875 ctx->tail_call_direct_label = prog - start;
876
877 *pprog = prog;
878 }
879
bpf_tail_call_direct_fixup(struct bpf_prog * prog)880 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
881 {
882 struct bpf_jit_poke_descriptor *poke;
883 struct bpf_array *array;
884 struct bpf_prog *target;
885 int i, ret;
886
887 for (i = 0; i < prog->aux->size_poke_tab; i++) {
888 poke = &prog->aux->poke_tab[i];
889 if (poke->aux && poke->aux != prog->aux)
890 continue;
891
892 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
893
894 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
895 continue;
896
897 array = container_of(poke->tail_call.map, struct bpf_array, map);
898 mutex_lock(&array->aux->poke_mutex);
899 target = array->ptrs[poke->tail_call.key];
900 if (target) {
901 ret = __bpf_arch_text_poke(poke->tailcall_target,
902 BPF_MOD_NOP, BPF_MOD_JUMP,
903 NULL,
904 (u8 *)target->bpf_func +
905 poke->adj_off);
906 BUG_ON(ret < 0);
907 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
908 BPF_MOD_JUMP, BPF_MOD_NOP,
909 (u8 *)poke->tailcall_target +
910 X86_PATCH_SIZE, NULL);
911 BUG_ON(ret < 0);
912 }
913 WRITE_ONCE(poke->tailcall_target_stable, true);
914 mutex_unlock(&array->aux->poke_mutex);
915 }
916 }
917
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)918 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
919 u32 dst_reg, const u32 imm32)
920 {
921 u8 *prog = *pprog;
922 u8 b1, b2, b3;
923
924 /*
925 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
926 * (which zero-extends imm32) to save 2 bytes.
927 */
928 if (sign_propagate && (s32)imm32 < 0) {
929 /* 'mov %rax, imm32' sign extends imm32 */
930 b1 = add_1mod(0x48, dst_reg);
931 b2 = 0xC7;
932 b3 = 0xC0;
933 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
934 goto done;
935 }
936
937 /*
938 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
939 * to save 3 bytes.
940 */
941 if (imm32 == 0) {
942 if (is_ereg(dst_reg))
943 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
944 b2 = 0x31; /* xor */
945 b3 = 0xC0;
946 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
947 goto done;
948 }
949
950 /* mov %eax, imm32 */
951 if (is_ereg(dst_reg))
952 EMIT1(add_1mod(0x40, dst_reg));
953 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
954 done:
955 *pprog = prog;
956 }
957
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)958 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
959 const u32 imm32_hi, const u32 imm32_lo)
960 {
961 u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
962 u8 *prog = *pprog;
963
964 if (is_uimm32(imm64)) {
965 /*
966 * For emitting plain u32, where sign bit must not be
967 * propagated LLVM tends to load imm64 over mov32
968 * directly, so save couple of bytes by just doing
969 * 'mov %eax, imm32' instead.
970 */
971 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
972 } else if (is_simm32(imm64)) {
973 emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
974 } else {
975 /* movabsq rax, imm64 */
976 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
977 EMIT(imm32_lo, 4);
978 EMIT(imm32_hi, 4);
979 }
980
981 *pprog = prog;
982 }
983
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)984 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
985 {
986 u8 *prog = *pprog;
987
988 if (is64) {
989 /* mov dst, src */
990 EMIT_mov(dst_reg, src_reg);
991 } else {
992 /* mov32 dst, src */
993 if (is_ereg(dst_reg) || is_ereg(src_reg))
994 EMIT1(add_2mod(0x40, dst_reg, src_reg));
995 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
996 }
997
998 *pprog = prog;
999 }
1000
emit_movsx_reg(u8 ** pprog,int num_bits,bool is64,u32 dst_reg,u32 src_reg)1001 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
1002 u32 src_reg)
1003 {
1004 u8 *prog = *pprog;
1005
1006 if (is64) {
1007 /* movs[b,w,l]q dst, src */
1008 if (num_bits == 8)
1009 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
1010 add_2reg(0xC0, src_reg, dst_reg));
1011 else if (num_bits == 16)
1012 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
1013 add_2reg(0xC0, src_reg, dst_reg));
1014 else if (num_bits == 32)
1015 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
1016 add_2reg(0xC0, src_reg, dst_reg));
1017 } else {
1018 /* movs[b,w]l dst, src */
1019 if (num_bits == 8) {
1020 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
1021 add_2reg(0xC0, src_reg, dst_reg));
1022 } else if (num_bits == 16) {
1023 if (is_ereg(dst_reg) || is_ereg(src_reg))
1024 EMIT1(add_2mod(0x40, src_reg, dst_reg));
1025 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
1026 add_2reg(0xC0, src_reg, dst_reg));
1027 }
1028 }
1029
1030 *pprog = prog;
1031 }
1032
1033 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)1034 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
1035 {
1036 u8 *prog = *pprog;
1037
1038 if (is_imm8(off)) {
1039 /* 1-byte signed displacement.
1040 *
1041 * If off == 0 we could skip this and save one extra byte, but
1042 * special case of x86 R13 which always needs an offset is not
1043 * worth the hassle
1044 */
1045 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
1046 } else {
1047 /* 4-byte signed displacement */
1048 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
1049 }
1050 *pprog = prog;
1051 }
1052
emit_insn_suffix_SIB(u8 ** pprog,u32 ptr_reg,u32 val_reg,u32 index_reg,int off)1053 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
1054 {
1055 u8 *prog = *pprog;
1056
1057 if (is_imm8(off)) {
1058 EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1059 } else {
1060 EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1061 }
1062 *pprog = prog;
1063 }
1064
1065 /*
1066 * Emit a REX byte if it will be necessary to address these registers
1067 */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)1068 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
1069 {
1070 u8 *prog = *pprog;
1071
1072 if (is64)
1073 EMIT1(add_2mod(0x48, dst_reg, src_reg));
1074 else if (is_ereg(dst_reg) || is_ereg(src_reg))
1075 EMIT1(add_2mod(0x40, dst_reg, src_reg));
1076 *pprog = prog;
1077 }
1078
1079 /*
1080 * Similar version of maybe_emit_mod() for a single register
1081 */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)1082 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
1083 {
1084 u8 *prog = *pprog;
1085
1086 if (is64)
1087 EMIT1(add_1mod(0x48, reg));
1088 else if (is_ereg(reg))
1089 EMIT1(add_1mod(0x40, reg));
1090 *pprog = prog;
1091 }
1092
1093 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1094 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1095 {
1096 u8 *prog = *pprog;
1097
1098 switch (size) {
1099 case BPF_B:
1100 /* Emit 'movzx rax, byte ptr [rax + off]' */
1101 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
1102 break;
1103 case BPF_H:
1104 /* Emit 'movzx rax, word ptr [rax + off]' */
1105 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
1106 break;
1107 case BPF_W:
1108 /* Emit 'mov eax, dword ptr [rax+0x14]' */
1109 if (is_ereg(dst_reg) || is_ereg(src_reg))
1110 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
1111 else
1112 EMIT1(0x8B);
1113 break;
1114 case BPF_DW:
1115 /* Emit 'mov rax, qword ptr [rax+0x14]' */
1116 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
1117 break;
1118 }
1119 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1120 *pprog = prog;
1121 }
1122
1123 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
emit_ldsx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1124 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1125 {
1126 u8 *prog = *pprog;
1127
1128 switch (size) {
1129 case BPF_B:
1130 /* Emit 'movsx rax, byte ptr [rax + off]' */
1131 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
1132 break;
1133 case BPF_H:
1134 /* Emit 'movsx rax, word ptr [rax + off]' */
1135 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
1136 break;
1137 case BPF_W:
1138 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
1139 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
1140 break;
1141 }
1142 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1143 *pprog = prog;
1144 }
1145
emit_ldx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1146 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1147 {
1148 u8 *prog = *pprog;
1149
1150 switch (size) {
1151 case BPF_B:
1152 /* movzx rax, byte ptr [rax + r12 + off] */
1153 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1154 break;
1155 case BPF_H:
1156 /* movzx rax, word ptr [rax + r12 + off] */
1157 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1158 break;
1159 case BPF_W:
1160 /* mov eax, dword ptr [rax + r12 + off] */
1161 EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1162 break;
1163 case BPF_DW:
1164 /* mov rax, qword ptr [rax + r12 + off] */
1165 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1166 break;
1167 }
1168 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1169 *pprog = prog;
1170 }
1171
emit_ldsx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1172 static void emit_ldsx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1173 {
1174 u8 *prog = *pprog;
1175
1176 switch (size) {
1177 case BPF_B:
1178 /* movsx rax, byte ptr [rax + r12 + off] */
1179 EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBE);
1180 break;
1181 case BPF_H:
1182 /* movsx rax, word ptr [rax + r12 + off] */
1183 EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBF);
1184 break;
1185 case BPF_W:
1186 /* movsx rax, dword ptr [rax + r12 + off] */
1187 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x63);
1188 break;
1189 }
1190 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1191 *pprog = prog;
1192 }
1193
emit_ldx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1194 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1195 {
1196 emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1197 }
1198
emit_ldsx_r12(u8 ** prog,u32 size,u32 dst_reg,u32 src_reg,int off)1199 static void emit_ldsx_r12(u8 **prog, u32 size, u32 dst_reg, u32 src_reg, int off)
1200 {
1201 emit_ldsx_index(prog, size, dst_reg, src_reg, X86_REG_R12, off);
1202 }
1203
1204 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1205 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1206 {
1207 u8 *prog = *pprog;
1208
1209 switch (size) {
1210 case BPF_B:
1211 /* Emit 'mov byte ptr [rax + off], al' */
1212 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1213 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1214 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1215 else
1216 EMIT1(0x88);
1217 break;
1218 case BPF_H:
1219 if (is_ereg(dst_reg) || is_ereg(src_reg))
1220 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1221 else
1222 EMIT2(0x66, 0x89);
1223 break;
1224 case BPF_W:
1225 if (is_ereg(dst_reg) || is_ereg(src_reg))
1226 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1227 else
1228 EMIT1(0x89);
1229 break;
1230 case BPF_DW:
1231 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1232 break;
1233 }
1234 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1235 *pprog = prog;
1236 }
1237
1238 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
emit_stx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1239 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1240 {
1241 u8 *prog = *pprog;
1242
1243 switch (size) {
1244 case BPF_B:
1245 /* mov byte ptr [rax + r12 + off], al */
1246 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1247 break;
1248 case BPF_H:
1249 /* mov word ptr [rax + r12 + off], ax */
1250 EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1251 break;
1252 case BPF_W:
1253 /* mov dword ptr [rax + r12 + 1], eax */
1254 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1255 break;
1256 case BPF_DW:
1257 /* mov qword ptr [rax + r12 + 1], rax */
1258 EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1259 break;
1260 }
1261 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1262 *pprog = prog;
1263 }
1264
emit_stx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1265 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1266 {
1267 emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1268 }
1269
1270 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
emit_st_index(u8 ** pprog,u32 size,u32 dst_reg,u32 index_reg,int off,int imm)1271 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1272 {
1273 u8 *prog = *pprog;
1274
1275 switch (size) {
1276 case BPF_B:
1277 /* mov byte ptr [rax + r12 + off], imm8 */
1278 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1279 break;
1280 case BPF_H:
1281 /* mov word ptr [rax + r12 + off], imm16 */
1282 EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1283 break;
1284 case BPF_W:
1285 /* mov dword ptr [rax + r12 + 1], imm32 */
1286 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1287 break;
1288 case BPF_DW:
1289 /* mov qword ptr [rax + r12 + 1], imm32 */
1290 EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1291 break;
1292 }
1293 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1294 EMIT(imm, bpf_size_to_x86_bytes(size));
1295 *pprog = prog;
1296 }
1297
emit_st_r12(u8 ** pprog,u32 size,u32 dst_reg,int off,int imm)1298 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1299 {
1300 emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1301 }
1302
emit_atomic_rmw(u8 ** pprog,u32 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)1303 static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
1304 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1305 {
1306 u8 *prog = *pprog;
1307
1308 EMIT1(0xF0); /* lock prefix */
1309
1310 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1311
1312 /* emit opcode */
1313 switch (atomic_op) {
1314 case BPF_ADD:
1315 case BPF_AND:
1316 case BPF_OR:
1317 case BPF_XOR:
1318 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1319 EMIT1(simple_alu_opcodes[atomic_op]);
1320 break;
1321 case BPF_ADD | BPF_FETCH:
1322 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1323 EMIT2(0x0F, 0xC1);
1324 break;
1325 case BPF_XCHG:
1326 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1327 EMIT1(0x87);
1328 break;
1329 case BPF_CMPXCHG:
1330 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1331 EMIT2(0x0F, 0xB1);
1332 break;
1333 default:
1334 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1335 return -EFAULT;
1336 }
1337
1338 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1339
1340 *pprog = prog;
1341 return 0;
1342 }
1343
emit_atomic_rmw_index(u8 ** pprog,u32 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1344 static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
1345 u32 dst_reg, u32 src_reg, u32 index_reg,
1346 int off)
1347 {
1348 u8 *prog = *pprog;
1349
1350 EMIT1(0xF0); /* lock prefix */
1351 switch (size) {
1352 case BPF_W:
1353 EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1354 break;
1355 case BPF_DW:
1356 EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1357 break;
1358 default:
1359 pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
1360 return -EFAULT;
1361 }
1362
1363 /* emit opcode */
1364 switch (atomic_op) {
1365 case BPF_ADD:
1366 case BPF_AND:
1367 case BPF_OR:
1368 case BPF_XOR:
1369 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1370 EMIT1(simple_alu_opcodes[atomic_op]);
1371 break;
1372 case BPF_ADD | BPF_FETCH:
1373 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1374 EMIT2(0x0F, 0xC1);
1375 break;
1376 case BPF_XCHG:
1377 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1378 EMIT1(0x87);
1379 break;
1380 case BPF_CMPXCHG:
1381 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1382 EMIT2(0x0F, 0xB1);
1383 break;
1384 default:
1385 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1386 return -EFAULT;
1387 }
1388 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1389 *pprog = prog;
1390 return 0;
1391 }
1392
emit_atomic_ld_st(u8 ** pprog,u32 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)1393 static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
1394 u32 src_reg, s16 off, u8 bpf_size)
1395 {
1396 switch (atomic_op) {
1397 case BPF_LOAD_ACQ:
1398 /* dst_reg = smp_load_acquire(src_reg + off16) */
1399 emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
1400 break;
1401 case BPF_STORE_REL:
1402 /* smp_store_release(dst_reg + off16, src_reg) */
1403 emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
1404 break;
1405 default:
1406 pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1407 atomic_op);
1408 return -EFAULT;
1409 }
1410
1411 return 0;
1412 }
1413
emit_atomic_ld_st_index(u8 ** pprog,u32 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1414 static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
1415 u32 dst_reg, u32 src_reg, u32 index_reg,
1416 int off)
1417 {
1418 switch (atomic_op) {
1419 case BPF_LOAD_ACQ:
1420 /* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
1421 emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1422 break;
1423 case BPF_STORE_REL:
1424 /* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
1425 emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1426 break;
1427 default:
1428 pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1429 atomic_op);
1430 return -EFAULT;
1431 }
1432
1433 return 0;
1434 }
1435
1436 /*
1437 * Metadata encoding for exception handling in JITed code.
1438 *
1439 * Format of `fixup` and `data` fields in `struct exception_table_entry`:
1440 *
1441 * Bit layout of `fixup` (32-bit):
1442 *
1443 * +-----------+--------+-----------+---------+----------+
1444 * | 31 | 30-24 | 23-16 | 15-8 | 7-0 |
1445 * | | | | | |
1446 * | ARENA_ACC | Unused | ARENA_REG | DST_REG | INSN_LEN |
1447 * +-----------+--------+-----------+---------+----------+
1448 *
1449 * - INSN_LEN (8 bits): Length of faulting insn (max x86 insn = 15 bytes (fits in 8 bits)).
1450 * - DST_REG (8 bits): Offset of dst_reg from reg2pt_regs[] (max offset = 112 (fits in 8 bits)).
1451 * This is set to DONT_CLEAR if the insn is a store.
1452 * - ARENA_REG (8 bits): Offset of the register that is used to calculate the
1453 * address for load/store when accessing the arena region.
1454 * - ARENA_ACCESS (1 bit): This bit is set when the faulting instruction accessed the arena region.
1455 *
1456 * Bit layout of `data` (32-bit):
1457 *
1458 * +--------------+--------+--------------+
1459 * | 31-16 | 15-8 | 7-0 |
1460 * | | | |
1461 * | ARENA_OFFSET | Unused | EX_TYPE_BPF |
1462 * +--------------+--------+--------------+
1463 *
1464 * - ARENA_OFFSET (16 bits): Offset used to calculate the address for load/store when
1465 * accessing the arena region.
1466 */
1467
1468 #define DONT_CLEAR 1
1469 #define FIXUP_INSN_LEN_MASK GENMASK(7, 0)
1470 #define FIXUP_REG_MASK GENMASK(15, 8)
1471 #define FIXUP_ARENA_REG_MASK GENMASK(23, 16)
1472 #define FIXUP_ARENA_ACCESS BIT(31)
1473 #define DATA_ARENA_OFFSET_MASK GENMASK(31, 16)
1474
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)1475 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1476 {
1477 u32 reg = FIELD_GET(FIXUP_REG_MASK, x->fixup);
1478 u32 insn_len = FIELD_GET(FIXUP_INSN_LEN_MASK, x->fixup);
1479 bool is_arena = !!(x->fixup & FIXUP_ARENA_ACCESS);
1480 bool is_write = (reg == DONT_CLEAR);
1481 unsigned long addr;
1482 s16 off;
1483 u32 arena_reg;
1484
1485 if (is_arena) {
1486 arena_reg = FIELD_GET(FIXUP_ARENA_REG_MASK, x->fixup);
1487 off = FIELD_GET(DATA_ARENA_OFFSET_MASK, x->data);
1488 addr = *(unsigned long *)((void *)regs + arena_reg) + off;
1489 bpf_prog_report_arena_violation(is_write, addr, regs->ip);
1490 }
1491
1492 /* jump over faulting load and clear dest register */
1493 if (reg != DONT_CLEAR)
1494 *(unsigned long *)((void *)regs + reg) = 0;
1495 regs->ip += insn_len;
1496
1497 return true;
1498 }
1499
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used)1500 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1501 bool *regs_used)
1502 {
1503 int i;
1504
1505 for (i = 1; i <= insn_cnt; i++, insn++) {
1506 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1507 regs_used[0] = true;
1508 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1509 regs_used[1] = true;
1510 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1511 regs_used[2] = true;
1512 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1513 regs_used[3] = true;
1514 }
1515 }
1516
1517 /* emit the 3-byte VEX prefix
1518 *
1519 * r: same as rex.r, extra bit for ModRM reg field
1520 * x: same as rex.x, extra bit for SIB index field
1521 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1522 * m: opcode map select, encoding escape bytes e.g. 0x0f38
1523 * w: same as rex.w (32 bit or 64 bit) or opcode specific
1524 * src_reg2: additional source reg (encoded as BPF reg)
1525 * l: vector length (128 bit or 256 bit) or reserved
1526 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1527 */
emit_3vex(u8 ** pprog,bool r,bool x,bool b,u8 m,bool w,u8 src_reg2,bool l,u8 pp)1528 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1529 bool w, u8 src_reg2, bool l, u8 pp)
1530 {
1531 u8 *prog = *pprog;
1532 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1533 u8 b1, b2;
1534 u8 vvvv = reg2hex[src_reg2];
1535
1536 /* reg2hex gives only the lower 3 bit of vvvv */
1537 if (is_ereg(src_reg2))
1538 vvvv |= 1 << 3;
1539
1540 /*
1541 * 2nd byte of 3-byte VEX prefix
1542 * ~ means bit inverted encoding
1543 *
1544 * 7 0
1545 * +---+---+---+---+---+---+---+---+
1546 * |~R |~X |~B | m |
1547 * +---+---+---+---+---+---+---+---+
1548 */
1549 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1550 /*
1551 * 3rd byte of 3-byte VEX prefix
1552 *
1553 * 7 0
1554 * +---+---+---+---+---+---+---+---+
1555 * | W | ~vvvv | L | pp |
1556 * +---+---+---+---+---+---+---+---+
1557 */
1558 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1559
1560 EMIT3(b0, b1, b2);
1561 *pprog = prog;
1562 }
1563
1564 /* emit BMI2 shift instruction */
emit_shiftx(u8 ** pprog,u32 dst_reg,u8 src_reg,bool is64,u8 op)1565 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1566 {
1567 u8 *prog = *pprog;
1568 bool r = is_ereg(dst_reg);
1569 u8 m = 2; /* escape code 0f38 */
1570
1571 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1572 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1573 *pprog = prog;
1574 }
1575
emit_priv_frame_ptr(u8 ** pprog,void __percpu * priv_frame_ptr)1576 static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
1577 {
1578 u8 *prog = *pprog;
1579
1580 /* movabs r9, priv_frame_ptr */
1581 emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32,
1582 (u32) (__force long) priv_frame_ptr);
1583
1584 #ifdef CONFIG_SMP
1585 /* add <r9>, gs:[<off>] */
1586 EMIT2(0x65, 0x4c);
1587 EMIT3(0x03, 0x0c, 0x25);
1588 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1589 #endif
1590
1591 *pprog = prog;
1592 }
1593
1594 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1595
1596 #define __LOAD_TCC_PTR(off) \
1597 EMIT3_off32(0x48, 0x8B, 0x85, off)
1598 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1599 #define LOAD_TAIL_CALL_CNT_PTR(stack) \
1600 __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1601
1602 /* Memory size/value to protect private stack overflow/underflow */
1603 #define PRIV_STACK_GUARD_SZ 8
1604 #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
1605
emit_spectre_bhb_barrier(u8 ** pprog,u8 * ip,struct bpf_prog * bpf_prog)1606 static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
1607 struct bpf_prog *bpf_prog)
1608 {
1609 u8 *prog = *pprog;
1610 u8 *func;
1611
1612 if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
1613 /* The clearing sequence clobbers eax and ecx. */
1614 EMIT1(0x50); /* push rax */
1615 EMIT1(0x51); /* push rcx */
1616 ip += 2;
1617
1618 func = (u8 *)clear_bhb_loop;
1619 ip += x86_call_depth_emit_accounting(&prog, func, ip);
1620
1621 if (emit_call(&prog, func, ip))
1622 return -EINVAL;
1623 EMIT1(0x59); /* pop rcx */
1624 EMIT1(0x58); /* pop rax */
1625 }
1626 /* Insert IBHF instruction */
1627 if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
1628 cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
1629 cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
1630 /*
1631 * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
1632 * fence preventing branch history from before the fence from
1633 * affecting indirect branches after the fence. This is
1634 * specifically used in cBPF jitted code to prevent Intra-mode
1635 * BHI attacks. The IBHF instruction is designed to be a NOP on
1636 * hardware that doesn't need or support it. The REP and REX.W
1637 * prefixes are required by the microcode, and they also ensure
1638 * that the NOP is unlikely to be used in existing code.
1639 *
1640 * IBHF is not a valid instruction in 32-bit mode.
1641 */
1642 EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
1643 }
1644 *pprog = prog;
1645 return 0;
1646 }
1647
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)1648 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1649 int oldproglen, struct jit_context *ctx, bool jmp_padding)
1650 {
1651 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1652 struct bpf_insn *insn = bpf_prog->insnsi;
1653 bool callee_regs_used[4] = {};
1654 int insn_cnt = bpf_prog->len;
1655 bool seen_exit = false;
1656 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1657 void __percpu *priv_frame_ptr = NULL;
1658 u64 arena_vm_start, user_vm_start;
1659 void __percpu *priv_stack_ptr;
1660 int i, excnt = 0;
1661 int ilen, proglen = 0;
1662 u8 *prog = temp;
1663 u32 stack_depth;
1664 int err;
1665
1666 stack_depth = bpf_prog->aux->stack_depth;
1667 priv_stack_ptr = bpf_prog->aux->priv_stack_ptr;
1668 if (priv_stack_ptr) {
1669 priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8);
1670 stack_depth = 0;
1671 }
1672
1673 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1674 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1675
1676 detect_reg_usage(insn, insn_cnt, callee_regs_used);
1677
1678 emit_prologue(&prog, image, stack_depth,
1679 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1680 bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1681
1682 bpf_prog->aux->ksym.fp_start = prog - temp;
1683
1684 /* Exception callback will clobber callee regs for its own use, and
1685 * restore the original callee regs from main prog's stack frame.
1686 */
1687 if (bpf_prog->aux->exception_boundary) {
1688 /* We also need to save r12, which is not mapped to any BPF
1689 * register, as we throw after entry into the kernel, which may
1690 * overwrite r12.
1691 */
1692 push_r12(&prog);
1693 push_callee_regs(&prog, all_callee_regs_used);
1694 } else {
1695 if (arena_vm_start)
1696 push_r12(&prog);
1697 push_callee_regs(&prog, callee_regs_used);
1698 }
1699 if (arena_vm_start)
1700 emit_mov_imm64(&prog, X86_REG_R12,
1701 arena_vm_start >> 32, (u32) arena_vm_start);
1702
1703 if (priv_frame_ptr)
1704 emit_priv_frame_ptr(&prog, priv_frame_ptr);
1705
1706 ilen = prog - temp;
1707 if (rw_image)
1708 memcpy(rw_image + proglen, temp, ilen);
1709 proglen += ilen;
1710 addrs[0] = proglen;
1711 prog = temp;
1712
1713 for (i = 1; i <= insn_cnt; i++, insn++) {
1714 const s32 imm32 = insn->imm;
1715 u32 dst_reg = insn->dst_reg;
1716 u32 src_reg = insn->src_reg;
1717 u8 b2 = 0, b3 = 0;
1718 u8 *start_of_ldx;
1719 s64 jmp_offset;
1720 s16 insn_off;
1721 u8 jmp_cond;
1722 u8 *func;
1723 int nops;
1724
1725 if (priv_frame_ptr) {
1726 if (src_reg == BPF_REG_FP)
1727 src_reg = X86_REG_R9;
1728
1729 if (dst_reg == BPF_REG_FP)
1730 dst_reg = X86_REG_R9;
1731 }
1732
1733 switch (insn->code) {
1734 /* ALU */
1735 case BPF_ALU | BPF_ADD | BPF_X:
1736 case BPF_ALU | BPF_SUB | BPF_X:
1737 case BPF_ALU | BPF_AND | BPF_X:
1738 case BPF_ALU | BPF_OR | BPF_X:
1739 case BPF_ALU | BPF_XOR | BPF_X:
1740 case BPF_ALU64 | BPF_ADD | BPF_X:
1741 case BPF_ALU64 | BPF_SUB | BPF_X:
1742 case BPF_ALU64 | BPF_AND | BPF_X:
1743 case BPF_ALU64 | BPF_OR | BPF_X:
1744 case BPF_ALU64 | BPF_XOR | BPF_X:
1745 maybe_emit_mod(&prog, dst_reg, src_reg,
1746 BPF_CLASS(insn->code) == BPF_ALU64);
1747 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1748 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1749 break;
1750
1751 case BPF_ALU64 | BPF_MOV | BPF_X:
1752 if (insn_is_cast_user(insn)) {
1753 if (dst_reg != src_reg)
1754 /* 32-bit mov */
1755 emit_mov_reg(&prog, false, dst_reg, src_reg);
1756 /* shl dst_reg, 32 */
1757 maybe_emit_1mod(&prog, dst_reg, true);
1758 EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1759
1760 /* or dst_reg, user_vm_start */
1761 maybe_emit_1mod(&prog, dst_reg, true);
1762 if (is_axreg(dst_reg))
1763 EMIT1_off32(0x0D, user_vm_start >> 32);
1764 else
1765 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32);
1766
1767 /* rol dst_reg, 32 */
1768 maybe_emit_1mod(&prog, dst_reg, true);
1769 EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1770
1771 /* xor r11, r11 */
1772 EMIT3(0x4D, 0x31, 0xDB);
1773
1774 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1775 maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1776 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1777
1778 /* cmove r11, dst_reg; if so, set dst_reg to zero */
1779 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1780 maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1781 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1782 break;
1783 } else if (insn_is_mov_percpu_addr(insn)) {
1784 /* mov <dst>, <src> (if necessary) */
1785 EMIT_mov(dst_reg, src_reg);
1786 #ifdef CONFIG_SMP
1787 /* add <dst>, gs:[<off>] */
1788 EMIT2(0x65, add_1mod(0x48, dst_reg));
1789 EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1790 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1791 #endif
1792 break;
1793 }
1794 fallthrough;
1795 case BPF_ALU | BPF_MOV | BPF_X:
1796 if (insn->off == 0)
1797 emit_mov_reg(&prog,
1798 BPF_CLASS(insn->code) == BPF_ALU64,
1799 dst_reg, src_reg);
1800 else
1801 emit_movsx_reg(&prog, insn->off,
1802 BPF_CLASS(insn->code) == BPF_ALU64,
1803 dst_reg, src_reg);
1804 break;
1805
1806 /* neg dst */
1807 case BPF_ALU | BPF_NEG:
1808 case BPF_ALU64 | BPF_NEG:
1809 maybe_emit_1mod(&prog, dst_reg,
1810 BPF_CLASS(insn->code) == BPF_ALU64);
1811 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1812 break;
1813
1814 case BPF_ALU | BPF_ADD | BPF_K:
1815 case BPF_ALU | BPF_SUB | BPF_K:
1816 case BPF_ALU | BPF_AND | BPF_K:
1817 case BPF_ALU | BPF_OR | BPF_K:
1818 case BPF_ALU | BPF_XOR | BPF_K:
1819 case BPF_ALU64 | BPF_ADD | BPF_K:
1820 case BPF_ALU64 | BPF_SUB | BPF_K:
1821 case BPF_ALU64 | BPF_AND | BPF_K:
1822 case BPF_ALU64 | BPF_OR | BPF_K:
1823 case BPF_ALU64 | BPF_XOR | BPF_K:
1824 maybe_emit_1mod(&prog, dst_reg,
1825 BPF_CLASS(insn->code) == BPF_ALU64);
1826
1827 /*
1828 * b3 holds 'normal' opcode, b2 short form only valid
1829 * in case dst is eax/rax.
1830 */
1831 switch (BPF_OP(insn->code)) {
1832 case BPF_ADD:
1833 b3 = 0xC0;
1834 b2 = 0x05;
1835 break;
1836 case BPF_SUB:
1837 b3 = 0xE8;
1838 b2 = 0x2D;
1839 break;
1840 case BPF_AND:
1841 b3 = 0xE0;
1842 b2 = 0x25;
1843 break;
1844 case BPF_OR:
1845 b3 = 0xC8;
1846 b2 = 0x0D;
1847 break;
1848 case BPF_XOR:
1849 b3 = 0xF0;
1850 b2 = 0x35;
1851 break;
1852 }
1853
1854 if (is_imm8(imm32))
1855 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1856 else if (is_axreg(dst_reg))
1857 EMIT1_off32(b2, imm32);
1858 else
1859 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1860 break;
1861
1862 case BPF_ALU64 | BPF_MOV | BPF_K:
1863 case BPF_ALU | BPF_MOV | BPF_K:
1864 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1865 dst_reg, imm32);
1866 break;
1867
1868 case BPF_LD | BPF_IMM | BPF_DW:
1869 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1870 insn++;
1871 i++;
1872 break;
1873
1874 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1875 case BPF_ALU | BPF_MOD | BPF_X:
1876 case BPF_ALU | BPF_DIV | BPF_X:
1877 case BPF_ALU | BPF_MOD | BPF_K:
1878 case BPF_ALU | BPF_DIV | BPF_K:
1879 case BPF_ALU64 | BPF_MOD | BPF_X:
1880 case BPF_ALU64 | BPF_DIV | BPF_X:
1881 case BPF_ALU64 | BPF_MOD | BPF_K:
1882 case BPF_ALU64 | BPF_DIV | BPF_K: {
1883 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1884
1885 if (dst_reg != BPF_REG_0)
1886 EMIT1(0x50); /* push rax */
1887 if (dst_reg != BPF_REG_3)
1888 EMIT1(0x52); /* push rdx */
1889
1890 if (BPF_SRC(insn->code) == BPF_X) {
1891 if (src_reg == BPF_REG_0 ||
1892 src_reg == BPF_REG_3) {
1893 /* mov r11, src_reg */
1894 EMIT_mov(AUX_REG, src_reg);
1895 src_reg = AUX_REG;
1896 }
1897 } else {
1898 /* mov r11, imm32 */
1899 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1900 src_reg = AUX_REG;
1901 }
1902
1903 if (dst_reg != BPF_REG_0)
1904 /* mov rax, dst_reg */
1905 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1906
1907 if (insn->off == 0) {
1908 /*
1909 * xor edx, edx
1910 * equivalent to 'xor rdx, rdx', but one byte less
1911 */
1912 EMIT2(0x31, 0xd2);
1913
1914 /* div src_reg */
1915 maybe_emit_1mod(&prog, src_reg, is64);
1916 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1917 } else {
1918 if (BPF_CLASS(insn->code) == BPF_ALU)
1919 EMIT1(0x99); /* cdq */
1920 else
1921 EMIT2(0x48, 0x99); /* cqo */
1922
1923 /* idiv src_reg */
1924 maybe_emit_1mod(&prog, src_reg, is64);
1925 EMIT2(0xF7, add_1reg(0xF8, src_reg));
1926 }
1927
1928 if (BPF_OP(insn->code) == BPF_MOD &&
1929 dst_reg != BPF_REG_3)
1930 /* mov dst_reg, rdx */
1931 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1932 else if (BPF_OP(insn->code) == BPF_DIV &&
1933 dst_reg != BPF_REG_0)
1934 /* mov dst_reg, rax */
1935 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1936
1937 if (dst_reg != BPF_REG_3)
1938 EMIT1(0x5A); /* pop rdx */
1939 if (dst_reg != BPF_REG_0)
1940 EMIT1(0x58); /* pop rax */
1941 break;
1942 }
1943
1944 case BPF_ALU | BPF_MUL | BPF_K:
1945 case BPF_ALU64 | BPF_MUL | BPF_K:
1946 maybe_emit_mod(&prog, dst_reg, dst_reg,
1947 BPF_CLASS(insn->code) == BPF_ALU64);
1948
1949 if (is_imm8(imm32))
1950 /* imul dst_reg, dst_reg, imm8 */
1951 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1952 imm32);
1953 else
1954 /* imul dst_reg, dst_reg, imm32 */
1955 EMIT2_off32(0x69,
1956 add_2reg(0xC0, dst_reg, dst_reg),
1957 imm32);
1958 break;
1959
1960 case BPF_ALU | BPF_MUL | BPF_X:
1961 case BPF_ALU64 | BPF_MUL | BPF_X:
1962 maybe_emit_mod(&prog, src_reg, dst_reg,
1963 BPF_CLASS(insn->code) == BPF_ALU64);
1964
1965 /* imul dst_reg, src_reg */
1966 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1967 break;
1968
1969 /* Shifts */
1970 case BPF_ALU | BPF_LSH | BPF_K:
1971 case BPF_ALU | BPF_RSH | BPF_K:
1972 case BPF_ALU | BPF_ARSH | BPF_K:
1973 case BPF_ALU64 | BPF_LSH | BPF_K:
1974 case BPF_ALU64 | BPF_RSH | BPF_K:
1975 case BPF_ALU64 | BPF_ARSH | BPF_K:
1976 maybe_emit_1mod(&prog, dst_reg,
1977 BPF_CLASS(insn->code) == BPF_ALU64);
1978
1979 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1980 if (imm32 == 1)
1981 EMIT2(0xD1, add_1reg(b3, dst_reg));
1982 else
1983 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1984 break;
1985
1986 case BPF_ALU | BPF_LSH | BPF_X:
1987 case BPF_ALU | BPF_RSH | BPF_X:
1988 case BPF_ALU | BPF_ARSH | BPF_X:
1989 case BPF_ALU64 | BPF_LSH | BPF_X:
1990 case BPF_ALU64 | BPF_RSH | BPF_X:
1991 case BPF_ALU64 | BPF_ARSH | BPF_X:
1992 /* BMI2 shifts aren't better when shift count is already in rcx */
1993 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1994 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1995 bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1996 u8 op;
1997
1998 switch (BPF_OP(insn->code)) {
1999 case BPF_LSH:
2000 op = 1; /* prefix 0x66 */
2001 break;
2002 case BPF_RSH:
2003 op = 3; /* prefix 0xf2 */
2004 break;
2005 case BPF_ARSH:
2006 op = 2; /* prefix 0xf3 */
2007 break;
2008 }
2009
2010 emit_shiftx(&prog, dst_reg, src_reg, w, op);
2011
2012 break;
2013 }
2014
2015 if (src_reg != BPF_REG_4) { /* common case */
2016 /* Check for bad case when dst_reg == rcx */
2017 if (dst_reg == BPF_REG_4) {
2018 /* mov r11, dst_reg */
2019 EMIT_mov(AUX_REG, dst_reg);
2020 dst_reg = AUX_REG;
2021 } else {
2022 EMIT1(0x51); /* push rcx */
2023 }
2024 /* mov rcx, src_reg */
2025 EMIT_mov(BPF_REG_4, src_reg);
2026 }
2027
2028 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
2029 maybe_emit_1mod(&prog, dst_reg,
2030 BPF_CLASS(insn->code) == BPF_ALU64);
2031
2032 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
2033 EMIT2(0xD3, add_1reg(b3, dst_reg));
2034
2035 if (src_reg != BPF_REG_4) {
2036 if (insn->dst_reg == BPF_REG_4)
2037 /* mov dst_reg, r11 */
2038 EMIT_mov(insn->dst_reg, AUX_REG);
2039 else
2040 EMIT1(0x59); /* pop rcx */
2041 }
2042
2043 break;
2044
2045 case BPF_ALU | BPF_END | BPF_FROM_BE:
2046 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
2047 switch (imm32) {
2048 case 16:
2049 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
2050 EMIT1(0x66);
2051 if (is_ereg(dst_reg))
2052 EMIT1(0x41);
2053 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
2054
2055 /* Emit 'movzwl eax, ax' */
2056 if (is_ereg(dst_reg))
2057 EMIT3(0x45, 0x0F, 0xB7);
2058 else
2059 EMIT2(0x0F, 0xB7);
2060 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2061 break;
2062 case 32:
2063 /* Emit 'bswap eax' to swap lower 4 bytes */
2064 if (is_ereg(dst_reg))
2065 EMIT2(0x41, 0x0F);
2066 else
2067 EMIT1(0x0F);
2068 EMIT1(add_1reg(0xC8, dst_reg));
2069 break;
2070 case 64:
2071 /* Emit 'bswap rax' to swap 8 bytes */
2072 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
2073 add_1reg(0xC8, dst_reg));
2074 break;
2075 }
2076 break;
2077
2078 case BPF_ALU | BPF_END | BPF_FROM_LE:
2079 switch (imm32) {
2080 case 16:
2081 /*
2082 * Emit 'movzwl eax, ax' to zero extend 16-bit
2083 * into 64 bit
2084 */
2085 if (is_ereg(dst_reg))
2086 EMIT3(0x45, 0x0F, 0xB7);
2087 else
2088 EMIT2(0x0F, 0xB7);
2089 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2090 break;
2091 case 32:
2092 /* Emit 'mov eax, eax' to clear upper 32-bits */
2093 if (is_ereg(dst_reg))
2094 EMIT1(0x45);
2095 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
2096 break;
2097 case 64:
2098 /* nop */
2099 break;
2100 }
2101 break;
2102
2103 /* speculation barrier */
2104 case BPF_ST | BPF_NOSPEC:
2105 EMIT_LFENCE();
2106 break;
2107
2108 /* ST: *(u8*)(dst_reg + off) = imm */
2109 case BPF_ST | BPF_MEM | BPF_B:
2110 if (is_ereg(dst_reg))
2111 EMIT2(0x41, 0xC6);
2112 else
2113 EMIT1(0xC6);
2114 goto st;
2115 case BPF_ST | BPF_MEM | BPF_H:
2116 if (is_ereg(dst_reg))
2117 EMIT3(0x66, 0x41, 0xC7);
2118 else
2119 EMIT2(0x66, 0xC7);
2120 goto st;
2121 case BPF_ST | BPF_MEM | BPF_W:
2122 if (is_ereg(dst_reg))
2123 EMIT2(0x41, 0xC7);
2124 else
2125 EMIT1(0xC7);
2126 goto st;
2127 case BPF_ST | BPF_MEM | BPF_DW:
2128 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
2129
2130 st: if (is_imm8(insn->off))
2131 EMIT2(add_1reg(0x40, dst_reg), insn->off);
2132 else
2133 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
2134
2135 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
2136 break;
2137
2138 /* STX: *(u8*)(dst_reg + off) = src_reg */
2139 case BPF_STX | BPF_MEM | BPF_B:
2140 case BPF_STX | BPF_MEM | BPF_H:
2141 case BPF_STX | BPF_MEM | BPF_W:
2142 case BPF_STX | BPF_MEM | BPF_DW:
2143 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2144 break;
2145
2146 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
2147 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
2148 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
2149 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
2150 start_of_ldx = prog;
2151 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
2152 goto populate_extable;
2153
2154 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
2155 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
2156 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
2157 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
2158 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
2159 case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
2160 case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
2161 case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
2162 case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
2163 case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
2164 case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
2165 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
2166 start_of_ldx = prog;
2167 if (BPF_CLASS(insn->code) == BPF_LDX) {
2168 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX)
2169 emit_ldsx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2170 else
2171 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2172 } else {
2173 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2174 }
2175 populate_extable:
2176 {
2177 struct exception_table_entry *ex;
2178 u8 *_insn = image + proglen + (start_of_ldx - temp);
2179 u32 arena_reg, fixup_reg;
2180 s64 delta;
2181
2182 if (!bpf_prog->aux->extable)
2183 break;
2184
2185 if (excnt >= bpf_prog->aux->num_exentries) {
2186 pr_err("mem32 extable bug\n");
2187 return -EFAULT;
2188 }
2189 ex = &bpf_prog->aux->extable[excnt++];
2190
2191 delta = _insn - (u8 *)&ex->insn;
2192 /* switch ex to rw buffer for writes */
2193 ex = (void *)rw_image + ((void *)ex - (void *)image);
2194
2195 ex->insn = delta;
2196
2197 ex->data = EX_TYPE_BPF;
2198
2199 /*
2200 * src_reg/dst_reg holds the address in the arena region with upper
2201 * 32-bits being zero because of a preceding addr_space_cast(r<n>,
2202 * 0x0, 0x1) instruction. This address is adjusted with the addition
2203 * of arena_vm_start (see the implementation of BPF_PROBE_MEM32 and
2204 * BPF_PROBE_ATOMIC) before being used for the memory access. Pass
2205 * the reg holding the unmodified 32-bit address to
2206 * ex_handler_bpf().
2207 */
2208 if (BPF_CLASS(insn->code) == BPF_LDX) {
2209 arena_reg = reg2pt_regs[src_reg];
2210 fixup_reg = reg2pt_regs[dst_reg];
2211 } else {
2212 arena_reg = reg2pt_regs[dst_reg];
2213 fixup_reg = DONT_CLEAR;
2214 }
2215
2216 ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2217 FIELD_PREP(FIXUP_ARENA_REG_MASK, arena_reg) |
2218 FIELD_PREP(FIXUP_REG_MASK, fixup_reg);
2219 ex->fixup |= FIXUP_ARENA_ACCESS;
2220
2221 ex->data |= FIELD_PREP(DATA_ARENA_OFFSET_MASK, insn->off);
2222 }
2223 break;
2224
2225 /* LDX: dst_reg = *(u8*)(src_reg + off) */
2226 case BPF_LDX | BPF_MEM | BPF_B:
2227 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
2228 case BPF_LDX | BPF_MEM | BPF_H:
2229 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
2230 case BPF_LDX | BPF_MEM | BPF_W:
2231 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
2232 case BPF_LDX | BPF_MEM | BPF_DW:
2233 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
2234 /* LDXS: dst_reg = *(s8*)(src_reg + off) */
2235 case BPF_LDX | BPF_MEMSX | BPF_B:
2236 case BPF_LDX | BPF_MEMSX | BPF_H:
2237 case BPF_LDX | BPF_MEMSX | BPF_W:
2238 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
2239 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
2240 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
2241 insn_off = insn->off;
2242
2243 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2244 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2245 /* Conservatively check that src_reg + insn->off is a kernel address:
2246 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
2247 * and
2248 * src_reg + insn->off < VSYSCALL_ADDR
2249 */
2250
2251 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
2252 u8 *end_of_jmp;
2253
2254 /* movabsq r10, VSYSCALL_ADDR */
2255 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
2256 (u32)(long)VSYSCALL_ADDR);
2257
2258 /* mov src_reg, r11 */
2259 EMIT_mov(AUX_REG, src_reg);
2260
2261 if (insn->off) {
2262 /* add r11, insn->off */
2263 maybe_emit_1mod(&prog, AUX_REG, true);
2264 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
2265 }
2266
2267 /* sub r11, r10 */
2268 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2269 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2270
2271 /* movabsq r10, limit */
2272 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
2273 (u32)(long)limit);
2274
2275 /* cmp r10, r11 */
2276 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2277 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2278
2279 /* if unsigned '>', goto load */
2280 EMIT2(X86_JA, 0);
2281 end_of_jmp = prog;
2282
2283 /* xor dst_reg, dst_reg */
2284 emit_mov_imm32(&prog, false, dst_reg, 0);
2285 /* jmp byte_after_ldx */
2286 EMIT2(0xEB, 0);
2287
2288 /* populate jmp_offset for JAE above to jump to start_of_ldx */
2289 start_of_ldx = prog;
2290 end_of_jmp[-1] = start_of_ldx - end_of_jmp;
2291 }
2292 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
2293 BPF_MODE(insn->code) == BPF_MEMSX)
2294 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2295 else
2296 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2297 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2298 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2299 struct exception_table_entry *ex;
2300 u8 *_insn = image + proglen + (start_of_ldx - temp);
2301 s64 delta;
2302
2303 /* populate jmp_offset for JMP above */
2304 start_of_ldx[-1] = prog - start_of_ldx;
2305
2306 if (!bpf_prog->aux->extable)
2307 break;
2308
2309 if (excnt >= bpf_prog->aux->num_exentries) {
2310 pr_err("ex gen bug\n");
2311 return -EFAULT;
2312 }
2313 ex = &bpf_prog->aux->extable[excnt++];
2314
2315 delta = _insn - (u8 *)&ex->insn;
2316 if (!is_simm32(delta)) {
2317 pr_err("extable->insn doesn't fit into 32-bit\n");
2318 return -EFAULT;
2319 }
2320 /* switch ex to rw buffer for writes */
2321 ex = (void *)rw_image + ((void *)ex - (void *)image);
2322
2323 ex->insn = delta;
2324
2325 ex->data = EX_TYPE_BPF;
2326
2327 if (dst_reg > BPF_REG_9) {
2328 pr_err("verifier error\n");
2329 return -EFAULT;
2330 }
2331 /*
2332 * Compute size of x86 insn and its target dest x86 register.
2333 * ex_handler_bpf() will use lower 8 bits to adjust
2334 * pt_regs->ip to jump over this x86 instruction
2335 * and upper bits to figure out which pt_regs to zero out.
2336 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2337 * of 4 bytes will be ignored and rbx will be zero inited.
2338 */
2339 ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2340 FIELD_PREP(FIXUP_REG_MASK, reg2pt_regs[dst_reg]);
2341 }
2342 break;
2343
2344 case BPF_STX | BPF_ATOMIC | BPF_B:
2345 case BPF_STX | BPF_ATOMIC | BPF_H:
2346 if (!bpf_atomic_is_load_store(insn)) {
2347 pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2348 return -EFAULT;
2349 }
2350 fallthrough;
2351 case BPF_STX | BPF_ATOMIC | BPF_W:
2352 case BPF_STX | BPF_ATOMIC | BPF_DW:
2353 if (insn->imm == (BPF_AND | BPF_FETCH) ||
2354 insn->imm == (BPF_OR | BPF_FETCH) ||
2355 insn->imm == (BPF_XOR | BPF_FETCH)) {
2356 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2357 u32 real_src_reg = src_reg;
2358 u32 real_dst_reg = dst_reg;
2359 u8 *branch_target;
2360
2361 /*
2362 * Can't be implemented with a single x86 insn.
2363 * Need to do a CMPXCHG loop.
2364 */
2365
2366 /* Will need RAX as a CMPXCHG operand so save R0 */
2367 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2368 if (src_reg == BPF_REG_0)
2369 real_src_reg = BPF_REG_AX;
2370 if (dst_reg == BPF_REG_0)
2371 real_dst_reg = BPF_REG_AX;
2372
2373 branch_target = prog;
2374 /* Load old value */
2375 emit_ldx(&prog, BPF_SIZE(insn->code),
2376 BPF_REG_0, real_dst_reg, insn->off);
2377 /*
2378 * Perform the (commutative) operation locally,
2379 * put the result in the AUX_REG.
2380 */
2381 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2382 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2383 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2384 add_2reg(0xC0, AUX_REG, real_src_reg));
2385 /* Attempt to swap in new value */
2386 err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
2387 real_dst_reg, AUX_REG,
2388 insn->off,
2389 BPF_SIZE(insn->code));
2390 if (WARN_ON(err))
2391 return err;
2392 /*
2393 * ZF tells us whether we won the race. If it's
2394 * cleared we need to try again.
2395 */
2396 EMIT2(X86_JNE, -(prog - branch_target) - 2);
2397 /* Return the pre-modification value */
2398 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2399 /* Restore R0 after clobbering RAX */
2400 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2401 break;
2402 }
2403
2404 if (bpf_atomic_is_load_store(insn))
2405 err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
2406 insn->off, BPF_SIZE(insn->code));
2407 else
2408 err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
2409 insn->off, BPF_SIZE(insn->code));
2410 if (err)
2411 return err;
2412 break;
2413
2414 case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
2415 case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
2416 if (!bpf_atomic_is_load_store(insn)) {
2417 pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2418 return -EFAULT;
2419 }
2420 fallthrough;
2421 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2422 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2423 start_of_ldx = prog;
2424
2425 if (bpf_atomic_is_load_store(insn))
2426 err = emit_atomic_ld_st_index(&prog, insn->imm,
2427 BPF_SIZE(insn->code), dst_reg,
2428 src_reg, X86_REG_R12, insn->off);
2429 else
2430 err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
2431 dst_reg, src_reg, X86_REG_R12,
2432 insn->off);
2433 if (err)
2434 return err;
2435 goto populate_extable;
2436
2437 /* call */
2438 case BPF_JMP | BPF_CALL: {
2439 u8 *ip = image + addrs[i - 1];
2440
2441 func = (u8 *) __bpf_call_base + imm32;
2442 if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
2443 LOAD_TAIL_CALL_CNT_PTR(stack_depth);
2444 ip += 7;
2445 }
2446 if (!imm32)
2447 return -EINVAL;
2448 if (priv_frame_ptr) {
2449 push_r9(&prog);
2450 ip += 2;
2451 }
2452 ip += x86_call_depth_emit_accounting(&prog, func, ip);
2453 if (emit_call(&prog, func, ip))
2454 return -EINVAL;
2455 if (priv_frame_ptr)
2456 pop_r9(&prog);
2457 break;
2458 }
2459
2460 case BPF_JMP | BPF_TAIL_CALL:
2461 if (imm32)
2462 emit_bpf_tail_call_direct(bpf_prog,
2463 &bpf_prog->aux->poke_tab[imm32 - 1],
2464 &prog, image + addrs[i - 1],
2465 callee_regs_used,
2466 stack_depth,
2467 ctx);
2468 else
2469 emit_bpf_tail_call_indirect(bpf_prog,
2470 &prog,
2471 callee_regs_used,
2472 stack_depth,
2473 image + addrs[i - 1],
2474 ctx);
2475 break;
2476
2477 /* cond jump */
2478 case BPF_JMP | BPF_JEQ | BPF_X:
2479 case BPF_JMP | BPF_JNE | BPF_X:
2480 case BPF_JMP | BPF_JGT | BPF_X:
2481 case BPF_JMP | BPF_JLT | BPF_X:
2482 case BPF_JMP | BPF_JGE | BPF_X:
2483 case BPF_JMP | BPF_JLE | BPF_X:
2484 case BPF_JMP | BPF_JSGT | BPF_X:
2485 case BPF_JMP | BPF_JSLT | BPF_X:
2486 case BPF_JMP | BPF_JSGE | BPF_X:
2487 case BPF_JMP | BPF_JSLE | BPF_X:
2488 case BPF_JMP32 | BPF_JEQ | BPF_X:
2489 case BPF_JMP32 | BPF_JNE | BPF_X:
2490 case BPF_JMP32 | BPF_JGT | BPF_X:
2491 case BPF_JMP32 | BPF_JLT | BPF_X:
2492 case BPF_JMP32 | BPF_JGE | BPF_X:
2493 case BPF_JMP32 | BPF_JLE | BPF_X:
2494 case BPF_JMP32 | BPF_JSGT | BPF_X:
2495 case BPF_JMP32 | BPF_JSLT | BPF_X:
2496 case BPF_JMP32 | BPF_JSGE | BPF_X:
2497 case BPF_JMP32 | BPF_JSLE | BPF_X:
2498 /* cmp dst_reg, src_reg */
2499 maybe_emit_mod(&prog, dst_reg, src_reg,
2500 BPF_CLASS(insn->code) == BPF_JMP);
2501 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2502 goto emit_cond_jmp;
2503
2504 case BPF_JMP | BPF_JSET | BPF_X:
2505 case BPF_JMP32 | BPF_JSET | BPF_X:
2506 /* test dst_reg, src_reg */
2507 maybe_emit_mod(&prog, dst_reg, src_reg,
2508 BPF_CLASS(insn->code) == BPF_JMP);
2509 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2510 goto emit_cond_jmp;
2511
2512 case BPF_JMP | BPF_JSET | BPF_K:
2513 case BPF_JMP32 | BPF_JSET | BPF_K:
2514 /* test dst_reg, imm32 */
2515 maybe_emit_1mod(&prog, dst_reg,
2516 BPF_CLASS(insn->code) == BPF_JMP);
2517 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2518 goto emit_cond_jmp;
2519
2520 case BPF_JMP | BPF_JEQ | BPF_K:
2521 case BPF_JMP | BPF_JNE | BPF_K:
2522 case BPF_JMP | BPF_JGT | BPF_K:
2523 case BPF_JMP | BPF_JLT | BPF_K:
2524 case BPF_JMP | BPF_JGE | BPF_K:
2525 case BPF_JMP | BPF_JLE | BPF_K:
2526 case BPF_JMP | BPF_JSGT | BPF_K:
2527 case BPF_JMP | BPF_JSLT | BPF_K:
2528 case BPF_JMP | BPF_JSGE | BPF_K:
2529 case BPF_JMP | BPF_JSLE | BPF_K:
2530 case BPF_JMP32 | BPF_JEQ | BPF_K:
2531 case BPF_JMP32 | BPF_JNE | BPF_K:
2532 case BPF_JMP32 | BPF_JGT | BPF_K:
2533 case BPF_JMP32 | BPF_JLT | BPF_K:
2534 case BPF_JMP32 | BPF_JGE | BPF_K:
2535 case BPF_JMP32 | BPF_JLE | BPF_K:
2536 case BPF_JMP32 | BPF_JSGT | BPF_K:
2537 case BPF_JMP32 | BPF_JSLT | BPF_K:
2538 case BPF_JMP32 | BPF_JSGE | BPF_K:
2539 case BPF_JMP32 | BPF_JSLE | BPF_K:
2540 /* test dst_reg, dst_reg to save one extra byte */
2541 if (imm32 == 0) {
2542 maybe_emit_mod(&prog, dst_reg, dst_reg,
2543 BPF_CLASS(insn->code) == BPF_JMP);
2544 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2545 goto emit_cond_jmp;
2546 }
2547
2548 /* cmp dst_reg, imm8/32 */
2549 maybe_emit_1mod(&prog, dst_reg,
2550 BPF_CLASS(insn->code) == BPF_JMP);
2551
2552 if (is_imm8(imm32))
2553 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2554 else
2555 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2556
2557 emit_cond_jmp: /* Convert BPF opcode to x86 */
2558 switch (BPF_OP(insn->code)) {
2559 case BPF_JEQ:
2560 jmp_cond = X86_JE;
2561 break;
2562 case BPF_JSET:
2563 case BPF_JNE:
2564 jmp_cond = X86_JNE;
2565 break;
2566 case BPF_JGT:
2567 /* GT is unsigned '>', JA in x86 */
2568 jmp_cond = X86_JA;
2569 break;
2570 case BPF_JLT:
2571 /* LT is unsigned '<', JB in x86 */
2572 jmp_cond = X86_JB;
2573 break;
2574 case BPF_JGE:
2575 /* GE is unsigned '>=', JAE in x86 */
2576 jmp_cond = X86_JAE;
2577 break;
2578 case BPF_JLE:
2579 /* LE is unsigned '<=', JBE in x86 */
2580 jmp_cond = X86_JBE;
2581 break;
2582 case BPF_JSGT:
2583 /* Signed '>', GT in x86 */
2584 jmp_cond = X86_JG;
2585 break;
2586 case BPF_JSLT:
2587 /* Signed '<', LT in x86 */
2588 jmp_cond = X86_JL;
2589 break;
2590 case BPF_JSGE:
2591 /* Signed '>=', GE in x86 */
2592 jmp_cond = X86_JGE;
2593 break;
2594 case BPF_JSLE:
2595 /* Signed '<=', LE in x86 */
2596 jmp_cond = X86_JLE;
2597 break;
2598 default: /* to silence GCC warning */
2599 return -EFAULT;
2600 }
2601 jmp_offset = addrs[i + insn->off] - addrs[i];
2602 if (is_imm8_jmp_offset(jmp_offset)) {
2603 if (jmp_padding) {
2604 /* To keep the jmp_offset valid, the extra bytes are
2605 * padded before the jump insn, so we subtract the
2606 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2607 *
2608 * If the previous pass already emits an imm8
2609 * jmp_cond, then this BPF insn won't shrink, so
2610 * "nops" is 0.
2611 *
2612 * On the other hand, if the previous pass emits an
2613 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2614 * keep the image from shrinking further.
2615 *
2616 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2617 * is 2 bytes, so the size difference is 4 bytes.
2618 */
2619 nops = INSN_SZ_DIFF - 2;
2620 if (nops != 0 && nops != 4) {
2621 pr_err("unexpected jmp_cond padding: %d bytes\n",
2622 nops);
2623 return -EFAULT;
2624 }
2625 emit_nops(&prog, nops);
2626 }
2627 EMIT2(jmp_cond, jmp_offset);
2628 } else if (is_simm32(jmp_offset)) {
2629 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2630 } else {
2631 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2632 return -EFAULT;
2633 }
2634
2635 break;
2636
2637 case BPF_JMP | BPF_JA | BPF_X:
2638 emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
2639 break;
2640 case BPF_JMP | BPF_JA:
2641 case BPF_JMP32 | BPF_JA:
2642 if (BPF_CLASS(insn->code) == BPF_JMP) {
2643 if (insn->off == -1)
2644 /* -1 jmp instructions will always jump
2645 * backwards two bytes. Explicitly handling
2646 * this case avoids wasting too many passes
2647 * when there are long sequences of replaced
2648 * dead code.
2649 */
2650 jmp_offset = -2;
2651 else
2652 jmp_offset = addrs[i + insn->off] - addrs[i];
2653 } else {
2654 if (insn->imm == -1)
2655 jmp_offset = -2;
2656 else
2657 jmp_offset = addrs[i + insn->imm] - addrs[i];
2658 }
2659
2660 if (!jmp_offset) {
2661 /*
2662 * If jmp_padding is enabled, the extra nops will
2663 * be inserted. Otherwise, optimize out nop jumps.
2664 */
2665 if (jmp_padding) {
2666 /* There are 3 possible conditions.
2667 * (1) This BPF_JA is already optimized out in
2668 * the previous run, so there is no need
2669 * to pad any extra byte (0 byte).
2670 * (2) The previous pass emits an imm8 jmp,
2671 * so we pad 2 bytes to match the previous
2672 * insn size.
2673 * (3) Similarly, the previous pass emits an
2674 * imm32 jmp, and 5 bytes is padded.
2675 */
2676 nops = INSN_SZ_DIFF;
2677 if (nops != 0 && nops != 2 && nops != 5) {
2678 pr_err("unexpected nop jump padding: %d bytes\n",
2679 nops);
2680 return -EFAULT;
2681 }
2682 emit_nops(&prog, nops);
2683 }
2684 break;
2685 }
2686 emit_jmp:
2687 if (is_imm8_jmp_offset(jmp_offset)) {
2688 if (jmp_padding) {
2689 /* To avoid breaking jmp_offset, the extra bytes
2690 * are padded before the actual jmp insn, so
2691 * 2 bytes is subtracted from INSN_SZ_DIFF.
2692 *
2693 * If the previous pass already emits an imm8
2694 * jmp, there is nothing to pad (0 byte).
2695 *
2696 * If it emits an imm32 jmp (5 bytes) previously
2697 * and now an imm8 jmp (2 bytes), then we pad
2698 * (5 - 2 = 3) bytes to stop the image from
2699 * shrinking further.
2700 */
2701 nops = INSN_SZ_DIFF - 2;
2702 if (nops != 0 && nops != 3) {
2703 pr_err("unexpected jump padding: %d bytes\n",
2704 nops);
2705 return -EFAULT;
2706 }
2707 emit_nops(&prog, INSN_SZ_DIFF - 2);
2708 }
2709 EMIT2(0xEB, jmp_offset);
2710 } else if (is_simm32(jmp_offset)) {
2711 EMIT1_off32(0xE9, jmp_offset);
2712 } else {
2713 pr_err("jmp gen bug %llx\n", jmp_offset);
2714 return -EFAULT;
2715 }
2716 break;
2717
2718 case BPF_JMP | BPF_EXIT:
2719 if (seen_exit) {
2720 jmp_offset = ctx->cleanup_addr - addrs[i];
2721 goto emit_jmp;
2722 }
2723 seen_exit = true;
2724 /* Update cleanup_addr */
2725 ctx->cleanup_addr = proglen;
2726 if (bpf_prog_was_classic(bpf_prog) &&
2727 !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
2728 u8 *ip = image + addrs[i - 1];
2729
2730 if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
2731 return -EINVAL;
2732 }
2733 if (bpf_prog->aux->exception_boundary) {
2734 pop_callee_regs(&prog, all_callee_regs_used);
2735 pop_r12(&prog);
2736 } else {
2737 pop_callee_regs(&prog, callee_regs_used);
2738 if (arena_vm_start)
2739 pop_r12(&prog);
2740 }
2741 EMIT1(0xC9); /* leave */
2742 bpf_prog->aux->ksym.fp_end = prog - temp;
2743
2744 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2745 break;
2746
2747 default:
2748 /*
2749 * By design x86-64 JIT should support all BPF instructions.
2750 * This error will be seen if new instruction was added
2751 * to the interpreter, but not to the JIT, or if there is
2752 * junk in bpf_prog.
2753 */
2754 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2755 return -EINVAL;
2756 }
2757
2758 ilen = prog - temp;
2759 if (ilen > BPF_MAX_INSN_SIZE) {
2760 pr_err("bpf_jit: fatal insn size error\n");
2761 return -EFAULT;
2762 }
2763
2764 if (image) {
2765 /*
2766 * When populating the image, assert that:
2767 *
2768 * i) We do not write beyond the allocated space, and
2769 * ii) addrs[i] did not change from the prior run, in order
2770 * to validate assumptions made for computing branch
2771 * displacements.
2772 */
2773 if (unlikely(proglen + ilen > oldproglen ||
2774 proglen + ilen != addrs[i])) {
2775 pr_err("bpf_jit: fatal error\n");
2776 return -EFAULT;
2777 }
2778 memcpy(rw_image + proglen, temp, ilen);
2779 }
2780 proglen += ilen;
2781 addrs[i] = proglen;
2782 prog = temp;
2783 }
2784
2785 if (image && excnt != bpf_prog->aux->num_exentries) {
2786 pr_err("extable is not populated\n");
2787 return -EFAULT;
2788 }
2789 return proglen;
2790 }
2791
clean_stack_garbage(const struct btf_func_model * m,u8 ** pprog,int nr_stack_slots,int stack_size)2792 static void clean_stack_garbage(const struct btf_func_model *m,
2793 u8 **pprog, int nr_stack_slots,
2794 int stack_size)
2795 {
2796 int arg_size, off;
2797 u8 *prog;
2798
2799 /* Generally speaking, the compiler will pass the arguments
2800 * on-stack with "push" instruction, which will take 8-byte
2801 * on the stack. In this case, there won't be garbage values
2802 * while we copy the arguments from origin stack frame to current
2803 * in BPF_DW.
2804 *
2805 * However, sometimes the compiler will only allocate 4-byte on
2806 * the stack for the arguments. For now, this case will only
2807 * happen if there is only one argument on-stack and its size
2808 * not more than 4 byte. In this case, there will be garbage
2809 * values on the upper 4-byte where we store the argument on
2810 * current stack frame.
2811 *
2812 * arguments on origin stack:
2813 *
2814 * stack_arg_1(4-byte) xxx(4-byte)
2815 *
2816 * what we copy:
2817 *
2818 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2819 *
2820 * and the xxx is the garbage values which we should clean here.
2821 */
2822 if (nr_stack_slots != 1)
2823 return;
2824
2825 /* the size of the last argument */
2826 arg_size = m->arg_size[m->nr_args - 1];
2827 if (arg_size <= 4) {
2828 off = -(stack_size - 4);
2829 prog = *pprog;
2830 /* mov DWORD PTR [rbp + off], 0 */
2831 if (!is_imm8(off))
2832 EMIT2_off32(0xC7, 0x85, off);
2833 else
2834 EMIT3(0xC7, 0x45, off);
2835 EMIT(0, 4);
2836 *pprog = prog;
2837 }
2838 }
2839
2840 /* get the count of the regs that are used to pass arguments */
get_nr_used_regs(const struct btf_func_model * m)2841 static int get_nr_used_regs(const struct btf_func_model *m)
2842 {
2843 int i, arg_regs, nr_used_regs = 0;
2844
2845 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2846 arg_regs = (m->arg_size[i] + 7) / 8;
2847 if (nr_used_regs + arg_regs <= 6)
2848 nr_used_regs += arg_regs;
2849
2850 if (nr_used_regs >= 6)
2851 break;
2852 }
2853
2854 return nr_used_regs;
2855 }
2856
save_args(const struct btf_func_model * m,u8 ** prog,int stack_size,bool for_call_origin,u32 flags)2857 static void save_args(const struct btf_func_model *m, u8 **prog,
2858 int stack_size, bool for_call_origin, u32 flags)
2859 {
2860 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2861 bool use_jmp = bpf_trampoline_use_jmp(flags);
2862 int i, j;
2863
2864 /* Store function arguments to stack.
2865 * For a function that accepts two pointers the sequence will be:
2866 * mov QWORD PTR [rbp-0x10],rdi
2867 * mov QWORD PTR [rbp-0x8],rsi
2868 */
2869 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2870 arg_regs = (m->arg_size[i] + 7) / 8;
2871
2872 /* According to the research of Yonghong, struct members
2873 * should be all in register or all on the stack.
2874 * Meanwhile, the compiler will pass the argument on regs
2875 * if the remaining regs can hold the argument.
2876 *
2877 * Disorder of the args can happen. For example:
2878 *
2879 * struct foo_struct {
2880 * long a;
2881 * int b;
2882 * };
2883 * int foo(char, char, char, char, char, struct foo_struct,
2884 * char);
2885 *
2886 * the arg1-5,arg7 will be passed by regs, and arg6 will
2887 * by stack.
2888 */
2889 if (nr_regs + arg_regs > 6) {
2890 /* copy function arguments from origin stack frame
2891 * into current stack frame.
2892 *
2893 * The starting address of the arguments on-stack
2894 * is:
2895 * rbp + 8(push rbp) +
2896 * 8(return addr of origin call) +
2897 * 8(return addr of the caller)
2898 * which means: rbp + 24
2899 */
2900 for (j = 0; j < arg_regs; j++) {
2901 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2902 nr_stack_slots * 8 + 16 + (!use_jmp) * 8);
2903 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2904 -stack_size);
2905
2906 if (!nr_stack_slots)
2907 first_off = stack_size;
2908 stack_size -= 8;
2909 nr_stack_slots++;
2910 }
2911 } else {
2912 /* Only copy the arguments on-stack to current
2913 * 'stack_size' and ignore the regs, used to
2914 * prepare the arguments on-stack for origin call.
2915 */
2916 if (for_call_origin) {
2917 nr_regs += arg_regs;
2918 continue;
2919 }
2920
2921 /* copy the arguments from regs into stack */
2922 for (j = 0; j < arg_regs; j++) {
2923 emit_stx(prog, BPF_DW, BPF_REG_FP,
2924 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2925 -stack_size);
2926 stack_size -= 8;
2927 nr_regs++;
2928 }
2929 }
2930 }
2931
2932 clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2933 }
2934
restore_regs(const struct btf_func_model * m,u8 ** prog,int stack_size)2935 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2936 int stack_size)
2937 {
2938 int i, j, arg_regs, nr_regs = 0;
2939
2940 /* Restore function arguments from stack.
2941 * For a function that accepts two pointers the sequence will be:
2942 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2943 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2944 *
2945 * The logic here is similar to what we do in save_args()
2946 */
2947 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2948 arg_regs = (m->arg_size[i] + 7) / 8;
2949 if (nr_regs + arg_regs <= 6) {
2950 for (j = 0; j < arg_regs; j++) {
2951 emit_ldx(prog, BPF_DW,
2952 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2953 BPF_REG_FP,
2954 -stack_size);
2955 stack_size -= 8;
2956 nr_regs++;
2957 }
2958 } else {
2959 stack_size -= 8 * arg_regs;
2960 }
2961
2962 if (nr_regs >= 6)
2963 break;
2964 }
2965 }
2966
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)2967 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2968 struct bpf_tramp_link *l, int stack_size,
2969 int run_ctx_off, bool save_ret,
2970 void *image, void *rw_image)
2971 {
2972 u8 *prog = *pprog;
2973 u8 *jmp_insn;
2974 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2975 struct bpf_prog *p = l->link.prog;
2976 u64 cookie = l->cookie;
2977
2978 /* mov rdi, cookie */
2979 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2980
2981 /* Prepare struct bpf_tramp_run_ctx.
2982 *
2983 * bpf_tramp_run_ctx is already preserved by
2984 * arch_prepare_bpf_trampoline().
2985 *
2986 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2987 */
2988 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2989
2990 /* arg1: mov rdi, progs[i] */
2991 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2992 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2993 if (!is_imm8(-run_ctx_off))
2994 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2995 else
2996 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2997
2998 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2999 return -EINVAL;
3000 /* remember prog start time returned by __bpf_prog_enter */
3001 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
3002
3003 /* if (__bpf_prog_enter*(prog) == 0)
3004 * goto skip_exec_of_prog;
3005 */
3006 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
3007 /* emit 2 nops that will be replaced with JE insn */
3008 jmp_insn = prog;
3009 emit_nops(&prog, 2);
3010
3011 /* arg1: lea rdi, [rbp - stack_size] */
3012 if (!is_imm8(-stack_size))
3013 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
3014 else
3015 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
3016 /* arg2: progs[i]->insnsi for interpreter */
3017 if (!p->jited)
3018 emit_mov_imm64(&prog, BPF_REG_2,
3019 (long) p->insnsi >> 32,
3020 (u32) (long) p->insnsi);
3021 /* call JITed bpf program or interpreter */
3022 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
3023 return -EINVAL;
3024
3025 /*
3026 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
3027 * of the previous call which is then passed on the stack to
3028 * the next BPF program.
3029 *
3030 * BPF_TRAMP_FENTRY trampoline may need to return the return
3031 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
3032 */
3033 if (save_ret)
3034 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3035
3036 /* replace 2 nops with JE insn, since jmp target is known */
3037 jmp_insn[0] = X86_JE;
3038 jmp_insn[1] = prog - jmp_insn - 2;
3039
3040 /* arg1: mov rdi, progs[i] */
3041 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
3042 /* arg2: mov rsi, rbx <- start time in nsec */
3043 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
3044 /* arg3: lea rdx, [rbp - run_ctx_off] */
3045 if (!is_imm8(-run_ctx_off))
3046 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
3047 else
3048 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
3049 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
3050 return -EINVAL;
3051
3052 *pprog = prog;
3053 return 0;
3054 }
3055
emit_align(u8 ** pprog,u32 align)3056 static void emit_align(u8 **pprog, u32 align)
3057 {
3058 u8 *target, *prog = *pprog;
3059
3060 target = PTR_ALIGN(prog, align);
3061 if (target != prog)
3062 emit_nops(&prog, target - prog);
3063
3064 *pprog = prog;
3065 }
3066
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)3067 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
3068 {
3069 u8 *prog = *pprog;
3070 s64 offset;
3071
3072 offset = func - (ip + 2 + 4);
3073 if (!is_simm32(offset)) {
3074 pr_err("Target %p is out of range\n", func);
3075 return -EINVAL;
3076 }
3077 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
3078 *pprog = prog;
3079 return 0;
3080 }
3081
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)3082 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
3083 struct bpf_tramp_links *tl, int stack_size,
3084 int run_ctx_off, bool save_ret,
3085 void *image, void *rw_image)
3086 {
3087 int i;
3088 u8 *prog = *pprog;
3089
3090 for (i = 0; i < tl->nr_links; i++) {
3091 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
3092 run_ctx_off, save_ret, image, rw_image))
3093 return -EINVAL;
3094 }
3095 *pprog = prog;
3096 return 0;
3097 }
3098
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches,void * image,void * rw_image)3099 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
3100 struct bpf_tramp_links *tl, int stack_size,
3101 int run_ctx_off, u8 **branches,
3102 void *image, void *rw_image)
3103 {
3104 u8 *prog = *pprog;
3105 int i;
3106
3107 /* The first fmod_ret program will receive a garbage return value.
3108 * Set this to 0 to avoid confusing the program.
3109 */
3110 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
3111 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3112 for (i = 0; i < tl->nr_links; i++) {
3113 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
3114 image, rw_image))
3115 return -EINVAL;
3116
3117 /* mod_ret prog stored return value into [rbp - 8]. Emit:
3118 * if (*(u64 *)(rbp - 8) != 0)
3119 * goto do_fexit;
3120 */
3121 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
3122 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
3123
3124 /* Save the location of the branch and Generate 6 nops
3125 * (4 bytes for an offset and 2 bytes for the jump) These nops
3126 * are replaced with a conditional jump once do_fexit (i.e. the
3127 * start of the fexit invocation) is finalized.
3128 */
3129 branches[i] = prog;
3130 emit_nops(&prog, 4 + 2);
3131 }
3132
3133 *pprog = prog;
3134 return 0;
3135 }
3136
3137 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
3138 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
3139 __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
3140
3141 /* Example:
3142 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
3143 * its 'struct btf_func_model' will be nr_args=2
3144 * The assembly code when eth_type_trans is executing after trampoline:
3145 *
3146 * push rbp
3147 * mov rbp, rsp
3148 * sub rsp, 16 // space for skb and dev
3149 * push rbx // temp regs to pass start time
3150 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
3151 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
3152 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
3153 * mov rbx, rax // remember start time in bpf stats are enabled
3154 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
3155 * call addr_of_jited_FENTRY_prog
3156 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
3157 * mov rsi, rbx // prog start time
3158 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
3159 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
3160 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
3161 * pop rbx
3162 * leave
3163 * ret
3164 *
3165 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
3166 * replaced with 'call generated_bpf_trampoline'. When it returns
3167 * eth_type_trans will continue executing with original skb and dev pointers.
3168 *
3169 * The assembly code when eth_type_trans is called from trampoline:
3170 *
3171 * push rbp
3172 * mov rbp, rsp
3173 * sub rsp, 24 // space for skb, dev, return value
3174 * push rbx // temp regs to pass start time
3175 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
3176 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
3177 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
3178 * mov rbx, rax // remember start time if bpf stats are enabled
3179 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
3180 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
3181 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
3182 * mov rsi, rbx // prog start time
3183 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
3184 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
3185 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
3186 * call eth_type_trans+5 // execute body of eth_type_trans
3187 * mov qword ptr [rbp - 8], rax // save return value
3188 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
3189 * mov rbx, rax // remember start time in bpf stats are enabled
3190 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
3191 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
3192 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
3193 * mov rsi, rbx // prog start time
3194 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
3195 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
3196 * pop rbx
3197 * leave
3198 * add rsp, 8 // skip eth_type_trans's frame
3199 * ret // return to its caller
3200 */
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * rw_image,void * rw_image_end,void * image,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3201 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
3202 void *rw_image_end, void *image,
3203 const struct btf_func_model *m, u32 flags,
3204 struct bpf_tramp_links *tlinks,
3205 void *func_addr)
3206 {
3207 int i, ret, nr_regs = m->nr_args, stack_size = 0;
3208 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
3209 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
3210 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
3211 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
3212 void *orig_call = func_addr;
3213 u8 **branches = NULL;
3214 u8 *prog;
3215 bool save_ret;
3216
3217 /*
3218 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
3219 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
3220 * because @func_addr.
3221 */
3222 WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
3223 (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
3224
3225 /* extra registers for struct arguments */
3226 for (i = 0; i < m->nr_args; i++) {
3227 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
3228 nr_regs += (m->arg_size[i] + 7) / 8 - 1;
3229 }
3230
3231 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
3232 * are passed through regs, the remains are through stack.
3233 */
3234 if (nr_regs > MAX_BPF_FUNC_ARGS)
3235 return -ENOTSUPP;
3236
3237 /* Generated trampoline stack layout:
3238 *
3239 * RBP + 8 [ return address ]
3240 * RBP + 0 [ RBP ]
3241 *
3242 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
3243 * BPF_TRAMP_F_RET_FENTRY_RET flags
3244 *
3245 * [ reg_argN ] always
3246 * [ ... ]
3247 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
3248 *
3249 * RBP - nregs_off [ regs count ] always
3250 *
3251 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
3252 *
3253 * RBP - rbx_off [ rbx value ] always
3254 *
3255 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
3256 *
3257 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
3258 * [ ... ]
3259 * [ stack_arg2 ]
3260 * RBP - arg_stack_off [ stack_arg1 ]
3261 * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
3262 */
3263
3264 /* room for return value of orig_call or fentry prog */
3265 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
3266 if (save_ret)
3267 stack_size += 8;
3268
3269 stack_size += nr_regs * 8;
3270 regs_off = stack_size;
3271
3272 /* regs count */
3273 stack_size += 8;
3274 nregs_off = stack_size;
3275
3276 if (flags & BPF_TRAMP_F_IP_ARG)
3277 stack_size += 8; /* room for IP address argument */
3278
3279 ip_off = stack_size;
3280
3281 stack_size += 8;
3282 rbx_off = stack_size;
3283
3284 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
3285 run_ctx_off = stack_size;
3286
3287 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
3288 /* the space that used to pass arguments on-stack */
3289 stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
3290 /* make sure the stack pointer is 16-byte aligned if we
3291 * need pass arguments on stack, which means
3292 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
3293 * should be 16-byte aligned. Following code depend on
3294 * that stack_size is already 8-byte aligned.
3295 */
3296 if (bpf_trampoline_use_jmp(flags)) {
3297 /* no rip in the "jmp" case */
3298 stack_size += (stack_size % 16) ? 8 : 0;
3299 } else {
3300 stack_size += (stack_size % 16) ? 0 : 8;
3301 }
3302 }
3303
3304 arg_stack_off = stack_size;
3305
3306 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3307 /* skip patched call instruction and point orig_call to actual
3308 * body of the kernel function.
3309 */
3310 if (is_endbr(orig_call))
3311 orig_call += ENDBR_INSN_SIZE;
3312 orig_call += X86_PATCH_SIZE;
3313 }
3314
3315 prog = rw_image;
3316
3317 if (flags & BPF_TRAMP_F_INDIRECT) {
3318 /*
3319 * Indirect call for bpf_struct_ops
3320 */
3321 emit_cfi(&prog, image,
3322 cfi_get_func_hash(func_addr),
3323 cfi_get_func_arity(func_addr));
3324 } else {
3325 /*
3326 * Direct-call fentry stub, as such it needs accounting for the
3327 * __fentry__ call.
3328 */
3329 x86_call_depth_emit_accounting(&prog, NULL, image);
3330 }
3331 EMIT1(0x55); /* push rbp */
3332 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
3333 if (im)
3334 im->ksym.fp_start = prog - (u8 *)rw_image;
3335
3336 if (!is_imm8(stack_size)) {
3337 /* sub rsp, stack_size */
3338 EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
3339 } else {
3340 /* sub rsp, stack_size */
3341 EMIT4(0x48, 0x83, 0xEC, stack_size);
3342 }
3343 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
3344 EMIT1(0x50); /* push rax */
3345 /* mov QWORD PTR [rbp - rbx_off], rbx */
3346 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3347
3348 /* Store number of argument registers of the traced function:
3349 * mov rax, nr_regs
3350 * mov QWORD PTR [rbp - nregs_off], rax
3351 */
3352 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
3353 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
3354
3355 if (flags & BPF_TRAMP_F_IP_ARG) {
3356 /* Store IP address of the traced function:
3357 * movabsq rax, func_addr
3358 * mov QWORD PTR [rbp - ip_off], rax
3359 */
3360 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
3361 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
3362 }
3363
3364 save_args(m, &prog, regs_off, false, flags);
3365
3366 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3367 /* arg1: mov rdi, im */
3368 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3369 if (emit_rsb_call(&prog, __bpf_tramp_enter,
3370 image + (prog - (u8 *)rw_image))) {
3371 ret = -EINVAL;
3372 goto cleanup;
3373 }
3374 }
3375
3376 if (fentry->nr_links) {
3377 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
3378 flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
3379 return -EINVAL;
3380 }
3381
3382 if (fmod_ret->nr_links) {
3383 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3384 GFP_KERNEL);
3385 if (!branches)
3386 return -ENOMEM;
3387
3388 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
3389 run_ctx_off, branches, image, rw_image)) {
3390 ret = -EINVAL;
3391 goto cleanup;
3392 }
3393 }
3394
3395 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3396 restore_regs(m, &prog, regs_off);
3397 save_args(m, &prog, arg_stack_off, true, flags);
3398
3399 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3400 /* Before calling the original function, load the
3401 * tail_call_cnt_ptr from stack to rax.
3402 */
3403 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3404 }
3405
3406 if (flags & BPF_TRAMP_F_ORIG_STACK) {
3407 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
3408 EMIT2(0xff, 0xd3); /* call *rbx */
3409 } else {
3410 /* call original function */
3411 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3412 ret = -EINVAL;
3413 goto cleanup;
3414 }
3415 }
3416 /* remember return value in a stack for bpf prog to access */
3417 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3418 im->ip_after_call = image + (prog - (u8 *)rw_image);
3419 emit_nops(&prog, X86_PATCH_SIZE);
3420 }
3421
3422 if (fmod_ret->nr_links) {
3423 /* From Intel 64 and IA-32 Architectures Optimization
3424 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3425 * Coding Rule 11: All branch targets should be 16-byte
3426 * aligned.
3427 */
3428 emit_align(&prog, 16);
3429 /* Update the branches saved in invoke_bpf_mod_ret with the
3430 * aligned address of do_fexit.
3431 */
3432 for (i = 0; i < fmod_ret->nr_links; i++) {
3433 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3434 image + (branches[i] - (u8 *)rw_image), X86_JNE);
3435 }
3436 }
3437
3438 if (fexit->nr_links) {
3439 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
3440 false, image, rw_image)) {
3441 ret = -EINVAL;
3442 goto cleanup;
3443 }
3444 }
3445
3446 if (flags & BPF_TRAMP_F_RESTORE_REGS)
3447 restore_regs(m, &prog, regs_off);
3448
3449 /* This needs to be done regardless. If there were fmod_ret programs,
3450 * the return value is only updated on the stack and still needs to be
3451 * restored to R0.
3452 */
3453 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3454 im->ip_epilogue = image + (prog - (u8 *)rw_image);
3455 /* arg1: mov rdi, im */
3456 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3457 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3458 ret = -EINVAL;
3459 goto cleanup;
3460 }
3461 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3462 /* Before running the original function, load the
3463 * tail_call_cnt_ptr from stack to rax.
3464 */
3465 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3466 }
3467
3468 /* restore return value of orig_call or fentry prog back into RAX */
3469 if (save_ret)
3470 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3471
3472 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3473
3474 EMIT1(0xC9); /* leave */
3475 if (im)
3476 im->ksym.fp_end = prog - (u8 *)rw_image;
3477
3478 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3479 /* skip our return address and return to parent */
3480 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3481 }
3482 emit_return(&prog, image + (prog - (u8 *)rw_image));
3483 /* Make sure the trampoline generation logic doesn't overflow */
3484 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3485 ret = -EFAULT;
3486 goto cleanup;
3487 }
3488 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3489
3490 cleanup:
3491 kfree(branches);
3492 return ret;
3493 }
3494
arch_alloc_bpf_trampoline(unsigned int size)3495 void *arch_alloc_bpf_trampoline(unsigned int size)
3496 {
3497 return bpf_prog_pack_alloc(size, jit_fill_hole);
3498 }
3499
arch_free_bpf_trampoline(void * image,unsigned int size)3500 void arch_free_bpf_trampoline(void *image, unsigned int size)
3501 {
3502 bpf_prog_pack_free(image, size);
3503 }
3504
arch_protect_bpf_trampoline(void * image,unsigned int size)3505 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3506 {
3507 return 0;
3508 }
3509
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3510 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3511 const struct btf_func_model *m, u32 flags,
3512 struct bpf_tramp_links *tlinks,
3513 void *func_addr)
3514 {
3515 void *rw_image, *tmp;
3516 int ret;
3517 u32 size = image_end - image;
3518
3519 /* rw_image doesn't need to be in module memory range, so we can
3520 * use kvmalloc.
3521 */
3522 rw_image = kvmalloc(size, GFP_KERNEL);
3523 if (!rw_image)
3524 return -ENOMEM;
3525
3526 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3527 flags, tlinks, func_addr);
3528 if (ret < 0)
3529 goto out;
3530
3531 tmp = bpf_arch_text_copy(image, rw_image, size);
3532 if (IS_ERR(tmp))
3533 ret = PTR_ERR(tmp);
3534 out:
3535 kvfree(rw_image);
3536 return ret;
3537 }
3538
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3539 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3540 struct bpf_tramp_links *tlinks, void *func_addr)
3541 {
3542 struct bpf_tramp_image im;
3543 void *image;
3544 int ret;
3545
3546 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3547 * This will NOT cause fragmentation in direct map, as we do not
3548 * call set_memory_*() on this buffer.
3549 *
3550 * We cannot use kvmalloc here, because we need image to be in
3551 * module memory range.
3552 */
3553 image = bpf_jit_alloc_exec(PAGE_SIZE);
3554 if (!image)
3555 return -ENOMEM;
3556
3557 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3558 m, flags, tlinks, func_addr);
3559 bpf_jit_free_exec(image);
3560 return ret;
3561 }
3562
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs,u8 * image,u8 * buf)3563 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3564 {
3565 u8 *jg_reloc, *prog = *pprog;
3566 int pivot, err, jg_bytes = 1;
3567 s64 jg_offset;
3568
3569 if (a == b) {
3570 /* Leaf node of recursion, i.e. not a range of indices
3571 * anymore.
3572 */
3573 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3574 if (!is_simm32(progs[a]))
3575 return -1;
3576 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3577 progs[a]);
3578 err = emit_cond_near_jump(&prog, /* je func */
3579 (void *)progs[a], image + (prog - buf),
3580 X86_JE);
3581 if (err)
3582 return err;
3583
3584 emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
3585
3586 *pprog = prog;
3587 return 0;
3588 }
3589
3590 /* Not a leaf node, so we pivot, and recursively descend into
3591 * the lower and upper ranges.
3592 */
3593 pivot = (b - a) / 2;
3594 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3595 if (!is_simm32(progs[a + pivot]))
3596 return -1;
3597 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3598
3599 if (pivot > 2) { /* jg upper_part */
3600 /* Require near jump. */
3601 jg_bytes = 4;
3602 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3603 } else {
3604 EMIT2(X86_JG, 0);
3605 }
3606 jg_reloc = prog;
3607
3608 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
3609 progs, image, buf);
3610 if (err)
3611 return err;
3612
3613 /* From Intel 64 and IA-32 Architectures Optimization
3614 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3615 * Coding Rule 11: All branch targets should be 16-byte
3616 * aligned.
3617 */
3618 emit_align(&prog, 16);
3619 jg_offset = prog - jg_reloc;
3620 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3621
3622 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
3623 b, progs, image, buf);
3624 if (err)
3625 return err;
3626
3627 *pprog = prog;
3628 return 0;
3629 }
3630
cmp_ips(const void * a,const void * b)3631 static int cmp_ips(const void *a, const void *b)
3632 {
3633 const s64 *ipa = a;
3634 const s64 *ipb = b;
3635
3636 if (*ipa > *ipb)
3637 return 1;
3638 if (*ipa < *ipb)
3639 return -1;
3640 return 0;
3641 }
3642
arch_prepare_bpf_dispatcher(void * image,void * buf,s64 * funcs,int num_funcs)3643 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3644 {
3645 u8 *prog = buf;
3646
3647 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3648 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3649 }
3650
priv_stack_init_guard(void __percpu * priv_stack_ptr,int alloc_size)3651 static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
3652 {
3653 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3654 u64 *stack_ptr;
3655
3656 for_each_possible_cpu(cpu) {
3657 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3658 stack_ptr[0] = PRIV_STACK_GUARD_VAL;
3659 stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
3660 }
3661 }
3662
priv_stack_check_guard(void __percpu * priv_stack_ptr,int alloc_size,struct bpf_prog * prog)3663 static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
3664 struct bpf_prog *prog)
3665 {
3666 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3667 u64 *stack_ptr;
3668
3669 for_each_possible_cpu(cpu) {
3670 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3671 if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
3672 stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) {
3673 pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
3674 bpf_jit_get_prog_name(prog));
3675 break;
3676 }
3677 }
3678 }
3679
3680 struct x64_jit_data {
3681 struct bpf_binary_header *rw_header;
3682 struct bpf_binary_header *header;
3683 int *addrs;
3684 u8 *image;
3685 int proglen;
3686 struct jit_context ctx;
3687 };
3688
3689 #define MAX_PASSES 20
3690 #define PADDING_PASSES (MAX_PASSES - 5)
3691
bpf_int_jit_compile(struct bpf_prog * prog)3692 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3693 {
3694 struct bpf_binary_header *rw_header = NULL;
3695 struct bpf_binary_header *header = NULL;
3696 struct bpf_prog *tmp, *orig_prog = prog;
3697 void __percpu *priv_stack_ptr = NULL;
3698 struct x64_jit_data *jit_data;
3699 int priv_stack_alloc_sz;
3700 int proglen, oldproglen = 0;
3701 struct jit_context ctx = {};
3702 bool tmp_blinded = false;
3703 bool extra_pass = false;
3704 bool padding = false;
3705 u8 *rw_image = NULL;
3706 u8 *image = NULL;
3707 int *addrs;
3708 int pass;
3709 int i;
3710
3711 if (!prog->jit_requested)
3712 return orig_prog;
3713
3714 tmp = bpf_jit_blind_constants(prog);
3715 /*
3716 * If blinding was requested and we failed during blinding,
3717 * we must fall back to the interpreter.
3718 */
3719 if (IS_ERR(tmp))
3720 return orig_prog;
3721 if (tmp != prog) {
3722 tmp_blinded = true;
3723 prog = tmp;
3724 }
3725
3726 jit_data = prog->aux->jit_data;
3727 if (!jit_data) {
3728 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3729 if (!jit_data) {
3730 prog = orig_prog;
3731 goto out;
3732 }
3733 prog->aux->jit_data = jit_data;
3734 }
3735 priv_stack_ptr = prog->aux->priv_stack_ptr;
3736 if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) {
3737 /* Allocate actual private stack size with verifier-calculated
3738 * stack size plus two memory guards to protect overflow and
3739 * underflow.
3740 */
3741 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3742 2 * PRIV_STACK_GUARD_SZ;
3743 priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL);
3744 if (!priv_stack_ptr) {
3745 prog = orig_prog;
3746 goto out_priv_stack;
3747 }
3748
3749 priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
3750 prog->aux->priv_stack_ptr = priv_stack_ptr;
3751 }
3752 addrs = jit_data->addrs;
3753 if (addrs) {
3754 ctx = jit_data->ctx;
3755 oldproglen = jit_data->proglen;
3756 image = jit_data->image;
3757 header = jit_data->header;
3758 rw_header = jit_data->rw_header;
3759 rw_image = (void *)rw_header + ((void *)image - (void *)header);
3760 extra_pass = true;
3761 padding = true;
3762 goto skip_init_addrs;
3763 }
3764 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3765 if (!addrs) {
3766 prog = orig_prog;
3767 goto out_addrs;
3768 }
3769
3770 /*
3771 * Before first pass, make a rough estimation of addrs[]
3772 * each BPF instruction is translated to less than 64 bytes
3773 */
3774 for (proglen = 0, i = 0; i <= prog->len; i++) {
3775 proglen += 64;
3776 addrs[i] = proglen;
3777 }
3778 ctx.cleanup_addr = proglen;
3779 skip_init_addrs:
3780
3781 /*
3782 * JITed image shrinks with every pass and the loop iterates
3783 * until the image stops shrinking. Very large BPF programs
3784 * may converge on the last pass. In such case do one more
3785 * pass to emit the final image.
3786 */
3787 for (pass = 0; pass < MAX_PASSES || image; pass++) {
3788 if (!padding && pass >= PADDING_PASSES)
3789 padding = true;
3790 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3791 if (proglen <= 0) {
3792 out_image:
3793 image = NULL;
3794 if (header) {
3795 bpf_arch_text_copy(&header->size, &rw_header->size,
3796 sizeof(rw_header->size));
3797 bpf_jit_binary_pack_free(header, rw_header);
3798 }
3799 /* Fall back to interpreter mode */
3800 prog = orig_prog;
3801 if (extra_pass) {
3802 prog->bpf_func = NULL;
3803 prog->jited = 0;
3804 prog->jited_len = 0;
3805 }
3806 goto out_addrs;
3807 }
3808 if (image) {
3809 if (proglen != oldproglen) {
3810 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3811 proglen, oldproglen);
3812 goto out_image;
3813 }
3814 break;
3815 }
3816 if (proglen == oldproglen) {
3817 /*
3818 * The number of entries in extable is the number of BPF_LDX
3819 * insns that access kernel memory via "pointer to BTF type".
3820 * The verifier changed their opcode from LDX|MEM|size
3821 * to LDX|PROBE_MEM|size to make JITing easier.
3822 */
3823 u32 align = __alignof__(struct exception_table_entry);
3824 u32 extable_size = prog->aux->num_exentries *
3825 sizeof(struct exception_table_entry);
3826
3827 /* allocate module memory for x86 insns and extable */
3828 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3829 &image, align, &rw_header, &rw_image,
3830 jit_fill_hole);
3831 if (!header) {
3832 prog = orig_prog;
3833 goto out_addrs;
3834 }
3835 prog->aux->extable = (void *) image + roundup(proglen, align);
3836 }
3837 oldproglen = proglen;
3838 cond_resched();
3839 }
3840
3841 if (bpf_jit_enable > 1)
3842 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3843
3844 if (image) {
3845 if (!prog->is_func || extra_pass) {
3846 /*
3847 * bpf_jit_binary_pack_finalize fails in two scenarios:
3848 * 1) header is not pointing to proper module memory;
3849 * 2) the arch doesn't support bpf_arch_text_copy().
3850 *
3851 * Both cases are serious bugs and justify WARN_ON.
3852 */
3853 if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3854 /* header has been freed */
3855 header = NULL;
3856 goto out_image;
3857 }
3858
3859 bpf_tail_call_direct_fixup(prog);
3860 } else {
3861 jit_data->addrs = addrs;
3862 jit_data->ctx = ctx;
3863 jit_data->proglen = proglen;
3864 jit_data->image = image;
3865 jit_data->header = header;
3866 jit_data->rw_header = rw_header;
3867 }
3868
3869 /*
3870 * The bpf_prog_update_insn_ptrs function expects addrs to
3871 * point to the first byte of the jitted instruction (unlike
3872 * the bpf_prog_fill_jited_linfo below, which, for historical
3873 * reasons, expects to point to the next instruction)
3874 */
3875 bpf_prog_update_insn_ptrs(prog, addrs, image);
3876
3877 /*
3878 * ctx.prog_offset is used when CFI preambles put code *before*
3879 * the function. See emit_cfi(). For FineIBT specifically this code
3880 * can also be executed and bpf_prog_kallsyms_add() will
3881 * generate an additional symbol to cover this, hence also
3882 * decrement proglen.
3883 */
3884 prog->bpf_func = (void *)image + cfi_get_offset();
3885 prog->jited = 1;
3886 prog->jited_len = proglen - cfi_get_offset();
3887 } else {
3888 prog = orig_prog;
3889 }
3890
3891 if (!image || !prog->is_func || extra_pass) {
3892 if (image)
3893 bpf_prog_fill_jited_linfo(prog, addrs + 1);
3894 out_addrs:
3895 kvfree(addrs);
3896 if (!image && priv_stack_ptr) {
3897 free_percpu(priv_stack_ptr);
3898 prog->aux->priv_stack_ptr = NULL;
3899 }
3900 out_priv_stack:
3901 kfree(jit_data);
3902 prog->aux->jit_data = NULL;
3903 }
3904 out:
3905 if (tmp_blinded)
3906 bpf_jit_prog_release_other(prog, prog == orig_prog ?
3907 tmp : orig_prog);
3908 return prog;
3909 }
3910
bpf_jit_supports_kfunc_call(void)3911 bool bpf_jit_supports_kfunc_call(void)
3912 {
3913 return true;
3914 }
3915
bpf_arch_text_copy(void * dst,void * src,size_t len)3916 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3917 {
3918 if (text_poke_copy(dst, src, len) == NULL)
3919 return ERR_PTR(-EINVAL);
3920 return dst;
3921 }
3922
3923 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3924 bool bpf_jit_supports_subprog_tailcalls(void)
3925 {
3926 return true;
3927 }
3928
bpf_jit_supports_percpu_insn(void)3929 bool bpf_jit_supports_percpu_insn(void)
3930 {
3931 return true;
3932 }
3933
bpf_jit_free(struct bpf_prog * prog)3934 void bpf_jit_free(struct bpf_prog *prog)
3935 {
3936 if (prog->jited) {
3937 struct x64_jit_data *jit_data = prog->aux->jit_data;
3938 struct bpf_binary_header *hdr;
3939 void __percpu *priv_stack_ptr;
3940 int priv_stack_alloc_sz;
3941
3942 /*
3943 * If we fail the final pass of JIT (from jit_subprogs),
3944 * the program may not be finalized yet. Call finalize here
3945 * before freeing it.
3946 */
3947 if (jit_data) {
3948 bpf_jit_binary_pack_finalize(jit_data->header,
3949 jit_data->rw_header);
3950 kvfree(jit_data->addrs);
3951 kfree(jit_data);
3952 }
3953 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3954 hdr = bpf_jit_binary_pack_hdr(prog);
3955 bpf_jit_binary_pack_free(hdr, NULL);
3956 priv_stack_ptr = prog->aux->priv_stack_ptr;
3957 if (priv_stack_ptr) {
3958 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3959 2 * PRIV_STACK_GUARD_SZ;
3960 priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog);
3961 free_percpu(prog->aux->priv_stack_ptr);
3962 }
3963 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3964 }
3965
3966 bpf_prog_unlock_free(prog);
3967 }
3968
bpf_jit_supports_exceptions(void)3969 bool bpf_jit_supports_exceptions(void)
3970 {
3971 /* We unwind through both kernel frames (starting from within bpf_throw
3972 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3973 * to walk kernel frames and reach BPF frames in the stack trace.
3974 */
3975 return IS_ENABLED(CONFIG_UNWINDER_ORC);
3976 }
3977
bpf_jit_supports_private_stack(void)3978 bool bpf_jit_supports_private_stack(void)
3979 {
3980 return true;
3981 }
3982
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3983 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3984 {
3985 #if defined(CONFIG_UNWINDER_ORC)
3986 struct unwind_state state;
3987 unsigned long addr;
3988
3989 for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3990 unwind_next_frame(&state)) {
3991 addr = unwind_get_return_address(&state);
3992 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3993 break;
3994 }
3995 return;
3996 #endif
3997 }
3998
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)3999 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
4000 struct bpf_prog *new, struct bpf_prog *old)
4001 {
4002 u8 *old_addr, *new_addr, *old_bypass_addr;
4003 enum bpf_text_poke_type t;
4004 int ret;
4005
4006 old_bypass_addr = old ? NULL : poke->bypass_addr;
4007 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
4008 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
4009
4010 /*
4011 * On program loading or teardown, the program's kallsym entry
4012 * might not be in place, so we use __bpf_arch_text_poke to skip
4013 * the kallsyms check.
4014 */
4015 if (new) {
4016 t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4017 ret = __bpf_arch_text_poke(poke->tailcall_target,
4018 t, BPF_MOD_JUMP,
4019 old_addr, new_addr);
4020 BUG_ON(ret < 0);
4021 if (!old) {
4022 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4023 BPF_MOD_JUMP, BPF_MOD_NOP,
4024 poke->bypass_addr,
4025 NULL);
4026 BUG_ON(ret < 0);
4027 }
4028 } else {
4029 t = old_bypass_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4030 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4031 t, BPF_MOD_JUMP, old_bypass_addr,
4032 poke->bypass_addr);
4033 BUG_ON(ret < 0);
4034 /* let other CPUs finish the execution of program
4035 * so that it will not possible to expose them
4036 * to invalid nop, stack unwind, nop state
4037 */
4038 if (!ret)
4039 synchronize_rcu();
4040 t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4041 ret = __bpf_arch_text_poke(poke->tailcall_target,
4042 t, BPF_MOD_NOP, old_addr, NULL);
4043 BUG_ON(ret < 0);
4044 }
4045 }
4046
bpf_jit_supports_arena(void)4047 bool bpf_jit_supports_arena(void)
4048 {
4049 return true;
4050 }
4051
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)4052 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
4053 {
4054 if (!in_arena)
4055 return true;
4056 switch (insn->code) {
4057 case BPF_STX | BPF_ATOMIC | BPF_W:
4058 case BPF_STX | BPF_ATOMIC | BPF_DW:
4059 if (insn->imm == (BPF_AND | BPF_FETCH) ||
4060 insn->imm == (BPF_OR | BPF_FETCH) ||
4061 insn->imm == (BPF_XOR | BPF_FETCH))
4062 return false;
4063 }
4064 return true;
4065 }
4066
bpf_jit_supports_ptr_xchg(void)4067 bool bpf_jit_supports_ptr_xchg(void)
4068 {
4069 return true;
4070 }
4071
4072 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
bpf_arch_uaddress_limit(void)4073 u64 bpf_arch_uaddress_limit(void)
4074 {
4075 return 0;
4076 }
4077
bpf_jit_supports_timed_may_goto(void)4078 bool bpf_jit_supports_timed_may_goto(void)
4079 {
4080 return true;
4081 }
4082