xref: /linux/arch/x86/net/bpf_jit_comp.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 #include <asm/unwind.h>
20 #include <asm/cfi.h>
21 
22 static bool all_callee_regs_used[4] = {true, true, true, true};
23 
emit_code(u8 * ptr,u32 bytes,unsigned int len)24 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
25 {
26 	if (len == 1)
27 		*ptr = bytes;
28 	else if (len == 2)
29 		*(u16 *)ptr = bytes;
30 	else {
31 		*(u32 *)ptr = bytes;
32 		barrier();
33 	}
34 	return ptr + len;
35 }
36 
37 #define EMIT(bytes, len) \
38 	do { prog = emit_code(prog, bytes, len); } while (0)
39 
40 #define EMIT1(b1)		EMIT(b1, 1)
41 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
42 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
44 
45 #define EMIT1_off32(b1, off) \
46 	do { EMIT1(b1); EMIT(off, 4); } while (0)
47 #define EMIT2_off32(b1, b2, off) \
48 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49 #define EMIT3_off32(b1, b2, b3, off) \
50 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51 #define EMIT4_off32(b1, b2, b3, b4, off) \
52 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
53 
54 #ifdef CONFIG_X86_KERNEL_IBT
55 #define EMIT_ENDBR()		EMIT(gen_endbr(), 4)
56 #define EMIT_ENDBR_POISON()	EMIT(gen_endbr_poison(), 4)
57 #else
58 #define EMIT_ENDBR()
59 #define EMIT_ENDBR_POISON()
60 #endif
61 
is_imm8(int value)62 static bool is_imm8(int value)
63 {
64 	return value <= 127 && value >= -128;
65 }
66 
67 /*
68  * Let us limit the positive offset to be <= 123.
69  * This is to ensure eventual jit convergence For the following patterns:
70  * ...
71  * pass4, final_proglen=4391:
72  *   ...
73  *   20e:    48 85 ff                test   rdi,rdi
74  *   211:    74 7d                   je     0x290
75  *   213:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
76  *   ...
77  *   289:    48 85 ff                test   rdi,rdi
78  *   28c:    74 17                   je     0x2a5
79  *   28e:    e9 7f ff ff ff          jmp    0x212
80  *   293:    bf 03 00 00 00          mov    edi,0x3
81  * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
82  * and insn at 0x28e is 5-byte jmp insn with offset -129.
83  *
84  * pass5, final_proglen=4392:
85  *   ...
86  *   20e:    48 85 ff                test   rdi,rdi
87  *   211:    0f 84 80 00 00 00       je     0x297
88  *   217:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
89  *   ...
90  *   28d:    48 85 ff                test   rdi,rdi
91  *   290:    74 1a                   je     0x2ac
92  *   292:    eb 84                   jmp    0x218
93  *   294:    bf 03 00 00 00          mov    edi,0x3
94  * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
95  * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
96  * At the same time, insn at 0x292 is a 2-byte insn since its offset is
97  * -124.
98  *
99  * pass6 will repeat the same code as in pass4 and this will prevent
100  * eventual convergence.
101  *
102  * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
103  * cycle in the above. In the above example je offset <= 0x7c should work.
104  *
105  * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
106  * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
107  * avoid no convergence issue.
108  *
109  * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
110  * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
111  */
is_imm8_jmp_offset(int value)112 static bool is_imm8_jmp_offset(int value)
113 {
114 	return value <= 123 && value >= -128;
115 }
116 
is_simm32(s64 value)117 static bool is_simm32(s64 value)
118 {
119 	return value == (s64)(s32)value;
120 }
121 
is_uimm32(u64 value)122 static bool is_uimm32(u64 value)
123 {
124 	return value == (u64)(u32)value;
125 }
126 
127 /* mov dst, src */
128 #define EMIT_mov(DST, SRC)								 \
129 	do {										 \
130 		if (DST != SRC)								 \
131 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
132 	} while (0)
133 
bpf_size_to_x86_bytes(int bpf_size)134 static int bpf_size_to_x86_bytes(int bpf_size)
135 {
136 	if (bpf_size == BPF_W)
137 		return 4;
138 	else if (bpf_size == BPF_H)
139 		return 2;
140 	else if (bpf_size == BPF_B)
141 		return 1;
142 	else if (bpf_size == BPF_DW)
143 		return 4; /* imm32 */
144 	else
145 		return 0;
146 }
147 
148 /*
149  * List of x86 cond jumps opcodes (. + s8)
150  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
151  */
152 #define X86_JB  0x72
153 #define X86_JAE 0x73
154 #define X86_JE  0x74
155 #define X86_JNE 0x75
156 #define X86_JBE 0x76
157 #define X86_JA  0x77
158 #define X86_JL  0x7C
159 #define X86_JGE 0x7D
160 #define X86_JLE 0x7E
161 #define X86_JG  0x7F
162 
163 /* Pick a register outside of BPF range for JIT internal work */
164 #define AUX_REG (MAX_BPF_JIT_REG + 1)
165 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
166 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
167 
168 /*
169  * The following table maps BPF registers to x86-64 registers.
170  *
171  * x86-64 register R12 is unused, since if used as base address
172  * register in load/store instructions, it always needs an
173  * extra byte of encoding and is callee saved.
174  *
175  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
176  * trampoline. x86-64 register R10 is used for blinding (if enabled).
177  */
178 static const int reg2hex[] = {
179 	[BPF_REG_0] = 0,  /* RAX */
180 	[BPF_REG_1] = 7,  /* RDI */
181 	[BPF_REG_2] = 6,  /* RSI */
182 	[BPF_REG_3] = 2,  /* RDX */
183 	[BPF_REG_4] = 1,  /* RCX */
184 	[BPF_REG_5] = 0,  /* R8  */
185 	[BPF_REG_6] = 3,  /* RBX callee saved */
186 	[BPF_REG_7] = 5,  /* R13 callee saved */
187 	[BPF_REG_8] = 6,  /* R14 callee saved */
188 	[BPF_REG_9] = 7,  /* R15 callee saved */
189 	[BPF_REG_FP] = 5, /* RBP readonly */
190 	[BPF_REG_AX] = 2, /* R10 temp register */
191 	[AUX_REG] = 3,    /* R11 temp register */
192 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
193 	[X86_REG_R12] = 4, /* R12 callee saved */
194 };
195 
196 static const int reg2pt_regs[] = {
197 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
198 	[BPF_REG_1] = offsetof(struct pt_regs, di),
199 	[BPF_REG_2] = offsetof(struct pt_regs, si),
200 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
201 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
202 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
203 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
204 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
205 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
206 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
207 };
208 
209 /*
210  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
211  * which need extra byte of encoding.
212  * rax,rcx,...,rbp have simpler encoding
213  */
is_ereg(u32 reg)214 static bool is_ereg(u32 reg)
215 {
216 	return (1 << reg) & (BIT(BPF_REG_5) |
217 			     BIT(AUX_REG) |
218 			     BIT(BPF_REG_7) |
219 			     BIT(BPF_REG_8) |
220 			     BIT(BPF_REG_9) |
221 			     BIT(X86_REG_R9) |
222 			     BIT(X86_REG_R12) |
223 			     BIT(BPF_REG_AX));
224 }
225 
226 /*
227  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
228  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
229  * of encoding. al,cl,dl,bl have simpler encoding.
230  */
is_ereg_8l(u32 reg)231 static bool is_ereg_8l(u32 reg)
232 {
233 	return is_ereg(reg) ||
234 	    (1 << reg) & (BIT(BPF_REG_1) |
235 			  BIT(BPF_REG_2) |
236 			  BIT(BPF_REG_FP));
237 }
238 
is_axreg(u32 reg)239 static bool is_axreg(u32 reg)
240 {
241 	return reg == BPF_REG_0;
242 }
243 
244 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)245 static u8 add_1mod(u8 byte, u32 reg)
246 {
247 	if (is_ereg(reg))
248 		byte |= 1;
249 	return byte;
250 }
251 
add_2mod(u8 byte,u32 r1,u32 r2)252 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
253 {
254 	if (is_ereg(r1))
255 		byte |= 1;
256 	if (is_ereg(r2))
257 		byte |= 4;
258 	return byte;
259 }
260 
add_3mod(u8 byte,u32 r1,u32 r2,u32 index)261 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
262 {
263 	if (is_ereg(r1))
264 		byte |= 1;
265 	if (is_ereg(index))
266 		byte |= 2;
267 	if (is_ereg(r2))
268 		byte |= 4;
269 	return byte;
270 }
271 
272 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)273 static u8 add_1reg(u8 byte, u32 dst_reg)
274 {
275 	return byte + reg2hex[dst_reg];
276 }
277 
278 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)279 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
280 {
281 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
282 }
283 
284 /* Some 1-byte opcodes for binary ALU operations */
285 static u8 simple_alu_opcodes[] = {
286 	[BPF_ADD] = 0x01,
287 	[BPF_SUB] = 0x29,
288 	[BPF_AND] = 0x21,
289 	[BPF_OR] = 0x09,
290 	[BPF_XOR] = 0x31,
291 	[BPF_LSH] = 0xE0,
292 	[BPF_RSH] = 0xE8,
293 	[BPF_ARSH] = 0xF8,
294 };
295 
jit_fill_hole(void * area,unsigned int size)296 static void jit_fill_hole(void *area, unsigned int size)
297 {
298 	/* Fill whole space with INT3 instructions */
299 	memset(area, 0xcc, size);
300 }
301 
bpf_arch_text_invalidate(void * dst,size_t len)302 int bpf_arch_text_invalidate(void *dst, size_t len)
303 {
304 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
305 }
306 
307 struct jit_context {
308 	int cleanup_addr; /* Epilogue code offset */
309 
310 	/*
311 	 * Program specific offsets of labels in the code; these rely on the
312 	 * JIT doing at least 2 passes, recording the position on the first
313 	 * pass, only to generate the correct offset on the second pass.
314 	 */
315 	int tail_call_direct_label;
316 	int tail_call_indirect_label;
317 };
318 
319 /* Maximum number of bytes emitted while JITing one eBPF insn */
320 #define BPF_MAX_INSN_SIZE	128
321 #define BPF_INSN_SAFETY		64
322 
323 /* Number of bytes emit_patch() needs to generate instructions */
324 #define X86_PATCH_SIZE		5
325 /* Number of bytes that will be skipped on tailcall */
326 #define X86_TAIL_CALL_OFFSET	(12 + ENDBR_INSN_SIZE)
327 
push_r12(u8 ** pprog)328 static void push_r12(u8 **pprog)
329 {
330 	u8 *prog = *pprog;
331 
332 	EMIT2(0x41, 0x54);   /* push r12 */
333 	*pprog = prog;
334 }
335 
push_callee_regs(u8 ** pprog,bool * callee_regs_used)336 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
337 {
338 	u8 *prog = *pprog;
339 
340 	if (callee_regs_used[0])
341 		EMIT1(0x53);         /* push rbx */
342 	if (callee_regs_used[1])
343 		EMIT2(0x41, 0x55);   /* push r13 */
344 	if (callee_regs_used[2])
345 		EMIT2(0x41, 0x56);   /* push r14 */
346 	if (callee_regs_used[3])
347 		EMIT2(0x41, 0x57);   /* push r15 */
348 	*pprog = prog;
349 }
350 
pop_r12(u8 ** pprog)351 static void pop_r12(u8 **pprog)
352 {
353 	u8 *prog = *pprog;
354 
355 	EMIT2(0x41, 0x5C);   /* pop r12 */
356 	*pprog = prog;
357 }
358 
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)359 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
360 {
361 	u8 *prog = *pprog;
362 
363 	if (callee_regs_used[3])
364 		EMIT2(0x41, 0x5F);   /* pop r15 */
365 	if (callee_regs_used[2])
366 		EMIT2(0x41, 0x5E);   /* pop r14 */
367 	if (callee_regs_used[1])
368 		EMIT2(0x41, 0x5D);   /* pop r13 */
369 	if (callee_regs_used[0])
370 		EMIT1(0x5B);         /* pop rbx */
371 	*pprog = prog;
372 }
373 
emit_nops(u8 ** pprog,int len)374 static void emit_nops(u8 **pprog, int len)
375 {
376 	u8 *prog = *pprog;
377 	int i, noplen;
378 
379 	while (len > 0) {
380 		noplen = len;
381 
382 		if (noplen > ASM_NOP_MAX)
383 			noplen = ASM_NOP_MAX;
384 
385 		for (i = 0; i < noplen; i++)
386 			EMIT1(x86_nops[noplen][i]);
387 		len -= noplen;
388 	}
389 
390 	*pprog = prog;
391 }
392 
393 /*
394  * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
395  * in arch/x86/kernel/alternative.c
396  */
397 
emit_fineibt(u8 ** pprog,u32 hash)398 static void emit_fineibt(u8 **pprog, u32 hash)
399 {
400 	u8 *prog = *pprog;
401 
402 	EMIT_ENDBR();
403 	EMIT3_off32(0x41, 0x81, 0xea, hash);		/* subl $hash, %r10d	*/
404 	EMIT2(0x74, 0x07);				/* jz.d8 +7		*/
405 	EMIT2(0x0f, 0x0b);				/* ud2			*/
406 	EMIT1(0x90);					/* nop			*/
407 	EMIT_ENDBR_POISON();
408 
409 	*pprog = prog;
410 }
411 
emit_kcfi(u8 ** pprog,u32 hash)412 static void emit_kcfi(u8 **pprog, u32 hash)
413 {
414 	u8 *prog = *pprog;
415 
416 	EMIT1_off32(0xb8, hash);			/* movl $hash, %eax	*/
417 #ifdef CONFIG_CALL_PADDING
418 	EMIT1(0x90);
419 	EMIT1(0x90);
420 	EMIT1(0x90);
421 	EMIT1(0x90);
422 	EMIT1(0x90);
423 	EMIT1(0x90);
424 	EMIT1(0x90);
425 	EMIT1(0x90);
426 	EMIT1(0x90);
427 	EMIT1(0x90);
428 	EMIT1(0x90);
429 #endif
430 	EMIT_ENDBR();
431 
432 	*pprog = prog;
433 }
434 
emit_cfi(u8 ** pprog,u32 hash)435 static void emit_cfi(u8 **pprog, u32 hash)
436 {
437 	u8 *prog = *pprog;
438 
439 	switch (cfi_mode) {
440 	case CFI_FINEIBT:
441 		emit_fineibt(&prog, hash);
442 		break;
443 
444 	case CFI_KCFI:
445 		emit_kcfi(&prog, hash);
446 		break;
447 
448 	default:
449 		EMIT_ENDBR();
450 		break;
451 	}
452 
453 	*pprog = prog;
454 }
455 
emit_prologue_tail_call(u8 ** pprog,bool is_subprog)456 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
457 {
458 	u8 *prog = *pprog;
459 
460 	if (!is_subprog) {
461 		/* cmp rax, MAX_TAIL_CALL_CNT */
462 		EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
463 		EMIT2(X86_JA, 6);        /* ja 6 */
464 		/* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
465 		 * case1: entry of main prog.
466 		 * case2: tail callee of main prog.
467 		 */
468 		EMIT1(0x50);             /* push rax */
469 		/* Make rax as tail_call_cnt_ptr. */
470 		EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
471 		EMIT2(0xEB, 1);          /* jmp 1 */
472 		/* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
473 		 * case: tail callee of subprog.
474 		 */
475 		EMIT1(0x50);             /* push rax */
476 		/* push tail_call_cnt_ptr */
477 		EMIT1(0x50);             /* push rax */
478 	} else { /* is_subprog */
479 		/* rax is tail_call_cnt_ptr. */
480 		EMIT1(0x50);             /* push rax */
481 		EMIT1(0x50);             /* push rax */
482 	}
483 
484 	*pprog = prog;
485 }
486 
487 /*
488  * Emit x86-64 prologue code for BPF program.
489  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
490  * while jumping to another program
491  */
emit_prologue(u8 ** pprog,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog,bool is_exception_cb)492 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
493 			  bool tail_call_reachable, bool is_subprog,
494 			  bool is_exception_cb)
495 {
496 	u8 *prog = *pprog;
497 
498 	emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
499 	/* BPF trampoline can be made to work without these nops,
500 	 * but let's waste 5 bytes for now and optimize later
501 	 */
502 	emit_nops(&prog, X86_PATCH_SIZE);
503 	if (!ebpf_from_cbpf) {
504 		if (tail_call_reachable && !is_subprog)
505 			/* When it's the entry of the whole tailcall context,
506 			 * zeroing rax means initialising tail_call_cnt.
507 			 */
508 			EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
509 		else
510 			/* Keep the same instruction layout. */
511 			emit_nops(&prog, 3);     /* nop3 */
512 	}
513 	/* Exception callback receives FP as third parameter */
514 	if (is_exception_cb) {
515 		EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
516 		EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
517 		/* The main frame must have exception_boundary as true, so we
518 		 * first restore those callee-saved regs from stack, before
519 		 * reusing the stack frame.
520 		 */
521 		pop_callee_regs(&prog, all_callee_regs_used);
522 		pop_r12(&prog);
523 		/* Reset the stack frame. */
524 		EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
525 	} else {
526 		EMIT1(0x55);             /* push rbp */
527 		EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
528 	}
529 
530 	/* X86_TAIL_CALL_OFFSET is here */
531 	EMIT_ENDBR();
532 
533 	/* sub rsp, rounded_stack_depth */
534 	if (stack_depth)
535 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
536 	if (tail_call_reachable)
537 		emit_prologue_tail_call(&prog, is_subprog);
538 	*pprog = prog;
539 }
540 
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)541 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
542 {
543 	u8 *prog = *pprog;
544 	s64 offset;
545 
546 	offset = func - (ip + X86_PATCH_SIZE);
547 	if (!is_simm32(offset)) {
548 		pr_err("Target call %p is out of range\n", func);
549 		return -ERANGE;
550 	}
551 	EMIT1_off32(opcode, offset);
552 	*pprog = prog;
553 	return 0;
554 }
555 
emit_call(u8 ** pprog,void * func,void * ip)556 static int emit_call(u8 **pprog, void *func, void *ip)
557 {
558 	return emit_patch(pprog, func, ip, 0xE8);
559 }
560 
emit_rsb_call(u8 ** pprog,void * func,void * ip)561 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
562 {
563 	OPTIMIZER_HIDE_VAR(func);
564 	ip += x86_call_depth_emit_accounting(pprog, func, ip);
565 	return emit_patch(pprog, func, ip, 0xE8);
566 }
567 
emit_jump(u8 ** pprog,void * func,void * ip)568 static int emit_jump(u8 **pprog, void *func, void *ip)
569 {
570 	return emit_patch(pprog, func, ip, 0xE9);
571 }
572 
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)573 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
574 				void *old_addr, void *new_addr)
575 {
576 	const u8 *nop_insn = x86_nops[5];
577 	u8 old_insn[X86_PATCH_SIZE];
578 	u8 new_insn[X86_PATCH_SIZE];
579 	u8 *prog;
580 	int ret;
581 
582 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
583 	if (old_addr) {
584 		prog = old_insn;
585 		ret = t == BPF_MOD_CALL ?
586 		      emit_call(&prog, old_addr, ip) :
587 		      emit_jump(&prog, old_addr, ip);
588 		if (ret)
589 			return ret;
590 	}
591 
592 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
593 	if (new_addr) {
594 		prog = new_insn;
595 		ret = t == BPF_MOD_CALL ?
596 		      emit_call(&prog, new_addr, ip) :
597 		      emit_jump(&prog, new_addr, ip);
598 		if (ret)
599 			return ret;
600 	}
601 
602 	ret = -EBUSY;
603 	mutex_lock(&text_mutex);
604 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
605 		goto out;
606 	ret = 1;
607 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
608 		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
609 		ret = 0;
610 	}
611 out:
612 	mutex_unlock(&text_mutex);
613 	return ret;
614 }
615 
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)616 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
617 		       void *old_addr, void *new_addr)
618 {
619 	if (!is_kernel_text((long)ip) &&
620 	    !is_bpf_text_address((long)ip))
621 		/* BPF poking in modules is not supported */
622 		return -EINVAL;
623 
624 	/*
625 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
626 	 * with an ENDBR instruction.
627 	 */
628 	if (is_endbr(*(u32 *)ip))
629 		ip += ENDBR_INSN_SIZE;
630 
631 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
632 }
633 
634 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
635 
emit_indirect_jump(u8 ** pprog,int reg,u8 * ip)636 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
637 {
638 	u8 *prog = *pprog;
639 
640 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
641 		EMIT_LFENCE();
642 		EMIT2(0xFF, 0xE0 + reg);
643 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
644 		OPTIMIZER_HIDE_VAR(reg);
645 		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
646 			emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
647 		else
648 			emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
649 	} else {
650 		EMIT2(0xFF, 0xE0 + reg);	/* jmp *%\reg */
651 		if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
652 			EMIT1(0xCC);		/* int3 */
653 	}
654 
655 	*pprog = prog;
656 }
657 
emit_return(u8 ** pprog,u8 * ip)658 static void emit_return(u8 **pprog, u8 *ip)
659 {
660 	u8 *prog = *pprog;
661 
662 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
663 		emit_jump(&prog, x86_return_thunk, ip);
664 	} else {
665 		EMIT1(0xC3);		/* ret */
666 		if (IS_ENABLED(CONFIG_MITIGATION_SLS))
667 			EMIT1(0xCC);	/* int3 */
668 	}
669 
670 	*pprog = prog;
671 }
672 
673 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)	(-16 - round_up(stack, 8))
674 
675 /*
676  * Generate the following code:
677  *
678  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
679  *   if (index >= array->map.max_entries)
680  *     goto out;
681  *   if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
682  *     goto out;
683  *   prog = array->ptrs[index];
684  *   if (prog == NULL)
685  *     goto out;
686  *   goto *(prog->bpf_func + prologue_size);
687  * out:
688  */
emit_bpf_tail_call_indirect(struct bpf_prog * bpf_prog,u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)689 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
690 					u8 **pprog, bool *callee_regs_used,
691 					u32 stack_depth, u8 *ip,
692 					struct jit_context *ctx)
693 {
694 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
695 	u8 *prog = *pprog, *start = *pprog;
696 	int offset;
697 
698 	/*
699 	 * rdi - pointer to ctx
700 	 * rsi - pointer to bpf_array
701 	 * rdx - index in bpf_array
702 	 */
703 
704 	/*
705 	 * if (index >= array->map.max_entries)
706 	 *	goto out;
707 	 */
708 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
709 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
710 	      offsetof(struct bpf_array, map.max_entries));
711 
712 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
713 	EMIT2(X86_JBE, offset);                   /* jbe out */
714 
715 	/*
716 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
717 	 *	goto out;
718 	 */
719 	EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
720 	EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
721 
722 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
723 	EMIT2(X86_JAE, offset);                   /* jae out */
724 
725 	/* prog = array->ptrs[index]; */
726 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
727 		    offsetof(struct bpf_array, ptrs));
728 
729 	/*
730 	 * if (prog == NULL)
731 	 *	goto out;
732 	 */
733 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
734 
735 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
736 	EMIT2(X86_JE, offset);                    /* je out */
737 
738 	/* Inc tail_call_cnt if the slot is populated. */
739 	EMIT4(0x48, 0x83, 0x00, 0x01);            /* add qword ptr [rax], 1 */
740 
741 	if (bpf_prog->aux->exception_boundary) {
742 		pop_callee_regs(&prog, all_callee_regs_used);
743 		pop_r12(&prog);
744 	} else {
745 		pop_callee_regs(&prog, callee_regs_used);
746 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
747 			pop_r12(&prog);
748 	}
749 
750 	/* Pop tail_call_cnt_ptr. */
751 	EMIT1(0x58);                              /* pop rax */
752 	/* Pop tail_call_cnt, if it's main prog.
753 	 * Pop tail_call_cnt_ptr, if it's subprog.
754 	 */
755 	EMIT1(0x58);                              /* pop rax */
756 	if (stack_depth)
757 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
758 			    round_up(stack_depth, 8));
759 
760 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
761 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
762 	      offsetof(struct bpf_prog, bpf_func));
763 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
764 	      X86_TAIL_CALL_OFFSET);
765 	/*
766 	 * Now we're ready to jump into next BPF program
767 	 * rdi == ctx (1st arg)
768 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
769 	 */
770 	emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
771 
772 	/* out: */
773 	ctx->tail_call_indirect_label = prog - start;
774 	*pprog = prog;
775 }
776 
emit_bpf_tail_call_direct(struct bpf_prog * bpf_prog,struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)777 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
778 				      struct bpf_jit_poke_descriptor *poke,
779 				      u8 **pprog, u8 *ip,
780 				      bool *callee_regs_used, u32 stack_depth,
781 				      struct jit_context *ctx)
782 {
783 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
784 	u8 *prog = *pprog, *start = *pprog;
785 	int offset;
786 
787 	/*
788 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
789 	 *	goto out;
790 	 */
791 	EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off);   /* mov rax, qword ptr [rbp - tcc_ptr_off] */
792 	EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT);   /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
793 
794 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
795 	EMIT2(X86_JAE, offset);                       /* jae out */
796 
797 	poke->tailcall_bypass = ip + (prog - start);
798 	poke->adj_off = X86_TAIL_CALL_OFFSET;
799 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
800 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
801 
802 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
803 		  poke->tailcall_bypass);
804 
805 	/* Inc tail_call_cnt if the slot is populated. */
806 	EMIT4(0x48, 0x83, 0x00, 0x01);                /* add qword ptr [rax], 1 */
807 
808 	if (bpf_prog->aux->exception_boundary) {
809 		pop_callee_regs(&prog, all_callee_regs_used);
810 		pop_r12(&prog);
811 	} else {
812 		pop_callee_regs(&prog, callee_regs_used);
813 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
814 			pop_r12(&prog);
815 	}
816 
817 	/* Pop tail_call_cnt_ptr. */
818 	EMIT1(0x58);                                  /* pop rax */
819 	/* Pop tail_call_cnt, if it's main prog.
820 	 * Pop tail_call_cnt_ptr, if it's subprog.
821 	 */
822 	EMIT1(0x58);                                  /* pop rax */
823 	if (stack_depth)
824 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
825 
826 	emit_nops(&prog, X86_PATCH_SIZE);
827 
828 	/* out: */
829 	ctx->tail_call_direct_label = prog - start;
830 
831 	*pprog = prog;
832 }
833 
bpf_tail_call_direct_fixup(struct bpf_prog * prog)834 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
835 {
836 	struct bpf_jit_poke_descriptor *poke;
837 	struct bpf_array *array;
838 	struct bpf_prog *target;
839 	int i, ret;
840 
841 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
842 		poke = &prog->aux->poke_tab[i];
843 		if (poke->aux && poke->aux != prog->aux)
844 			continue;
845 
846 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
847 
848 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
849 			continue;
850 
851 		array = container_of(poke->tail_call.map, struct bpf_array, map);
852 		mutex_lock(&array->aux->poke_mutex);
853 		target = array->ptrs[poke->tail_call.key];
854 		if (target) {
855 			ret = __bpf_arch_text_poke(poke->tailcall_target,
856 						   BPF_MOD_JUMP, NULL,
857 						   (u8 *)target->bpf_func +
858 						   poke->adj_off);
859 			BUG_ON(ret < 0);
860 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
861 						   BPF_MOD_JUMP,
862 						   (u8 *)poke->tailcall_target +
863 						   X86_PATCH_SIZE, NULL);
864 			BUG_ON(ret < 0);
865 		}
866 		WRITE_ONCE(poke->tailcall_target_stable, true);
867 		mutex_unlock(&array->aux->poke_mutex);
868 	}
869 }
870 
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)871 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
872 			   u32 dst_reg, const u32 imm32)
873 {
874 	u8 *prog = *pprog;
875 	u8 b1, b2, b3;
876 
877 	/*
878 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
879 	 * (which zero-extends imm32) to save 2 bytes.
880 	 */
881 	if (sign_propagate && (s32)imm32 < 0) {
882 		/* 'mov %rax, imm32' sign extends imm32 */
883 		b1 = add_1mod(0x48, dst_reg);
884 		b2 = 0xC7;
885 		b3 = 0xC0;
886 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
887 		goto done;
888 	}
889 
890 	/*
891 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
892 	 * to save 3 bytes.
893 	 */
894 	if (imm32 == 0) {
895 		if (is_ereg(dst_reg))
896 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
897 		b2 = 0x31; /* xor */
898 		b3 = 0xC0;
899 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
900 		goto done;
901 	}
902 
903 	/* mov %eax, imm32 */
904 	if (is_ereg(dst_reg))
905 		EMIT1(add_1mod(0x40, dst_reg));
906 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
907 done:
908 	*pprog = prog;
909 }
910 
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)911 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
912 			   const u32 imm32_hi, const u32 imm32_lo)
913 {
914 	u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
915 	u8 *prog = *pprog;
916 
917 	if (is_uimm32(imm64)) {
918 		/*
919 		 * For emitting plain u32, where sign bit must not be
920 		 * propagated LLVM tends to load imm64 over mov32
921 		 * directly, so save couple of bytes by just doing
922 		 * 'mov %eax, imm32' instead.
923 		 */
924 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
925 	} else if (is_simm32(imm64)) {
926 		emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
927 	} else {
928 		/* movabsq rax, imm64 */
929 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
930 		EMIT(imm32_lo, 4);
931 		EMIT(imm32_hi, 4);
932 	}
933 
934 	*pprog = prog;
935 }
936 
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)937 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
938 {
939 	u8 *prog = *pprog;
940 
941 	if (is64) {
942 		/* mov dst, src */
943 		EMIT_mov(dst_reg, src_reg);
944 	} else {
945 		/* mov32 dst, src */
946 		if (is_ereg(dst_reg) || is_ereg(src_reg))
947 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
948 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
949 	}
950 
951 	*pprog = prog;
952 }
953 
emit_movsx_reg(u8 ** pprog,int num_bits,bool is64,u32 dst_reg,u32 src_reg)954 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
955 			   u32 src_reg)
956 {
957 	u8 *prog = *pprog;
958 
959 	if (is64) {
960 		/* movs[b,w,l]q dst, src */
961 		if (num_bits == 8)
962 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
963 			      add_2reg(0xC0, src_reg, dst_reg));
964 		else if (num_bits == 16)
965 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
966 			      add_2reg(0xC0, src_reg, dst_reg));
967 		else if (num_bits == 32)
968 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
969 			      add_2reg(0xC0, src_reg, dst_reg));
970 	} else {
971 		/* movs[b,w]l dst, src */
972 		if (num_bits == 8) {
973 			EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
974 			      add_2reg(0xC0, src_reg, dst_reg));
975 		} else if (num_bits == 16) {
976 			if (is_ereg(dst_reg) || is_ereg(src_reg))
977 				EMIT1(add_2mod(0x40, src_reg, dst_reg));
978 			EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
979 			      add_2reg(0xC0, src_reg, dst_reg));
980 		}
981 	}
982 
983 	*pprog = prog;
984 }
985 
986 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)987 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
988 {
989 	u8 *prog = *pprog;
990 
991 	if (is_imm8(off)) {
992 		/* 1-byte signed displacement.
993 		 *
994 		 * If off == 0 we could skip this and save one extra byte, but
995 		 * special case of x86 R13 which always needs an offset is not
996 		 * worth the hassle
997 		 */
998 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
999 	} else {
1000 		/* 4-byte signed displacement */
1001 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
1002 	}
1003 	*pprog = prog;
1004 }
1005 
emit_insn_suffix_SIB(u8 ** pprog,u32 ptr_reg,u32 val_reg,u32 index_reg,int off)1006 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
1007 {
1008 	u8 *prog = *pprog;
1009 
1010 	if (is_imm8(off)) {
1011 		EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1012 	} else {
1013 		EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1014 	}
1015 	*pprog = prog;
1016 }
1017 
1018 /*
1019  * Emit a REX byte if it will be necessary to address these registers
1020  */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)1021 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
1022 {
1023 	u8 *prog = *pprog;
1024 
1025 	if (is64)
1026 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
1027 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
1028 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
1029 	*pprog = prog;
1030 }
1031 
1032 /*
1033  * Similar version of maybe_emit_mod() for a single register
1034  */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)1035 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
1036 {
1037 	u8 *prog = *pprog;
1038 
1039 	if (is64)
1040 		EMIT1(add_1mod(0x48, reg));
1041 	else if (is_ereg(reg))
1042 		EMIT1(add_1mod(0x40, reg));
1043 	*pprog = prog;
1044 }
1045 
1046 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1047 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1048 {
1049 	u8 *prog = *pprog;
1050 
1051 	switch (size) {
1052 	case BPF_B:
1053 		/* Emit 'movzx rax, byte ptr [rax + off]' */
1054 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
1055 		break;
1056 	case BPF_H:
1057 		/* Emit 'movzx rax, word ptr [rax + off]' */
1058 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
1059 		break;
1060 	case BPF_W:
1061 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
1062 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1063 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
1064 		else
1065 			EMIT1(0x8B);
1066 		break;
1067 	case BPF_DW:
1068 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
1069 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
1070 		break;
1071 	}
1072 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
1073 	*pprog = prog;
1074 }
1075 
1076 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
emit_ldsx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1077 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1078 {
1079 	u8 *prog = *pprog;
1080 
1081 	switch (size) {
1082 	case BPF_B:
1083 		/* Emit 'movsx rax, byte ptr [rax + off]' */
1084 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
1085 		break;
1086 	case BPF_H:
1087 		/* Emit 'movsx rax, word ptr [rax + off]' */
1088 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
1089 		break;
1090 	case BPF_W:
1091 		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
1092 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
1093 		break;
1094 	}
1095 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
1096 	*pprog = prog;
1097 }
1098 
emit_ldx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1099 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1100 {
1101 	u8 *prog = *pprog;
1102 
1103 	switch (size) {
1104 	case BPF_B:
1105 		/* movzx rax, byte ptr [rax + r12 + off] */
1106 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1107 		break;
1108 	case BPF_H:
1109 		/* movzx rax, word ptr [rax + r12 + off] */
1110 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1111 		break;
1112 	case BPF_W:
1113 		/* mov eax, dword ptr [rax + r12 + off] */
1114 		EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1115 		break;
1116 	case BPF_DW:
1117 		/* mov rax, qword ptr [rax + r12 + off] */
1118 		EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1119 		break;
1120 	}
1121 	emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1122 	*pprog = prog;
1123 }
1124 
emit_ldx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1125 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1126 {
1127 	emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1128 }
1129 
1130 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1131 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1132 {
1133 	u8 *prog = *pprog;
1134 
1135 	switch (size) {
1136 	case BPF_B:
1137 		/* Emit 'mov byte ptr [rax + off], al' */
1138 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1139 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1140 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1141 		else
1142 			EMIT1(0x88);
1143 		break;
1144 	case BPF_H:
1145 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1146 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1147 		else
1148 			EMIT2(0x66, 0x89);
1149 		break;
1150 	case BPF_W:
1151 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1152 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1153 		else
1154 			EMIT1(0x89);
1155 		break;
1156 	case BPF_DW:
1157 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1158 		break;
1159 	}
1160 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1161 	*pprog = prog;
1162 }
1163 
1164 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
emit_stx_index(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1165 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1166 {
1167 	u8 *prog = *pprog;
1168 
1169 	switch (size) {
1170 	case BPF_B:
1171 		/* mov byte ptr [rax + r12 + off], al */
1172 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1173 		break;
1174 	case BPF_H:
1175 		/* mov word ptr [rax + r12 + off], ax */
1176 		EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1177 		break;
1178 	case BPF_W:
1179 		/* mov dword ptr [rax + r12 + 1], eax */
1180 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1181 		break;
1182 	case BPF_DW:
1183 		/* mov qword ptr [rax + r12 + 1], rax */
1184 		EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1185 		break;
1186 	}
1187 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1188 	*pprog = prog;
1189 }
1190 
emit_stx_r12(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)1191 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1192 {
1193 	emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1194 }
1195 
1196 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
emit_st_index(u8 ** pprog,u32 size,u32 dst_reg,u32 index_reg,int off,int imm)1197 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1198 {
1199 	u8 *prog = *pprog;
1200 
1201 	switch (size) {
1202 	case BPF_B:
1203 		/* mov byte ptr [rax + r12 + off], imm8 */
1204 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1205 		break;
1206 	case BPF_H:
1207 		/* mov word ptr [rax + r12 + off], imm16 */
1208 		EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1209 		break;
1210 	case BPF_W:
1211 		/* mov dword ptr [rax + r12 + 1], imm32 */
1212 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1213 		break;
1214 	case BPF_DW:
1215 		/* mov qword ptr [rax + r12 + 1], imm32 */
1216 		EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1217 		break;
1218 	}
1219 	emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1220 	EMIT(imm, bpf_size_to_x86_bytes(size));
1221 	*pprog = prog;
1222 }
1223 
emit_st_r12(u8 ** pprog,u32 size,u32 dst_reg,int off,int imm)1224 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1225 {
1226 	emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1227 }
1228 
emit_atomic(u8 ** pprog,u8 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)1229 static int emit_atomic(u8 **pprog, u8 atomic_op,
1230 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1231 {
1232 	u8 *prog = *pprog;
1233 
1234 	EMIT1(0xF0); /* lock prefix */
1235 
1236 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1237 
1238 	/* emit opcode */
1239 	switch (atomic_op) {
1240 	case BPF_ADD:
1241 	case BPF_AND:
1242 	case BPF_OR:
1243 	case BPF_XOR:
1244 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1245 		EMIT1(simple_alu_opcodes[atomic_op]);
1246 		break;
1247 	case BPF_ADD | BPF_FETCH:
1248 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1249 		EMIT2(0x0F, 0xC1);
1250 		break;
1251 	case BPF_XCHG:
1252 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1253 		EMIT1(0x87);
1254 		break;
1255 	case BPF_CMPXCHG:
1256 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1257 		EMIT2(0x0F, 0xB1);
1258 		break;
1259 	default:
1260 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1261 		return -EFAULT;
1262 	}
1263 
1264 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1265 
1266 	*pprog = prog;
1267 	return 0;
1268 }
1269 
emit_atomic_index(u8 ** pprog,u8 atomic_op,u32 size,u32 dst_reg,u32 src_reg,u32 index_reg,int off)1270 static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1271 			     u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1272 {
1273 	u8 *prog = *pprog;
1274 
1275 	EMIT1(0xF0); /* lock prefix */
1276 	switch (size) {
1277 	case BPF_W:
1278 		EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1279 		break;
1280 	case BPF_DW:
1281 		EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1282 		break;
1283 	default:
1284 		pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
1285 		return -EFAULT;
1286 	}
1287 
1288 	/* emit opcode */
1289 	switch (atomic_op) {
1290 	case BPF_ADD:
1291 	case BPF_AND:
1292 	case BPF_OR:
1293 	case BPF_XOR:
1294 		/* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1295 		EMIT1(simple_alu_opcodes[atomic_op]);
1296 		break;
1297 	case BPF_ADD | BPF_FETCH:
1298 		/* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1299 		EMIT2(0x0F, 0xC1);
1300 		break;
1301 	case BPF_XCHG:
1302 		/* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1303 		EMIT1(0x87);
1304 		break;
1305 	case BPF_CMPXCHG:
1306 		/* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1307 		EMIT2(0x0F, 0xB1);
1308 		break;
1309 	default:
1310 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1311 		return -EFAULT;
1312 	}
1313 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1314 	*pprog = prog;
1315 	return 0;
1316 }
1317 
1318 #define DONT_CLEAR 1
1319 
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)1320 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1321 {
1322 	u32 reg = x->fixup >> 8;
1323 
1324 	/* jump over faulting load and clear dest register */
1325 	if (reg != DONT_CLEAR)
1326 		*(unsigned long *)((void *)regs + reg) = 0;
1327 	regs->ip += x->fixup & 0xff;
1328 	return true;
1329 }
1330 
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used)1331 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1332 			     bool *regs_used)
1333 {
1334 	int i;
1335 
1336 	for (i = 1; i <= insn_cnt; i++, insn++) {
1337 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1338 			regs_used[0] = true;
1339 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1340 			regs_used[1] = true;
1341 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1342 			regs_used[2] = true;
1343 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1344 			regs_used[3] = true;
1345 	}
1346 }
1347 
1348 /* emit the 3-byte VEX prefix
1349  *
1350  * r: same as rex.r, extra bit for ModRM reg field
1351  * x: same as rex.x, extra bit for SIB index field
1352  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1353  * m: opcode map select, encoding escape bytes e.g. 0x0f38
1354  * w: same as rex.w (32 bit or 64 bit) or opcode specific
1355  * src_reg2: additional source reg (encoded as BPF reg)
1356  * l: vector length (128 bit or 256 bit) or reserved
1357  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1358  */
emit_3vex(u8 ** pprog,bool r,bool x,bool b,u8 m,bool w,u8 src_reg2,bool l,u8 pp)1359 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1360 		      bool w, u8 src_reg2, bool l, u8 pp)
1361 {
1362 	u8 *prog = *pprog;
1363 	const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1364 	u8 b1, b2;
1365 	u8 vvvv = reg2hex[src_reg2];
1366 
1367 	/* reg2hex gives only the lower 3 bit of vvvv */
1368 	if (is_ereg(src_reg2))
1369 		vvvv |= 1 << 3;
1370 
1371 	/*
1372 	 * 2nd byte of 3-byte VEX prefix
1373 	 * ~ means bit inverted encoding
1374 	 *
1375 	 *    7                           0
1376 	 *  +---+---+---+---+---+---+---+---+
1377 	 *  |~R |~X |~B |         m         |
1378 	 *  +---+---+---+---+---+---+---+---+
1379 	 */
1380 	b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1381 	/*
1382 	 * 3rd byte of 3-byte VEX prefix
1383 	 *
1384 	 *    7                           0
1385 	 *  +---+---+---+---+---+---+---+---+
1386 	 *  | W |     ~vvvv     | L |   pp  |
1387 	 *  +---+---+---+---+---+---+---+---+
1388 	 */
1389 	b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1390 
1391 	EMIT3(b0, b1, b2);
1392 	*pprog = prog;
1393 }
1394 
1395 /* emit BMI2 shift instruction */
emit_shiftx(u8 ** pprog,u32 dst_reg,u8 src_reg,bool is64,u8 op)1396 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1397 {
1398 	u8 *prog = *pprog;
1399 	bool r = is_ereg(dst_reg);
1400 	u8 m = 2; /* escape code 0f38 */
1401 
1402 	emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1403 	EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1404 	*pprog = prog;
1405 }
1406 
1407 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1408 
1409 #define __LOAD_TCC_PTR(off)			\
1410 	EMIT3_off32(0x48, 0x8B, 0x85, off)
1411 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1412 #define LOAD_TAIL_CALL_CNT_PTR(stack)				\
1413 	__LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1414 
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)1415 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1416 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
1417 {
1418 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1419 	struct bpf_insn *insn = bpf_prog->insnsi;
1420 	bool callee_regs_used[4] = {};
1421 	int insn_cnt = bpf_prog->len;
1422 	bool seen_exit = false;
1423 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1424 	u64 arena_vm_start, user_vm_start;
1425 	int i, excnt = 0;
1426 	int ilen, proglen = 0;
1427 	u8 *prog = temp;
1428 	int err;
1429 
1430 	arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1431 	user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1432 
1433 	detect_reg_usage(insn, insn_cnt, callee_regs_used);
1434 
1435 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
1436 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1437 		      bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1438 	/* Exception callback will clobber callee regs for its own use, and
1439 	 * restore the original callee regs from main prog's stack frame.
1440 	 */
1441 	if (bpf_prog->aux->exception_boundary) {
1442 		/* We also need to save r12, which is not mapped to any BPF
1443 		 * register, as we throw after entry into the kernel, which may
1444 		 * overwrite r12.
1445 		 */
1446 		push_r12(&prog);
1447 		push_callee_regs(&prog, all_callee_regs_used);
1448 	} else {
1449 		if (arena_vm_start)
1450 			push_r12(&prog);
1451 		push_callee_regs(&prog, callee_regs_used);
1452 	}
1453 	if (arena_vm_start)
1454 		emit_mov_imm64(&prog, X86_REG_R12,
1455 			       arena_vm_start >> 32, (u32) arena_vm_start);
1456 
1457 	ilen = prog - temp;
1458 	if (rw_image)
1459 		memcpy(rw_image + proglen, temp, ilen);
1460 	proglen += ilen;
1461 	addrs[0] = proglen;
1462 	prog = temp;
1463 
1464 	for (i = 1; i <= insn_cnt; i++, insn++) {
1465 		const s32 imm32 = insn->imm;
1466 		u32 dst_reg = insn->dst_reg;
1467 		u32 src_reg = insn->src_reg;
1468 		u8 b2 = 0, b3 = 0;
1469 		u8 *start_of_ldx;
1470 		s64 jmp_offset;
1471 		s16 insn_off;
1472 		u8 jmp_cond;
1473 		u8 *func;
1474 		int nops;
1475 
1476 		switch (insn->code) {
1477 			/* ALU */
1478 		case BPF_ALU | BPF_ADD | BPF_X:
1479 		case BPF_ALU | BPF_SUB | BPF_X:
1480 		case BPF_ALU | BPF_AND | BPF_X:
1481 		case BPF_ALU | BPF_OR | BPF_X:
1482 		case BPF_ALU | BPF_XOR | BPF_X:
1483 		case BPF_ALU64 | BPF_ADD | BPF_X:
1484 		case BPF_ALU64 | BPF_SUB | BPF_X:
1485 		case BPF_ALU64 | BPF_AND | BPF_X:
1486 		case BPF_ALU64 | BPF_OR | BPF_X:
1487 		case BPF_ALU64 | BPF_XOR | BPF_X:
1488 			maybe_emit_mod(&prog, dst_reg, src_reg,
1489 				       BPF_CLASS(insn->code) == BPF_ALU64);
1490 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1491 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1492 			break;
1493 
1494 		case BPF_ALU64 | BPF_MOV | BPF_X:
1495 			if (insn_is_cast_user(insn)) {
1496 				if (dst_reg != src_reg)
1497 					/* 32-bit mov */
1498 					emit_mov_reg(&prog, false, dst_reg, src_reg);
1499 				/* shl dst_reg, 32 */
1500 				maybe_emit_1mod(&prog, dst_reg, true);
1501 				EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1502 
1503 				/* or dst_reg, user_vm_start */
1504 				maybe_emit_1mod(&prog, dst_reg, true);
1505 				if (is_axreg(dst_reg))
1506 					EMIT1_off32(0x0D,  user_vm_start >> 32);
1507 				else
1508 					EMIT2_off32(0x81, add_1reg(0xC8, dst_reg),  user_vm_start >> 32);
1509 
1510 				/* rol dst_reg, 32 */
1511 				maybe_emit_1mod(&prog, dst_reg, true);
1512 				EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1513 
1514 				/* xor r11, r11 */
1515 				EMIT3(0x4D, 0x31, 0xDB);
1516 
1517 				/* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1518 				maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1519 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1520 
1521 				/* cmove r11, dst_reg; if so, set dst_reg to zero */
1522 				/* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1523 				maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1524 				EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1525 				break;
1526 			} else if (insn_is_mov_percpu_addr(insn)) {
1527 				/* mov <dst>, <src> (if necessary) */
1528 				EMIT_mov(dst_reg, src_reg);
1529 #ifdef CONFIG_SMP
1530 				/* add <dst>, gs:[<off>] */
1531 				EMIT2(0x65, add_1mod(0x48, dst_reg));
1532 				EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1533 				EMIT((u32)(unsigned long)&this_cpu_off, 4);
1534 #endif
1535 				break;
1536 			}
1537 			fallthrough;
1538 		case BPF_ALU | BPF_MOV | BPF_X:
1539 			if (insn->off == 0)
1540 				emit_mov_reg(&prog,
1541 					     BPF_CLASS(insn->code) == BPF_ALU64,
1542 					     dst_reg, src_reg);
1543 			else
1544 				emit_movsx_reg(&prog, insn->off,
1545 					       BPF_CLASS(insn->code) == BPF_ALU64,
1546 					       dst_reg, src_reg);
1547 			break;
1548 
1549 			/* neg dst */
1550 		case BPF_ALU | BPF_NEG:
1551 		case BPF_ALU64 | BPF_NEG:
1552 			maybe_emit_1mod(&prog, dst_reg,
1553 					BPF_CLASS(insn->code) == BPF_ALU64);
1554 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1555 			break;
1556 
1557 		case BPF_ALU | BPF_ADD | BPF_K:
1558 		case BPF_ALU | BPF_SUB | BPF_K:
1559 		case BPF_ALU | BPF_AND | BPF_K:
1560 		case BPF_ALU | BPF_OR | BPF_K:
1561 		case BPF_ALU | BPF_XOR | BPF_K:
1562 		case BPF_ALU64 | BPF_ADD | BPF_K:
1563 		case BPF_ALU64 | BPF_SUB | BPF_K:
1564 		case BPF_ALU64 | BPF_AND | BPF_K:
1565 		case BPF_ALU64 | BPF_OR | BPF_K:
1566 		case BPF_ALU64 | BPF_XOR | BPF_K:
1567 			maybe_emit_1mod(&prog, dst_reg,
1568 					BPF_CLASS(insn->code) == BPF_ALU64);
1569 
1570 			/*
1571 			 * b3 holds 'normal' opcode, b2 short form only valid
1572 			 * in case dst is eax/rax.
1573 			 */
1574 			switch (BPF_OP(insn->code)) {
1575 			case BPF_ADD:
1576 				b3 = 0xC0;
1577 				b2 = 0x05;
1578 				break;
1579 			case BPF_SUB:
1580 				b3 = 0xE8;
1581 				b2 = 0x2D;
1582 				break;
1583 			case BPF_AND:
1584 				b3 = 0xE0;
1585 				b2 = 0x25;
1586 				break;
1587 			case BPF_OR:
1588 				b3 = 0xC8;
1589 				b2 = 0x0D;
1590 				break;
1591 			case BPF_XOR:
1592 				b3 = 0xF0;
1593 				b2 = 0x35;
1594 				break;
1595 			}
1596 
1597 			if (is_imm8(imm32))
1598 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1599 			else if (is_axreg(dst_reg))
1600 				EMIT1_off32(b2, imm32);
1601 			else
1602 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1603 			break;
1604 
1605 		case BPF_ALU64 | BPF_MOV | BPF_K:
1606 		case BPF_ALU | BPF_MOV | BPF_K:
1607 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1608 				       dst_reg, imm32);
1609 			break;
1610 
1611 		case BPF_LD | BPF_IMM | BPF_DW:
1612 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1613 			insn++;
1614 			i++;
1615 			break;
1616 
1617 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1618 		case BPF_ALU | BPF_MOD | BPF_X:
1619 		case BPF_ALU | BPF_DIV | BPF_X:
1620 		case BPF_ALU | BPF_MOD | BPF_K:
1621 		case BPF_ALU | BPF_DIV | BPF_K:
1622 		case BPF_ALU64 | BPF_MOD | BPF_X:
1623 		case BPF_ALU64 | BPF_DIV | BPF_X:
1624 		case BPF_ALU64 | BPF_MOD | BPF_K:
1625 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1626 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1627 
1628 			if (dst_reg != BPF_REG_0)
1629 				EMIT1(0x50); /* push rax */
1630 			if (dst_reg != BPF_REG_3)
1631 				EMIT1(0x52); /* push rdx */
1632 
1633 			if (BPF_SRC(insn->code) == BPF_X) {
1634 				if (src_reg == BPF_REG_0 ||
1635 				    src_reg == BPF_REG_3) {
1636 					/* mov r11, src_reg */
1637 					EMIT_mov(AUX_REG, src_reg);
1638 					src_reg = AUX_REG;
1639 				}
1640 			} else {
1641 				/* mov r11, imm32 */
1642 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1643 				src_reg = AUX_REG;
1644 			}
1645 
1646 			if (dst_reg != BPF_REG_0)
1647 				/* mov rax, dst_reg */
1648 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1649 
1650 			if (insn->off == 0) {
1651 				/*
1652 				 * xor edx, edx
1653 				 * equivalent to 'xor rdx, rdx', but one byte less
1654 				 */
1655 				EMIT2(0x31, 0xd2);
1656 
1657 				/* div src_reg */
1658 				maybe_emit_1mod(&prog, src_reg, is64);
1659 				EMIT2(0xF7, add_1reg(0xF0, src_reg));
1660 			} else {
1661 				if (BPF_CLASS(insn->code) == BPF_ALU)
1662 					EMIT1(0x99); /* cdq */
1663 				else
1664 					EMIT2(0x48, 0x99); /* cqo */
1665 
1666 				/* idiv src_reg */
1667 				maybe_emit_1mod(&prog, src_reg, is64);
1668 				EMIT2(0xF7, add_1reg(0xF8, src_reg));
1669 			}
1670 
1671 			if (BPF_OP(insn->code) == BPF_MOD &&
1672 			    dst_reg != BPF_REG_3)
1673 				/* mov dst_reg, rdx */
1674 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1675 			else if (BPF_OP(insn->code) == BPF_DIV &&
1676 				 dst_reg != BPF_REG_0)
1677 				/* mov dst_reg, rax */
1678 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1679 
1680 			if (dst_reg != BPF_REG_3)
1681 				EMIT1(0x5A); /* pop rdx */
1682 			if (dst_reg != BPF_REG_0)
1683 				EMIT1(0x58); /* pop rax */
1684 			break;
1685 		}
1686 
1687 		case BPF_ALU | BPF_MUL | BPF_K:
1688 		case BPF_ALU64 | BPF_MUL | BPF_K:
1689 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1690 				       BPF_CLASS(insn->code) == BPF_ALU64);
1691 
1692 			if (is_imm8(imm32))
1693 				/* imul dst_reg, dst_reg, imm8 */
1694 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1695 				      imm32);
1696 			else
1697 				/* imul dst_reg, dst_reg, imm32 */
1698 				EMIT2_off32(0x69,
1699 					    add_2reg(0xC0, dst_reg, dst_reg),
1700 					    imm32);
1701 			break;
1702 
1703 		case BPF_ALU | BPF_MUL | BPF_X:
1704 		case BPF_ALU64 | BPF_MUL | BPF_X:
1705 			maybe_emit_mod(&prog, src_reg, dst_reg,
1706 				       BPF_CLASS(insn->code) == BPF_ALU64);
1707 
1708 			/* imul dst_reg, src_reg */
1709 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1710 			break;
1711 
1712 			/* Shifts */
1713 		case BPF_ALU | BPF_LSH | BPF_K:
1714 		case BPF_ALU | BPF_RSH | BPF_K:
1715 		case BPF_ALU | BPF_ARSH | BPF_K:
1716 		case BPF_ALU64 | BPF_LSH | BPF_K:
1717 		case BPF_ALU64 | BPF_RSH | BPF_K:
1718 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1719 			maybe_emit_1mod(&prog, dst_reg,
1720 					BPF_CLASS(insn->code) == BPF_ALU64);
1721 
1722 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1723 			if (imm32 == 1)
1724 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1725 			else
1726 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1727 			break;
1728 
1729 		case BPF_ALU | BPF_LSH | BPF_X:
1730 		case BPF_ALU | BPF_RSH | BPF_X:
1731 		case BPF_ALU | BPF_ARSH | BPF_X:
1732 		case BPF_ALU64 | BPF_LSH | BPF_X:
1733 		case BPF_ALU64 | BPF_RSH | BPF_X:
1734 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1735 			/* BMI2 shifts aren't better when shift count is already in rcx */
1736 			if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1737 				/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1738 				bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1739 				u8 op;
1740 
1741 				switch (BPF_OP(insn->code)) {
1742 				case BPF_LSH:
1743 					op = 1; /* prefix 0x66 */
1744 					break;
1745 				case BPF_RSH:
1746 					op = 3; /* prefix 0xf2 */
1747 					break;
1748 				case BPF_ARSH:
1749 					op = 2; /* prefix 0xf3 */
1750 					break;
1751 				}
1752 
1753 				emit_shiftx(&prog, dst_reg, src_reg, w, op);
1754 
1755 				break;
1756 			}
1757 
1758 			if (src_reg != BPF_REG_4) { /* common case */
1759 				/* Check for bad case when dst_reg == rcx */
1760 				if (dst_reg == BPF_REG_4) {
1761 					/* mov r11, dst_reg */
1762 					EMIT_mov(AUX_REG, dst_reg);
1763 					dst_reg = AUX_REG;
1764 				} else {
1765 					EMIT1(0x51); /* push rcx */
1766 				}
1767 				/* mov rcx, src_reg */
1768 				EMIT_mov(BPF_REG_4, src_reg);
1769 			}
1770 
1771 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1772 			maybe_emit_1mod(&prog, dst_reg,
1773 					BPF_CLASS(insn->code) == BPF_ALU64);
1774 
1775 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1776 			EMIT2(0xD3, add_1reg(b3, dst_reg));
1777 
1778 			if (src_reg != BPF_REG_4) {
1779 				if (insn->dst_reg == BPF_REG_4)
1780 					/* mov dst_reg, r11 */
1781 					EMIT_mov(insn->dst_reg, AUX_REG);
1782 				else
1783 					EMIT1(0x59); /* pop rcx */
1784 			}
1785 
1786 			break;
1787 
1788 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1789 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1790 			switch (imm32) {
1791 			case 16:
1792 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
1793 				EMIT1(0x66);
1794 				if (is_ereg(dst_reg))
1795 					EMIT1(0x41);
1796 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1797 
1798 				/* Emit 'movzwl eax, ax' */
1799 				if (is_ereg(dst_reg))
1800 					EMIT3(0x45, 0x0F, 0xB7);
1801 				else
1802 					EMIT2(0x0F, 0xB7);
1803 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1804 				break;
1805 			case 32:
1806 				/* Emit 'bswap eax' to swap lower 4 bytes */
1807 				if (is_ereg(dst_reg))
1808 					EMIT2(0x41, 0x0F);
1809 				else
1810 					EMIT1(0x0F);
1811 				EMIT1(add_1reg(0xC8, dst_reg));
1812 				break;
1813 			case 64:
1814 				/* Emit 'bswap rax' to swap 8 bytes */
1815 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1816 				      add_1reg(0xC8, dst_reg));
1817 				break;
1818 			}
1819 			break;
1820 
1821 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1822 			switch (imm32) {
1823 			case 16:
1824 				/*
1825 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1826 				 * into 64 bit
1827 				 */
1828 				if (is_ereg(dst_reg))
1829 					EMIT3(0x45, 0x0F, 0xB7);
1830 				else
1831 					EMIT2(0x0F, 0xB7);
1832 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1833 				break;
1834 			case 32:
1835 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1836 				if (is_ereg(dst_reg))
1837 					EMIT1(0x45);
1838 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1839 				break;
1840 			case 64:
1841 				/* nop */
1842 				break;
1843 			}
1844 			break;
1845 
1846 			/* speculation barrier */
1847 		case BPF_ST | BPF_NOSPEC:
1848 			EMIT_LFENCE();
1849 			break;
1850 
1851 			/* ST: *(u8*)(dst_reg + off) = imm */
1852 		case BPF_ST | BPF_MEM | BPF_B:
1853 			if (is_ereg(dst_reg))
1854 				EMIT2(0x41, 0xC6);
1855 			else
1856 				EMIT1(0xC6);
1857 			goto st;
1858 		case BPF_ST | BPF_MEM | BPF_H:
1859 			if (is_ereg(dst_reg))
1860 				EMIT3(0x66, 0x41, 0xC7);
1861 			else
1862 				EMIT2(0x66, 0xC7);
1863 			goto st;
1864 		case BPF_ST | BPF_MEM | BPF_W:
1865 			if (is_ereg(dst_reg))
1866 				EMIT2(0x41, 0xC7);
1867 			else
1868 				EMIT1(0xC7);
1869 			goto st;
1870 		case BPF_ST | BPF_MEM | BPF_DW:
1871 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1872 
1873 st:			if (is_imm8(insn->off))
1874 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1875 			else
1876 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1877 
1878 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1879 			break;
1880 
1881 			/* STX: *(u8*)(dst_reg + off) = src_reg */
1882 		case BPF_STX | BPF_MEM | BPF_B:
1883 		case BPF_STX | BPF_MEM | BPF_H:
1884 		case BPF_STX | BPF_MEM | BPF_W:
1885 		case BPF_STX | BPF_MEM | BPF_DW:
1886 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1887 			break;
1888 
1889 		case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1890 		case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1891 		case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1892 		case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1893 			start_of_ldx = prog;
1894 			emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
1895 			goto populate_extable;
1896 
1897 			/* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
1898 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1899 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1900 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1901 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1902 		case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1903 		case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1904 		case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1905 		case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1906 			start_of_ldx = prog;
1907 			if (BPF_CLASS(insn->code) == BPF_LDX)
1908 				emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1909 			else
1910 				emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1911 populate_extable:
1912 			{
1913 				struct exception_table_entry *ex;
1914 				u8 *_insn = image + proglen + (start_of_ldx - temp);
1915 				s64 delta;
1916 
1917 				if (!bpf_prog->aux->extable)
1918 					break;
1919 
1920 				if (excnt >= bpf_prog->aux->num_exentries) {
1921 					pr_err("mem32 extable bug\n");
1922 					return -EFAULT;
1923 				}
1924 				ex = &bpf_prog->aux->extable[excnt++];
1925 
1926 				delta = _insn - (u8 *)&ex->insn;
1927 				/* switch ex to rw buffer for writes */
1928 				ex = (void *)rw_image + ((void *)ex - (void *)image);
1929 
1930 				ex->insn = delta;
1931 
1932 				ex->data = EX_TYPE_BPF;
1933 
1934 				ex->fixup = (prog - start_of_ldx) |
1935 					((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8);
1936 			}
1937 			break;
1938 
1939 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1940 		case BPF_LDX | BPF_MEM | BPF_B:
1941 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1942 		case BPF_LDX | BPF_MEM | BPF_H:
1943 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1944 		case BPF_LDX | BPF_MEM | BPF_W:
1945 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1946 		case BPF_LDX | BPF_MEM | BPF_DW:
1947 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1948 			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
1949 		case BPF_LDX | BPF_MEMSX | BPF_B:
1950 		case BPF_LDX | BPF_MEMSX | BPF_H:
1951 		case BPF_LDX | BPF_MEMSX | BPF_W:
1952 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1953 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1954 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1955 			insn_off = insn->off;
1956 
1957 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1958 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1959 				/* Conservatively check that src_reg + insn->off is a kernel address:
1960 				 *   src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
1961 				 *   and
1962 				 *   src_reg + insn->off < VSYSCALL_ADDR
1963 				 */
1964 
1965 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
1966 				u8 *end_of_jmp;
1967 
1968 				/* movabsq r10, VSYSCALL_ADDR */
1969 				emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
1970 					       (u32)(long)VSYSCALL_ADDR);
1971 
1972 				/* mov src_reg, r11 */
1973 				EMIT_mov(AUX_REG, src_reg);
1974 
1975 				if (insn->off) {
1976 					/* add r11, insn->off */
1977 					maybe_emit_1mod(&prog, AUX_REG, true);
1978 					EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1979 				}
1980 
1981 				/* sub r11, r10 */
1982 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1983 				EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1984 
1985 				/* movabsq r10, limit */
1986 				emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
1987 					       (u32)(long)limit);
1988 
1989 				/* cmp r10, r11 */
1990 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1991 				EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
1992 
1993 				/* if unsigned '>', goto load */
1994 				EMIT2(X86_JA, 0);
1995 				end_of_jmp = prog;
1996 
1997 				/* xor dst_reg, dst_reg */
1998 				emit_mov_imm32(&prog, false, dst_reg, 0);
1999 				/* jmp byte_after_ldx */
2000 				EMIT2(0xEB, 0);
2001 
2002 				/* populate jmp_offset for JAE above to jump to start_of_ldx */
2003 				start_of_ldx = prog;
2004 				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
2005 			}
2006 			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
2007 			    BPF_MODE(insn->code) == BPF_MEMSX)
2008 				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2009 			else
2010 				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2011 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2012 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2013 				struct exception_table_entry *ex;
2014 				u8 *_insn = image + proglen + (start_of_ldx - temp);
2015 				s64 delta;
2016 
2017 				/* populate jmp_offset for JMP above */
2018 				start_of_ldx[-1] = prog - start_of_ldx;
2019 
2020 				if (!bpf_prog->aux->extable)
2021 					break;
2022 
2023 				if (excnt >= bpf_prog->aux->num_exentries) {
2024 					pr_err("ex gen bug\n");
2025 					return -EFAULT;
2026 				}
2027 				ex = &bpf_prog->aux->extable[excnt++];
2028 
2029 				delta = _insn - (u8 *)&ex->insn;
2030 				if (!is_simm32(delta)) {
2031 					pr_err("extable->insn doesn't fit into 32-bit\n");
2032 					return -EFAULT;
2033 				}
2034 				/* switch ex to rw buffer for writes */
2035 				ex = (void *)rw_image + ((void *)ex - (void *)image);
2036 
2037 				ex->insn = delta;
2038 
2039 				ex->data = EX_TYPE_BPF;
2040 
2041 				if (dst_reg > BPF_REG_9) {
2042 					pr_err("verifier error\n");
2043 					return -EFAULT;
2044 				}
2045 				/*
2046 				 * Compute size of x86 insn and its target dest x86 register.
2047 				 * ex_handler_bpf() will use lower 8 bits to adjust
2048 				 * pt_regs->ip to jump over this x86 instruction
2049 				 * and upper bits to figure out which pt_regs to zero out.
2050 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2051 				 * of 4 bytes will be ignored and rbx will be zero inited.
2052 				 */
2053 				ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
2054 			}
2055 			break;
2056 
2057 		case BPF_STX | BPF_ATOMIC | BPF_W:
2058 		case BPF_STX | BPF_ATOMIC | BPF_DW:
2059 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
2060 			    insn->imm == (BPF_OR | BPF_FETCH) ||
2061 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
2062 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2063 				u32 real_src_reg = src_reg;
2064 				u32 real_dst_reg = dst_reg;
2065 				u8 *branch_target;
2066 
2067 				/*
2068 				 * Can't be implemented with a single x86 insn.
2069 				 * Need to do a CMPXCHG loop.
2070 				 */
2071 
2072 				/* Will need RAX as a CMPXCHG operand so save R0 */
2073 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2074 				if (src_reg == BPF_REG_0)
2075 					real_src_reg = BPF_REG_AX;
2076 				if (dst_reg == BPF_REG_0)
2077 					real_dst_reg = BPF_REG_AX;
2078 
2079 				branch_target = prog;
2080 				/* Load old value */
2081 				emit_ldx(&prog, BPF_SIZE(insn->code),
2082 					 BPF_REG_0, real_dst_reg, insn->off);
2083 				/*
2084 				 * Perform the (commutative) operation locally,
2085 				 * put the result in the AUX_REG.
2086 				 */
2087 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2088 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2089 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2090 				      add_2reg(0xC0, AUX_REG, real_src_reg));
2091 				/* Attempt to swap in new value */
2092 				err = emit_atomic(&prog, BPF_CMPXCHG,
2093 						  real_dst_reg, AUX_REG,
2094 						  insn->off,
2095 						  BPF_SIZE(insn->code));
2096 				if (WARN_ON(err))
2097 					return err;
2098 				/*
2099 				 * ZF tells us whether we won the race. If it's
2100 				 * cleared we need to try again.
2101 				 */
2102 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
2103 				/* Return the pre-modification value */
2104 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2105 				/* Restore R0 after clobbering RAX */
2106 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2107 				break;
2108 			}
2109 
2110 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
2111 					  insn->off, BPF_SIZE(insn->code));
2112 			if (err)
2113 				return err;
2114 			break;
2115 
2116 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2117 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2118 			start_of_ldx = prog;
2119 			err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
2120 						dst_reg, src_reg, X86_REG_R12, insn->off);
2121 			if (err)
2122 				return err;
2123 			goto populate_extable;
2124 
2125 			/* call */
2126 		case BPF_JMP | BPF_CALL: {
2127 			u8 *ip = image + addrs[i - 1];
2128 
2129 			func = (u8 *) __bpf_call_base + imm32;
2130 			if (tail_call_reachable) {
2131 				LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth);
2132 				ip += 7;
2133 			}
2134 			if (!imm32)
2135 				return -EINVAL;
2136 			ip += x86_call_depth_emit_accounting(&prog, func, ip);
2137 			if (emit_call(&prog, func, ip))
2138 				return -EINVAL;
2139 			break;
2140 		}
2141 
2142 		case BPF_JMP | BPF_TAIL_CALL:
2143 			if (imm32)
2144 				emit_bpf_tail_call_direct(bpf_prog,
2145 							  &bpf_prog->aux->poke_tab[imm32 - 1],
2146 							  &prog, image + addrs[i - 1],
2147 							  callee_regs_used,
2148 							  bpf_prog->aux->stack_depth,
2149 							  ctx);
2150 			else
2151 				emit_bpf_tail_call_indirect(bpf_prog,
2152 							    &prog,
2153 							    callee_regs_used,
2154 							    bpf_prog->aux->stack_depth,
2155 							    image + addrs[i - 1],
2156 							    ctx);
2157 			break;
2158 
2159 			/* cond jump */
2160 		case BPF_JMP | BPF_JEQ | BPF_X:
2161 		case BPF_JMP | BPF_JNE | BPF_X:
2162 		case BPF_JMP | BPF_JGT | BPF_X:
2163 		case BPF_JMP | BPF_JLT | BPF_X:
2164 		case BPF_JMP | BPF_JGE | BPF_X:
2165 		case BPF_JMP | BPF_JLE | BPF_X:
2166 		case BPF_JMP | BPF_JSGT | BPF_X:
2167 		case BPF_JMP | BPF_JSLT | BPF_X:
2168 		case BPF_JMP | BPF_JSGE | BPF_X:
2169 		case BPF_JMP | BPF_JSLE | BPF_X:
2170 		case BPF_JMP32 | BPF_JEQ | BPF_X:
2171 		case BPF_JMP32 | BPF_JNE | BPF_X:
2172 		case BPF_JMP32 | BPF_JGT | BPF_X:
2173 		case BPF_JMP32 | BPF_JLT | BPF_X:
2174 		case BPF_JMP32 | BPF_JGE | BPF_X:
2175 		case BPF_JMP32 | BPF_JLE | BPF_X:
2176 		case BPF_JMP32 | BPF_JSGT | BPF_X:
2177 		case BPF_JMP32 | BPF_JSLT | BPF_X:
2178 		case BPF_JMP32 | BPF_JSGE | BPF_X:
2179 		case BPF_JMP32 | BPF_JSLE | BPF_X:
2180 			/* cmp dst_reg, src_reg */
2181 			maybe_emit_mod(&prog, dst_reg, src_reg,
2182 				       BPF_CLASS(insn->code) == BPF_JMP);
2183 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2184 			goto emit_cond_jmp;
2185 
2186 		case BPF_JMP | BPF_JSET | BPF_X:
2187 		case BPF_JMP32 | BPF_JSET | BPF_X:
2188 			/* test dst_reg, src_reg */
2189 			maybe_emit_mod(&prog, dst_reg, src_reg,
2190 				       BPF_CLASS(insn->code) == BPF_JMP);
2191 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2192 			goto emit_cond_jmp;
2193 
2194 		case BPF_JMP | BPF_JSET | BPF_K:
2195 		case BPF_JMP32 | BPF_JSET | BPF_K:
2196 			/* test dst_reg, imm32 */
2197 			maybe_emit_1mod(&prog, dst_reg,
2198 					BPF_CLASS(insn->code) == BPF_JMP);
2199 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2200 			goto emit_cond_jmp;
2201 
2202 		case BPF_JMP | BPF_JEQ | BPF_K:
2203 		case BPF_JMP | BPF_JNE | BPF_K:
2204 		case BPF_JMP | BPF_JGT | BPF_K:
2205 		case BPF_JMP | BPF_JLT | BPF_K:
2206 		case BPF_JMP | BPF_JGE | BPF_K:
2207 		case BPF_JMP | BPF_JLE | BPF_K:
2208 		case BPF_JMP | BPF_JSGT | BPF_K:
2209 		case BPF_JMP | BPF_JSLT | BPF_K:
2210 		case BPF_JMP | BPF_JSGE | BPF_K:
2211 		case BPF_JMP | BPF_JSLE | BPF_K:
2212 		case BPF_JMP32 | BPF_JEQ | BPF_K:
2213 		case BPF_JMP32 | BPF_JNE | BPF_K:
2214 		case BPF_JMP32 | BPF_JGT | BPF_K:
2215 		case BPF_JMP32 | BPF_JLT | BPF_K:
2216 		case BPF_JMP32 | BPF_JGE | BPF_K:
2217 		case BPF_JMP32 | BPF_JLE | BPF_K:
2218 		case BPF_JMP32 | BPF_JSGT | BPF_K:
2219 		case BPF_JMP32 | BPF_JSLT | BPF_K:
2220 		case BPF_JMP32 | BPF_JSGE | BPF_K:
2221 		case BPF_JMP32 | BPF_JSLE | BPF_K:
2222 			/* test dst_reg, dst_reg to save one extra byte */
2223 			if (imm32 == 0) {
2224 				maybe_emit_mod(&prog, dst_reg, dst_reg,
2225 					       BPF_CLASS(insn->code) == BPF_JMP);
2226 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2227 				goto emit_cond_jmp;
2228 			}
2229 
2230 			/* cmp dst_reg, imm8/32 */
2231 			maybe_emit_1mod(&prog, dst_reg,
2232 					BPF_CLASS(insn->code) == BPF_JMP);
2233 
2234 			if (is_imm8(imm32))
2235 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2236 			else
2237 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2238 
2239 emit_cond_jmp:		/* Convert BPF opcode to x86 */
2240 			switch (BPF_OP(insn->code)) {
2241 			case BPF_JEQ:
2242 				jmp_cond = X86_JE;
2243 				break;
2244 			case BPF_JSET:
2245 			case BPF_JNE:
2246 				jmp_cond = X86_JNE;
2247 				break;
2248 			case BPF_JGT:
2249 				/* GT is unsigned '>', JA in x86 */
2250 				jmp_cond = X86_JA;
2251 				break;
2252 			case BPF_JLT:
2253 				/* LT is unsigned '<', JB in x86 */
2254 				jmp_cond = X86_JB;
2255 				break;
2256 			case BPF_JGE:
2257 				/* GE is unsigned '>=', JAE in x86 */
2258 				jmp_cond = X86_JAE;
2259 				break;
2260 			case BPF_JLE:
2261 				/* LE is unsigned '<=', JBE in x86 */
2262 				jmp_cond = X86_JBE;
2263 				break;
2264 			case BPF_JSGT:
2265 				/* Signed '>', GT in x86 */
2266 				jmp_cond = X86_JG;
2267 				break;
2268 			case BPF_JSLT:
2269 				/* Signed '<', LT in x86 */
2270 				jmp_cond = X86_JL;
2271 				break;
2272 			case BPF_JSGE:
2273 				/* Signed '>=', GE in x86 */
2274 				jmp_cond = X86_JGE;
2275 				break;
2276 			case BPF_JSLE:
2277 				/* Signed '<=', LE in x86 */
2278 				jmp_cond = X86_JLE;
2279 				break;
2280 			default: /* to silence GCC warning */
2281 				return -EFAULT;
2282 			}
2283 			jmp_offset = addrs[i + insn->off] - addrs[i];
2284 			if (is_imm8_jmp_offset(jmp_offset)) {
2285 				if (jmp_padding) {
2286 					/* To keep the jmp_offset valid, the extra bytes are
2287 					 * padded before the jump insn, so we subtract the
2288 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2289 					 *
2290 					 * If the previous pass already emits an imm8
2291 					 * jmp_cond, then this BPF insn won't shrink, so
2292 					 * "nops" is 0.
2293 					 *
2294 					 * On the other hand, if the previous pass emits an
2295 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2296 					 * keep the image from shrinking further.
2297 					 *
2298 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2299 					 *     is 2 bytes, so the size difference is 4 bytes.
2300 					 */
2301 					nops = INSN_SZ_DIFF - 2;
2302 					if (nops != 0 && nops != 4) {
2303 						pr_err("unexpected jmp_cond padding: %d bytes\n",
2304 						       nops);
2305 						return -EFAULT;
2306 					}
2307 					emit_nops(&prog, nops);
2308 				}
2309 				EMIT2(jmp_cond, jmp_offset);
2310 			} else if (is_simm32(jmp_offset)) {
2311 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2312 			} else {
2313 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2314 				return -EFAULT;
2315 			}
2316 
2317 			break;
2318 
2319 		case BPF_JMP | BPF_JA:
2320 		case BPF_JMP32 | BPF_JA:
2321 			if (BPF_CLASS(insn->code) == BPF_JMP) {
2322 				if (insn->off == -1)
2323 					/* -1 jmp instructions will always jump
2324 					 * backwards two bytes. Explicitly handling
2325 					 * this case avoids wasting too many passes
2326 					 * when there are long sequences of replaced
2327 					 * dead code.
2328 					 */
2329 					jmp_offset = -2;
2330 				else
2331 					jmp_offset = addrs[i + insn->off] - addrs[i];
2332 			} else {
2333 				if (insn->imm == -1)
2334 					jmp_offset = -2;
2335 				else
2336 					jmp_offset = addrs[i + insn->imm] - addrs[i];
2337 			}
2338 
2339 			if (!jmp_offset) {
2340 				/*
2341 				 * If jmp_padding is enabled, the extra nops will
2342 				 * be inserted. Otherwise, optimize out nop jumps.
2343 				 */
2344 				if (jmp_padding) {
2345 					/* There are 3 possible conditions.
2346 					 * (1) This BPF_JA is already optimized out in
2347 					 *     the previous run, so there is no need
2348 					 *     to pad any extra byte (0 byte).
2349 					 * (2) The previous pass emits an imm8 jmp,
2350 					 *     so we pad 2 bytes to match the previous
2351 					 *     insn size.
2352 					 * (3) Similarly, the previous pass emits an
2353 					 *     imm32 jmp, and 5 bytes is padded.
2354 					 */
2355 					nops = INSN_SZ_DIFF;
2356 					if (nops != 0 && nops != 2 && nops != 5) {
2357 						pr_err("unexpected nop jump padding: %d bytes\n",
2358 						       nops);
2359 						return -EFAULT;
2360 					}
2361 					emit_nops(&prog, nops);
2362 				}
2363 				break;
2364 			}
2365 emit_jmp:
2366 			if (is_imm8_jmp_offset(jmp_offset)) {
2367 				if (jmp_padding) {
2368 					/* To avoid breaking jmp_offset, the extra bytes
2369 					 * are padded before the actual jmp insn, so
2370 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
2371 					 *
2372 					 * If the previous pass already emits an imm8
2373 					 * jmp, there is nothing to pad (0 byte).
2374 					 *
2375 					 * If it emits an imm32 jmp (5 bytes) previously
2376 					 * and now an imm8 jmp (2 bytes), then we pad
2377 					 * (5 - 2 = 3) bytes to stop the image from
2378 					 * shrinking further.
2379 					 */
2380 					nops = INSN_SZ_DIFF - 2;
2381 					if (nops != 0 && nops != 3) {
2382 						pr_err("unexpected jump padding: %d bytes\n",
2383 						       nops);
2384 						return -EFAULT;
2385 					}
2386 					emit_nops(&prog, INSN_SZ_DIFF - 2);
2387 				}
2388 				EMIT2(0xEB, jmp_offset);
2389 			} else if (is_simm32(jmp_offset)) {
2390 				EMIT1_off32(0xE9, jmp_offset);
2391 			} else {
2392 				pr_err("jmp gen bug %llx\n", jmp_offset);
2393 				return -EFAULT;
2394 			}
2395 			break;
2396 
2397 		case BPF_JMP | BPF_EXIT:
2398 			if (seen_exit) {
2399 				jmp_offset = ctx->cleanup_addr - addrs[i];
2400 				goto emit_jmp;
2401 			}
2402 			seen_exit = true;
2403 			/* Update cleanup_addr */
2404 			ctx->cleanup_addr = proglen;
2405 			if (bpf_prog->aux->exception_boundary) {
2406 				pop_callee_regs(&prog, all_callee_regs_used);
2407 				pop_r12(&prog);
2408 			} else {
2409 				pop_callee_regs(&prog, callee_regs_used);
2410 				if (arena_vm_start)
2411 					pop_r12(&prog);
2412 			}
2413 			EMIT1(0xC9);         /* leave */
2414 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2415 			break;
2416 
2417 		default:
2418 			/*
2419 			 * By design x86-64 JIT should support all BPF instructions.
2420 			 * This error will be seen if new instruction was added
2421 			 * to the interpreter, but not to the JIT, or if there is
2422 			 * junk in bpf_prog.
2423 			 */
2424 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2425 			return -EINVAL;
2426 		}
2427 
2428 		ilen = prog - temp;
2429 		if (ilen > BPF_MAX_INSN_SIZE) {
2430 			pr_err("bpf_jit: fatal insn size error\n");
2431 			return -EFAULT;
2432 		}
2433 
2434 		if (image) {
2435 			/*
2436 			 * When populating the image, assert that:
2437 			 *
2438 			 *  i) We do not write beyond the allocated space, and
2439 			 * ii) addrs[i] did not change from the prior run, in order
2440 			 *     to validate assumptions made for computing branch
2441 			 *     displacements.
2442 			 */
2443 			if (unlikely(proglen + ilen > oldproglen ||
2444 				     proglen + ilen != addrs[i])) {
2445 				pr_err("bpf_jit: fatal error\n");
2446 				return -EFAULT;
2447 			}
2448 			memcpy(rw_image + proglen, temp, ilen);
2449 		}
2450 		proglen += ilen;
2451 		addrs[i] = proglen;
2452 		prog = temp;
2453 	}
2454 
2455 	if (image && excnt != bpf_prog->aux->num_exentries) {
2456 		pr_err("extable is not populated\n");
2457 		return -EFAULT;
2458 	}
2459 	return proglen;
2460 }
2461 
clean_stack_garbage(const struct btf_func_model * m,u8 ** pprog,int nr_stack_slots,int stack_size)2462 static void clean_stack_garbage(const struct btf_func_model *m,
2463 				u8 **pprog, int nr_stack_slots,
2464 				int stack_size)
2465 {
2466 	int arg_size, off;
2467 	u8 *prog;
2468 
2469 	/* Generally speaking, the compiler will pass the arguments
2470 	 * on-stack with "push" instruction, which will take 8-byte
2471 	 * on the stack. In this case, there won't be garbage values
2472 	 * while we copy the arguments from origin stack frame to current
2473 	 * in BPF_DW.
2474 	 *
2475 	 * However, sometimes the compiler will only allocate 4-byte on
2476 	 * the stack for the arguments. For now, this case will only
2477 	 * happen if there is only one argument on-stack and its size
2478 	 * not more than 4 byte. In this case, there will be garbage
2479 	 * values on the upper 4-byte where we store the argument on
2480 	 * current stack frame.
2481 	 *
2482 	 * arguments on origin stack:
2483 	 *
2484 	 * stack_arg_1(4-byte) xxx(4-byte)
2485 	 *
2486 	 * what we copy:
2487 	 *
2488 	 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2489 	 *
2490 	 * and the xxx is the garbage values which we should clean here.
2491 	 */
2492 	if (nr_stack_slots != 1)
2493 		return;
2494 
2495 	/* the size of the last argument */
2496 	arg_size = m->arg_size[m->nr_args - 1];
2497 	if (arg_size <= 4) {
2498 		off = -(stack_size - 4);
2499 		prog = *pprog;
2500 		/* mov DWORD PTR [rbp + off], 0 */
2501 		if (!is_imm8(off))
2502 			EMIT2_off32(0xC7, 0x85, off);
2503 		else
2504 			EMIT3(0xC7, 0x45, off);
2505 		EMIT(0, 4);
2506 		*pprog = prog;
2507 	}
2508 }
2509 
2510 /* get the count of the regs that are used to pass arguments */
get_nr_used_regs(const struct btf_func_model * m)2511 static int get_nr_used_regs(const struct btf_func_model *m)
2512 {
2513 	int i, arg_regs, nr_used_regs = 0;
2514 
2515 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2516 		arg_regs = (m->arg_size[i] + 7) / 8;
2517 		if (nr_used_regs + arg_regs <= 6)
2518 			nr_used_regs += arg_regs;
2519 
2520 		if (nr_used_regs >= 6)
2521 			break;
2522 	}
2523 
2524 	return nr_used_regs;
2525 }
2526 
save_args(const struct btf_func_model * m,u8 ** prog,int stack_size,bool for_call_origin)2527 static void save_args(const struct btf_func_model *m, u8 **prog,
2528 		      int stack_size, bool for_call_origin)
2529 {
2530 	int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2531 	int i, j;
2532 
2533 	/* Store function arguments to stack.
2534 	 * For a function that accepts two pointers the sequence will be:
2535 	 * mov QWORD PTR [rbp-0x10],rdi
2536 	 * mov QWORD PTR [rbp-0x8],rsi
2537 	 */
2538 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2539 		arg_regs = (m->arg_size[i] + 7) / 8;
2540 
2541 		/* According to the research of Yonghong, struct members
2542 		 * should be all in register or all on the stack.
2543 		 * Meanwhile, the compiler will pass the argument on regs
2544 		 * if the remaining regs can hold the argument.
2545 		 *
2546 		 * Disorder of the args can happen. For example:
2547 		 *
2548 		 * struct foo_struct {
2549 		 *     long a;
2550 		 *     int b;
2551 		 * };
2552 		 * int foo(char, char, char, char, char, struct foo_struct,
2553 		 *         char);
2554 		 *
2555 		 * the arg1-5,arg7 will be passed by regs, and arg6 will
2556 		 * by stack.
2557 		 */
2558 		if (nr_regs + arg_regs > 6) {
2559 			/* copy function arguments from origin stack frame
2560 			 * into current stack frame.
2561 			 *
2562 			 * The starting address of the arguments on-stack
2563 			 * is:
2564 			 *   rbp + 8(push rbp) +
2565 			 *   8(return addr of origin call) +
2566 			 *   8(return addr of the caller)
2567 			 * which means: rbp + 24
2568 			 */
2569 			for (j = 0; j < arg_regs; j++) {
2570 				emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2571 					 nr_stack_slots * 8 + 0x18);
2572 				emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2573 					 -stack_size);
2574 
2575 				if (!nr_stack_slots)
2576 					first_off = stack_size;
2577 				stack_size -= 8;
2578 				nr_stack_slots++;
2579 			}
2580 		} else {
2581 			/* Only copy the arguments on-stack to current
2582 			 * 'stack_size' and ignore the regs, used to
2583 			 * prepare the arguments on-stack for origin call.
2584 			 */
2585 			if (for_call_origin) {
2586 				nr_regs += arg_regs;
2587 				continue;
2588 			}
2589 
2590 			/* copy the arguments from regs into stack */
2591 			for (j = 0; j < arg_regs; j++) {
2592 				emit_stx(prog, BPF_DW, BPF_REG_FP,
2593 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2594 					 -stack_size);
2595 				stack_size -= 8;
2596 				nr_regs++;
2597 			}
2598 		}
2599 	}
2600 
2601 	clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2602 }
2603 
restore_regs(const struct btf_func_model * m,u8 ** prog,int stack_size)2604 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2605 			 int stack_size)
2606 {
2607 	int i, j, arg_regs, nr_regs = 0;
2608 
2609 	/* Restore function arguments from stack.
2610 	 * For a function that accepts two pointers the sequence will be:
2611 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2612 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2613 	 *
2614 	 * The logic here is similar to what we do in save_args()
2615 	 */
2616 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2617 		arg_regs = (m->arg_size[i] + 7) / 8;
2618 		if (nr_regs + arg_regs <= 6) {
2619 			for (j = 0; j < arg_regs; j++) {
2620 				emit_ldx(prog, BPF_DW,
2621 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2622 					 BPF_REG_FP,
2623 					 -stack_size);
2624 				stack_size -= 8;
2625 				nr_regs++;
2626 			}
2627 		} else {
2628 			stack_size -= 8 * arg_regs;
2629 		}
2630 
2631 		if (nr_regs >= 6)
2632 			break;
2633 	}
2634 }
2635 
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)2636 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2637 			   struct bpf_tramp_link *l, int stack_size,
2638 			   int run_ctx_off, bool save_ret,
2639 			   void *image, void *rw_image)
2640 {
2641 	u8 *prog = *pprog;
2642 	u8 *jmp_insn;
2643 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2644 	struct bpf_prog *p = l->link.prog;
2645 	u64 cookie = l->cookie;
2646 
2647 	/* mov rdi, cookie */
2648 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2649 
2650 	/* Prepare struct bpf_tramp_run_ctx.
2651 	 *
2652 	 * bpf_tramp_run_ctx is already preserved by
2653 	 * arch_prepare_bpf_trampoline().
2654 	 *
2655 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2656 	 */
2657 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2658 
2659 	/* arg1: mov rdi, progs[i] */
2660 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2661 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
2662 	if (!is_imm8(-run_ctx_off))
2663 		EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2664 	else
2665 		EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2666 
2667 	if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2668 		return -EINVAL;
2669 	/* remember prog start time returned by __bpf_prog_enter */
2670 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2671 
2672 	/* if (__bpf_prog_enter*(prog) == 0)
2673 	 *	goto skip_exec_of_prog;
2674 	 */
2675 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
2676 	/* emit 2 nops that will be replaced with JE insn */
2677 	jmp_insn = prog;
2678 	emit_nops(&prog, 2);
2679 
2680 	/* arg1: lea rdi, [rbp - stack_size] */
2681 	if (!is_imm8(-stack_size))
2682 		EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2683 	else
2684 		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2685 	/* arg2: progs[i]->insnsi for interpreter */
2686 	if (!p->jited)
2687 		emit_mov_imm64(&prog, BPF_REG_2,
2688 			       (long) p->insnsi >> 32,
2689 			       (u32) (long) p->insnsi);
2690 	/* call JITed bpf program or interpreter */
2691 	if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2692 		return -EINVAL;
2693 
2694 	/*
2695 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2696 	 * of the previous call which is then passed on the stack to
2697 	 * the next BPF program.
2698 	 *
2699 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
2700 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2701 	 */
2702 	if (save_ret)
2703 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2704 
2705 	/* replace 2 nops with JE insn, since jmp target is known */
2706 	jmp_insn[0] = X86_JE;
2707 	jmp_insn[1] = prog - jmp_insn - 2;
2708 
2709 	/* arg1: mov rdi, progs[i] */
2710 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2711 	/* arg2: mov rsi, rbx <- start time in nsec */
2712 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2713 	/* arg3: lea rdx, [rbp - run_ctx_off] */
2714 	if (!is_imm8(-run_ctx_off))
2715 		EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2716 	else
2717 		EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2718 	if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2719 		return -EINVAL;
2720 
2721 	*pprog = prog;
2722 	return 0;
2723 }
2724 
emit_align(u8 ** pprog,u32 align)2725 static void emit_align(u8 **pprog, u32 align)
2726 {
2727 	u8 *target, *prog = *pprog;
2728 
2729 	target = PTR_ALIGN(prog, align);
2730 	if (target != prog)
2731 		emit_nops(&prog, target - prog);
2732 
2733 	*pprog = prog;
2734 }
2735 
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)2736 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2737 {
2738 	u8 *prog = *pprog;
2739 	s64 offset;
2740 
2741 	offset = func - (ip + 2 + 4);
2742 	if (!is_simm32(offset)) {
2743 		pr_err("Target %p is out of range\n", func);
2744 		return -EINVAL;
2745 	}
2746 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2747 	*pprog = prog;
2748 	return 0;
2749 }
2750 
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,bool save_ret,void * image,void * rw_image)2751 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2752 		      struct bpf_tramp_links *tl, int stack_size,
2753 		      int run_ctx_off, bool save_ret,
2754 		      void *image, void *rw_image)
2755 {
2756 	int i;
2757 	u8 *prog = *pprog;
2758 
2759 	for (i = 0; i < tl->nr_links; i++) {
2760 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2761 				    run_ctx_off, save_ret, image, rw_image))
2762 			return -EINVAL;
2763 	}
2764 	*pprog = prog;
2765 	return 0;
2766 }
2767 
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches,void * image,void * rw_image)2768 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2769 			      struct bpf_tramp_links *tl, int stack_size,
2770 			      int run_ctx_off, u8 **branches,
2771 			      void *image, void *rw_image)
2772 {
2773 	u8 *prog = *pprog;
2774 	int i;
2775 
2776 	/* The first fmod_ret program will receive a garbage return value.
2777 	 * Set this to 0 to avoid confusing the program.
2778 	 */
2779 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2780 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2781 	for (i = 0; i < tl->nr_links; i++) {
2782 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2783 				    image, rw_image))
2784 			return -EINVAL;
2785 
2786 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
2787 		 * if (*(u64 *)(rbp - 8) !=  0)
2788 		 *	goto do_fexit;
2789 		 */
2790 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
2791 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2792 
2793 		/* Save the location of the branch and Generate 6 nops
2794 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
2795 		 * are replaced with a conditional jump once do_fexit (i.e. the
2796 		 * start of the fexit invocation) is finalized.
2797 		 */
2798 		branches[i] = prog;
2799 		emit_nops(&prog, 4 + 2);
2800 	}
2801 
2802 	*pprog = prog;
2803 	return 0;
2804 }
2805 
2806 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
2807 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack)	\
2808 	__LOAD_TCC_PTR(-round_up(stack, 8) - 8)
2809 
2810 /* Example:
2811  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2812  * its 'struct btf_func_model' will be nr_args=2
2813  * The assembly code when eth_type_trans is executing after trampoline:
2814  *
2815  * push rbp
2816  * mov rbp, rsp
2817  * sub rsp, 16                     // space for skb and dev
2818  * push rbx                        // temp regs to pass start time
2819  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
2820  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
2821  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2822  * mov rbx, rax                    // remember start time in bpf stats are enabled
2823  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
2824  * call addr_of_jited_FENTRY_prog
2825  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2826  * mov rsi, rbx                    // prog start time
2827  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2828  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
2829  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
2830  * pop rbx
2831  * leave
2832  * ret
2833  *
2834  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2835  * replaced with 'call generated_bpf_trampoline'. When it returns
2836  * eth_type_trans will continue executing with original skb and dev pointers.
2837  *
2838  * The assembly code when eth_type_trans is called from trampoline:
2839  *
2840  * push rbp
2841  * mov rbp, rsp
2842  * sub rsp, 24                     // space for skb, dev, return value
2843  * push rbx                        // temp regs to pass start time
2844  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
2845  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
2846  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2847  * mov rbx, rax                    // remember start time if bpf stats are enabled
2848  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2849  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
2850  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2851  * mov rsi, rbx                    // prog start time
2852  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2853  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
2854  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
2855  * call eth_type_trans+5           // execute body of eth_type_trans
2856  * mov qword ptr [rbp - 8], rax    // save return value
2857  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2858  * mov rbx, rax                    // remember start time in bpf stats are enabled
2859  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2860  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
2861  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2862  * mov rsi, rbx                    // prog start time
2863  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2864  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
2865  * pop rbx
2866  * leave
2867  * add rsp, 8                      // skip eth_type_trans's frame
2868  * ret                             // return to its caller
2869  */
__arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * rw_image,void * rw_image_end,void * image,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)2870 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
2871 					 void *rw_image_end, void *image,
2872 					 const struct btf_func_model *m, u32 flags,
2873 					 struct bpf_tramp_links *tlinks,
2874 					 void *func_addr)
2875 {
2876 	int i, ret, nr_regs = m->nr_args, stack_size = 0;
2877 	int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2878 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2879 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2880 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2881 	void *orig_call = func_addr;
2882 	u8 **branches = NULL;
2883 	u8 *prog;
2884 	bool save_ret;
2885 
2886 	/*
2887 	 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2888 	 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2889 	 * because @func_addr.
2890 	 */
2891 	WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
2892 		     (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
2893 
2894 	/* extra registers for struct arguments */
2895 	for (i = 0; i < m->nr_args; i++) {
2896 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2897 			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
2898 	}
2899 
2900 	/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2901 	 * are passed through regs, the remains are through stack.
2902 	 */
2903 	if (nr_regs > MAX_BPF_FUNC_ARGS)
2904 		return -ENOTSUPP;
2905 
2906 	/* Generated trampoline stack layout:
2907 	 *
2908 	 * RBP + 8         [ return address  ]
2909 	 * RBP + 0         [ RBP             ]
2910 	 *
2911 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
2912 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
2913 	 *
2914 	 *                 [ reg_argN        ]  always
2915 	 *                 [ ...             ]
2916 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
2917 	 *
2918 	 * RBP - nregs_off [ regs count	     ]  always
2919 	 *
2920 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
2921 	 *
2922 	 * RBP - rbx_off   [ rbx value       ]  always
2923 	 *
2924 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2925 	 *
2926 	 *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
2927 	 *                     [ ...        ]
2928 	 *                     [ stack_arg2 ]
2929 	 * RBP - arg_stack_off [ stack_arg1 ]
2930 	 * RSP                 [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
2931 	 */
2932 
2933 	/* room for return value of orig_call or fentry prog */
2934 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2935 	if (save_ret)
2936 		stack_size += 8;
2937 
2938 	stack_size += nr_regs * 8;
2939 	regs_off = stack_size;
2940 
2941 	/* regs count  */
2942 	stack_size += 8;
2943 	nregs_off = stack_size;
2944 
2945 	if (flags & BPF_TRAMP_F_IP_ARG)
2946 		stack_size += 8; /* room for IP address argument */
2947 
2948 	ip_off = stack_size;
2949 
2950 	stack_size += 8;
2951 	rbx_off = stack_size;
2952 
2953 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2954 	run_ctx_off = stack_size;
2955 
2956 	if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
2957 		/* the space that used to pass arguments on-stack */
2958 		stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
2959 		/* make sure the stack pointer is 16-byte aligned if we
2960 		 * need pass arguments on stack, which means
2961 		 *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2962 		 * should be 16-byte aligned. Following code depend on
2963 		 * that stack_size is already 8-byte aligned.
2964 		 */
2965 		stack_size += (stack_size % 16) ? 0 : 8;
2966 	}
2967 
2968 	arg_stack_off = stack_size;
2969 
2970 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2971 		/* skip patched call instruction and point orig_call to actual
2972 		 * body of the kernel function.
2973 		 */
2974 		if (is_endbr(*(u32 *)orig_call))
2975 			orig_call += ENDBR_INSN_SIZE;
2976 		orig_call += X86_PATCH_SIZE;
2977 	}
2978 
2979 	prog = rw_image;
2980 
2981 	if (flags & BPF_TRAMP_F_INDIRECT) {
2982 		/*
2983 		 * Indirect call for bpf_struct_ops
2984 		 */
2985 		emit_cfi(&prog, cfi_get_func_hash(func_addr));
2986 	} else {
2987 		/*
2988 		 * Direct-call fentry stub, as such it needs accounting for the
2989 		 * __fentry__ call.
2990 		 */
2991 		x86_call_depth_emit_accounting(&prog, NULL, image);
2992 	}
2993 	EMIT1(0x55);		 /* push rbp */
2994 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2995 	if (!is_imm8(stack_size)) {
2996 		/* sub rsp, stack_size */
2997 		EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
2998 	} else {
2999 		/* sub rsp, stack_size */
3000 		EMIT4(0x48, 0x83, 0xEC, stack_size);
3001 	}
3002 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
3003 		EMIT1(0x50);		/* push rax */
3004 	/* mov QWORD PTR [rbp - rbx_off], rbx */
3005 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3006 
3007 	/* Store number of argument registers of the traced function:
3008 	 *   mov rax, nr_regs
3009 	 *   mov QWORD PTR [rbp - nregs_off], rax
3010 	 */
3011 	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
3012 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
3013 
3014 	if (flags & BPF_TRAMP_F_IP_ARG) {
3015 		/* Store IP address of the traced function:
3016 		 * movabsq rax, func_addr
3017 		 * mov QWORD PTR [rbp - ip_off], rax
3018 		 */
3019 		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
3020 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
3021 	}
3022 
3023 	save_args(m, &prog, regs_off, false);
3024 
3025 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3026 		/* arg1: mov rdi, im */
3027 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3028 		if (emit_rsb_call(&prog, __bpf_tramp_enter,
3029 				  image + (prog - (u8 *)rw_image))) {
3030 			ret = -EINVAL;
3031 			goto cleanup;
3032 		}
3033 	}
3034 
3035 	if (fentry->nr_links) {
3036 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
3037 			       flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
3038 			return -EINVAL;
3039 	}
3040 
3041 	if (fmod_ret->nr_links) {
3042 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3043 				   GFP_KERNEL);
3044 		if (!branches)
3045 			return -ENOMEM;
3046 
3047 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
3048 				       run_ctx_off, branches, image, rw_image)) {
3049 			ret = -EINVAL;
3050 			goto cleanup;
3051 		}
3052 	}
3053 
3054 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3055 		restore_regs(m, &prog, regs_off);
3056 		save_args(m, &prog, arg_stack_off, true);
3057 
3058 		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3059 			/* Before calling the original function, load the
3060 			 * tail_call_cnt_ptr from stack to rax.
3061 			 */
3062 			LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3063 		}
3064 
3065 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
3066 			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
3067 			EMIT2(0xff, 0xd3); /* call *rbx */
3068 		} else {
3069 			/* call original function */
3070 			if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3071 				ret = -EINVAL;
3072 				goto cleanup;
3073 			}
3074 		}
3075 		/* remember return value in a stack for bpf prog to access */
3076 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3077 		im->ip_after_call = image + (prog - (u8 *)rw_image);
3078 		emit_nops(&prog, X86_PATCH_SIZE);
3079 	}
3080 
3081 	if (fmod_ret->nr_links) {
3082 		/* From Intel 64 and IA-32 Architectures Optimization
3083 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3084 		 * Coding Rule 11: All branch targets should be 16-byte
3085 		 * aligned.
3086 		 */
3087 		emit_align(&prog, 16);
3088 		/* Update the branches saved in invoke_bpf_mod_ret with the
3089 		 * aligned address of do_fexit.
3090 		 */
3091 		for (i = 0; i < fmod_ret->nr_links; i++) {
3092 			emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3093 					    image + (branches[i] - (u8 *)rw_image), X86_JNE);
3094 		}
3095 	}
3096 
3097 	if (fexit->nr_links) {
3098 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
3099 			       false, image, rw_image)) {
3100 			ret = -EINVAL;
3101 			goto cleanup;
3102 		}
3103 	}
3104 
3105 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
3106 		restore_regs(m, &prog, regs_off);
3107 
3108 	/* This needs to be done regardless. If there were fmod_ret programs,
3109 	 * the return value is only updated on the stack and still needs to be
3110 	 * restored to R0.
3111 	 */
3112 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3113 		im->ip_epilogue = image + (prog - (u8 *)rw_image);
3114 		/* arg1: mov rdi, im */
3115 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3116 		if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3117 			ret = -EINVAL;
3118 			goto cleanup;
3119 		}
3120 	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3121 		/* Before running the original function, load the
3122 		 * tail_call_cnt_ptr from stack to rax.
3123 		 */
3124 		LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3125 	}
3126 
3127 	/* restore return value of orig_call or fentry prog back into RAX */
3128 	if (save_ret)
3129 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3130 
3131 	emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3132 	EMIT1(0xC9); /* leave */
3133 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3134 		/* skip our return address and return to parent */
3135 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3136 	}
3137 	emit_return(&prog, image + (prog - (u8 *)rw_image));
3138 	/* Make sure the trampoline generation logic doesn't overflow */
3139 	if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3140 		ret = -EFAULT;
3141 		goto cleanup;
3142 	}
3143 	ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3144 
3145 cleanup:
3146 	kfree(branches);
3147 	return ret;
3148 }
3149 
arch_alloc_bpf_trampoline(unsigned int size)3150 void *arch_alloc_bpf_trampoline(unsigned int size)
3151 {
3152 	return bpf_prog_pack_alloc(size, jit_fill_hole);
3153 }
3154 
arch_free_bpf_trampoline(void * image,unsigned int size)3155 void arch_free_bpf_trampoline(void *image, unsigned int size)
3156 {
3157 	bpf_prog_pack_free(image, size);
3158 }
3159 
arch_protect_bpf_trampoline(void * image,unsigned int size)3160 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3161 {
3162 	return 0;
3163 }
3164 
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3165 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3166 				const struct btf_func_model *m, u32 flags,
3167 				struct bpf_tramp_links *tlinks,
3168 				void *func_addr)
3169 {
3170 	void *rw_image, *tmp;
3171 	int ret;
3172 	u32 size = image_end - image;
3173 
3174 	/* rw_image doesn't need to be in module memory range, so we can
3175 	 * use kvmalloc.
3176 	 */
3177 	rw_image = kvmalloc(size, GFP_KERNEL);
3178 	if (!rw_image)
3179 		return -ENOMEM;
3180 
3181 	ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3182 					    flags, tlinks, func_addr);
3183 	if (ret < 0)
3184 		goto out;
3185 
3186 	tmp = bpf_arch_text_copy(image, rw_image, size);
3187 	if (IS_ERR(tmp))
3188 		ret = PTR_ERR(tmp);
3189 out:
3190 	kvfree(rw_image);
3191 	return ret;
3192 }
3193 
arch_bpf_trampoline_size(const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * func_addr)3194 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3195 			     struct bpf_tramp_links *tlinks, void *func_addr)
3196 {
3197 	struct bpf_tramp_image im;
3198 	void *image;
3199 	int ret;
3200 
3201 	/* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3202 	 * This will NOT cause fragmentation in direct map, as we do not
3203 	 * call set_memory_*() on this buffer.
3204 	 *
3205 	 * We cannot use kvmalloc here, because we need image to be in
3206 	 * module memory range.
3207 	 */
3208 	image = bpf_jit_alloc_exec(PAGE_SIZE);
3209 	if (!image)
3210 		return -ENOMEM;
3211 
3212 	ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3213 					    m, flags, tlinks, func_addr);
3214 	bpf_jit_free_exec(image);
3215 	return ret;
3216 }
3217 
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs,u8 * image,u8 * buf)3218 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3219 {
3220 	u8 *jg_reloc, *prog = *pprog;
3221 	int pivot, err, jg_bytes = 1;
3222 	s64 jg_offset;
3223 
3224 	if (a == b) {
3225 		/* Leaf node of recursion, i.e. not a range of indices
3226 		 * anymore.
3227 		 */
3228 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
3229 		if (!is_simm32(progs[a]))
3230 			return -1;
3231 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3232 			    progs[a]);
3233 		err = emit_cond_near_jump(&prog,	/* je func */
3234 					  (void *)progs[a], image + (prog - buf),
3235 					  X86_JE);
3236 		if (err)
3237 			return err;
3238 
3239 		emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
3240 
3241 		*pprog = prog;
3242 		return 0;
3243 	}
3244 
3245 	/* Not a leaf node, so we pivot, and recursively descend into
3246 	 * the lower and upper ranges.
3247 	 */
3248 	pivot = (b - a) / 2;
3249 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
3250 	if (!is_simm32(progs[a + pivot]))
3251 		return -1;
3252 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3253 
3254 	if (pivot > 2) {				/* jg upper_part */
3255 		/* Require near jump. */
3256 		jg_bytes = 4;
3257 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3258 	} else {
3259 		EMIT2(X86_JG, 0);
3260 	}
3261 	jg_reloc = prog;
3262 
3263 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
3264 				  progs, image, buf);
3265 	if (err)
3266 		return err;
3267 
3268 	/* From Intel 64 and IA-32 Architectures Optimization
3269 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3270 	 * Coding Rule 11: All branch targets should be 16-byte
3271 	 * aligned.
3272 	 */
3273 	emit_align(&prog, 16);
3274 	jg_offset = prog - jg_reloc;
3275 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3276 
3277 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
3278 				  b, progs, image, buf);
3279 	if (err)
3280 		return err;
3281 
3282 	*pprog = prog;
3283 	return 0;
3284 }
3285 
cmp_ips(const void * a,const void * b)3286 static int cmp_ips(const void *a, const void *b)
3287 {
3288 	const s64 *ipa = a;
3289 	const s64 *ipb = b;
3290 
3291 	if (*ipa > *ipb)
3292 		return 1;
3293 	if (*ipa < *ipb)
3294 		return -1;
3295 	return 0;
3296 }
3297 
arch_prepare_bpf_dispatcher(void * image,void * buf,s64 * funcs,int num_funcs)3298 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3299 {
3300 	u8 *prog = buf;
3301 
3302 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3303 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3304 }
3305 
3306 struct x64_jit_data {
3307 	struct bpf_binary_header *rw_header;
3308 	struct bpf_binary_header *header;
3309 	int *addrs;
3310 	u8 *image;
3311 	int proglen;
3312 	struct jit_context ctx;
3313 };
3314 
3315 #define MAX_PASSES 20
3316 #define PADDING_PASSES (MAX_PASSES - 5)
3317 
bpf_int_jit_compile(struct bpf_prog * prog)3318 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3319 {
3320 	struct bpf_binary_header *rw_header = NULL;
3321 	struct bpf_binary_header *header = NULL;
3322 	struct bpf_prog *tmp, *orig_prog = prog;
3323 	struct x64_jit_data *jit_data;
3324 	int proglen, oldproglen = 0;
3325 	struct jit_context ctx = {};
3326 	bool tmp_blinded = false;
3327 	bool extra_pass = false;
3328 	bool padding = false;
3329 	u8 *rw_image = NULL;
3330 	u8 *image = NULL;
3331 	int *addrs;
3332 	int pass;
3333 	int i;
3334 
3335 	if (!prog->jit_requested)
3336 		return orig_prog;
3337 
3338 	tmp = bpf_jit_blind_constants(prog);
3339 	/*
3340 	 * If blinding was requested and we failed during blinding,
3341 	 * we must fall back to the interpreter.
3342 	 */
3343 	if (IS_ERR(tmp))
3344 		return orig_prog;
3345 	if (tmp != prog) {
3346 		tmp_blinded = true;
3347 		prog = tmp;
3348 	}
3349 
3350 	jit_data = prog->aux->jit_data;
3351 	if (!jit_data) {
3352 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3353 		if (!jit_data) {
3354 			prog = orig_prog;
3355 			goto out;
3356 		}
3357 		prog->aux->jit_data = jit_data;
3358 	}
3359 	addrs = jit_data->addrs;
3360 	if (addrs) {
3361 		ctx = jit_data->ctx;
3362 		oldproglen = jit_data->proglen;
3363 		image = jit_data->image;
3364 		header = jit_data->header;
3365 		rw_header = jit_data->rw_header;
3366 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
3367 		extra_pass = true;
3368 		padding = true;
3369 		goto skip_init_addrs;
3370 	}
3371 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3372 	if (!addrs) {
3373 		prog = orig_prog;
3374 		goto out_addrs;
3375 	}
3376 
3377 	/*
3378 	 * Before first pass, make a rough estimation of addrs[]
3379 	 * each BPF instruction is translated to less than 64 bytes
3380 	 */
3381 	for (proglen = 0, i = 0; i <= prog->len; i++) {
3382 		proglen += 64;
3383 		addrs[i] = proglen;
3384 	}
3385 	ctx.cleanup_addr = proglen;
3386 skip_init_addrs:
3387 
3388 	/*
3389 	 * JITed image shrinks with every pass and the loop iterates
3390 	 * until the image stops shrinking. Very large BPF programs
3391 	 * may converge on the last pass. In such case do one more
3392 	 * pass to emit the final image.
3393 	 */
3394 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
3395 		if (!padding && pass >= PADDING_PASSES)
3396 			padding = true;
3397 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3398 		if (proglen <= 0) {
3399 out_image:
3400 			image = NULL;
3401 			if (header) {
3402 				bpf_arch_text_copy(&header->size, &rw_header->size,
3403 						   sizeof(rw_header->size));
3404 				bpf_jit_binary_pack_free(header, rw_header);
3405 			}
3406 			/* Fall back to interpreter mode */
3407 			prog = orig_prog;
3408 			if (extra_pass) {
3409 				prog->bpf_func = NULL;
3410 				prog->jited = 0;
3411 				prog->jited_len = 0;
3412 			}
3413 			goto out_addrs;
3414 		}
3415 		if (image) {
3416 			if (proglen != oldproglen) {
3417 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3418 				       proglen, oldproglen);
3419 				goto out_image;
3420 			}
3421 			break;
3422 		}
3423 		if (proglen == oldproglen) {
3424 			/*
3425 			 * The number of entries in extable is the number of BPF_LDX
3426 			 * insns that access kernel memory via "pointer to BTF type".
3427 			 * The verifier changed their opcode from LDX|MEM|size
3428 			 * to LDX|PROBE_MEM|size to make JITing easier.
3429 			 */
3430 			u32 align = __alignof__(struct exception_table_entry);
3431 			u32 extable_size = prog->aux->num_exentries *
3432 				sizeof(struct exception_table_entry);
3433 
3434 			/* allocate module memory for x86 insns and extable */
3435 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3436 							   &image, align, &rw_header, &rw_image,
3437 							   jit_fill_hole);
3438 			if (!header) {
3439 				prog = orig_prog;
3440 				goto out_addrs;
3441 			}
3442 			prog->aux->extable = (void *) image + roundup(proglen, align);
3443 		}
3444 		oldproglen = proglen;
3445 		cond_resched();
3446 	}
3447 
3448 	if (bpf_jit_enable > 1)
3449 		bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3450 
3451 	if (image) {
3452 		if (!prog->is_func || extra_pass) {
3453 			/*
3454 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
3455 			 *   1) header is not pointing to proper module memory;
3456 			 *   2) the arch doesn't support bpf_arch_text_copy().
3457 			 *
3458 			 * Both cases are serious bugs and justify WARN_ON.
3459 			 */
3460 			if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3461 				/* header has been freed */
3462 				header = NULL;
3463 				goto out_image;
3464 			}
3465 
3466 			bpf_tail_call_direct_fixup(prog);
3467 		} else {
3468 			jit_data->addrs = addrs;
3469 			jit_data->ctx = ctx;
3470 			jit_data->proglen = proglen;
3471 			jit_data->image = image;
3472 			jit_data->header = header;
3473 			jit_data->rw_header = rw_header;
3474 		}
3475 		/*
3476 		 * ctx.prog_offset is used when CFI preambles put code *before*
3477 		 * the function. See emit_cfi(). For FineIBT specifically this code
3478 		 * can also be executed and bpf_prog_kallsyms_add() will
3479 		 * generate an additional symbol to cover this, hence also
3480 		 * decrement proglen.
3481 		 */
3482 		prog->bpf_func = (void *)image + cfi_get_offset();
3483 		prog->jited = 1;
3484 		prog->jited_len = proglen - cfi_get_offset();
3485 	} else {
3486 		prog = orig_prog;
3487 	}
3488 
3489 	if (!image || !prog->is_func || extra_pass) {
3490 		if (image)
3491 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
3492 out_addrs:
3493 		kvfree(addrs);
3494 		kfree(jit_data);
3495 		prog->aux->jit_data = NULL;
3496 	}
3497 out:
3498 	if (tmp_blinded)
3499 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
3500 					   tmp : orig_prog);
3501 	return prog;
3502 }
3503 
bpf_jit_supports_kfunc_call(void)3504 bool bpf_jit_supports_kfunc_call(void)
3505 {
3506 	return true;
3507 }
3508 
bpf_arch_text_copy(void * dst,void * src,size_t len)3509 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3510 {
3511 	if (text_poke_copy(dst, src, len) == NULL)
3512 		return ERR_PTR(-EINVAL);
3513 	return dst;
3514 }
3515 
3516 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)3517 bool bpf_jit_supports_subprog_tailcalls(void)
3518 {
3519 	return true;
3520 }
3521 
bpf_jit_supports_percpu_insn(void)3522 bool bpf_jit_supports_percpu_insn(void)
3523 {
3524 	return true;
3525 }
3526 
bpf_jit_free(struct bpf_prog * prog)3527 void bpf_jit_free(struct bpf_prog *prog)
3528 {
3529 	if (prog->jited) {
3530 		struct x64_jit_data *jit_data = prog->aux->jit_data;
3531 		struct bpf_binary_header *hdr;
3532 
3533 		/*
3534 		 * If we fail the final pass of JIT (from jit_subprogs),
3535 		 * the program may not be finalized yet. Call finalize here
3536 		 * before freeing it.
3537 		 */
3538 		if (jit_data) {
3539 			bpf_jit_binary_pack_finalize(jit_data->header,
3540 						     jit_data->rw_header);
3541 			kvfree(jit_data->addrs);
3542 			kfree(jit_data);
3543 		}
3544 		prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3545 		hdr = bpf_jit_binary_pack_hdr(prog);
3546 		bpf_jit_binary_pack_free(hdr, NULL);
3547 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3548 	}
3549 
3550 	bpf_prog_unlock_free(prog);
3551 }
3552 
bpf_jit_supports_exceptions(void)3553 bool bpf_jit_supports_exceptions(void)
3554 {
3555 	/* We unwind through both kernel frames (starting from within bpf_throw
3556 	 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3557 	 * to walk kernel frames and reach BPF frames in the stack trace.
3558 	 */
3559 	return IS_ENABLED(CONFIG_UNWINDER_ORC);
3560 }
3561 
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3562 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3563 {
3564 #if defined(CONFIG_UNWINDER_ORC)
3565 	struct unwind_state state;
3566 	unsigned long addr;
3567 
3568 	for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3569 	     unwind_next_frame(&state)) {
3570 		addr = unwind_get_return_address(&state);
3571 		if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3572 			break;
3573 	}
3574 	return;
3575 #endif
3576 	WARN(1, "verification of programs using bpf_throw should have failed\n");
3577 }
3578 
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)3579 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3580 			       struct bpf_prog *new, struct bpf_prog *old)
3581 {
3582 	u8 *old_addr, *new_addr, *old_bypass_addr;
3583 	int ret;
3584 
3585 	old_bypass_addr = old ? NULL : poke->bypass_addr;
3586 	old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
3587 	new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
3588 
3589 	/*
3590 	 * On program loading or teardown, the program's kallsym entry
3591 	 * might not be in place, so we use __bpf_arch_text_poke to skip
3592 	 * the kallsyms check.
3593 	 */
3594 	if (new) {
3595 		ret = __bpf_arch_text_poke(poke->tailcall_target,
3596 					   BPF_MOD_JUMP,
3597 					   old_addr, new_addr);
3598 		BUG_ON(ret < 0);
3599 		if (!old) {
3600 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3601 						   BPF_MOD_JUMP,
3602 						   poke->bypass_addr,
3603 						   NULL);
3604 			BUG_ON(ret < 0);
3605 		}
3606 	} else {
3607 		ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3608 					   BPF_MOD_JUMP,
3609 					   old_bypass_addr,
3610 					   poke->bypass_addr);
3611 		BUG_ON(ret < 0);
3612 		/* let other CPUs finish the execution of program
3613 		 * so that it will not possible to expose them
3614 		 * to invalid nop, stack unwind, nop state
3615 		 */
3616 		if (!ret)
3617 			synchronize_rcu();
3618 		ret = __bpf_arch_text_poke(poke->tailcall_target,
3619 					   BPF_MOD_JUMP,
3620 					   old_addr, NULL);
3621 		BUG_ON(ret < 0);
3622 	}
3623 }
3624 
bpf_jit_supports_arena(void)3625 bool bpf_jit_supports_arena(void)
3626 {
3627 	return true;
3628 }
3629 
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3630 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3631 {
3632 	if (!in_arena)
3633 		return true;
3634 	switch (insn->code) {
3635 	case BPF_STX | BPF_ATOMIC | BPF_W:
3636 	case BPF_STX | BPF_ATOMIC | BPF_DW:
3637 		if (insn->imm == (BPF_AND | BPF_FETCH) ||
3638 		    insn->imm == (BPF_OR | BPF_FETCH) ||
3639 		    insn->imm == (BPF_XOR | BPF_FETCH))
3640 			return false;
3641 	}
3642 	return true;
3643 }
3644 
bpf_jit_supports_ptr_xchg(void)3645 bool bpf_jit_supports_ptr_xchg(void)
3646 {
3647 	return true;
3648 }
3649 
3650 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
bpf_arch_uaddress_limit(void)3651 u64 bpf_arch_uaddress_limit(void)
3652 {
3653 	return 0;
3654 }
3655