xref: /linux/arch/x86/net/bpf_jit_comp.c (revision 6c8c1406a6d6a3f2e61ac590f5c0994231bc6be7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <linux/init.h>
15 #include <asm/extable.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 
20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21 {
22 	if (len == 1)
23 		*ptr = bytes;
24 	else if (len == 2)
25 		*(u16 *)ptr = bytes;
26 	else {
27 		*(u32 *)ptr = bytes;
28 		barrier();
29 	}
30 	return ptr + len;
31 }
32 
33 #define EMIT(bytes, len) \
34 	do { prog = emit_code(prog, bytes, len); } while (0)
35 
36 #define EMIT1(b1)		EMIT(b1, 1)
37 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40 
41 #define EMIT1_off32(b1, off) \
42 	do { EMIT1(b1); EMIT(off, 4); } while (0)
43 #define EMIT2_off32(b1, b2, off) \
44 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45 #define EMIT3_off32(b1, b2, b3, off) \
46 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47 #define EMIT4_off32(b1, b2, b3, b4, off) \
48 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49 
50 #ifdef CONFIG_X86_KERNEL_IBT
51 #define EMIT_ENDBR()	EMIT(gen_endbr(), 4)
52 #else
53 #define EMIT_ENDBR()
54 #endif
55 
56 static bool is_imm8(int value)
57 {
58 	return value <= 127 && value >= -128;
59 }
60 
61 static bool is_simm32(s64 value)
62 {
63 	return value == (s64)(s32)value;
64 }
65 
66 static bool is_uimm32(u64 value)
67 {
68 	return value == (u64)(u32)value;
69 }
70 
71 /* mov dst, src */
72 #define EMIT_mov(DST, SRC)								 \
73 	do {										 \
74 		if (DST != SRC)								 \
75 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
76 	} while (0)
77 
78 static int bpf_size_to_x86_bytes(int bpf_size)
79 {
80 	if (bpf_size == BPF_W)
81 		return 4;
82 	else if (bpf_size == BPF_H)
83 		return 2;
84 	else if (bpf_size == BPF_B)
85 		return 1;
86 	else if (bpf_size == BPF_DW)
87 		return 4; /* imm32 */
88 	else
89 		return 0;
90 }
91 
92 /*
93  * List of x86 cond jumps opcodes (. + s8)
94  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
95  */
96 #define X86_JB  0x72
97 #define X86_JAE 0x73
98 #define X86_JE  0x74
99 #define X86_JNE 0x75
100 #define X86_JBE 0x76
101 #define X86_JA  0x77
102 #define X86_JL  0x7C
103 #define X86_JGE 0x7D
104 #define X86_JLE 0x7E
105 #define X86_JG  0x7F
106 
107 /* Pick a register outside of BPF range for JIT internal work */
108 #define AUX_REG (MAX_BPF_JIT_REG + 1)
109 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
110 
111 /*
112  * The following table maps BPF registers to x86-64 registers.
113  *
114  * x86-64 register R12 is unused, since if used as base address
115  * register in load/store instructions, it always needs an
116  * extra byte of encoding and is callee saved.
117  *
118  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
119  * trampoline. x86-64 register R10 is used for blinding (if enabled).
120  */
121 static const int reg2hex[] = {
122 	[BPF_REG_0] = 0,  /* RAX */
123 	[BPF_REG_1] = 7,  /* RDI */
124 	[BPF_REG_2] = 6,  /* RSI */
125 	[BPF_REG_3] = 2,  /* RDX */
126 	[BPF_REG_4] = 1,  /* RCX */
127 	[BPF_REG_5] = 0,  /* R8  */
128 	[BPF_REG_6] = 3,  /* RBX callee saved */
129 	[BPF_REG_7] = 5,  /* R13 callee saved */
130 	[BPF_REG_8] = 6,  /* R14 callee saved */
131 	[BPF_REG_9] = 7,  /* R15 callee saved */
132 	[BPF_REG_FP] = 5, /* RBP readonly */
133 	[BPF_REG_AX] = 2, /* R10 temp register */
134 	[AUX_REG] = 3,    /* R11 temp register */
135 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
136 };
137 
138 static const int reg2pt_regs[] = {
139 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
140 	[BPF_REG_1] = offsetof(struct pt_regs, di),
141 	[BPF_REG_2] = offsetof(struct pt_regs, si),
142 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
143 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
144 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
145 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
146 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
147 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
148 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
149 };
150 
151 /*
152  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
153  * which need extra byte of encoding.
154  * rax,rcx,...,rbp have simpler encoding
155  */
156 static bool is_ereg(u32 reg)
157 {
158 	return (1 << reg) & (BIT(BPF_REG_5) |
159 			     BIT(AUX_REG) |
160 			     BIT(BPF_REG_7) |
161 			     BIT(BPF_REG_8) |
162 			     BIT(BPF_REG_9) |
163 			     BIT(X86_REG_R9) |
164 			     BIT(BPF_REG_AX));
165 }
166 
167 /*
168  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
169  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
170  * of encoding. al,cl,dl,bl have simpler encoding.
171  */
172 static bool is_ereg_8l(u32 reg)
173 {
174 	return is_ereg(reg) ||
175 	    (1 << reg) & (BIT(BPF_REG_1) |
176 			  BIT(BPF_REG_2) |
177 			  BIT(BPF_REG_FP));
178 }
179 
180 static bool is_axreg(u32 reg)
181 {
182 	return reg == BPF_REG_0;
183 }
184 
185 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
186 static u8 add_1mod(u8 byte, u32 reg)
187 {
188 	if (is_ereg(reg))
189 		byte |= 1;
190 	return byte;
191 }
192 
193 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
194 {
195 	if (is_ereg(r1))
196 		byte |= 1;
197 	if (is_ereg(r2))
198 		byte |= 4;
199 	return byte;
200 }
201 
202 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
203 static u8 add_1reg(u8 byte, u32 dst_reg)
204 {
205 	return byte + reg2hex[dst_reg];
206 }
207 
208 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
209 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
210 {
211 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
212 }
213 
214 /* Some 1-byte opcodes for binary ALU operations */
215 static u8 simple_alu_opcodes[] = {
216 	[BPF_ADD] = 0x01,
217 	[BPF_SUB] = 0x29,
218 	[BPF_AND] = 0x21,
219 	[BPF_OR] = 0x09,
220 	[BPF_XOR] = 0x31,
221 	[BPF_LSH] = 0xE0,
222 	[BPF_RSH] = 0xE8,
223 	[BPF_ARSH] = 0xF8,
224 };
225 
226 static void jit_fill_hole(void *area, unsigned int size)
227 {
228 	/* Fill whole space with INT3 instructions */
229 	memset(area, 0xcc, size);
230 }
231 
232 int bpf_arch_text_invalidate(void *dst, size_t len)
233 {
234 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
235 }
236 
237 struct jit_context {
238 	int cleanup_addr; /* Epilogue code offset */
239 
240 	/*
241 	 * Program specific offsets of labels in the code; these rely on the
242 	 * JIT doing at least 2 passes, recording the position on the first
243 	 * pass, only to generate the correct offset on the second pass.
244 	 */
245 	int tail_call_direct_label;
246 	int tail_call_indirect_label;
247 };
248 
249 /* Maximum number of bytes emitted while JITing one eBPF insn */
250 #define BPF_MAX_INSN_SIZE	128
251 #define BPF_INSN_SAFETY		64
252 
253 /* Number of bytes emit_patch() needs to generate instructions */
254 #define X86_PATCH_SIZE		5
255 /* Number of bytes that will be skipped on tailcall */
256 #define X86_TAIL_CALL_OFFSET	(11 + ENDBR_INSN_SIZE)
257 
258 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
259 {
260 	u8 *prog = *pprog;
261 
262 	if (callee_regs_used[0])
263 		EMIT1(0x53);         /* push rbx */
264 	if (callee_regs_used[1])
265 		EMIT2(0x41, 0x55);   /* push r13 */
266 	if (callee_regs_used[2])
267 		EMIT2(0x41, 0x56);   /* push r14 */
268 	if (callee_regs_used[3])
269 		EMIT2(0x41, 0x57);   /* push r15 */
270 	*pprog = prog;
271 }
272 
273 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
274 {
275 	u8 *prog = *pprog;
276 
277 	if (callee_regs_used[3])
278 		EMIT2(0x41, 0x5F);   /* pop r15 */
279 	if (callee_regs_used[2])
280 		EMIT2(0x41, 0x5E);   /* pop r14 */
281 	if (callee_regs_used[1])
282 		EMIT2(0x41, 0x5D);   /* pop r13 */
283 	if (callee_regs_used[0])
284 		EMIT1(0x5B);         /* pop rbx */
285 	*pprog = prog;
286 }
287 
288 /*
289  * Emit x86-64 prologue code for BPF program.
290  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
291  * while jumping to another program
292  */
293 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
294 			  bool tail_call_reachable, bool is_subprog)
295 {
296 	u8 *prog = *pprog;
297 
298 	/* BPF trampoline can be made to work without these nops,
299 	 * but let's waste 5 bytes for now and optimize later
300 	 */
301 	EMIT_ENDBR();
302 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
303 	prog += X86_PATCH_SIZE;
304 	if (!ebpf_from_cbpf) {
305 		if (tail_call_reachable && !is_subprog)
306 			EMIT2(0x31, 0xC0); /* xor eax, eax */
307 		else
308 			EMIT2(0x66, 0x90); /* nop2 */
309 	}
310 	EMIT1(0x55);             /* push rbp */
311 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
312 
313 	/* X86_TAIL_CALL_OFFSET is here */
314 	EMIT_ENDBR();
315 
316 	/* sub rsp, rounded_stack_depth */
317 	if (stack_depth)
318 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
319 	if (tail_call_reachable)
320 		EMIT1(0x50);         /* push rax */
321 	*pprog = prog;
322 }
323 
324 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
325 {
326 	u8 *prog = *pprog;
327 	s64 offset;
328 
329 	offset = func - (ip + X86_PATCH_SIZE);
330 	if (!is_simm32(offset)) {
331 		pr_err("Target call %p is out of range\n", func);
332 		return -ERANGE;
333 	}
334 	EMIT1_off32(opcode, offset);
335 	*pprog = prog;
336 	return 0;
337 }
338 
339 static int emit_call(u8 **pprog, void *func, void *ip)
340 {
341 	return emit_patch(pprog, func, ip, 0xE8);
342 }
343 
344 static int emit_jump(u8 **pprog, void *func, void *ip)
345 {
346 	return emit_patch(pprog, func, ip, 0xE9);
347 }
348 
349 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
350 				void *old_addr, void *new_addr)
351 {
352 	const u8 *nop_insn = x86_nops[5];
353 	u8 old_insn[X86_PATCH_SIZE];
354 	u8 new_insn[X86_PATCH_SIZE];
355 	u8 *prog;
356 	int ret;
357 
358 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
359 	if (old_addr) {
360 		prog = old_insn;
361 		ret = t == BPF_MOD_CALL ?
362 		      emit_call(&prog, old_addr, ip) :
363 		      emit_jump(&prog, old_addr, ip);
364 		if (ret)
365 			return ret;
366 	}
367 
368 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
369 	if (new_addr) {
370 		prog = new_insn;
371 		ret = t == BPF_MOD_CALL ?
372 		      emit_call(&prog, new_addr, ip) :
373 		      emit_jump(&prog, new_addr, ip);
374 		if (ret)
375 			return ret;
376 	}
377 
378 	ret = -EBUSY;
379 	mutex_lock(&text_mutex);
380 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
381 		goto out;
382 	ret = 1;
383 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
384 		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
385 		ret = 0;
386 	}
387 out:
388 	mutex_unlock(&text_mutex);
389 	return ret;
390 }
391 
392 int __init bpf_arch_init_dispatcher_early(void *ip)
393 {
394 	const u8 *nop_insn = x86_nops[5];
395 
396 	if (is_endbr(*(u32 *)ip))
397 		ip += ENDBR_INSN_SIZE;
398 
399 	if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
400 		text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
401 	return 0;
402 }
403 
404 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
405 		       void *old_addr, void *new_addr)
406 {
407 	if (!is_kernel_text((long)ip) &&
408 	    !is_bpf_text_address((long)ip))
409 		/* BPF poking in modules is not supported */
410 		return -EINVAL;
411 
412 	/*
413 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
414 	 * with an ENDBR instruction.
415 	 */
416 	if (is_endbr(*(u32 *)ip))
417 		ip += ENDBR_INSN_SIZE;
418 
419 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
420 }
421 
422 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
423 
424 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
425 {
426 	u8 *prog = *pprog;
427 
428 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
429 		EMIT_LFENCE();
430 		EMIT2(0xFF, 0xE0 + reg);
431 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
432 		OPTIMIZER_HIDE_VAR(reg);
433 		emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
434 	} else {
435 		EMIT2(0xFF, 0xE0 + reg);	/* jmp *%\reg */
436 		if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
437 			EMIT1(0xCC);		/* int3 */
438 	}
439 
440 	*pprog = prog;
441 }
442 
443 static void emit_return(u8 **pprog, u8 *ip)
444 {
445 	u8 *prog = *pprog;
446 
447 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
448 		emit_jump(&prog, &__x86_return_thunk, ip);
449 	} else {
450 		EMIT1(0xC3);		/* ret */
451 		if (IS_ENABLED(CONFIG_SLS))
452 			EMIT1(0xCC);	/* int3 */
453 	}
454 
455 	*pprog = prog;
456 }
457 
458 /*
459  * Generate the following code:
460  *
461  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
462  *   if (index >= array->map.max_entries)
463  *     goto out;
464  *   if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
465  *     goto out;
466  *   prog = array->ptrs[index];
467  *   if (prog == NULL)
468  *     goto out;
469  *   goto *(prog->bpf_func + prologue_size);
470  * out:
471  */
472 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
473 					u32 stack_depth, u8 *ip,
474 					struct jit_context *ctx)
475 {
476 	int tcc_off = -4 - round_up(stack_depth, 8);
477 	u8 *prog = *pprog, *start = *pprog;
478 	int offset;
479 
480 	/*
481 	 * rdi - pointer to ctx
482 	 * rsi - pointer to bpf_array
483 	 * rdx - index in bpf_array
484 	 */
485 
486 	/*
487 	 * if (index >= array->map.max_entries)
488 	 *	goto out;
489 	 */
490 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
491 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
492 	      offsetof(struct bpf_array, map.max_entries));
493 
494 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
495 	EMIT2(X86_JBE, offset);                   /* jbe out */
496 
497 	/*
498 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
499 	 *	goto out;
500 	 */
501 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
502 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
503 
504 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
505 	EMIT2(X86_JAE, offset);                   /* jae out */
506 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
507 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
508 
509 	/* prog = array->ptrs[index]; */
510 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
511 		    offsetof(struct bpf_array, ptrs));
512 
513 	/*
514 	 * if (prog == NULL)
515 	 *	goto out;
516 	 */
517 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
518 
519 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
520 	EMIT2(X86_JE, offset);                    /* je out */
521 
522 	pop_callee_regs(&prog, callee_regs_used);
523 
524 	EMIT1(0x58);                              /* pop rax */
525 	if (stack_depth)
526 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
527 			    round_up(stack_depth, 8));
528 
529 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
530 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
531 	      offsetof(struct bpf_prog, bpf_func));
532 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
533 	      X86_TAIL_CALL_OFFSET);
534 	/*
535 	 * Now we're ready to jump into next BPF program
536 	 * rdi == ctx (1st arg)
537 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
538 	 */
539 	emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
540 
541 	/* out: */
542 	ctx->tail_call_indirect_label = prog - start;
543 	*pprog = prog;
544 }
545 
546 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
547 				      u8 **pprog, u8 *ip,
548 				      bool *callee_regs_used, u32 stack_depth,
549 				      struct jit_context *ctx)
550 {
551 	int tcc_off = -4 - round_up(stack_depth, 8);
552 	u8 *prog = *pprog, *start = *pprog;
553 	int offset;
554 
555 	/*
556 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
557 	 *	goto out;
558 	 */
559 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
560 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
561 
562 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
563 	EMIT2(X86_JAE, offset);                       /* jae out */
564 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
565 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
566 
567 	poke->tailcall_bypass = ip + (prog - start);
568 	poke->adj_off = X86_TAIL_CALL_OFFSET;
569 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
570 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
571 
572 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
573 		  poke->tailcall_bypass);
574 
575 	pop_callee_regs(&prog, callee_regs_used);
576 	EMIT1(0x58);                                  /* pop rax */
577 	if (stack_depth)
578 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
579 
580 	memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
581 	prog += X86_PATCH_SIZE;
582 
583 	/* out: */
584 	ctx->tail_call_direct_label = prog - start;
585 
586 	*pprog = prog;
587 }
588 
589 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
590 {
591 	struct bpf_jit_poke_descriptor *poke;
592 	struct bpf_array *array;
593 	struct bpf_prog *target;
594 	int i, ret;
595 
596 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
597 		poke = &prog->aux->poke_tab[i];
598 		if (poke->aux && poke->aux != prog->aux)
599 			continue;
600 
601 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
602 
603 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
604 			continue;
605 
606 		array = container_of(poke->tail_call.map, struct bpf_array, map);
607 		mutex_lock(&array->aux->poke_mutex);
608 		target = array->ptrs[poke->tail_call.key];
609 		if (target) {
610 			ret = __bpf_arch_text_poke(poke->tailcall_target,
611 						   BPF_MOD_JUMP, NULL,
612 						   (u8 *)target->bpf_func +
613 						   poke->adj_off);
614 			BUG_ON(ret < 0);
615 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
616 						   BPF_MOD_JUMP,
617 						   (u8 *)poke->tailcall_target +
618 						   X86_PATCH_SIZE, NULL);
619 			BUG_ON(ret < 0);
620 		}
621 		WRITE_ONCE(poke->tailcall_target_stable, true);
622 		mutex_unlock(&array->aux->poke_mutex);
623 	}
624 }
625 
626 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
627 			   u32 dst_reg, const u32 imm32)
628 {
629 	u8 *prog = *pprog;
630 	u8 b1, b2, b3;
631 
632 	/*
633 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
634 	 * (which zero-extends imm32) to save 2 bytes.
635 	 */
636 	if (sign_propagate && (s32)imm32 < 0) {
637 		/* 'mov %rax, imm32' sign extends imm32 */
638 		b1 = add_1mod(0x48, dst_reg);
639 		b2 = 0xC7;
640 		b3 = 0xC0;
641 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
642 		goto done;
643 	}
644 
645 	/*
646 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
647 	 * to save 3 bytes.
648 	 */
649 	if (imm32 == 0) {
650 		if (is_ereg(dst_reg))
651 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
652 		b2 = 0x31; /* xor */
653 		b3 = 0xC0;
654 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
655 		goto done;
656 	}
657 
658 	/* mov %eax, imm32 */
659 	if (is_ereg(dst_reg))
660 		EMIT1(add_1mod(0x40, dst_reg));
661 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
662 done:
663 	*pprog = prog;
664 }
665 
666 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
667 			   const u32 imm32_hi, const u32 imm32_lo)
668 {
669 	u8 *prog = *pprog;
670 
671 	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
672 		/*
673 		 * For emitting plain u32, where sign bit must not be
674 		 * propagated LLVM tends to load imm64 over mov32
675 		 * directly, so save couple of bytes by just doing
676 		 * 'mov %eax, imm32' instead.
677 		 */
678 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
679 	} else {
680 		/* movabsq rax, imm64 */
681 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
682 		EMIT(imm32_lo, 4);
683 		EMIT(imm32_hi, 4);
684 	}
685 
686 	*pprog = prog;
687 }
688 
689 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
690 {
691 	u8 *prog = *pprog;
692 
693 	if (is64) {
694 		/* mov dst, src */
695 		EMIT_mov(dst_reg, src_reg);
696 	} else {
697 		/* mov32 dst, src */
698 		if (is_ereg(dst_reg) || is_ereg(src_reg))
699 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
700 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
701 	}
702 
703 	*pprog = prog;
704 }
705 
706 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
707 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
708 {
709 	u8 *prog = *pprog;
710 
711 	if (is_imm8(off)) {
712 		/* 1-byte signed displacement.
713 		 *
714 		 * If off == 0 we could skip this and save one extra byte, but
715 		 * special case of x86 R13 which always needs an offset is not
716 		 * worth the hassle
717 		 */
718 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
719 	} else {
720 		/* 4-byte signed displacement */
721 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
722 	}
723 	*pprog = prog;
724 }
725 
726 /*
727  * Emit a REX byte if it will be necessary to address these registers
728  */
729 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
730 {
731 	u8 *prog = *pprog;
732 
733 	if (is64)
734 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
735 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
736 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
737 	*pprog = prog;
738 }
739 
740 /*
741  * Similar version of maybe_emit_mod() for a single register
742  */
743 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
744 {
745 	u8 *prog = *pprog;
746 
747 	if (is64)
748 		EMIT1(add_1mod(0x48, reg));
749 	else if (is_ereg(reg))
750 		EMIT1(add_1mod(0x40, reg));
751 	*pprog = prog;
752 }
753 
754 /* LDX: dst_reg = *(u8*)(src_reg + off) */
755 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
756 {
757 	u8 *prog = *pprog;
758 
759 	switch (size) {
760 	case BPF_B:
761 		/* Emit 'movzx rax, byte ptr [rax + off]' */
762 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
763 		break;
764 	case BPF_H:
765 		/* Emit 'movzx rax, word ptr [rax + off]' */
766 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
767 		break;
768 	case BPF_W:
769 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
770 		if (is_ereg(dst_reg) || is_ereg(src_reg))
771 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
772 		else
773 			EMIT1(0x8B);
774 		break;
775 	case BPF_DW:
776 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
777 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
778 		break;
779 	}
780 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
781 	*pprog = prog;
782 }
783 
784 /* STX: *(u8*)(dst_reg + off) = src_reg */
785 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
786 {
787 	u8 *prog = *pprog;
788 
789 	switch (size) {
790 	case BPF_B:
791 		/* Emit 'mov byte ptr [rax + off], al' */
792 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
793 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
794 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
795 		else
796 			EMIT1(0x88);
797 		break;
798 	case BPF_H:
799 		if (is_ereg(dst_reg) || is_ereg(src_reg))
800 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
801 		else
802 			EMIT2(0x66, 0x89);
803 		break;
804 	case BPF_W:
805 		if (is_ereg(dst_reg) || is_ereg(src_reg))
806 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
807 		else
808 			EMIT1(0x89);
809 		break;
810 	case BPF_DW:
811 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
812 		break;
813 	}
814 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
815 	*pprog = prog;
816 }
817 
818 static int emit_atomic(u8 **pprog, u8 atomic_op,
819 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
820 {
821 	u8 *prog = *pprog;
822 
823 	EMIT1(0xF0); /* lock prefix */
824 
825 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
826 
827 	/* emit opcode */
828 	switch (atomic_op) {
829 	case BPF_ADD:
830 	case BPF_AND:
831 	case BPF_OR:
832 	case BPF_XOR:
833 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
834 		EMIT1(simple_alu_opcodes[atomic_op]);
835 		break;
836 	case BPF_ADD | BPF_FETCH:
837 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
838 		EMIT2(0x0F, 0xC1);
839 		break;
840 	case BPF_XCHG:
841 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
842 		EMIT1(0x87);
843 		break;
844 	case BPF_CMPXCHG:
845 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
846 		EMIT2(0x0F, 0xB1);
847 		break;
848 	default:
849 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
850 		return -EFAULT;
851 	}
852 
853 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
854 
855 	*pprog = prog;
856 	return 0;
857 }
858 
859 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
860 {
861 	u32 reg = x->fixup >> 8;
862 
863 	/* jump over faulting load and clear dest register */
864 	*(unsigned long *)((void *)regs + reg) = 0;
865 	regs->ip += x->fixup & 0xff;
866 	return true;
867 }
868 
869 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
870 			     bool *regs_used, bool *tail_call_seen)
871 {
872 	int i;
873 
874 	for (i = 1; i <= insn_cnt; i++, insn++) {
875 		if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
876 			*tail_call_seen = true;
877 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
878 			regs_used[0] = true;
879 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
880 			regs_used[1] = true;
881 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
882 			regs_used[2] = true;
883 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
884 			regs_used[3] = true;
885 	}
886 }
887 
888 static void emit_nops(u8 **pprog, int len)
889 {
890 	u8 *prog = *pprog;
891 	int i, noplen;
892 
893 	while (len > 0) {
894 		noplen = len;
895 
896 		if (noplen > ASM_NOP_MAX)
897 			noplen = ASM_NOP_MAX;
898 
899 		for (i = 0; i < noplen; i++)
900 			EMIT1(x86_nops[noplen][i]);
901 		len -= noplen;
902 	}
903 
904 	*pprog = prog;
905 }
906 
907 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
908 
909 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
910 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
911 {
912 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
913 	struct bpf_insn *insn = bpf_prog->insnsi;
914 	bool callee_regs_used[4] = {};
915 	int insn_cnt = bpf_prog->len;
916 	bool tail_call_seen = false;
917 	bool seen_exit = false;
918 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
919 	int i, excnt = 0;
920 	int ilen, proglen = 0;
921 	u8 *prog = temp;
922 	int err;
923 
924 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
925 			 &tail_call_seen);
926 
927 	/* tail call's presence in current prog implies it is reachable */
928 	tail_call_reachable |= tail_call_seen;
929 
930 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
931 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
932 		      bpf_prog->aux->func_idx != 0);
933 	push_callee_regs(&prog, callee_regs_used);
934 
935 	ilen = prog - temp;
936 	if (rw_image)
937 		memcpy(rw_image + proglen, temp, ilen);
938 	proglen += ilen;
939 	addrs[0] = proglen;
940 	prog = temp;
941 
942 	for (i = 1; i <= insn_cnt; i++, insn++) {
943 		const s32 imm32 = insn->imm;
944 		u32 dst_reg = insn->dst_reg;
945 		u32 src_reg = insn->src_reg;
946 		u8 b2 = 0, b3 = 0;
947 		u8 *start_of_ldx;
948 		s64 jmp_offset;
949 		u8 jmp_cond;
950 		u8 *func;
951 		int nops;
952 
953 		switch (insn->code) {
954 			/* ALU */
955 		case BPF_ALU | BPF_ADD | BPF_X:
956 		case BPF_ALU | BPF_SUB | BPF_X:
957 		case BPF_ALU | BPF_AND | BPF_X:
958 		case BPF_ALU | BPF_OR | BPF_X:
959 		case BPF_ALU | BPF_XOR | BPF_X:
960 		case BPF_ALU64 | BPF_ADD | BPF_X:
961 		case BPF_ALU64 | BPF_SUB | BPF_X:
962 		case BPF_ALU64 | BPF_AND | BPF_X:
963 		case BPF_ALU64 | BPF_OR | BPF_X:
964 		case BPF_ALU64 | BPF_XOR | BPF_X:
965 			maybe_emit_mod(&prog, dst_reg, src_reg,
966 				       BPF_CLASS(insn->code) == BPF_ALU64);
967 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
968 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
969 			break;
970 
971 		case BPF_ALU64 | BPF_MOV | BPF_X:
972 		case BPF_ALU | BPF_MOV | BPF_X:
973 			emit_mov_reg(&prog,
974 				     BPF_CLASS(insn->code) == BPF_ALU64,
975 				     dst_reg, src_reg);
976 			break;
977 
978 			/* neg dst */
979 		case BPF_ALU | BPF_NEG:
980 		case BPF_ALU64 | BPF_NEG:
981 			maybe_emit_1mod(&prog, dst_reg,
982 					BPF_CLASS(insn->code) == BPF_ALU64);
983 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
984 			break;
985 
986 		case BPF_ALU | BPF_ADD | BPF_K:
987 		case BPF_ALU | BPF_SUB | BPF_K:
988 		case BPF_ALU | BPF_AND | BPF_K:
989 		case BPF_ALU | BPF_OR | BPF_K:
990 		case BPF_ALU | BPF_XOR | BPF_K:
991 		case BPF_ALU64 | BPF_ADD | BPF_K:
992 		case BPF_ALU64 | BPF_SUB | BPF_K:
993 		case BPF_ALU64 | BPF_AND | BPF_K:
994 		case BPF_ALU64 | BPF_OR | BPF_K:
995 		case BPF_ALU64 | BPF_XOR | BPF_K:
996 			maybe_emit_1mod(&prog, dst_reg,
997 					BPF_CLASS(insn->code) == BPF_ALU64);
998 
999 			/*
1000 			 * b3 holds 'normal' opcode, b2 short form only valid
1001 			 * in case dst is eax/rax.
1002 			 */
1003 			switch (BPF_OP(insn->code)) {
1004 			case BPF_ADD:
1005 				b3 = 0xC0;
1006 				b2 = 0x05;
1007 				break;
1008 			case BPF_SUB:
1009 				b3 = 0xE8;
1010 				b2 = 0x2D;
1011 				break;
1012 			case BPF_AND:
1013 				b3 = 0xE0;
1014 				b2 = 0x25;
1015 				break;
1016 			case BPF_OR:
1017 				b3 = 0xC8;
1018 				b2 = 0x0D;
1019 				break;
1020 			case BPF_XOR:
1021 				b3 = 0xF0;
1022 				b2 = 0x35;
1023 				break;
1024 			}
1025 
1026 			if (is_imm8(imm32))
1027 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1028 			else if (is_axreg(dst_reg))
1029 				EMIT1_off32(b2, imm32);
1030 			else
1031 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1032 			break;
1033 
1034 		case BPF_ALU64 | BPF_MOV | BPF_K:
1035 		case BPF_ALU | BPF_MOV | BPF_K:
1036 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1037 				       dst_reg, imm32);
1038 			break;
1039 
1040 		case BPF_LD | BPF_IMM | BPF_DW:
1041 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1042 			insn++;
1043 			i++;
1044 			break;
1045 
1046 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1047 		case BPF_ALU | BPF_MOD | BPF_X:
1048 		case BPF_ALU | BPF_DIV | BPF_X:
1049 		case BPF_ALU | BPF_MOD | BPF_K:
1050 		case BPF_ALU | BPF_DIV | BPF_K:
1051 		case BPF_ALU64 | BPF_MOD | BPF_X:
1052 		case BPF_ALU64 | BPF_DIV | BPF_X:
1053 		case BPF_ALU64 | BPF_MOD | BPF_K:
1054 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1055 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1056 
1057 			if (dst_reg != BPF_REG_0)
1058 				EMIT1(0x50); /* push rax */
1059 			if (dst_reg != BPF_REG_3)
1060 				EMIT1(0x52); /* push rdx */
1061 
1062 			if (BPF_SRC(insn->code) == BPF_X) {
1063 				if (src_reg == BPF_REG_0 ||
1064 				    src_reg == BPF_REG_3) {
1065 					/* mov r11, src_reg */
1066 					EMIT_mov(AUX_REG, src_reg);
1067 					src_reg = AUX_REG;
1068 				}
1069 			} else {
1070 				/* mov r11, imm32 */
1071 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1072 				src_reg = AUX_REG;
1073 			}
1074 
1075 			if (dst_reg != BPF_REG_0)
1076 				/* mov rax, dst_reg */
1077 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1078 
1079 			/*
1080 			 * xor edx, edx
1081 			 * equivalent to 'xor rdx, rdx', but one byte less
1082 			 */
1083 			EMIT2(0x31, 0xd2);
1084 
1085 			/* div src_reg */
1086 			maybe_emit_1mod(&prog, src_reg, is64);
1087 			EMIT2(0xF7, add_1reg(0xF0, src_reg));
1088 
1089 			if (BPF_OP(insn->code) == BPF_MOD &&
1090 			    dst_reg != BPF_REG_3)
1091 				/* mov dst_reg, rdx */
1092 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1093 			else if (BPF_OP(insn->code) == BPF_DIV &&
1094 				 dst_reg != BPF_REG_0)
1095 				/* mov dst_reg, rax */
1096 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1097 
1098 			if (dst_reg != BPF_REG_3)
1099 				EMIT1(0x5A); /* pop rdx */
1100 			if (dst_reg != BPF_REG_0)
1101 				EMIT1(0x58); /* pop rax */
1102 			break;
1103 		}
1104 
1105 		case BPF_ALU | BPF_MUL | BPF_K:
1106 		case BPF_ALU64 | BPF_MUL | BPF_K:
1107 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1108 				       BPF_CLASS(insn->code) == BPF_ALU64);
1109 
1110 			if (is_imm8(imm32))
1111 				/* imul dst_reg, dst_reg, imm8 */
1112 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1113 				      imm32);
1114 			else
1115 				/* imul dst_reg, dst_reg, imm32 */
1116 				EMIT2_off32(0x69,
1117 					    add_2reg(0xC0, dst_reg, dst_reg),
1118 					    imm32);
1119 			break;
1120 
1121 		case BPF_ALU | BPF_MUL | BPF_X:
1122 		case BPF_ALU64 | BPF_MUL | BPF_X:
1123 			maybe_emit_mod(&prog, src_reg, dst_reg,
1124 				       BPF_CLASS(insn->code) == BPF_ALU64);
1125 
1126 			/* imul dst_reg, src_reg */
1127 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1128 			break;
1129 
1130 			/* Shifts */
1131 		case BPF_ALU | BPF_LSH | BPF_K:
1132 		case BPF_ALU | BPF_RSH | BPF_K:
1133 		case BPF_ALU | BPF_ARSH | BPF_K:
1134 		case BPF_ALU64 | BPF_LSH | BPF_K:
1135 		case BPF_ALU64 | BPF_RSH | BPF_K:
1136 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1137 			maybe_emit_1mod(&prog, dst_reg,
1138 					BPF_CLASS(insn->code) == BPF_ALU64);
1139 
1140 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1141 			if (imm32 == 1)
1142 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1143 			else
1144 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1145 			break;
1146 
1147 		case BPF_ALU | BPF_LSH | BPF_X:
1148 		case BPF_ALU | BPF_RSH | BPF_X:
1149 		case BPF_ALU | BPF_ARSH | BPF_X:
1150 		case BPF_ALU64 | BPF_LSH | BPF_X:
1151 		case BPF_ALU64 | BPF_RSH | BPF_X:
1152 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1153 
1154 			/* Check for bad case when dst_reg == rcx */
1155 			if (dst_reg == BPF_REG_4) {
1156 				/* mov r11, dst_reg */
1157 				EMIT_mov(AUX_REG, dst_reg);
1158 				dst_reg = AUX_REG;
1159 			}
1160 
1161 			if (src_reg != BPF_REG_4) { /* common case */
1162 				EMIT1(0x51); /* push rcx */
1163 
1164 				/* mov rcx, src_reg */
1165 				EMIT_mov(BPF_REG_4, src_reg);
1166 			}
1167 
1168 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1169 			maybe_emit_1mod(&prog, dst_reg,
1170 					BPF_CLASS(insn->code) == BPF_ALU64);
1171 
1172 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1173 			EMIT2(0xD3, add_1reg(b3, dst_reg));
1174 
1175 			if (src_reg != BPF_REG_4)
1176 				EMIT1(0x59); /* pop rcx */
1177 
1178 			if (insn->dst_reg == BPF_REG_4)
1179 				/* mov dst_reg, r11 */
1180 				EMIT_mov(insn->dst_reg, AUX_REG);
1181 			break;
1182 
1183 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1184 			switch (imm32) {
1185 			case 16:
1186 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
1187 				EMIT1(0x66);
1188 				if (is_ereg(dst_reg))
1189 					EMIT1(0x41);
1190 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1191 
1192 				/* Emit 'movzwl eax, ax' */
1193 				if (is_ereg(dst_reg))
1194 					EMIT3(0x45, 0x0F, 0xB7);
1195 				else
1196 					EMIT2(0x0F, 0xB7);
1197 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1198 				break;
1199 			case 32:
1200 				/* Emit 'bswap eax' to swap lower 4 bytes */
1201 				if (is_ereg(dst_reg))
1202 					EMIT2(0x41, 0x0F);
1203 				else
1204 					EMIT1(0x0F);
1205 				EMIT1(add_1reg(0xC8, dst_reg));
1206 				break;
1207 			case 64:
1208 				/* Emit 'bswap rax' to swap 8 bytes */
1209 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1210 				      add_1reg(0xC8, dst_reg));
1211 				break;
1212 			}
1213 			break;
1214 
1215 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1216 			switch (imm32) {
1217 			case 16:
1218 				/*
1219 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1220 				 * into 64 bit
1221 				 */
1222 				if (is_ereg(dst_reg))
1223 					EMIT3(0x45, 0x0F, 0xB7);
1224 				else
1225 					EMIT2(0x0F, 0xB7);
1226 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1227 				break;
1228 			case 32:
1229 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1230 				if (is_ereg(dst_reg))
1231 					EMIT1(0x45);
1232 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1233 				break;
1234 			case 64:
1235 				/* nop */
1236 				break;
1237 			}
1238 			break;
1239 
1240 			/* speculation barrier */
1241 		case BPF_ST | BPF_NOSPEC:
1242 			if (boot_cpu_has(X86_FEATURE_XMM2))
1243 				EMIT_LFENCE();
1244 			break;
1245 
1246 			/* ST: *(u8*)(dst_reg + off) = imm */
1247 		case BPF_ST | BPF_MEM | BPF_B:
1248 			if (is_ereg(dst_reg))
1249 				EMIT2(0x41, 0xC6);
1250 			else
1251 				EMIT1(0xC6);
1252 			goto st;
1253 		case BPF_ST | BPF_MEM | BPF_H:
1254 			if (is_ereg(dst_reg))
1255 				EMIT3(0x66, 0x41, 0xC7);
1256 			else
1257 				EMIT2(0x66, 0xC7);
1258 			goto st;
1259 		case BPF_ST | BPF_MEM | BPF_W:
1260 			if (is_ereg(dst_reg))
1261 				EMIT2(0x41, 0xC7);
1262 			else
1263 				EMIT1(0xC7);
1264 			goto st;
1265 		case BPF_ST | BPF_MEM | BPF_DW:
1266 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1267 
1268 st:			if (is_imm8(insn->off))
1269 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1270 			else
1271 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1272 
1273 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1274 			break;
1275 
1276 			/* STX: *(u8*)(dst_reg + off) = src_reg */
1277 		case BPF_STX | BPF_MEM | BPF_B:
1278 		case BPF_STX | BPF_MEM | BPF_H:
1279 		case BPF_STX | BPF_MEM | BPF_W:
1280 		case BPF_STX | BPF_MEM | BPF_DW:
1281 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1282 			break;
1283 
1284 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1285 		case BPF_LDX | BPF_MEM | BPF_B:
1286 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1287 		case BPF_LDX | BPF_MEM | BPF_H:
1288 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1289 		case BPF_LDX | BPF_MEM | BPF_W:
1290 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1291 		case BPF_LDX | BPF_MEM | BPF_DW:
1292 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1293 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1294 				/* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
1295 				 * add abs(insn->off) to the limit to make sure that negative
1296 				 * offset won't be an issue.
1297 				 * insn->off is s16, so it won't affect valid pointers.
1298 				 */
1299 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
1300 				u8 *end_of_jmp1, *end_of_jmp2;
1301 
1302 				/* Conservatively check that src_reg + insn->off is a kernel address:
1303 				 * 1. src_reg + insn->off >= limit
1304 				 * 2. src_reg + insn->off doesn't become small positive.
1305 				 * Cannot do src_reg + insn->off >= limit in one branch,
1306 				 * since it needs two spare registers, but JIT has only one.
1307 				 */
1308 
1309 				/* movabsq r11, limit */
1310 				EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1311 				EMIT((u32)limit, 4);
1312 				EMIT(limit >> 32, 4);
1313 				/* cmp src_reg, r11 */
1314 				maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1315 				EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1316 				/* if unsigned '<' goto end_of_jmp2 */
1317 				EMIT2(X86_JB, 0);
1318 				end_of_jmp1 = prog;
1319 
1320 				/* mov r11, src_reg */
1321 				emit_mov_reg(&prog, true, AUX_REG, src_reg);
1322 				/* add r11, insn->off */
1323 				maybe_emit_1mod(&prog, AUX_REG, true);
1324 				EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1325 				/* jmp if not carry to start_of_ldx
1326 				 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
1327 				 * that has to be rejected.
1328 				 */
1329 				EMIT2(0x73 /* JNC */, 0);
1330 				end_of_jmp2 = prog;
1331 
1332 				/* xor dst_reg, dst_reg */
1333 				emit_mov_imm32(&prog, false, dst_reg, 0);
1334 				/* jmp byte_after_ldx */
1335 				EMIT2(0xEB, 0);
1336 
1337 				/* populate jmp_offset for JB above to jump to xor dst_reg */
1338 				end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
1339 				/* populate jmp_offset for JNC above to jump to start_of_ldx */
1340 				start_of_ldx = prog;
1341 				end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
1342 			}
1343 			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1344 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1345 				struct exception_table_entry *ex;
1346 				u8 *_insn = image + proglen + (start_of_ldx - temp);
1347 				s64 delta;
1348 
1349 				/* populate jmp_offset for JMP above */
1350 				start_of_ldx[-1] = prog - start_of_ldx;
1351 
1352 				if (!bpf_prog->aux->extable)
1353 					break;
1354 
1355 				if (excnt >= bpf_prog->aux->num_exentries) {
1356 					pr_err("ex gen bug\n");
1357 					return -EFAULT;
1358 				}
1359 				ex = &bpf_prog->aux->extable[excnt++];
1360 
1361 				delta = _insn - (u8 *)&ex->insn;
1362 				if (!is_simm32(delta)) {
1363 					pr_err("extable->insn doesn't fit into 32-bit\n");
1364 					return -EFAULT;
1365 				}
1366 				/* switch ex to rw buffer for writes */
1367 				ex = (void *)rw_image + ((void *)ex - (void *)image);
1368 
1369 				ex->insn = delta;
1370 
1371 				ex->data = EX_TYPE_BPF;
1372 
1373 				if (dst_reg > BPF_REG_9) {
1374 					pr_err("verifier error\n");
1375 					return -EFAULT;
1376 				}
1377 				/*
1378 				 * Compute size of x86 insn and its target dest x86 register.
1379 				 * ex_handler_bpf() will use lower 8 bits to adjust
1380 				 * pt_regs->ip to jump over this x86 instruction
1381 				 * and upper bits to figure out which pt_regs to zero out.
1382 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1383 				 * of 4 bytes will be ignored and rbx will be zero inited.
1384 				 */
1385 				ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1386 			}
1387 			break;
1388 
1389 		case BPF_STX | BPF_ATOMIC | BPF_W:
1390 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1391 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
1392 			    insn->imm == (BPF_OR | BPF_FETCH) ||
1393 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
1394 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1395 				u32 real_src_reg = src_reg;
1396 				u32 real_dst_reg = dst_reg;
1397 				u8 *branch_target;
1398 
1399 				/*
1400 				 * Can't be implemented with a single x86 insn.
1401 				 * Need to do a CMPXCHG loop.
1402 				 */
1403 
1404 				/* Will need RAX as a CMPXCHG operand so save R0 */
1405 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1406 				if (src_reg == BPF_REG_0)
1407 					real_src_reg = BPF_REG_AX;
1408 				if (dst_reg == BPF_REG_0)
1409 					real_dst_reg = BPF_REG_AX;
1410 
1411 				branch_target = prog;
1412 				/* Load old value */
1413 				emit_ldx(&prog, BPF_SIZE(insn->code),
1414 					 BPF_REG_0, real_dst_reg, insn->off);
1415 				/*
1416 				 * Perform the (commutative) operation locally,
1417 				 * put the result in the AUX_REG.
1418 				 */
1419 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1420 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1421 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1422 				      add_2reg(0xC0, AUX_REG, real_src_reg));
1423 				/* Attempt to swap in new value */
1424 				err = emit_atomic(&prog, BPF_CMPXCHG,
1425 						  real_dst_reg, AUX_REG,
1426 						  insn->off,
1427 						  BPF_SIZE(insn->code));
1428 				if (WARN_ON(err))
1429 					return err;
1430 				/*
1431 				 * ZF tells us whether we won the race. If it's
1432 				 * cleared we need to try again.
1433 				 */
1434 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
1435 				/* Return the pre-modification value */
1436 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1437 				/* Restore R0 after clobbering RAX */
1438 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1439 				break;
1440 			}
1441 
1442 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1443 					  insn->off, BPF_SIZE(insn->code));
1444 			if (err)
1445 				return err;
1446 			break;
1447 
1448 			/* call */
1449 		case BPF_JMP | BPF_CALL:
1450 			func = (u8 *) __bpf_call_base + imm32;
1451 			if (tail_call_reachable) {
1452 				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1453 				EMIT3_off32(0x48, 0x8B, 0x85,
1454 					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
1455 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1456 					return -EINVAL;
1457 			} else {
1458 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1459 					return -EINVAL;
1460 			}
1461 			break;
1462 
1463 		case BPF_JMP | BPF_TAIL_CALL:
1464 			if (imm32)
1465 				emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1466 							  &prog, image + addrs[i - 1],
1467 							  callee_regs_used,
1468 							  bpf_prog->aux->stack_depth,
1469 							  ctx);
1470 			else
1471 				emit_bpf_tail_call_indirect(&prog,
1472 							    callee_regs_used,
1473 							    bpf_prog->aux->stack_depth,
1474 							    image + addrs[i - 1],
1475 							    ctx);
1476 			break;
1477 
1478 			/* cond jump */
1479 		case BPF_JMP | BPF_JEQ | BPF_X:
1480 		case BPF_JMP | BPF_JNE | BPF_X:
1481 		case BPF_JMP | BPF_JGT | BPF_X:
1482 		case BPF_JMP | BPF_JLT | BPF_X:
1483 		case BPF_JMP | BPF_JGE | BPF_X:
1484 		case BPF_JMP | BPF_JLE | BPF_X:
1485 		case BPF_JMP | BPF_JSGT | BPF_X:
1486 		case BPF_JMP | BPF_JSLT | BPF_X:
1487 		case BPF_JMP | BPF_JSGE | BPF_X:
1488 		case BPF_JMP | BPF_JSLE | BPF_X:
1489 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1490 		case BPF_JMP32 | BPF_JNE | BPF_X:
1491 		case BPF_JMP32 | BPF_JGT | BPF_X:
1492 		case BPF_JMP32 | BPF_JLT | BPF_X:
1493 		case BPF_JMP32 | BPF_JGE | BPF_X:
1494 		case BPF_JMP32 | BPF_JLE | BPF_X:
1495 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1496 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1497 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1498 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1499 			/* cmp dst_reg, src_reg */
1500 			maybe_emit_mod(&prog, dst_reg, src_reg,
1501 				       BPF_CLASS(insn->code) == BPF_JMP);
1502 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1503 			goto emit_cond_jmp;
1504 
1505 		case BPF_JMP | BPF_JSET | BPF_X:
1506 		case BPF_JMP32 | BPF_JSET | BPF_X:
1507 			/* test dst_reg, src_reg */
1508 			maybe_emit_mod(&prog, dst_reg, src_reg,
1509 				       BPF_CLASS(insn->code) == BPF_JMP);
1510 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1511 			goto emit_cond_jmp;
1512 
1513 		case BPF_JMP | BPF_JSET | BPF_K:
1514 		case BPF_JMP32 | BPF_JSET | BPF_K:
1515 			/* test dst_reg, imm32 */
1516 			maybe_emit_1mod(&prog, dst_reg,
1517 					BPF_CLASS(insn->code) == BPF_JMP);
1518 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1519 			goto emit_cond_jmp;
1520 
1521 		case BPF_JMP | BPF_JEQ | BPF_K:
1522 		case BPF_JMP | BPF_JNE | BPF_K:
1523 		case BPF_JMP | BPF_JGT | BPF_K:
1524 		case BPF_JMP | BPF_JLT | BPF_K:
1525 		case BPF_JMP | BPF_JGE | BPF_K:
1526 		case BPF_JMP | BPF_JLE | BPF_K:
1527 		case BPF_JMP | BPF_JSGT | BPF_K:
1528 		case BPF_JMP | BPF_JSLT | BPF_K:
1529 		case BPF_JMP | BPF_JSGE | BPF_K:
1530 		case BPF_JMP | BPF_JSLE | BPF_K:
1531 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1532 		case BPF_JMP32 | BPF_JNE | BPF_K:
1533 		case BPF_JMP32 | BPF_JGT | BPF_K:
1534 		case BPF_JMP32 | BPF_JLT | BPF_K:
1535 		case BPF_JMP32 | BPF_JGE | BPF_K:
1536 		case BPF_JMP32 | BPF_JLE | BPF_K:
1537 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1538 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1539 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1540 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1541 			/* test dst_reg, dst_reg to save one extra byte */
1542 			if (imm32 == 0) {
1543 				maybe_emit_mod(&prog, dst_reg, dst_reg,
1544 					       BPF_CLASS(insn->code) == BPF_JMP);
1545 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1546 				goto emit_cond_jmp;
1547 			}
1548 
1549 			/* cmp dst_reg, imm8/32 */
1550 			maybe_emit_1mod(&prog, dst_reg,
1551 					BPF_CLASS(insn->code) == BPF_JMP);
1552 
1553 			if (is_imm8(imm32))
1554 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1555 			else
1556 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1557 
1558 emit_cond_jmp:		/* Convert BPF opcode to x86 */
1559 			switch (BPF_OP(insn->code)) {
1560 			case BPF_JEQ:
1561 				jmp_cond = X86_JE;
1562 				break;
1563 			case BPF_JSET:
1564 			case BPF_JNE:
1565 				jmp_cond = X86_JNE;
1566 				break;
1567 			case BPF_JGT:
1568 				/* GT is unsigned '>', JA in x86 */
1569 				jmp_cond = X86_JA;
1570 				break;
1571 			case BPF_JLT:
1572 				/* LT is unsigned '<', JB in x86 */
1573 				jmp_cond = X86_JB;
1574 				break;
1575 			case BPF_JGE:
1576 				/* GE is unsigned '>=', JAE in x86 */
1577 				jmp_cond = X86_JAE;
1578 				break;
1579 			case BPF_JLE:
1580 				/* LE is unsigned '<=', JBE in x86 */
1581 				jmp_cond = X86_JBE;
1582 				break;
1583 			case BPF_JSGT:
1584 				/* Signed '>', GT in x86 */
1585 				jmp_cond = X86_JG;
1586 				break;
1587 			case BPF_JSLT:
1588 				/* Signed '<', LT in x86 */
1589 				jmp_cond = X86_JL;
1590 				break;
1591 			case BPF_JSGE:
1592 				/* Signed '>=', GE in x86 */
1593 				jmp_cond = X86_JGE;
1594 				break;
1595 			case BPF_JSLE:
1596 				/* Signed '<=', LE in x86 */
1597 				jmp_cond = X86_JLE;
1598 				break;
1599 			default: /* to silence GCC warning */
1600 				return -EFAULT;
1601 			}
1602 			jmp_offset = addrs[i + insn->off] - addrs[i];
1603 			if (is_imm8(jmp_offset)) {
1604 				if (jmp_padding) {
1605 					/* To keep the jmp_offset valid, the extra bytes are
1606 					 * padded before the jump insn, so we subtract the
1607 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1608 					 *
1609 					 * If the previous pass already emits an imm8
1610 					 * jmp_cond, then this BPF insn won't shrink, so
1611 					 * "nops" is 0.
1612 					 *
1613 					 * On the other hand, if the previous pass emits an
1614 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1615 					 * keep the image from shrinking further.
1616 					 *
1617 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1618 					 *     is 2 bytes, so the size difference is 4 bytes.
1619 					 */
1620 					nops = INSN_SZ_DIFF - 2;
1621 					if (nops != 0 && nops != 4) {
1622 						pr_err("unexpected jmp_cond padding: %d bytes\n",
1623 						       nops);
1624 						return -EFAULT;
1625 					}
1626 					emit_nops(&prog, nops);
1627 				}
1628 				EMIT2(jmp_cond, jmp_offset);
1629 			} else if (is_simm32(jmp_offset)) {
1630 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1631 			} else {
1632 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1633 				return -EFAULT;
1634 			}
1635 
1636 			break;
1637 
1638 		case BPF_JMP | BPF_JA:
1639 			if (insn->off == -1)
1640 				/* -1 jmp instructions will always jump
1641 				 * backwards two bytes. Explicitly handling
1642 				 * this case avoids wasting too many passes
1643 				 * when there are long sequences of replaced
1644 				 * dead code.
1645 				 */
1646 				jmp_offset = -2;
1647 			else
1648 				jmp_offset = addrs[i + insn->off] - addrs[i];
1649 
1650 			if (!jmp_offset) {
1651 				/*
1652 				 * If jmp_padding is enabled, the extra nops will
1653 				 * be inserted. Otherwise, optimize out nop jumps.
1654 				 */
1655 				if (jmp_padding) {
1656 					/* There are 3 possible conditions.
1657 					 * (1) This BPF_JA is already optimized out in
1658 					 *     the previous run, so there is no need
1659 					 *     to pad any extra byte (0 byte).
1660 					 * (2) The previous pass emits an imm8 jmp,
1661 					 *     so we pad 2 bytes to match the previous
1662 					 *     insn size.
1663 					 * (3) Similarly, the previous pass emits an
1664 					 *     imm32 jmp, and 5 bytes is padded.
1665 					 */
1666 					nops = INSN_SZ_DIFF;
1667 					if (nops != 0 && nops != 2 && nops != 5) {
1668 						pr_err("unexpected nop jump padding: %d bytes\n",
1669 						       nops);
1670 						return -EFAULT;
1671 					}
1672 					emit_nops(&prog, nops);
1673 				}
1674 				break;
1675 			}
1676 emit_jmp:
1677 			if (is_imm8(jmp_offset)) {
1678 				if (jmp_padding) {
1679 					/* To avoid breaking jmp_offset, the extra bytes
1680 					 * are padded before the actual jmp insn, so
1681 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
1682 					 *
1683 					 * If the previous pass already emits an imm8
1684 					 * jmp, there is nothing to pad (0 byte).
1685 					 *
1686 					 * If it emits an imm32 jmp (5 bytes) previously
1687 					 * and now an imm8 jmp (2 bytes), then we pad
1688 					 * (5 - 2 = 3) bytes to stop the image from
1689 					 * shrinking further.
1690 					 */
1691 					nops = INSN_SZ_DIFF - 2;
1692 					if (nops != 0 && nops != 3) {
1693 						pr_err("unexpected jump padding: %d bytes\n",
1694 						       nops);
1695 						return -EFAULT;
1696 					}
1697 					emit_nops(&prog, INSN_SZ_DIFF - 2);
1698 				}
1699 				EMIT2(0xEB, jmp_offset);
1700 			} else if (is_simm32(jmp_offset)) {
1701 				EMIT1_off32(0xE9, jmp_offset);
1702 			} else {
1703 				pr_err("jmp gen bug %llx\n", jmp_offset);
1704 				return -EFAULT;
1705 			}
1706 			break;
1707 
1708 		case BPF_JMP | BPF_EXIT:
1709 			if (seen_exit) {
1710 				jmp_offset = ctx->cleanup_addr - addrs[i];
1711 				goto emit_jmp;
1712 			}
1713 			seen_exit = true;
1714 			/* Update cleanup_addr */
1715 			ctx->cleanup_addr = proglen;
1716 			pop_callee_regs(&prog, callee_regs_used);
1717 			EMIT1(0xC9);         /* leave */
1718 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1719 			break;
1720 
1721 		default:
1722 			/*
1723 			 * By design x86-64 JIT should support all BPF instructions.
1724 			 * This error will be seen if new instruction was added
1725 			 * to the interpreter, but not to the JIT, or if there is
1726 			 * junk in bpf_prog.
1727 			 */
1728 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1729 			return -EINVAL;
1730 		}
1731 
1732 		ilen = prog - temp;
1733 		if (ilen > BPF_MAX_INSN_SIZE) {
1734 			pr_err("bpf_jit: fatal insn size error\n");
1735 			return -EFAULT;
1736 		}
1737 
1738 		if (image) {
1739 			/*
1740 			 * When populating the image, assert that:
1741 			 *
1742 			 *  i) We do not write beyond the allocated space, and
1743 			 * ii) addrs[i] did not change from the prior run, in order
1744 			 *     to validate assumptions made for computing branch
1745 			 *     displacements.
1746 			 */
1747 			if (unlikely(proglen + ilen > oldproglen ||
1748 				     proglen + ilen != addrs[i])) {
1749 				pr_err("bpf_jit: fatal error\n");
1750 				return -EFAULT;
1751 			}
1752 			memcpy(rw_image + proglen, temp, ilen);
1753 		}
1754 		proglen += ilen;
1755 		addrs[i] = proglen;
1756 		prog = temp;
1757 	}
1758 
1759 	if (image && excnt != bpf_prog->aux->num_exentries) {
1760 		pr_err("extable is not populated\n");
1761 		return -EFAULT;
1762 	}
1763 	return proglen;
1764 }
1765 
1766 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1767 		      int stack_size)
1768 {
1769 	int i, j, arg_size, nr_regs;
1770 	/* Store function arguments to stack.
1771 	 * For a function that accepts two pointers the sequence will be:
1772 	 * mov QWORD PTR [rbp-0x10],rdi
1773 	 * mov QWORD PTR [rbp-0x8],rsi
1774 	 */
1775 	for (i = 0, j = 0; i < min(nr_args, 6); i++) {
1776 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
1777 			nr_regs = (m->arg_size[i] + 7) / 8;
1778 			arg_size = 8;
1779 		} else {
1780 			nr_regs = 1;
1781 			arg_size = m->arg_size[i];
1782 		}
1783 
1784 		while (nr_regs) {
1785 			emit_stx(prog, bytes_to_bpf_size(arg_size),
1786 				 BPF_REG_FP,
1787 				 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
1788 				 -(stack_size - j * 8));
1789 			nr_regs--;
1790 			j++;
1791 		}
1792 	}
1793 }
1794 
1795 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1796 			 int stack_size)
1797 {
1798 	int i, j, arg_size, nr_regs;
1799 
1800 	/* Restore function arguments from stack.
1801 	 * For a function that accepts two pointers the sequence will be:
1802 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1803 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1804 	 */
1805 	for (i = 0, j = 0; i < min(nr_args, 6); i++) {
1806 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
1807 			nr_regs = (m->arg_size[i] + 7) / 8;
1808 			arg_size = 8;
1809 		} else {
1810 			nr_regs = 1;
1811 			arg_size = m->arg_size[i];
1812 		}
1813 
1814 		while (nr_regs) {
1815 			emit_ldx(prog, bytes_to_bpf_size(arg_size),
1816 				 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
1817 				 BPF_REG_FP,
1818 				 -(stack_size - j * 8));
1819 			nr_regs--;
1820 			j++;
1821 		}
1822 	}
1823 }
1824 
1825 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1826 			   struct bpf_tramp_link *l, int stack_size,
1827 			   int run_ctx_off, bool save_ret)
1828 {
1829 	void (*exit)(struct bpf_prog *prog, u64 start,
1830 		     struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
1831 	u64 (*enter)(struct bpf_prog *prog,
1832 		     struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
1833 	u8 *prog = *pprog;
1834 	u8 *jmp_insn;
1835 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1836 	struct bpf_prog *p = l->link.prog;
1837 	u64 cookie = l->cookie;
1838 
1839 	/* mov rdi, cookie */
1840 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
1841 
1842 	/* Prepare struct bpf_tramp_run_ctx.
1843 	 *
1844 	 * bpf_tramp_run_ctx is already preserved by
1845 	 * arch_prepare_bpf_trampoline().
1846 	 *
1847 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
1848 	 */
1849 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
1850 
1851 	if (p->aux->sleepable) {
1852 		enter = __bpf_prog_enter_sleepable;
1853 		exit = __bpf_prog_exit_sleepable;
1854 	} else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
1855 		enter = __bpf_prog_enter_struct_ops;
1856 		exit = __bpf_prog_exit_struct_ops;
1857 	} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
1858 		enter = __bpf_prog_enter_lsm_cgroup;
1859 		exit = __bpf_prog_exit_lsm_cgroup;
1860 	}
1861 
1862 	/* arg1: mov rdi, progs[i] */
1863 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1864 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
1865 	EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
1866 
1867 	if (emit_call(&prog, enter, prog))
1868 		return -EINVAL;
1869 	/* remember prog start time returned by __bpf_prog_enter */
1870 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1871 
1872 	/* if (__bpf_prog_enter*(prog) == 0)
1873 	 *	goto skip_exec_of_prog;
1874 	 */
1875 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
1876 	/* emit 2 nops that will be replaced with JE insn */
1877 	jmp_insn = prog;
1878 	emit_nops(&prog, 2);
1879 
1880 	/* arg1: lea rdi, [rbp - stack_size] */
1881 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1882 	/* arg2: progs[i]->insnsi for interpreter */
1883 	if (!p->jited)
1884 		emit_mov_imm64(&prog, BPF_REG_2,
1885 			       (long) p->insnsi >> 32,
1886 			       (u32) (long) p->insnsi);
1887 	/* call JITed bpf program or interpreter */
1888 	if (emit_call(&prog, p->bpf_func, prog))
1889 		return -EINVAL;
1890 
1891 	/*
1892 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1893 	 * of the previous call which is then passed on the stack to
1894 	 * the next BPF program.
1895 	 *
1896 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
1897 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1898 	 */
1899 	if (save_ret)
1900 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1901 
1902 	/* replace 2 nops with JE insn, since jmp target is known */
1903 	jmp_insn[0] = X86_JE;
1904 	jmp_insn[1] = prog - jmp_insn - 2;
1905 
1906 	/* arg1: mov rdi, progs[i] */
1907 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1908 	/* arg2: mov rsi, rbx <- start time in nsec */
1909 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1910 	/* arg3: lea rdx, [rbp - run_ctx_off] */
1911 	EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
1912 	if (emit_call(&prog, exit, prog))
1913 		return -EINVAL;
1914 
1915 	*pprog = prog;
1916 	return 0;
1917 }
1918 
1919 static void emit_align(u8 **pprog, u32 align)
1920 {
1921 	u8 *target, *prog = *pprog;
1922 
1923 	target = PTR_ALIGN(prog, align);
1924 	if (target != prog)
1925 		emit_nops(&prog, target - prog);
1926 
1927 	*pprog = prog;
1928 }
1929 
1930 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1931 {
1932 	u8 *prog = *pprog;
1933 	s64 offset;
1934 
1935 	offset = func - (ip + 2 + 4);
1936 	if (!is_simm32(offset)) {
1937 		pr_err("Target %p is out of range\n", func);
1938 		return -EINVAL;
1939 	}
1940 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1941 	*pprog = prog;
1942 	return 0;
1943 }
1944 
1945 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1946 		      struct bpf_tramp_links *tl, int stack_size,
1947 		      int run_ctx_off, bool save_ret)
1948 {
1949 	int i;
1950 	u8 *prog = *pprog;
1951 
1952 	for (i = 0; i < tl->nr_links; i++) {
1953 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
1954 				    run_ctx_off, save_ret))
1955 			return -EINVAL;
1956 	}
1957 	*pprog = prog;
1958 	return 0;
1959 }
1960 
1961 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1962 			      struct bpf_tramp_links *tl, int stack_size,
1963 			      int run_ctx_off, u8 **branches)
1964 {
1965 	u8 *prog = *pprog;
1966 	int i;
1967 
1968 	/* The first fmod_ret program will receive a garbage return value.
1969 	 * Set this to 0 to avoid confusing the program.
1970 	 */
1971 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1972 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1973 	for (i = 0; i < tl->nr_links; i++) {
1974 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
1975 			return -EINVAL;
1976 
1977 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
1978 		 * if (*(u64 *)(rbp - 8) !=  0)
1979 		 *	goto do_fexit;
1980 		 */
1981 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
1982 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1983 
1984 		/* Save the location of the branch and Generate 6 nops
1985 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
1986 		 * are replaced with a conditional jump once do_fexit (i.e. the
1987 		 * start of the fexit invocation) is finalized.
1988 		 */
1989 		branches[i] = prog;
1990 		emit_nops(&prog, 4 + 2);
1991 	}
1992 
1993 	*pprog = prog;
1994 	return 0;
1995 }
1996 
1997 /* Example:
1998  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1999  * its 'struct btf_func_model' will be nr_args=2
2000  * The assembly code when eth_type_trans is executing after trampoline:
2001  *
2002  * push rbp
2003  * mov rbp, rsp
2004  * sub rsp, 16                     // space for skb and dev
2005  * push rbx                        // temp regs to pass start time
2006  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
2007  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
2008  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2009  * mov rbx, rax                    // remember start time in bpf stats are enabled
2010  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
2011  * call addr_of_jited_FENTRY_prog
2012  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2013  * mov rsi, rbx                    // prog start time
2014  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2015  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
2016  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
2017  * pop rbx
2018  * leave
2019  * ret
2020  *
2021  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2022  * replaced with 'call generated_bpf_trampoline'. When it returns
2023  * eth_type_trans will continue executing with original skb and dev pointers.
2024  *
2025  * The assembly code when eth_type_trans is called from trampoline:
2026  *
2027  * push rbp
2028  * mov rbp, rsp
2029  * sub rsp, 24                     // space for skb, dev, return value
2030  * push rbx                        // temp regs to pass start time
2031  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
2032  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
2033  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2034  * mov rbx, rax                    // remember start time if bpf stats are enabled
2035  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2036  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
2037  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2038  * mov rsi, rbx                    // prog start time
2039  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2040  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
2041  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
2042  * call eth_type_trans+5           // execute body of eth_type_trans
2043  * mov qword ptr [rbp - 8], rax    // save return value
2044  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2045  * mov rbx, rax                    // remember start time in bpf stats are enabled
2046  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2047  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
2048  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2049  * mov rsi, rbx                    // prog start time
2050  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2051  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
2052  * pop rbx
2053  * leave
2054  * add rsp, 8                      // skip eth_type_trans's frame
2055  * ret                             // return to its caller
2056  */
2057 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2058 				const struct btf_func_model *m, u32 flags,
2059 				struct bpf_tramp_links *tlinks,
2060 				void *func_addr)
2061 {
2062 	int ret, i, nr_args = m->nr_args, extra_nregs = 0;
2063 	int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
2064 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2065 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2066 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2067 	void *orig_call = func_addr;
2068 	u8 **branches = NULL;
2069 	u8 *prog;
2070 	bool save_ret;
2071 
2072 	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
2073 	if (nr_args > 6)
2074 		return -ENOTSUPP;
2075 
2076 	for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
2077 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2078 			extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
2079 	}
2080 	if (nr_args + extra_nregs > 6)
2081 		return -ENOTSUPP;
2082 	stack_size += extra_nregs * 8;
2083 
2084 	/* Generated trampoline stack layout:
2085 	 *
2086 	 * RBP + 8         [ return address  ]
2087 	 * RBP + 0         [ RBP             ]
2088 	 *
2089 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
2090 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
2091 	 *
2092 	 *                 [ reg_argN        ]  always
2093 	 *                 [ ...             ]
2094 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
2095 	 *
2096 	 * RBP - args_off  [ arg regs count  ]  always
2097 	 *
2098 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
2099 	 *
2100 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2101 	 */
2102 
2103 	/* room for return value of orig_call or fentry prog */
2104 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2105 	if (save_ret)
2106 		stack_size += 8;
2107 
2108 	regs_off = stack_size;
2109 
2110 	/* args count  */
2111 	stack_size += 8;
2112 	args_off = stack_size;
2113 
2114 	if (flags & BPF_TRAMP_F_IP_ARG)
2115 		stack_size += 8; /* room for IP address argument */
2116 
2117 	ip_off = stack_size;
2118 
2119 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2120 	run_ctx_off = stack_size;
2121 
2122 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2123 		/* skip patched call instruction and point orig_call to actual
2124 		 * body of the kernel function.
2125 		 */
2126 		if (is_endbr(*(u32 *)orig_call))
2127 			orig_call += ENDBR_INSN_SIZE;
2128 		orig_call += X86_PATCH_SIZE;
2129 	}
2130 
2131 	prog = image;
2132 
2133 	EMIT_ENDBR();
2134 	EMIT1(0x55);		 /* push rbp */
2135 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2136 	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2137 	EMIT1(0x53);		 /* push rbx */
2138 
2139 	/* Store number of argument registers of the traced function:
2140 	 *   mov rax, nr_args + extra_nregs
2141 	 *   mov QWORD PTR [rbp - args_off], rax
2142 	 */
2143 	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
2144 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
2145 
2146 	if (flags & BPF_TRAMP_F_IP_ARG) {
2147 		/* Store IP address of the traced function:
2148 		 * movabsq rax, func_addr
2149 		 * mov QWORD PTR [rbp - ip_off], rax
2150 		 */
2151 		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2152 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2153 	}
2154 
2155 	save_regs(m, &prog, nr_args, regs_off);
2156 
2157 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2158 		/* arg1: mov rdi, im */
2159 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2160 		if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2161 			ret = -EINVAL;
2162 			goto cleanup;
2163 		}
2164 	}
2165 
2166 	if (fentry->nr_links)
2167 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2168 			       flags & BPF_TRAMP_F_RET_FENTRY_RET))
2169 			return -EINVAL;
2170 
2171 	if (fmod_ret->nr_links) {
2172 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2173 				   GFP_KERNEL);
2174 		if (!branches)
2175 			return -ENOMEM;
2176 
2177 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2178 				       run_ctx_off, branches)) {
2179 			ret = -EINVAL;
2180 			goto cleanup;
2181 		}
2182 	}
2183 
2184 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2185 		restore_regs(m, &prog, nr_args, regs_off);
2186 
2187 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
2188 			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2189 			EMIT2(0xff, 0xd0); /* call *rax */
2190 		} else {
2191 			/* call original function */
2192 			if (emit_call(&prog, orig_call, prog)) {
2193 				ret = -EINVAL;
2194 				goto cleanup;
2195 			}
2196 		}
2197 		/* remember return value in a stack for bpf prog to access */
2198 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2199 		im->ip_after_call = prog;
2200 		memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2201 		prog += X86_PATCH_SIZE;
2202 	}
2203 
2204 	if (fmod_ret->nr_links) {
2205 		/* From Intel 64 and IA-32 Architectures Optimization
2206 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2207 		 * Coding Rule 11: All branch targets should be 16-byte
2208 		 * aligned.
2209 		 */
2210 		emit_align(&prog, 16);
2211 		/* Update the branches saved in invoke_bpf_mod_ret with the
2212 		 * aligned address of do_fexit.
2213 		 */
2214 		for (i = 0; i < fmod_ret->nr_links; i++)
2215 			emit_cond_near_jump(&branches[i], prog, branches[i],
2216 					    X86_JNE);
2217 	}
2218 
2219 	if (fexit->nr_links)
2220 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2221 			ret = -EINVAL;
2222 			goto cleanup;
2223 		}
2224 
2225 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
2226 		restore_regs(m, &prog, nr_args, regs_off);
2227 
2228 	/* This needs to be done regardless. If there were fmod_ret programs,
2229 	 * the return value is only updated on the stack and still needs to be
2230 	 * restored to R0.
2231 	 */
2232 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2233 		im->ip_epilogue = prog;
2234 		/* arg1: mov rdi, im */
2235 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2236 		if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2237 			ret = -EINVAL;
2238 			goto cleanup;
2239 		}
2240 	}
2241 	/* restore return value of orig_call or fentry prog back into RAX */
2242 	if (save_ret)
2243 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2244 
2245 	EMIT1(0x5B); /* pop rbx */
2246 	EMIT1(0xC9); /* leave */
2247 	if (flags & BPF_TRAMP_F_SKIP_FRAME)
2248 		/* skip our return address and return to parent */
2249 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2250 	emit_return(&prog, prog);
2251 	/* Make sure the trampoline generation logic doesn't overflow */
2252 	if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2253 		ret = -EFAULT;
2254 		goto cleanup;
2255 	}
2256 	ret = prog - (u8 *)image;
2257 
2258 cleanup:
2259 	kfree(branches);
2260 	return ret;
2261 }
2262 
2263 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
2264 {
2265 	u8 *jg_reloc, *prog = *pprog;
2266 	int pivot, err, jg_bytes = 1;
2267 	s64 jg_offset;
2268 
2269 	if (a == b) {
2270 		/* Leaf node of recursion, i.e. not a range of indices
2271 		 * anymore.
2272 		 */
2273 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
2274 		if (!is_simm32(progs[a]))
2275 			return -1;
2276 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2277 			    progs[a]);
2278 		err = emit_cond_near_jump(&prog,	/* je func */
2279 					  (void *)progs[a], image + (prog - buf),
2280 					  X86_JE);
2281 		if (err)
2282 			return err;
2283 
2284 		emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
2285 
2286 		*pprog = prog;
2287 		return 0;
2288 	}
2289 
2290 	/* Not a leaf node, so we pivot, and recursively descend into
2291 	 * the lower and upper ranges.
2292 	 */
2293 	pivot = (b - a) / 2;
2294 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
2295 	if (!is_simm32(progs[a + pivot]))
2296 		return -1;
2297 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2298 
2299 	if (pivot > 2) {				/* jg upper_part */
2300 		/* Require near jump. */
2301 		jg_bytes = 4;
2302 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2303 	} else {
2304 		EMIT2(X86_JG, 0);
2305 	}
2306 	jg_reloc = prog;
2307 
2308 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
2309 				  progs, image, buf);
2310 	if (err)
2311 		return err;
2312 
2313 	/* From Intel 64 and IA-32 Architectures Optimization
2314 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2315 	 * Coding Rule 11: All branch targets should be 16-byte
2316 	 * aligned.
2317 	 */
2318 	emit_align(&prog, 16);
2319 	jg_offset = prog - jg_reloc;
2320 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2321 
2322 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
2323 				  b, progs, image, buf);
2324 	if (err)
2325 		return err;
2326 
2327 	*pprog = prog;
2328 	return 0;
2329 }
2330 
2331 static int cmp_ips(const void *a, const void *b)
2332 {
2333 	const s64 *ipa = a;
2334 	const s64 *ipb = b;
2335 
2336 	if (*ipa > *ipb)
2337 		return 1;
2338 	if (*ipa < *ipb)
2339 		return -1;
2340 	return 0;
2341 }
2342 
2343 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
2344 {
2345 	u8 *prog = buf;
2346 
2347 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2348 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
2349 }
2350 
2351 struct x64_jit_data {
2352 	struct bpf_binary_header *rw_header;
2353 	struct bpf_binary_header *header;
2354 	int *addrs;
2355 	u8 *image;
2356 	int proglen;
2357 	struct jit_context ctx;
2358 };
2359 
2360 #define MAX_PASSES 20
2361 #define PADDING_PASSES (MAX_PASSES - 5)
2362 
2363 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2364 {
2365 	struct bpf_binary_header *rw_header = NULL;
2366 	struct bpf_binary_header *header = NULL;
2367 	struct bpf_prog *tmp, *orig_prog = prog;
2368 	struct x64_jit_data *jit_data;
2369 	int proglen, oldproglen = 0;
2370 	struct jit_context ctx = {};
2371 	bool tmp_blinded = false;
2372 	bool extra_pass = false;
2373 	bool padding = false;
2374 	u8 *rw_image = NULL;
2375 	u8 *image = NULL;
2376 	int *addrs;
2377 	int pass;
2378 	int i;
2379 
2380 	if (!prog->jit_requested)
2381 		return orig_prog;
2382 
2383 	tmp = bpf_jit_blind_constants(prog);
2384 	/*
2385 	 * If blinding was requested and we failed during blinding,
2386 	 * we must fall back to the interpreter.
2387 	 */
2388 	if (IS_ERR(tmp))
2389 		return orig_prog;
2390 	if (tmp != prog) {
2391 		tmp_blinded = true;
2392 		prog = tmp;
2393 	}
2394 
2395 	jit_data = prog->aux->jit_data;
2396 	if (!jit_data) {
2397 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2398 		if (!jit_data) {
2399 			prog = orig_prog;
2400 			goto out;
2401 		}
2402 		prog->aux->jit_data = jit_data;
2403 	}
2404 	addrs = jit_data->addrs;
2405 	if (addrs) {
2406 		ctx = jit_data->ctx;
2407 		oldproglen = jit_data->proglen;
2408 		image = jit_data->image;
2409 		header = jit_data->header;
2410 		rw_header = jit_data->rw_header;
2411 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
2412 		extra_pass = true;
2413 		padding = true;
2414 		goto skip_init_addrs;
2415 	}
2416 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2417 	if (!addrs) {
2418 		prog = orig_prog;
2419 		goto out_addrs;
2420 	}
2421 
2422 	/*
2423 	 * Before first pass, make a rough estimation of addrs[]
2424 	 * each BPF instruction is translated to less than 64 bytes
2425 	 */
2426 	for (proglen = 0, i = 0; i <= prog->len; i++) {
2427 		proglen += 64;
2428 		addrs[i] = proglen;
2429 	}
2430 	ctx.cleanup_addr = proglen;
2431 skip_init_addrs:
2432 
2433 	/*
2434 	 * JITed image shrinks with every pass and the loop iterates
2435 	 * until the image stops shrinking. Very large BPF programs
2436 	 * may converge on the last pass. In such case do one more
2437 	 * pass to emit the final image.
2438 	 */
2439 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
2440 		if (!padding && pass >= PADDING_PASSES)
2441 			padding = true;
2442 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
2443 		if (proglen <= 0) {
2444 out_image:
2445 			image = NULL;
2446 			if (header) {
2447 				bpf_arch_text_copy(&header->size, &rw_header->size,
2448 						   sizeof(rw_header->size));
2449 				bpf_jit_binary_pack_free(header, rw_header);
2450 			}
2451 			/* Fall back to interpreter mode */
2452 			prog = orig_prog;
2453 			if (extra_pass) {
2454 				prog->bpf_func = NULL;
2455 				prog->jited = 0;
2456 				prog->jited_len = 0;
2457 			}
2458 			goto out_addrs;
2459 		}
2460 		if (image) {
2461 			if (proglen != oldproglen) {
2462 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2463 				       proglen, oldproglen);
2464 				goto out_image;
2465 			}
2466 			break;
2467 		}
2468 		if (proglen == oldproglen) {
2469 			/*
2470 			 * The number of entries in extable is the number of BPF_LDX
2471 			 * insns that access kernel memory via "pointer to BTF type".
2472 			 * The verifier changed their opcode from LDX|MEM|size
2473 			 * to LDX|PROBE_MEM|size to make JITing easier.
2474 			 */
2475 			u32 align = __alignof__(struct exception_table_entry);
2476 			u32 extable_size = prog->aux->num_exentries *
2477 				sizeof(struct exception_table_entry);
2478 
2479 			/* allocate module memory for x86 insns and extable */
2480 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
2481 							   &image, align, &rw_header, &rw_image,
2482 							   jit_fill_hole);
2483 			if (!header) {
2484 				prog = orig_prog;
2485 				goto out_addrs;
2486 			}
2487 			prog->aux->extable = (void *) image + roundup(proglen, align);
2488 		}
2489 		oldproglen = proglen;
2490 		cond_resched();
2491 	}
2492 
2493 	if (bpf_jit_enable > 1)
2494 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
2495 
2496 	if (image) {
2497 		if (!prog->is_func || extra_pass) {
2498 			/*
2499 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
2500 			 *   1) header is not pointing to proper module memory;
2501 			 *   2) the arch doesn't support bpf_arch_text_copy().
2502 			 *
2503 			 * Both cases are serious bugs and justify WARN_ON.
2504 			 */
2505 			if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
2506 				/* header has been freed */
2507 				header = NULL;
2508 				goto out_image;
2509 			}
2510 
2511 			bpf_tail_call_direct_fixup(prog);
2512 		} else {
2513 			jit_data->addrs = addrs;
2514 			jit_data->ctx = ctx;
2515 			jit_data->proglen = proglen;
2516 			jit_data->image = image;
2517 			jit_data->header = header;
2518 			jit_data->rw_header = rw_header;
2519 		}
2520 		prog->bpf_func = (void *)image;
2521 		prog->jited = 1;
2522 		prog->jited_len = proglen;
2523 	} else {
2524 		prog = orig_prog;
2525 	}
2526 
2527 	if (!image || !prog->is_func || extra_pass) {
2528 		if (image)
2529 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
2530 out_addrs:
2531 		kvfree(addrs);
2532 		kfree(jit_data);
2533 		prog->aux->jit_data = NULL;
2534 	}
2535 out:
2536 	if (tmp_blinded)
2537 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2538 					   tmp : orig_prog);
2539 	return prog;
2540 }
2541 
2542 bool bpf_jit_supports_kfunc_call(void)
2543 {
2544 	return true;
2545 }
2546 
2547 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
2548 {
2549 	if (text_poke_copy(dst, src, len) == NULL)
2550 		return ERR_PTR(-EINVAL);
2551 	return dst;
2552 }
2553 
2554 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
2555 bool bpf_jit_supports_subprog_tailcalls(void)
2556 {
2557 	return true;
2558 }
2559 
2560 void bpf_jit_free(struct bpf_prog *prog)
2561 {
2562 	if (prog->jited) {
2563 		struct x64_jit_data *jit_data = prog->aux->jit_data;
2564 		struct bpf_binary_header *hdr;
2565 
2566 		/*
2567 		 * If we fail the final pass of JIT (from jit_subprogs),
2568 		 * the program may not be finalized yet. Call finalize here
2569 		 * before freeing it.
2570 		 */
2571 		if (jit_data) {
2572 			bpf_jit_binary_pack_finalize(prog, jit_data->header,
2573 						     jit_data->rw_header);
2574 			kvfree(jit_data->addrs);
2575 			kfree(jit_data);
2576 		}
2577 		hdr = bpf_jit_binary_pack_hdr(prog);
2578 		bpf_jit_binary_pack_free(hdr, NULL);
2579 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
2580 	}
2581 
2582 	bpf_prog_unlock_free(prog);
2583 }
2584