xref: /linux/arch/x86/net/bpf_jit_comp.c (revision 5832c4a77d6931cebf9ba737129ae8f14b66ee1d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 #include <asm/unwind.h>
20 #include <asm/cfi.h>
21 
22 static bool all_callee_regs_used[4] = {true, true, true, true};
23 
24 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
25 {
26 	if (len == 1)
27 		*ptr = bytes;
28 	else if (len == 2)
29 		*(u16 *)ptr = bytes;
30 	else {
31 		*(u32 *)ptr = bytes;
32 		barrier();
33 	}
34 	return ptr + len;
35 }
36 
37 #define EMIT(bytes, len) \
38 	do { prog = emit_code(prog, bytes, len); } while (0)
39 
40 #define EMIT1(b1)		EMIT(b1, 1)
41 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
42 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
44 
45 #define EMIT1_off32(b1, off) \
46 	do { EMIT1(b1); EMIT(off, 4); } while (0)
47 #define EMIT2_off32(b1, b2, off) \
48 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49 #define EMIT3_off32(b1, b2, b3, off) \
50 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51 #define EMIT4_off32(b1, b2, b3, b4, off) \
52 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
53 
54 #ifdef CONFIG_X86_KERNEL_IBT
55 #define EMIT_ENDBR()		EMIT(gen_endbr(), 4)
56 #define EMIT_ENDBR_POISON()	EMIT(gen_endbr_poison(), 4)
57 #else
58 #define EMIT_ENDBR()
59 #define EMIT_ENDBR_POISON()
60 #endif
61 
62 static bool is_imm8(int value)
63 {
64 	return value <= 127 && value >= -128;
65 }
66 
67 static bool is_simm32(s64 value)
68 {
69 	return value == (s64)(s32)value;
70 }
71 
72 static bool is_uimm32(u64 value)
73 {
74 	return value == (u64)(u32)value;
75 }
76 
77 /* mov dst, src */
78 #define EMIT_mov(DST, SRC)								 \
79 	do {										 \
80 		if (DST != SRC)								 \
81 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
82 	} while (0)
83 
84 static int bpf_size_to_x86_bytes(int bpf_size)
85 {
86 	if (bpf_size == BPF_W)
87 		return 4;
88 	else if (bpf_size == BPF_H)
89 		return 2;
90 	else if (bpf_size == BPF_B)
91 		return 1;
92 	else if (bpf_size == BPF_DW)
93 		return 4; /* imm32 */
94 	else
95 		return 0;
96 }
97 
98 /*
99  * List of x86 cond jumps opcodes (. + s8)
100  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
101  */
102 #define X86_JB  0x72
103 #define X86_JAE 0x73
104 #define X86_JE  0x74
105 #define X86_JNE 0x75
106 #define X86_JBE 0x76
107 #define X86_JA  0x77
108 #define X86_JL  0x7C
109 #define X86_JGE 0x7D
110 #define X86_JLE 0x7E
111 #define X86_JG  0x7F
112 
113 /* Pick a register outside of BPF range for JIT internal work */
114 #define AUX_REG (MAX_BPF_JIT_REG + 1)
115 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
116 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
117 
118 /*
119  * The following table maps BPF registers to x86-64 registers.
120  *
121  * x86-64 register R12 is unused, since if used as base address
122  * register in load/store instructions, it always needs an
123  * extra byte of encoding and is callee saved.
124  *
125  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
126  * trampoline. x86-64 register R10 is used for blinding (if enabled).
127  */
128 static const int reg2hex[] = {
129 	[BPF_REG_0] = 0,  /* RAX */
130 	[BPF_REG_1] = 7,  /* RDI */
131 	[BPF_REG_2] = 6,  /* RSI */
132 	[BPF_REG_3] = 2,  /* RDX */
133 	[BPF_REG_4] = 1,  /* RCX */
134 	[BPF_REG_5] = 0,  /* R8  */
135 	[BPF_REG_6] = 3,  /* RBX callee saved */
136 	[BPF_REG_7] = 5,  /* R13 callee saved */
137 	[BPF_REG_8] = 6,  /* R14 callee saved */
138 	[BPF_REG_9] = 7,  /* R15 callee saved */
139 	[BPF_REG_FP] = 5, /* RBP readonly */
140 	[BPF_REG_AX] = 2, /* R10 temp register */
141 	[AUX_REG] = 3,    /* R11 temp register */
142 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
143 	[X86_REG_R12] = 4, /* R12 callee saved */
144 };
145 
146 static const int reg2pt_regs[] = {
147 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
148 	[BPF_REG_1] = offsetof(struct pt_regs, di),
149 	[BPF_REG_2] = offsetof(struct pt_regs, si),
150 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
151 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
152 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
153 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
154 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
155 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
156 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
157 };
158 
159 /*
160  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
161  * which need extra byte of encoding.
162  * rax,rcx,...,rbp have simpler encoding
163  */
164 static bool is_ereg(u32 reg)
165 {
166 	return (1 << reg) & (BIT(BPF_REG_5) |
167 			     BIT(AUX_REG) |
168 			     BIT(BPF_REG_7) |
169 			     BIT(BPF_REG_8) |
170 			     BIT(BPF_REG_9) |
171 			     BIT(X86_REG_R9) |
172 			     BIT(X86_REG_R12) |
173 			     BIT(BPF_REG_AX));
174 }
175 
176 /*
177  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
178  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
179  * of encoding. al,cl,dl,bl have simpler encoding.
180  */
181 static bool is_ereg_8l(u32 reg)
182 {
183 	return is_ereg(reg) ||
184 	    (1 << reg) & (BIT(BPF_REG_1) |
185 			  BIT(BPF_REG_2) |
186 			  BIT(BPF_REG_FP));
187 }
188 
189 static bool is_axreg(u32 reg)
190 {
191 	return reg == BPF_REG_0;
192 }
193 
194 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
195 static u8 add_1mod(u8 byte, u32 reg)
196 {
197 	if (is_ereg(reg))
198 		byte |= 1;
199 	return byte;
200 }
201 
202 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
203 {
204 	if (is_ereg(r1))
205 		byte |= 1;
206 	if (is_ereg(r2))
207 		byte |= 4;
208 	return byte;
209 }
210 
211 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
212 {
213 	if (is_ereg(r1))
214 		byte |= 1;
215 	if (is_ereg(index))
216 		byte |= 2;
217 	if (is_ereg(r2))
218 		byte |= 4;
219 	return byte;
220 }
221 
222 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
223 static u8 add_1reg(u8 byte, u32 dst_reg)
224 {
225 	return byte + reg2hex[dst_reg];
226 }
227 
228 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
229 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
230 {
231 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
232 }
233 
234 /* Some 1-byte opcodes for binary ALU operations */
235 static u8 simple_alu_opcodes[] = {
236 	[BPF_ADD] = 0x01,
237 	[BPF_SUB] = 0x29,
238 	[BPF_AND] = 0x21,
239 	[BPF_OR] = 0x09,
240 	[BPF_XOR] = 0x31,
241 	[BPF_LSH] = 0xE0,
242 	[BPF_RSH] = 0xE8,
243 	[BPF_ARSH] = 0xF8,
244 };
245 
246 static void jit_fill_hole(void *area, unsigned int size)
247 {
248 	/* Fill whole space with INT3 instructions */
249 	memset(area, 0xcc, size);
250 }
251 
252 int bpf_arch_text_invalidate(void *dst, size_t len)
253 {
254 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
255 }
256 
257 struct jit_context {
258 	int cleanup_addr; /* Epilogue code offset */
259 
260 	/*
261 	 * Program specific offsets of labels in the code; these rely on the
262 	 * JIT doing at least 2 passes, recording the position on the first
263 	 * pass, only to generate the correct offset on the second pass.
264 	 */
265 	int tail_call_direct_label;
266 	int tail_call_indirect_label;
267 };
268 
269 /* Maximum number of bytes emitted while JITing one eBPF insn */
270 #define BPF_MAX_INSN_SIZE	128
271 #define BPF_INSN_SAFETY		64
272 
273 /* Number of bytes emit_patch() needs to generate instructions */
274 #define X86_PATCH_SIZE		5
275 /* Number of bytes that will be skipped on tailcall */
276 #define X86_TAIL_CALL_OFFSET	(11 + ENDBR_INSN_SIZE)
277 
278 static void push_r12(u8 **pprog)
279 {
280 	u8 *prog = *pprog;
281 
282 	EMIT2(0x41, 0x54);   /* push r12 */
283 	*pprog = prog;
284 }
285 
286 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
287 {
288 	u8 *prog = *pprog;
289 
290 	if (callee_regs_used[0])
291 		EMIT1(0x53);         /* push rbx */
292 	if (callee_regs_used[1])
293 		EMIT2(0x41, 0x55);   /* push r13 */
294 	if (callee_regs_used[2])
295 		EMIT2(0x41, 0x56);   /* push r14 */
296 	if (callee_regs_used[3])
297 		EMIT2(0x41, 0x57);   /* push r15 */
298 	*pprog = prog;
299 }
300 
301 static void pop_r12(u8 **pprog)
302 {
303 	u8 *prog = *pprog;
304 
305 	EMIT2(0x41, 0x5C);   /* pop r12 */
306 	*pprog = prog;
307 }
308 
309 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
310 {
311 	u8 *prog = *pprog;
312 
313 	if (callee_regs_used[3])
314 		EMIT2(0x41, 0x5F);   /* pop r15 */
315 	if (callee_regs_used[2])
316 		EMIT2(0x41, 0x5E);   /* pop r14 */
317 	if (callee_regs_used[1])
318 		EMIT2(0x41, 0x5D);   /* pop r13 */
319 	if (callee_regs_used[0])
320 		EMIT1(0x5B);         /* pop rbx */
321 	*pprog = prog;
322 }
323 
324 static void emit_nops(u8 **pprog, int len)
325 {
326 	u8 *prog = *pprog;
327 	int i, noplen;
328 
329 	while (len > 0) {
330 		noplen = len;
331 
332 		if (noplen > ASM_NOP_MAX)
333 			noplen = ASM_NOP_MAX;
334 
335 		for (i = 0; i < noplen; i++)
336 			EMIT1(x86_nops[noplen][i]);
337 		len -= noplen;
338 	}
339 
340 	*pprog = prog;
341 }
342 
343 /*
344  * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
345  * in arch/x86/kernel/alternative.c
346  */
347 
348 static void emit_fineibt(u8 **pprog, u32 hash)
349 {
350 	u8 *prog = *pprog;
351 
352 	EMIT_ENDBR();
353 	EMIT3_off32(0x41, 0x81, 0xea, hash);		/* subl $hash, %r10d	*/
354 	EMIT2(0x74, 0x07);				/* jz.d8 +7		*/
355 	EMIT2(0x0f, 0x0b);				/* ud2			*/
356 	EMIT1(0x90);					/* nop			*/
357 	EMIT_ENDBR_POISON();
358 
359 	*pprog = prog;
360 }
361 
362 static void emit_kcfi(u8 **pprog, u32 hash)
363 {
364 	u8 *prog = *pprog;
365 
366 	EMIT1_off32(0xb8, hash);			/* movl $hash, %eax	*/
367 #ifdef CONFIG_CALL_PADDING
368 	EMIT1(0x90);
369 	EMIT1(0x90);
370 	EMIT1(0x90);
371 	EMIT1(0x90);
372 	EMIT1(0x90);
373 	EMIT1(0x90);
374 	EMIT1(0x90);
375 	EMIT1(0x90);
376 	EMIT1(0x90);
377 	EMIT1(0x90);
378 	EMIT1(0x90);
379 #endif
380 	EMIT_ENDBR();
381 
382 	*pprog = prog;
383 }
384 
385 static void emit_cfi(u8 **pprog, u32 hash)
386 {
387 	u8 *prog = *pprog;
388 
389 	switch (cfi_mode) {
390 	case CFI_FINEIBT:
391 		emit_fineibt(&prog, hash);
392 		break;
393 
394 	case CFI_KCFI:
395 		emit_kcfi(&prog, hash);
396 		break;
397 
398 	default:
399 		EMIT_ENDBR();
400 		break;
401 	}
402 
403 	*pprog = prog;
404 }
405 
406 /*
407  * Emit x86-64 prologue code for BPF program.
408  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
409  * while jumping to another program
410  */
411 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
412 			  bool tail_call_reachable, bool is_subprog,
413 			  bool is_exception_cb)
414 {
415 	u8 *prog = *pprog;
416 
417 	emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
418 	/* BPF trampoline can be made to work without these nops,
419 	 * but let's waste 5 bytes for now and optimize later
420 	 */
421 	emit_nops(&prog, X86_PATCH_SIZE);
422 	if (!ebpf_from_cbpf) {
423 		if (tail_call_reachable && !is_subprog)
424 			/* When it's the entry of the whole tailcall context,
425 			 * zeroing rax means initialising tail_call_cnt.
426 			 */
427 			EMIT2(0x31, 0xC0); /* xor eax, eax */
428 		else
429 			/* Keep the same instruction layout. */
430 			EMIT2(0x66, 0x90); /* nop2 */
431 	}
432 	/* Exception callback receives FP as third parameter */
433 	if (is_exception_cb) {
434 		EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
435 		EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
436 		/* The main frame must have exception_boundary as true, so we
437 		 * first restore those callee-saved regs from stack, before
438 		 * reusing the stack frame.
439 		 */
440 		pop_callee_regs(&prog, all_callee_regs_used);
441 		pop_r12(&prog);
442 		/* Reset the stack frame. */
443 		EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
444 	} else {
445 		EMIT1(0x55);             /* push rbp */
446 		EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
447 	}
448 
449 	/* X86_TAIL_CALL_OFFSET is here */
450 	EMIT_ENDBR();
451 
452 	/* sub rsp, rounded_stack_depth */
453 	if (stack_depth)
454 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
455 	if (tail_call_reachable)
456 		EMIT1(0x50);         /* push rax */
457 	*pprog = prog;
458 }
459 
460 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
461 {
462 	u8 *prog = *pprog;
463 	s64 offset;
464 
465 	offset = func - (ip + X86_PATCH_SIZE);
466 	if (!is_simm32(offset)) {
467 		pr_err("Target call %p is out of range\n", func);
468 		return -ERANGE;
469 	}
470 	EMIT1_off32(opcode, offset);
471 	*pprog = prog;
472 	return 0;
473 }
474 
475 static int emit_call(u8 **pprog, void *func, void *ip)
476 {
477 	return emit_patch(pprog, func, ip, 0xE8);
478 }
479 
480 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
481 {
482 	OPTIMIZER_HIDE_VAR(func);
483 	x86_call_depth_emit_accounting(pprog, func);
484 	return emit_patch(pprog, func, ip, 0xE8);
485 }
486 
487 static int emit_jump(u8 **pprog, void *func, void *ip)
488 {
489 	return emit_patch(pprog, func, ip, 0xE9);
490 }
491 
492 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
493 				void *old_addr, void *new_addr)
494 {
495 	const u8 *nop_insn = x86_nops[5];
496 	u8 old_insn[X86_PATCH_SIZE];
497 	u8 new_insn[X86_PATCH_SIZE];
498 	u8 *prog;
499 	int ret;
500 
501 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
502 	if (old_addr) {
503 		prog = old_insn;
504 		ret = t == BPF_MOD_CALL ?
505 		      emit_call(&prog, old_addr, ip) :
506 		      emit_jump(&prog, old_addr, ip);
507 		if (ret)
508 			return ret;
509 	}
510 
511 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
512 	if (new_addr) {
513 		prog = new_insn;
514 		ret = t == BPF_MOD_CALL ?
515 		      emit_call(&prog, new_addr, ip) :
516 		      emit_jump(&prog, new_addr, ip);
517 		if (ret)
518 			return ret;
519 	}
520 
521 	ret = -EBUSY;
522 	mutex_lock(&text_mutex);
523 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
524 		goto out;
525 	ret = 1;
526 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
527 		text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
528 		ret = 0;
529 	}
530 out:
531 	mutex_unlock(&text_mutex);
532 	return ret;
533 }
534 
535 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
536 		       void *old_addr, void *new_addr)
537 {
538 	if (!is_kernel_text((long)ip) &&
539 	    !is_bpf_text_address((long)ip))
540 		/* BPF poking in modules is not supported */
541 		return -EINVAL;
542 
543 	/*
544 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
545 	 * with an ENDBR instruction.
546 	 */
547 	if (is_endbr(*(u32 *)ip))
548 		ip += ENDBR_INSN_SIZE;
549 
550 	return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
551 }
552 
553 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
554 
555 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
556 {
557 	u8 *prog = *pprog;
558 
559 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
560 		EMIT_LFENCE();
561 		EMIT2(0xFF, 0xE0 + reg);
562 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
563 		OPTIMIZER_HIDE_VAR(reg);
564 		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
565 			emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
566 		else
567 			emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
568 	} else {
569 		EMIT2(0xFF, 0xE0 + reg);	/* jmp *%\reg */
570 		if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
571 			EMIT1(0xCC);		/* int3 */
572 	}
573 
574 	*pprog = prog;
575 }
576 
577 static void emit_return(u8 **pprog, u8 *ip)
578 {
579 	u8 *prog = *pprog;
580 
581 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
582 		emit_jump(&prog, x86_return_thunk, ip);
583 	} else {
584 		EMIT1(0xC3);		/* ret */
585 		if (IS_ENABLED(CONFIG_MITIGATION_SLS))
586 			EMIT1(0xCC);	/* int3 */
587 	}
588 
589 	*pprog = prog;
590 }
591 
592 /*
593  * Generate the following code:
594  *
595  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
596  *   if (index >= array->map.max_entries)
597  *     goto out;
598  *   if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
599  *     goto out;
600  *   prog = array->ptrs[index];
601  *   if (prog == NULL)
602  *     goto out;
603  *   goto *(prog->bpf_func + prologue_size);
604  * out:
605  */
606 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
607 					u8 **pprog, bool *callee_regs_used,
608 					u32 stack_depth, u8 *ip,
609 					struct jit_context *ctx)
610 {
611 	int tcc_off = -4 - round_up(stack_depth, 8);
612 	u8 *prog = *pprog, *start = *pprog;
613 	int offset;
614 
615 	/*
616 	 * rdi - pointer to ctx
617 	 * rsi - pointer to bpf_array
618 	 * rdx - index in bpf_array
619 	 */
620 
621 	/*
622 	 * if (index >= array->map.max_entries)
623 	 *	goto out;
624 	 */
625 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
626 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
627 	      offsetof(struct bpf_array, map.max_entries));
628 
629 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
630 	EMIT2(X86_JBE, offset);                   /* jbe out */
631 
632 	/*
633 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
634 	 *	goto out;
635 	 */
636 	EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
637 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
638 
639 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
640 	EMIT2(X86_JAE, offset);                   /* jae out */
641 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
642 	EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
643 
644 	/* prog = array->ptrs[index]; */
645 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
646 		    offsetof(struct bpf_array, ptrs));
647 
648 	/*
649 	 * if (prog == NULL)
650 	 *	goto out;
651 	 */
652 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
653 
654 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
655 	EMIT2(X86_JE, offset);                    /* je out */
656 
657 	if (bpf_prog->aux->exception_boundary) {
658 		pop_callee_regs(&prog, all_callee_regs_used);
659 		pop_r12(&prog);
660 	} else {
661 		pop_callee_regs(&prog, callee_regs_used);
662 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
663 			pop_r12(&prog);
664 	}
665 
666 	EMIT1(0x58);                              /* pop rax */
667 	if (stack_depth)
668 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
669 			    round_up(stack_depth, 8));
670 
671 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
672 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
673 	      offsetof(struct bpf_prog, bpf_func));
674 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
675 	      X86_TAIL_CALL_OFFSET);
676 	/*
677 	 * Now we're ready to jump into next BPF program
678 	 * rdi == ctx (1st arg)
679 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
680 	 */
681 	emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
682 
683 	/* out: */
684 	ctx->tail_call_indirect_label = prog - start;
685 	*pprog = prog;
686 }
687 
688 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
689 				      struct bpf_jit_poke_descriptor *poke,
690 				      u8 **pprog, u8 *ip,
691 				      bool *callee_regs_used, u32 stack_depth,
692 				      struct jit_context *ctx)
693 {
694 	int tcc_off = -4 - round_up(stack_depth, 8);
695 	u8 *prog = *pprog, *start = *pprog;
696 	int offset;
697 
698 	/*
699 	 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
700 	 *	goto out;
701 	 */
702 	EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
703 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
704 
705 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
706 	EMIT2(X86_JAE, offset);                       /* jae out */
707 	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
708 	EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
709 
710 	poke->tailcall_bypass = ip + (prog - start);
711 	poke->adj_off = X86_TAIL_CALL_OFFSET;
712 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
713 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
714 
715 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
716 		  poke->tailcall_bypass);
717 
718 	if (bpf_prog->aux->exception_boundary) {
719 		pop_callee_regs(&prog, all_callee_regs_used);
720 		pop_r12(&prog);
721 	} else {
722 		pop_callee_regs(&prog, callee_regs_used);
723 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
724 			pop_r12(&prog);
725 	}
726 
727 	EMIT1(0x58);                                  /* pop rax */
728 	if (stack_depth)
729 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
730 
731 	emit_nops(&prog, X86_PATCH_SIZE);
732 
733 	/* out: */
734 	ctx->tail_call_direct_label = prog - start;
735 
736 	*pprog = prog;
737 }
738 
739 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
740 {
741 	struct bpf_jit_poke_descriptor *poke;
742 	struct bpf_array *array;
743 	struct bpf_prog *target;
744 	int i, ret;
745 
746 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
747 		poke = &prog->aux->poke_tab[i];
748 		if (poke->aux && poke->aux != prog->aux)
749 			continue;
750 
751 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
752 
753 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
754 			continue;
755 
756 		array = container_of(poke->tail_call.map, struct bpf_array, map);
757 		mutex_lock(&array->aux->poke_mutex);
758 		target = array->ptrs[poke->tail_call.key];
759 		if (target) {
760 			ret = __bpf_arch_text_poke(poke->tailcall_target,
761 						   BPF_MOD_JUMP, NULL,
762 						   (u8 *)target->bpf_func +
763 						   poke->adj_off);
764 			BUG_ON(ret < 0);
765 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
766 						   BPF_MOD_JUMP,
767 						   (u8 *)poke->tailcall_target +
768 						   X86_PATCH_SIZE, NULL);
769 			BUG_ON(ret < 0);
770 		}
771 		WRITE_ONCE(poke->tailcall_target_stable, true);
772 		mutex_unlock(&array->aux->poke_mutex);
773 	}
774 }
775 
776 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
777 			   u32 dst_reg, const u32 imm32)
778 {
779 	u8 *prog = *pprog;
780 	u8 b1, b2, b3;
781 
782 	/*
783 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
784 	 * (which zero-extends imm32) to save 2 bytes.
785 	 */
786 	if (sign_propagate && (s32)imm32 < 0) {
787 		/* 'mov %rax, imm32' sign extends imm32 */
788 		b1 = add_1mod(0x48, dst_reg);
789 		b2 = 0xC7;
790 		b3 = 0xC0;
791 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
792 		goto done;
793 	}
794 
795 	/*
796 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
797 	 * to save 3 bytes.
798 	 */
799 	if (imm32 == 0) {
800 		if (is_ereg(dst_reg))
801 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
802 		b2 = 0x31; /* xor */
803 		b3 = 0xC0;
804 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
805 		goto done;
806 	}
807 
808 	/* mov %eax, imm32 */
809 	if (is_ereg(dst_reg))
810 		EMIT1(add_1mod(0x40, dst_reg));
811 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
812 done:
813 	*pprog = prog;
814 }
815 
816 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
817 			   const u32 imm32_hi, const u32 imm32_lo)
818 {
819 	u8 *prog = *pprog;
820 
821 	if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
822 		/*
823 		 * For emitting plain u32, where sign bit must not be
824 		 * propagated LLVM tends to load imm64 over mov32
825 		 * directly, so save couple of bytes by just doing
826 		 * 'mov %eax, imm32' instead.
827 		 */
828 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
829 	} else {
830 		/* movabsq rax, imm64 */
831 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
832 		EMIT(imm32_lo, 4);
833 		EMIT(imm32_hi, 4);
834 	}
835 
836 	*pprog = prog;
837 }
838 
839 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
840 {
841 	u8 *prog = *pprog;
842 
843 	if (is64) {
844 		/* mov dst, src */
845 		EMIT_mov(dst_reg, src_reg);
846 	} else {
847 		/* mov32 dst, src */
848 		if (is_ereg(dst_reg) || is_ereg(src_reg))
849 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
850 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
851 	}
852 
853 	*pprog = prog;
854 }
855 
856 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
857 			   u32 src_reg)
858 {
859 	u8 *prog = *pprog;
860 
861 	if (is64) {
862 		/* movs[b,w,l]q dst, src */
863 		if (num_bits == 8)
864 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
865 			      add_2reg(0xC0, src_reg, dst_reg));
866 		else if (num_bits == 16)
867 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
868 			      add_2reg(0xC0, src_reg, dst_reg));
869 		else if (num_bits == 32)
870 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
871 			      add_2reg(0xC0, src_reg, dst_reg));
872 	} else {
873 		/* movs[b,w]l dst, src */
874 		if (num_bits == 8) {
875 			EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
876 			      add_2reg(0xC0, src_reg, dst_reg));
877 		} else if (num_bits == 16) {
878 			if (is_ereg(dst_reg) || is_ereg(src_reg))
879 				EMIT1(add_2mod(0x40, src_reg, dst_reg));
880 			EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
881 			      add_2reg(0xC0, src_reg, dst_reg));
882 		}
883 	}
884 
885 	*pprog = prog;
886 }
887 
888 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
889 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
890 {
891 	u8 *prog = *pprog;
892 
893 	if (is_imm8(off)) {
894 		/* 1-byte signed displacement.
895 		 *
896 		 * If off == 0 we could skip this and save one extra byte, but
897 		 * special case of x86 R13 which always needs an offset is not
898 		 * worth the hassle
899 		 */
900 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
901 	} else {
902 		/* 4-byte signed displacement */
903 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
904 	}
905 	*pprog = prog;
906 }
907 
908 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
909 {
910 	u8 *prog = *pprog;
911 
912 	if (is_imm8(off)) {
913 		EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
914 	} else {
915 		EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
916 	}
917 	*pprog = prog;
918 }
919 
920 /*
921  * Emit a REX byte if it will be necessary to address these registers
922  */
923 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
924 {
925 	u8 *prog = *pprog;
926 
927 	if (is64)
928 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
929 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
930 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
931 	*pprog = prog;
932 }
933 
934 /*
935  * Similar version of maybe_emit_mod() for a single register
936  */
937 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
938 {
939 	u8 *prog = *pprog;
940 
941 	if (is64)
942 		EMIT1(add_1mod(0x48, reg));
943 	else if (is_ereg(reg))
944 		EMIT1(add_1mod(0x40, reg));
945 	*pprog = prog;
946 }
947 
948 /* LDX: dst_reg = *(u8*)(src_reg + off) */
949 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
950 {
951 	u8 *prog = *pprog;
952 
953 	switch (size) {
954 	case BPF_B:
955 		/* Emit 'movzx rax, byte ptr [rax + off]' */
956 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
957 		break;
958 	case BPF_H:
959 		/* Emit 'movzx rax, word ptr [rax + off]' */
960 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
961 		break;
962 	case BPF_W:
963 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
964 		if (is_ereg(dst_reg) || is_ereg(src_reg))
965 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
966 		else
967 			EMIT1(0x8B);
968 		break;
969 	case BPF_DW:
970 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
971 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
972 		break;
973 	}
974 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
975 	*pprog = prog;
976 }
977 
978 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
979 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
980 {
981 	u8 *prog = *pprog;
982 
983 	switch (size) {
984 	case BPF_B:
985 		/* Emit 'movsx rax, byte ptr [rax + off]' */
986 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
987 		break;
988 	case BPF_H:
989 		/* Emit 'movsx rax, word ptr [rax + off]' */
990 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
991 		break;
992 	case BPF_W:
993 		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
994 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
995 		break;
996 	}
997 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
998 	*pprog = prog;
999 }
1000 
1001 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1002 {
1003 	u8 *prog = *pprog;
1004 
1005 	switch (size) {
1006 	case BPF_B:
1007 		/* movzx rax, byte ptr [rax + r12 + off] */
1008 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1009 		break;
1010 	case BPF_H:
1011 		/* movzx rax, word ptr [rax + r12 + off] */
1012 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1013 		break;
1014 	case BPF_W:
1015 		/* mov eax, dword ptr [rax + r12 + off] */
1016 		EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1017 		break;
1018 	case BPF_DW:
1019 		/* mov rax, qword ptr [rax + r12 + off] */
1020 		EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1021 		break;
1022 	}
1023 	emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1024 	*pprog = prog;
1025 }
1026 
1027 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1028 {
1029 	emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1030 }
1031 
1032 /* STX: *(u8*)(dst_reg + off) = src_reg */
1033 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1034 {
1035 	u8 *prog = *pprog;
1036 
1037 	switch (size) {
1038 	case BPF_B:
1039 		/* Emit 'mov byte ptr [rax + off], al' */
1040 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1041 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1042 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1043 		else
1044 			EMIT1(0x88);
1045 		break;
1046 	case BPF_H:
1047 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1048 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1049 		else
1050 			EMIT2(0x66, 0x89);
1051 		break;
1052 	case BPF_W:
1053 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1054 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1055 		else
1056 			EMIT1(0x89);
1057 		break;
1058 	case BPF_DW:
1059 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1060 		break;
1061 	}
1062 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1063 	*pprog = prog;
1064 }
1065 
1066 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
1067 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1068 {
1069 	u8 *prog = *pprog;
1070 
1071 	switch (size) {
1072 	case BPF_B:
1073 		/* mov byte ptr [rax + r12 + off], al */
1074 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1075 		break;
1076 	case BPF_H:
1077 		/* mov word ptr [rax + r12 + off], ax */
1078 		EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1079 		break;
1080 	case BPF_W:
1081 		/* mov dword ptr [rax + r12 + 1], eax */
1082 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1083 		break;
1084 	case BPF_DW:
1085 		/* mov qword ptr [rax + r12 + 1], rax */
1086 		EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1087 		break;
1088 	}
1089 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1090 	*pprog = prog;
1091 }
1092 
1093 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1094 {
1095 	emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1096 }
1097 
1098 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
1099 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1100 {
1101 	u8 *prog = *pprog;
1102 
1103 	switch (size) {
1104 	case BPF_B:
1105 		/* mov byte ptr [rax + r12 + off], imm8 */
1106 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1107 		break;
1108 	case BPF_H:
1109 		/* mov word ptr [rax + r12 + off], imm16 */
1110 		EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1111 		break;
1112 	case BPF_W:
1113 		/* mov dword ptr [rax + r12 + 1], imm32 */
1114 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1115 		break;
1116 	case BPF_DW:
1117 		/* mov qword ptr [rax + r12 + 1], imm32 */
1118 		EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1119 		break;
1120 	}
1121 	emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1122 	EMIT(imm, bpf_size_to_x86_bytes(size));
1123 	*pprog = prog;
1124 }
1125 
1126 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1127 {
1128 	emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1129 }
1130 
1131 static int emit_atomic(u8 **pprog, u8 atomic_op,
1132 		       u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1133 {
1134 	u8 *prog = *pprog;
1135 
1136 	EMIT1(0xF0); /* lock prefix */
1137 
1138 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1139 
1140 	/* emit opcode */
1141 	switch (atomic_op) {
1142 	case BPF_ADD:
1143 	case BPF_AND:
1144 	case BPF_OR:
1145 	case BPF_XOR:
1146 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1147 		EMIT1(simple_alu_opcodes[atomic_op]);
1148 		break;
1149 	case BPF_ADD | BPF_FETCH:
1150 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1151 		EMIT2(0x0F, 0xC1);
1152 		break;
1153 	case BPF_XCHG:
1154 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1155 		EMIT1(0x87);
1156 		break;
1157 	case BPF_CMPXCHG:
1158 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1159 		EMIT2(0x0F, 0xB1);
1160 		break;
1161 	default:
1162 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1163 		return -EFAULT;
1164 	}
1165 
1166 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1167 
1168 	*pprog = prog;
1169 	return 0;
1170 }
1171 
1172 #define DONT_CLEAR 1
1173 
1174 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1175 {
1176 	u32 reg = x->fixup >> 8;
1177 
1178 	/* jump over faulting load and clear dest register */
1179 	if (reg != DONT_CLEAR)
1180 		*(unsigned long *)((void *)regs + reg) = 0;
1181 	regs->ip += x->fixup & 0xff;
1182 	return true;
1183 }
1184 
1185 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1186 			     bool *regs_used, bool *tail_call_seen)
1187 {
1188 	int i;
1189 
1190 	for (i = 1; i <= insn_cnt; i++, insn++) {
1191 		if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
1192 			*tail_call_seen = true;
1193 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1194 			regs_used[0] = true;
1195 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1196 			regs_used[1] = true;
1197 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1198 			regs_used[2] = true;
1199 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1200 			regs_used[3] = true;
1201 	}
1202 }
1203 
1204 /* emit the 3-byte VEX prefix
1205  *
1206  * r: same as rex.r, extra bit for ModRM reg field
1207  * x: same as rex.x, extra bit for SIB index field
1208  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1209  * m: opcode map select, encoding escape bytes e.g. 0x0f38
1210  * w: same as rex.w (32 bit or 64 bit) or opcode specific
1211  * src_reg2: additional source reg (encoded as BPF reg)
1212  * l: vector length (128 bit or 256 bit) or reserved
1213  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1214  */
1215 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1216 		      bool w, u8 src_reg2, bool l, u8 pp)
1217 {
1218 	u8 *prog = *pprog;
1219 	const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1220 	u8 b1, b2;
1221 	u8 vvvv = reg2hex[src_reg2];
1222 
1223 	/* reg2hex gives only the lower 3 bit of vvvv */
1224 	if (is_ereg(src_reg2))
1225 		vvvv |= 1 << 3;
1226 
1227 	/*
1228 	 * 2nd byte of 3-byte VEX prefix
1229 	 * ~ means bit inverted encoding
1230 	 *
1231 	 *    7                           0
1232 	 *  +---+---+---+---+---+---+---+---+
1233 	 *  |~R |~X |~B |         m         |
1234 	 *  +---+---+---+---+---+---+---+---+
1235 	 */
1236 	b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1237 	/*
1238 	 * 3rd byte of 3-byte VEX prefix
1239 	 *
1240 	 *    7                           0
1241 	 *  +---+---+---+---+---+---+---+---+
1242 	 *  | W |     ~vvvv     | L |   pp  |
1243 	 *  +---+---+---+---+---+---+---+---+
1244 	 */
1245 	b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1246 
1247 	EMIT3(b0, b1, b2);
1248 	*pprog = prog;
1249 }
1250 
1251 /* emit BMI2 shift instruction */
1252 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1253 {
1254 	u8 *prog = *pprog;
1255 	bool r = is_ereg(dst_reg);
1256 	u8 m = 2; /* escape code 0f38 */
1257 
1258 	emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1259 	EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1260 	*pprog = prog;
1261 }
1262 
1263 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1264 
1265 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1266 #define RESTORE_TAIL_CALL_CNT(stack)				\
1267 	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
1268 
1269 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1270 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
1271 {
1272 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1273 	struct bpf_insn *insn = bpf_prog->insnsi;
1274 	bool callee_regs_used[4] = {};
1275 	int insn_cnt = bpf_prog->len;
1276 	bool tail_call_seen = false;
1277 	bool seen_exit = false;
1278 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1279 	u64 arena_vm_start, user_vm_start;
1280 	int i, excnt = 0;
1281 	int ilen, proglen = 0;
1282 	u8 *prog = temp;
1283 	int err;
1284 
1285 	arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1286 	user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1287 
1288 	detect_reg_usage(insn, insn_cnt, callee_regs_used,
1289 			 &tail_call_seen);
1290 
1291 	/* tail call's presence in current prog implies it is reachable */
1292 	tail_call_reachable |= tail_call_seen;
1293 
1294 	emit_prologue(&prog, bpf_prog->aux->stack_depth,
1295 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1296 		      bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1297 	/* Exception callback will clobber callee regs for its own use, and
1298 	 * restore the original callee regs from main prog's stack frame.
1299 	 */
1300 	if (bpf_prog->aux->exception_boundary) {
1301 		/* We also need to save r12, which is not mapped to any BPF
1302 		 * register, as we throw after entry into the kernel, which may
1303 		 * overwrite r12.
1304 		 */
1305 		push_r12(&prog);
1306 		push_callee_regs(&prog, all_callee_regs_used);
1307 	} else {
1308 		if (arena_vm_start)
1309 			push_r12(&prog);
1310 		push_callee_regs(&prog, callee_regs_used);
1311 	}
1312 	if (arena_vm_start)
1313 		emit_mov_imm64(&prog, X86_REG_R12,
1314 			       arena_vm_start >> 32, (u32) arena_vm_start);
1315 
1316 	ilen = prog - temp;
1317 	if (rw_image)
1318 		memcpy(rw_image + proglen, temp, ilen);
1319 	proglen += ilen;
1320 	addrs[0] = proglen;
1321 	prog = temp;
1322 
1323 	for (i = 1; i <= insn_cnt; i++, insn++) {
1324 		const s32 imm32 = insn->imm;
1325 		u32 dst_reg = insn->dst_reg;
1326 		u32 src_reg = insn->src_reg;
1327 		u8 b2 = 0, b3 = 0;
1328 		u8 *start_of_ldx;
1329 		s64 jmp_offset;
1330 		s16 insn_off;
1331 		u8 jmp_cond;
1332 		u8 *func;
1333 		int nops;
1334 
1335 		switch (insn->code) {
1336 			/* ALU */
1337 		case BPF_ALU | BPF_ADD | BPF_X:
1338 		case BPF_ALU | BPF_SUB | BPF_X:
1339 		case BPF_ALU | BPF_AND | BPF_X:
1340 		case BPF_ALU | BPF_OR | BPF_X:
1341 		case BPF_ALU | BPF_XOR | BPF_X:
1342 		case BPF_ALU64 | BPF_ADD | BPF_X:
1343 		case BPF_ALU64 | BPF_SUB | BPF_X:
1344 		case BPF_ALU64 | BPF_AND | BPF_X:
1345 		case BPF_ALU64 | BPF_OR | BPF_X:
1346 		case BPF_ALU64 | BPF_XOR | BPF_X:
1347 			maybe_emit_mod(&prog, dst_reg, src_reg,
1348 				       BPF_CLASS(insn->code) == BPF_ALU64);
1349 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1350 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1351 			break;
1352 
1353 		case BPF_ALU64 | BPF_MOV | BPF_X:
1354 			if (insn_is_cast_user(insn)) {
1355 				if (dst_reg != src_reg)
1356 					/* 32-bit mov */
1357 					emit_mov_reg(&prog, false, dst_reg, src_reg);
1358 				/* shl dst_reg, 32 */
1359 				maybe_emit_1mod(&prog, dst_reg, true);
1360 				EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1361 
1362 				/* or dst_reg, user_vm_start */
1363 				maybe_emit_1mod(&prog, dst_reg, true);
1364 				if (is_axreg(dst_reg))
1365 					EMIT1_off32(0x0D,  user_vm_start >> 32);
1366 				else
1367 					EMIT2_off32(0x81, add_1reg(0xC8, dst_reg),  user_vm_start >> 32);
1368 
1369 				/* rol dst_reg, 32 */
1370 				maybe_emit_1mod(&prog, dst_reg, true);
1371 				EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1372 
1373 				/* xor r11, r11 */
1374 				EMIT3(0x4D, 0x31, 0xDB);
1375 
1376 				/* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1377 				maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1378 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1379 
1380 				/* cmove r11, dst_reg; if so, set dst_reg to zero */
1381 				/* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1382 				maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1383 				EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1384 				break;
1385 			}
1386 			fallthrough;
1387 		case BPF_ALU | BPF_MOV | BPF_X:
1388 			if (insn->off == 0)
1389 				emit_mov_reg(&prog,
1390 					     BPF_CLASS(insn->code) == BPF_ALU64,
1391 					     dst_reg, src_reg);
1392 			else
1393 				emit_movsx_reg(&prog, insn->off,
1394 					       BPF_CLASS(insn->code) == BPF_ALU64,
1395 					       dst_reg, src_reg);
1396 			break;
1397 
1398 			/* neg dst */
1399 		case BPF_ALU | BPF_NEG:
1400 		case BPF_ALU64 | BPF_NEG:
1401 			maybe_emit_1mod(&prog, dst_reg,
1402 					BPF_CLASS(insn->code) == BPF_ALU64);
1403 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1404 			break;
1405 
1406 		case BPF_ALU | BPF_ADD | BPF_K:
1407 		case BPF_ALU | BPF_SUB | BPF_K:
1408 		case BPF_ALU | BPF_AND | BPF_K:
1409 		case BPF_ALU | BPF_OR | BPF_K:
1410 		case BPF_ALU | BPF_XOR | BPF_K:
1411 		case BPF_ALU64 | BPF_ADD | BPF_K:
1412 		case BPF_ALU64 | BPF_SUB | BPF_K:
1413 		case BPF_ALU64 | BPF_AND | BPF_K:
1414 		case BPF_ALU64 | BPF_OR | BPF_K:
1415 		case BPF_ALU64 | BPF_XOR | BPF_K:
1416 			maybe_emit_1mod(&prog, dst_reg,
1417 					BPF_CLASS(insn->code) == BPF_ALU64);
1418 
1419 			/*
1420 			 * b3 holds 'normal' opcode, b2 short form only valid
1421 			 * in case dst is eax/rax.
1422 			 */
1423 			switch (BPF_OP(insn->code)) {
1424 			case BPF_ADD:
1425 				b3 = 0xC0;
1426 				b2 = 0x05;
1427 				break;
1428 			case BPF_SUB:
1429 				b3 = 0xE8;
1430 				b2 = 0x2D;
1431 				break;
1432 			case BPF_AND:
1433 				b3 = 0xE0;
1434 				b2 = 0x25;
1435 				break;
1436 			case BPF_OR:
1437 				b3 = 0xC8;
1438 				b2 = 0x0D;
1439 				break;
1440 			case BPF_XOR:
1441 				b3 = 0xF0;
1442 				b2 = 0x35;
1443 				break;
1444 			}
1445 
1446 			if (is_imm8(imm32))
1447 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1448 			else if (is_axreg(dst_reg))
1449 				EMIT1_off32(b2, imm32);
1450 			else
1451 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1452 			break;
1453 
1454 		case BPF_ALU64 | BPF_MOV | BPF_K:
1455 		case BPF_ALU | BPF_MOV | BPF_K:
1456 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1457 				       dst_reg, imm32);
1458 			break;
1459 
1460 		case BPF_LD | BPF_IMM | BPF_DW:
1461 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1462 			insn++;
1463 			i++;
1464 			break;
1465 
1466 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1467 		case BPF_ALU | BPF_MOD | BPF_X:
1468 		case BPF_ALU | BPF_DIV | BPF_X:
1469 		case BPF_ALU | BPF_MOD | BPF_K:
1470 		case BPF_ALU | BPF_DIV | BPF_K:
1471 		case BPF_ALU64 | BPF_MOD | BPF_X:
1472 		case BPF_ALU64 | BPF_DIV | BPF_X:
1473 		case BPF_ALU64 | BPF_MOD | BPF_K:
1474 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1475 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1476 
1477 			if (dst_reg != BPF_REG_0)
1478 				EMIT1(0x50); /* push rax */
1479 			if (dst_reg != BPF_REG_3)
1480 				EMIT1(0x52); /* push rdx */
1481 
1482 			if (BPF_SRC(insn->code) == BPF_X) {
1483 				if (src_reg == BPF_REG_0 ||
1484 				    src_reg == BPF_REG_3) {
1485 					/* mov r11, src_reg */
1486 					EMIT_mov(AUX_REG, src_reg);
1487 					src_reg = AUX_REG;
1488 				}
1489 			} else {
1490 				/* mov r11, imm32 */
1491 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1492 				src_reg = AUX_REG;
1493 			}
1494 
1495 			if (dst_reg != BPF_REG_0)
1496 				/* mov rax, dst_reg */
1497 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1498 
1499 			if (insn->off == 0) {
1500 				/*
1501 				 * xor edx, edx
1502 				 * equivalent to 'xor rdx, rdx', but one byte less
1503 				 */
1504 				EMIT2(0x31, 0xd2);
1505 
1506 				/* div src_reg */
1507 				maybe_emit_1mod(&prog, src_reg, is64);
1508 				EMIT2(0xF7, add_1reg(0xF0, src_reg));
1509 			} else {
1510 				if (BPF_CLASS(insn->code) == BPF_ALU)
1511 					EMIT1(0x99); /* cdq */
1512 				else
1513 					EMIT2(0x48, 0x99); /* cqo */
1514 
1515 				/* idiv src_reg */
1516 				maybe_emit_1mod(&prog, src_reg, is64);
1517 				EMIT2(0xF7, add_1reg(0xF8, src_reg));
1518 			}
1519 
1520 			if (BPF_OP(insn->code) == BPF_MOD &&
1521 			    dst_reg != BPF_REG_3)
1522 				/* mov dst_reg, rdx */
1523 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1524 			else if (BPF_OP(insn->code) == BPF_DIV &&
1525 				 dst_reg != BPF_REG_0)
1526 				/* mov dst_reg, rax */
1527 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1528 
1529 			if (dst_reg != BPF_REG_3)
1530 				EMIT1(0x5A); /* pop rdx */
1531 			if (dst_reg != BPF_REG_0)
1532 				EMIT1(0x58); /* pop rax */
1533 			break;
1534 		}
1535 
1536 		case BPF_ALU | BPF_MUL | BPF_K:
1537 		case BPF_ALU64 | BPF_MUL | BPF_K:
1538 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1539 				       BPF_CLASS(insn->code) == BPF_ALU64);
1540 
1541 			if (is_imm8(imm32))
1542 				/* imul dst_reg, dst_reg, imm8 */
1543 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1544 				      imm32);
1545 			else
1546 				/* imul dst_reg, dst_reg, imm32 */
1547 				EMIT2_off32(0x69,
1548 					    add_2reg(0xC0, dst_reg, dst_reg),
1549 					    imm32);
1550 			break;
1551 
1552 		case BPF_ALU | BPF_MUL | BPF_X:
1553 		case BPF_ALU64 | BPF_MUL | BPF_X:
1554 			maybe_emit_mod(&prog, src_reg, dst_reg,
1555 				       BPF_CLASS(insn->code) == BPF_ALU64);
1556 
1557 			/* imul dst_reg, src_reg */
1558 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1559 			break;
1560 
1561 			/* Shifts */
1562 		case BPF_ALU | BPF_LSH | BPF_K:
1563 		case BPF_ALU | BPF_RSH | BPF_K:
1564 		case BPF_ALU | BPF_ARSH | BPF_K:
1565 		case BPF_ALU64 | BPF_LSH | BPF_K:
1566 		case BPF_ALU64 | BPF_RSH | BPF_K:
1567 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1568 			maybe_emit_1mod(&prog, dst_reg,
1569 					BPF_CLASS(insn->code) == BPF_ALU64);
1570 
1571 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1572 			if (imm32 == 1)
1573 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1574 			else
1575 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1576 			break;
1577 
1578 		case BPF_ALU | BPF_LSH | BPF_X:
1579 		case BPF_ALU | BPF_RSH | BPF_X:
1580 		case BPF_ALU | BPF_ARSH | BPF_X:
1581 		case BPF_ALU64 | BPF_LSH | BPF_X:
1582 		case BPF_ALU64 | BPF_RSH | BPF_X:
1583 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1584 			/* BMI2 shifts aren't better when shift count is already in rcx */
1585 			if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1586 				/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1587 				bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1588 				u8 op;
1589 
1590 				switch (BPF_OP(insn->code)) {
1591 				case BPF_LSH:
1592 					op = 1; /* prefix 0x66 */
1593 					break;
1594 				case BPF_RSH:
1595 					op = 3; /* prefix 0xf2 */
1596 					break;
1597 				case BPF_ARSH:
1598 					op = 2; /* prefix 0xf3 */
1599 					break;
1600 				}
1601 
1602 				emit_shiftx(&prog, dst_reg, src_reg, w, op);
1603 
1604 				break;
1605 			}
1606 
1607 			if (src_reg != BPF_REG_4) { /* common case */
1608 				/* Check for bad case when dst_reg == rcx */
1609 				if (dst_reg == BPF_REG_4) {
1610 					/* mov r11, dst_reg */
1611 					EMIT_mov(AUX_REG, dst_reg);
1612 					dst_reg = AUX_REG;
1613 				} else {
1614 					EMIT1(0x51); /* push rcx */
1615 				}
1616 				/* mov rcx, src_reg */
1617 				EMIT_mov(BPF_REG_4, src_reg);
1618 			}
1619 
1620 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1621 			maybe_emit_1mod(&prog, dst_reg,
1622 					BPF_CLASS(insn->code) == BPF_ALU64);
1623 
1624 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1625 			EMIT2(0xD3, add_1reg(b3, dst_reg));
1626 
1627 			if (src_reg != BPF_REG_4) {
1628 				if (insn->dst_reg == BPF_REG_4)
1629 					/* mov dst_reg, r11 */
1630 					EMIT_mov(insn->dst_reg, AUX_REG);
1631 				else
1632 					EMIT1(0x59); /* pop rcx */
1633 			}
1634 
1635 			break;
1636 
1637 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1638 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1639 			switch (imm32) {
1640 			case 16:
1641 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
1642 				EMIT1(0x66);
1643 				if (is_ereg(dst_reg))
1644 					EMIT1(0x41);
1645 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1646 
1647 				/* Emit 'movzwl eax, ax' */
1648 				if (is_ereg(dst_reg))
1649 					EMIT3(0x45, 0x0F, 0xB7);
1650 				else
1651 					EMIT2(0x0F, 0xB7);
1652 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1653 				break;
1654 			case 32:
1655 				/* Emit 'bswap eax' to swap lower 4 bytes */
1656 				if (is_ereg(dst_reg))
1657 					EMIT2(0x41, 0x0F);
1658 				else
1659 					EMIT1(0x0F);
1660 				EMIT1(add_1reg(0xC8, dst_reg));
1661 				break;
1662 			case 64:
1663 				/* Emit 'bswap rax' to swap 8 bytes */
1664 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1665 				      add_1reg(0xC8, dst_reg));
1666 				break;
1667 			}
1668 			break;
1669 
1670 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1671 			switch (imm32) {
1672 			case 16:
1673 				/*
1674 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
1675 				 * into 64 bit
1676 				 */
1677 				if (is_ereg(dst_reg))
1678 					EMIT3(0x45, 0x0F, 0xB7);
1679 				else
1680 					EMIT2(0x0F, 0xB7);
1681 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1682 				break;
1683 			case 32:
1684 				/* Emit 'mov eax, eax' to clear upper 32-bits */
1685 				if (is_ereg(dst_reg))
1686 					EMIT1(0x45);
1687 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1688 				break;
1689 			case 64:
1690 				/* nop */
1691 				break;
1692 			}
1693 			break;
1694 
1695 			/* speculation barrier */
1696 		case BPF_ST | BPF_NOSPEC:
1697 			EMIT_LFENCE();
1698 			break;
1699 
1700 			/* ST: *(u8*)(dst_reg + off) = imm */
1701 		case BPF_ST | BPF_MEM | BPF_B:
1702 			if (is_ereg(dst_reg))
1703 				EMIT2(0x41, 0xC6);
1704 			else
1705 				EMIT1(0xC6);
1706 			goto st;
1707 		case BPF_ST | BPF_MEM | BPF_H:
1708 			if (is_ereg(dst_reg))
1709 				EMIT3(0x66, 0x41, 0xC7);
1710 			else
1711 				EMIT2(0x66, 0xC7);
1712 			goto st;
1713 		case BPF_ST | BPF_MEM | BPF_W:
1714 			if (is_ereg(dst_reg))
1715 				EMIT2(0x41, 0xC7);
1716 			else
1717 				EMIT1(0xC7);
1718 			goto st;
1719 		case BPF_ST | BPF_MEM | BPF_DW:
1720 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1721 
1722 st:			if (is_imm8(insn->off))
1723 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
1724 			else
1725 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1726 
1727 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1728 			break;
1729 
1730 			/* STX: *(u8*)(dst_reg + off) = src_reg */
1731 		case BPF_STX | BPF_MEM | BPF_B:
1732 		case BPF_STX | BPF_MEM | BPF_H:
1733 		case BPF_STX | BPF_MEM | BPF_W:
1734 		case BPF_STX | BPF_MEM | BPF_DW:
1735 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1736 			break;
1737 
1738 		case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1739 		case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1740 		case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1741 		case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1742 			start_of_ldx = prog;
1743 			emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
1744 			goto populate_extable;
1745 
1746 			/* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
1747 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1748 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1749 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1750 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1751 		case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1752 		case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1753 		case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1754 		case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1755 			start_of_ldx = prog;
1756 			if (BPF_CLASS(insn->code) == BPF_LDX)
1757 				emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1758 			else
1759 				emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1760 populate_extable:
1761 			{
1762 				struct exception_table_entry *ex;
1763 				u8 *_insn = image + proglen + (start_of_ldx - temp);
1764 				s64 delta;
1765 
1766 				if (!bpf_prog->aux->extable)
1767 					break;
1768 
1769 				if (excnt >= bpf_prog->aux->num_exentries) {
1770 					pr_err("mem32 extable bug\n");
1771 					return -EFAULT;
1772 				}
1773 				ex = &bpf_prog->aux->extable[excnt++];
1774 
1775 				delta = _insn - (u8 *)&ex->insn;
1776 				/* switch ex to rw buffer for writes */
1777 				ex = (void *)rw_image + ((void *)ex - (void *)image);
1778 
1779 				ex->insn = delta;
1780 
1781 				ex->data = EX_TYPE_BPF;
1782 
1783 				ex->fixup = (prog - start_of_ldx) |
1784 					((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8);
1785 			}
1786 			break;
1787 
1788 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
1789 		case BPF_LDX | BPF_MEM | BPF_B:
1790 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1791 		case BPF_LDX | BPF_MEM | BPF_H:
1792 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1793 		case BPF_LDX | BPF_MEM | BPF_W:
1794 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1795 		case BPF_LDX | BPF_MEM | BPF_DW:
1796 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1797 			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
1798 		case BPF_LDX | BPF_MEMSX | BPF_B:
1799 		case BPF_LDX | BPF_MEMSX | BPF_H:
1800 		case BPF_LDX | BPF_MEMSX | BPF_W:
1801 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1802 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1803 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1804 			insn_off = insn->off;
1805 
1806 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1807 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1808 				/* Conservatively check that src_reg + insn->off is a kernel address:
1809 				 *   src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
1810 				 * src_reg is used as scratch for src_reg += insn->off and restored
1811 				 * after emit_ldx if necessary
1812 				 */
1813 
1814 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
1815 				u8 *end_of_jmp;
1816 
1817 				/* At end of these emitted checks, insn->off will have been added
1818 				 * to src_reg, so no need to do relative load with insn->off offset
1819 				 */
1820 				insn_off = 0;
1821 
1822 				/* movabsq r11, limit */
1823 				EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1824 				EMIT((u32)limit, 4);
1825 				EMIT(limit >> 32, 4);
1826 
1827 				if (insn->off) {
1828 					/* add src_reg, insn->off */
1829 					maybe_emit_1mod(&prog, src_reg, true);
1830 					EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
1831 				}
1832 
1833 				/* cmp src_reg, r11 */
1834 				maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1835 				EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1836 
1837 				/* if unsigned '>=', goto load */
1838 				EMIT2(X86_JAE, 0);
1839 				end_of_jmp = prog;
1840 
1841 				/* xor dst_reg, dst_reg */
1842 				emit_mov_imm32(&prog, false, dst_reg, 0);
1843 				/* jmp byte_after_ldx */
1844 				EMIT2(0xEB, 0);
1845 
1846 				/* populate jmp_offset for JAE above to jump to start_of_ldx */
1847 				start_of_ldx = prog;
1848 				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
1849 			}
1850 			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
1851 			    BPF_MODE(insn->code) == BPF_MEMSX)
1852 				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1853 			else
1854 				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1855 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1856 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1857 				struct exception_table_entry *ex;
1858 				u8 *_insn = image + proglen + (start_of_ldx - temp);
1859 				s64 delta;
1860 
1861 				/* populate jmp_offset for JMP above */
1862 				start_of_ldx[-1] = prog - start_of_ldx;
1863 
1864 				if (insn->off && src_reg != dst_reg) {
1865 					/* sub src_reg, insn->off
1866 					 * Restore src_reg after "add src_reg, insn->off" in prev
1867 					 * if statement. But if src_reg == dst_reg, emit_ldx
1868 					 * above already clobbered src_reg, so no need to restore.
1869 					 * If add src_reg, insn->off was unnecessary, no need to
1870 					 * restore either.
1871 					 */
1872 					maybe_emit_1mod(&prog, src_reg, true);
1873 					EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
1874 				}
1875 
1876 				if (!bpf_prog->aux->extable)
1877 					break;
1878 
1879 				if (excnt >= bpf_prog->aux->num_exentries) {
1880 					pr_err("ex gen bug\n");
1881 					return -EFAULT;
1882 				}
1883 				ex = &bpf_prog->aux->extable[excnt++];
1884 
1885 				delta = _insn - (u8 *)&ex->insn;
1886 				if (!is_simm32(delta)) {
1887 					pr_err("extable->insn doesn't fit into 32-bit\n");
1888 					return -EFAULT;
1889 				}
1890 				/* switch ex to rw buffer for writes */
1891 				ex = (void *)rw_image + ((void *)ex - (void *)image);
1892 
1893 				ex->insn = delta;
1894 
1895 				ex->data = EX_TYPE_BPF;
1896 
1897 				if (dst_reg > BPF_REG_9) {
1898 					pr_err("verifier error\n");
1899 					return -EFAULT;
1900 				}
1901 				/*
1902 				 * Compute size of x86 insn and its target dest x86 register.
1903 				 * ex_handler_bpf() will use lower 8 bits to adjust
1904 				 * pt_regs->ip to jump over this x86 instruction
1905 				 * and upper bits to figure out which pt_regs to zero out.
1906 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1907 				 * of 4 bytes will be ignored and rbx will be zero inited.
1908 				 */
1909 				ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1910 			}
1911 			break;
1912 
1913 		case BPF_STX | BPF_ATOMIC | BPF_W:
1914 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1915 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
1916 			    insn->imm == (BPF_OR | BPF_FETCH) ||
1917 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
1918 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1919 				u32 real_src_reg = src_reg;
1920 				u32 real_dst_reg = dst_reg;
1921 				u8 *branch_target;
1922 
1923 				/*
1924 				 * Can't be implemented with a single x86 insn.
1925 				 * Need to do a CMPXCHG loop.
1926 				 */
1927 
1928 				/* Will need RAX as a CMPXCHG operand so save R0 */
1929 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1930 				if (src_reg == BPF_REG_0)
1931 					real_src_reg = BPF_REG_AX;
1932 				if (dst_reg == BPF_REG_0)
1933 					real_dst_reg = BPF_REG_AX;
1934 
1935 				branch_target = prog;
1936 				/* Load old value */
1937 				emit_ldx(&prog, BPF_SIZE(insn->code),
1938 					 BPF_REG_0, real_dst_reg, insn->off);
1939 				/*
1940 				 * Perform the (commutative) operation locally,
1941 				 * put the result in the AUX_REG.
1942 				 */
1943 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1944 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1945 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1946 				      add_2reg(0xC0, AUX_REG, real_src_reg));
1947 				/* Attempt to swap in new value */
1948 				err = emit_atomic(&prog, BPF_CMPXCHG,
1949 						  real_dst_reg, AUX_REG,
1950 						  insn->off,
1951 						  BPF_SIZE(insn->code));
1952 				if (WARN_ON(err))
1953 					return err;
1954 				/*
1955 				 * ZF tells us whether we won the race. If it's
1956 				 * cleared we need to try again.
1957 				 */
1958 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
1959 				/* Return the pre-modification value */
1960 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1961 				/* Restore R0 after clobbering RAX */
1962 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1963 				break;
1964 			}
1965 
1966 			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1967 					  insn->off, BPF_SIZE(insn->code));
1968 			if (err)
1969 				return err;
1970 			break;
1971 
1972 			/* call */
1973 		case BPF_JMP | BPF_CALL: {
1974 			int offs;
1975 
1976 			func = (u8 *) __bpf_call_base + imm32;
1977 			if (tail_call_reachable) {
1978 				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
1979 				if (!imm32)
1980 					return -EINVAL;
1981 				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
1982 			} else {
1983 				if (!imm32)
1984 					return -EINVAL;
1985 				offs = x86_call_depth_emit_accounting(&prog, func);
1986 			}
1987 			if (emit_call(&prog, func, image + addrs[i - 1] + offs))
1988 				return -EINVAL;
1989 			break;
1990 		}
1991 
1992 		case BPF_JMP | BPF_TAIL_CALL:
1993 			if (imm32)
1994 				emit_bpf_tail_call_direct(bpf_prog,
1995 							  &bpf_prog->aux->poke_tab[imm32 - 1],
1996 							  &prog, image + addrs[i - 1],
1997 							  callee_regs_used,
1998 							  bpf_prog->aux->stack_depth,
1999 							  ctx);
2000 			else
2001 				emit_bpf_tail_call_indirect(bpf_prog,
2002 							    &prog,
2003 							    callee_regs_used,
2004 							    bpf_prog->aux->stack_depth,
2005 							    image + addrs[i - 1],
2006 							    ctx);
2007 			break;
2008 
2009 			/* cond jump */
2010 		case BPF_JMP | BPF_JEQ | BPF_X:
2011 		case BPF_JMP | BPF_JNE | BPF_X:
2012 		case BPF_JMP | BPF_JGT | BPF_X:
2013 		case BPF_JMP | BPF_JLT | BPF_X:
2014 		case BPF_JMP | BPF_JGE | BPF_X:
2015 		case BPF_JMP | BPF_JLE | BPF_X:
2016 		case BPF_JMP | BPF_JSGT | BPF_X:
2017 		case BPF_JMP | BPF_JSLT | BPF_X:
2018 		case BPF_JMP | BPF_JSGE | BPF_X:
2019 		case BPF_JMP | BPF_JSLE | BPF_X:
2020 		case BPF_JMP32 | BPF_JEQ | BPF_X:
2021 		case BPF_JMP32 | BPF_JNE | BPF_X:
2022 		case BPF_JMP32 | BPF_JGT | BPF_X:
2023 		case BPF_JMP32 | BPF_JLT | BPF_X:
2024 		case BPF_JMP32 | BPF_JGE | BPF_X:
2025 		case BPF_JMP32 | BPF_JLE | BPF_X:
2026 		case BPF_JMP32 | BPF_JSGT | BPF_X:
2027 		case BPF_JMP32 | BPF_JSLT | BPF_X:
2028 		case BPF_JMP32 | BPF_JSGE | BPF_X:
2029 		case BPF_JMP32 | BPF_JSLE | BPF_X:
2030 			/* cmp dst_reg, src_reg */
2031 			maybe_emit_mod(&prog, dst_reg, src_reg,
2032 				       BPF_CLASS(insn->code) == BPF_JMP);
2033 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2034 			goto emit_cond_jmp;
2035 
2036 		case BPF_JMP | BPF_JSET | BPF_X:
2037 		case BPF_JMP32 | BPF_JSET | BPF_X:
2038 			/* test dst_reg, src_reg */
2039 			maybe_emit_mod(&prog, dst_reg, src_reg,
2040 				       BPF_CLASS(insn->code) == BPF_JMP);
2041 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2042 			goto emit_cond_jmp;
2043 
2044 		case BPF_JMP | BPF_JSET | BPF_K:
2045 		case BPF_JMP32 | BPF_JSET | BPF_K:
2046 			/* test dst_reg, imm32 */
2047 			maybe_emit_1mod(&prog, dst_reg,
2048 					BPF_CLASS(insn->code) == BPF_JMP);
2049 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2050 			goto emit_cond_jmp;
2051 
2052 		case BPF_JMP | BPF_JEQ | BPF_K:
2053 		case BPF_JMP | BPF_JNE | BPF_K:
2054 		case BPF_JMP | BPF_JGT | BPF_K:
2055 		case BPF_JMP | BPF_JLT | BPF_K:
2056 		case BPF_JMP | BPF_JGE | BPF_K:
2057 		case BPF_JMP | BPF_JLE | BPF_K:
2058 		case BPF_JMP | BPF_JSGT | BPF_K:
2059 		case BPF_JMP | BPF_JSLT | BPF_K:
2060 		case BPF_JMP | BPF_JSGE | BPF_K:
2061 		case BPF_JMP | BPF_JSLE | BPF_K:
2062 		case BPF_JMP32 | BPF_JEQ | BPF_K:
2063 		case BPF_JMP32 | BPF_JNE | BPF_K:
2064 		case BPF_JMP32 | BPF_JGT | BPF_K:
2065 		case BPF_JMP32 | BPF_JLT | BPF_K:
2066 		case BPF_JMP32 | BPF_JGE | BPF_K:
2067 		case BPF_JMP32 | BPF_JLE | BPF_K:
2068 		case BPF_JMP32 | BPF_JSGT | BPF_K:
2069 		case BPF_JMP32 | BPF_JSLT | BPF_K:
2070 		case BPF_JMP32 | BPF_JSGE | BPF_K:
2071 		case BPF_JMP32 | BPF_JSLE | BPF_K:
2072 			/* test dst_reg, dst_reg to save one extra byte */
2073 			if (imm32 == 0) {
2074 				maybe_emit_mod(&prog, dst_reg, dst_reg,
2075 					       BPF_CLASS(insn->code) == BPF_JMP);
2076 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2077 				goto emit_cond_jmp;
2078 			}
2079 
2080 			/* cmp dst_reg, imm8/32 */
2081 			maybe_emit_1mod(&prog, dst_reg,
2082 					BPF_CLASS(insn->code) == BPF_JMP);
2083 
2084 			if (is_imm8(imm32))
2085 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2086 			else
2087 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2088 
2089 emit_cond_jmp:		/* Convert BPF opcode to x86 */
2090 			switch (BPF_OP(insn->code)) {
2091 			case BPF_JEQ:
2092 				jmp_cond = X86_JE;
2093 				break;
2094 			case BPF_JSET:
2095 			case BPF_JNE:
2096 				jmp_cond = X86_JNE;
2097 				break;
2098 			case BPF_JGT:
2099 				/* GT is unsigned '>', JA in x86 */
2100 				jmp_cond = X86_JA;
2101 				break;
2102 			case BPF_JLT:
2103 				/* LT is unsigned '<', JB in x86 */
2104 				jmp_cond = X86_JB;
2105 				break;
2106 			case BPF_JGE:
2107 				/* GE is unsigned '>=', JAE in x86 */
2108 				jmp_cond = X86_JAE;
2109 				break;
2110 			case BPF_JLE:
2111 				/* LE is unsigned '<=', JBE in x86 */
2112 				jmp_cond = X86_JBE;
2113 				break;
2114 			case BPF_JSGT:
2115 				/* Signed '>', GT in x86 */
2116 				jmp_cond = X86_JG;
2117 				break;
2118 			case BPF_JSLT:
2119 				/* Signed '<', LT in x86 */
2120 				jmp_cond = X86_JL;
2121 				break;
2122 			case BPF_JSGE:
2123 				/* Signed '>=', GE in x86 */
2124 				jmp_cond = X86_JGE;
2125 				break;
2126 			case BPF_JSLE:
2127 				/* Signed '<=', LE in x86 */
2128 				jmp_cond = X86_JLE;
2129 				break;
2130 			default: /* to silence GCC warning */
2131 				return -EFAULT;
2132 			}
2133 			jmp_offset = addrs[i + insn->off] - addrs[i];
2134 			if (is_imm8(jmp_offset)) {
2135 				if (jmp_padding) {
2136 					/* To keep the jmp_offset valid, the extra bytes are
2137 					 * padded before the jump insn, so we subtract the
2138 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2139 					 *
2140 					 * If the previous pass already emits an imm8
2141 					 * jmp_cond, then this BPF insn won't shrink, so
2142 					 * "nops" is 0.
2143 					 *
2144 					 * On the other hand, if the previous pass emits an
2145 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2146 					 * keep the image from shrinking further.
2147 					 *
2148 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2149 					 *     is 2 bytes, so the size difference is 4 bytes.
2150 					 */
2151 					nops = INSN_SZ_DIFF - 2;
2152 					if (nops != 0 && nops != 4) {
2153 						pr_err("unexpected jmp_cond padding: %d bytes\n",
2154 						       nops);
2155 						return -EFAULT;
2156 					}
2157 					emit_nops(&prog, nops);
2158 				}
2159 				EMIT2(jmp_cond, jmp_offset);
2160 			} else if (is_simm32(jmp_offset)) {
2161 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2162 			} else {
2163 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2164 				return -EFAULT;
2165 			}
2166 
2167 			break;
2168 
2169 		case BPF_JMP | BPF_JA:
2170 		case BPF_JMP32 | BPF_JA:
2171 			if (BPF_CLASS(insn->code) == BPF_JMP) {
2172 				if (insn->off == -1)
2173 					/* -1 jmp instructions will always jump
2174 					 * backwards two bytes. Explicitly handling
2175 					 * this case avoids wasting too many passes
2176 					 * when there are long sequences of replaced
2177 					 * dead code.
2178 					 */
2179 					jmp_offset = -2;
2180 				else
2181 					jmp_offset = addrs[i + insn->off] - addrs[i];
2182 			} else {
2183 				if (insn->imm == -1)
2184 					jmp_offset = -2;
2185 				else
2186 					jmp_offset = addrs[i + insn->imm] - addrs[i];
2187 			}
2188 
2189 			if (!jmp_offset) {
2190 				/*
2191 				 * If jmp_padding is enabled, the extra nops will
2192 				 * be inserted. Otherwise, optimize out nop jumps.
2193 				 */
2194 				if (jmp_padding) {
2195 					/* There are 3 possible conditions.
2196 					 * (1) This BPF_JA is already optimized out in
2197 					 *     the previous run, so there is no need
2198 					 *     to pad any extra byte (0 byte).
2199 					 * (2) The previous pass emits an imm8 jmp,
2200 					 *     so we pad 2 bytes to match the previous
2201 					 *     insn size.
2202 					 * (3) Similarly, the previous pass emits an
2203 					 *     imm32 jmp, and 5 bytes is padded.
2204 					 */
2205 					nops = INSN_SZ_DIFF;
2206 					if (nops != 0 && nops != 2 && nops != 5) {
2207 						pr_err("unexpected nop jump padding: %d bytes\n",
2208 						       nops);
2209 						return -EFAULT;
2210 					}
2211 					emit_nops(&prog, nops);
2212 				}
2213 				break;
2214 			}
2215 emit_jmp:
2216 			if (is_imm8(jmp_offset)) {
2217 				if (jmp_padding) {
2218 					/* To avoid breaking jmp_offset, the extra bytes
2219 					 * are padded before the actual jmp insn, so
2220 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
2221 					 *
2222 					 * If the previous pass already emits an imm8
2223 					 * jmp, there is nothing to pad (0 byte).
2224 					 *
2225 					 * If it emits an imm32 jmp (5 bytes) previously
2226 					 * and now an imm8 jmp (2 bytes), then we pad
2227 					 * (5 - 2 = 3) bytes to stop the image from
2228 					 * shrinking further.
2229 					 */
2230 					nops = INSN_SZ_DIFF - 2;
2231 					if (nops != 0 && nops != 3) {
2232 						pr_err("unexpected jump padding: %d bytes\n",
2233 						       nops);
2234 						return -EFAULT;
2235 					}
2236 					emit_nops(&prog, INSN_SZ_DIFF - 2);
2237 				}
2238 				EMIT2(0xEB, jmp_offset);
2239 			} else if (is_simm32(jmp_offset)) {
2240 				EMIT1_off32(0xE9, jmp_offset);
2241 			} else {
2242 				pr_err("jmp gen bug %llx\n", jmp_offset);
2243 				return -EFAULT;
2244 			}
2245 			break;
2246 
2247 		case BPF_JMP | BPF_EXIT:
2248 			if (seen_exit) {
2249 				jmp_offset = ctx->cleanup_addr - addrs[i];
2250 				goto emit_jmp;
2251 			}
2252 			seen_exit = true;
2253 			/* Update cleanup_addr */
2254 			ctx->cleanup_addr = proglen;
2255 			if (bpf_prog->aux->exception_boundary) {
2256 				pop_callee_regs(&prog, all_callee_regs_used);
2257 				pop_r12(&prog);
2258 			} else {
2259 				pop_callee_regs(&prog, callee_regs_used);
2260 				if (arena_vm_start)
2261 					pop_r12(&prog);
2262 			}
2263 			EMIT1(0xC9);         /* leave */
2264 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2265 			break;
2266 
2267 		default:
2268 			/*
2269 			 * By design x86-64 JIT should support all BPF instructions.
2270 			 * This error will be seen if new instruction was added
2271 			 * to the interpreter, but not to the JIT, or if there is
2272 			 * junk in bpf_prog.
2273 			 */
2274 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2275 			return -EINVAL;
2276 		}
2277 
2278 		ilen = prog - temp;
2279 		if (ilen > BPF_MAX_INSN_SIZE) {
2280 			pr_err("bpf_jit: fatal insn size error\n");
2281 			return -EFAULT;
2282 		}
2283 
2284 		if (image) {
2285 			/*
2286 			 * When populating the image, assert that:
2287 			 *
2288 			 *  i) We do not write beyond the allocated space, and
2289 			 * ii) addrs[i] did not change from the prior run, in order
2290 			 *     to validate assumptions made for computing branch
2291 			 *     displacements.
2292 			 */
2293 			if (unlikely(proglen + ilen > oldproglen ||
2294 				     proglen + ilen != addrs[i])) {
2295 				pr_err("bpf_jit: fatal error\n");
2296 				return -EFAULT;
2297 			}
2298 			memcpy(rw_image + proglen, temp, ilen);
2299 		}
2300 		proglen += ilen;
2301 		addrs[i] = proglen;
2302 		prog = temp;
2303 	}
2304 
2305 	if (image && excnt != bpf_prog->aux->num_exentries) {
2306 		pr_err("extable is not populated\n");
2307 		return -EFAULT;
2308 	}
2309 	return proglen;
2310 }
2311 
2312 static void clean_stack_garbage(const struct btf_func_model *m,
2313 				u8 **pprog, int nr_stack_slots,
2314 				int stack_size)
2315 {
2316 	int arg_size, off;
2317 	u8 *prog;
2318 
2319 	/* Generally speaking, the compiler will pass the arguments
2320 	 * on-stack with "push" instruction, which will take 8-byte
2321 	 * on the stack. In this case, there won't be garbage values
2322 	 * while we copy the arguments from origin stack frame to current
2323 	 * in BPF_DW.
2324 	 *
2325 	 * However, sometimes the compiler will only allocate 4-byte on
2326 	 * the stack for the arguments. For now, this case will only
2327 	 * happen if there is only one argument on-stack and its size
2328 	 * not more than 4 byte. In this case, there will be garbage
2329 	 * values on the upper 4-byte where we store the argument on
2330 	 * current stack frame.
2331 	 *
2332 	 * arguments on origin stack:
2333 	 *
2334 	 * stack_arg_1(4-byte) xxx(4-byte)
2335 	 *
2336 	 * what we copy:
2337 	 *
2338 	 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2339 	 *
2340 	 * and the xxx is the garbage values which we should clean here.
2341 	 */
2342 	if (nr_stack_slots != 1)
2343 		return;
2344 
2345 	/* the size of the last argument */
2346 	arg_size = m->arg_size[m->nr_args - 1];
2347 	if (arg_size <= 4) {
2348 		off = -(stack_size - 4);
2349 		prog = *pprog;
2350 		/* mov DWORD PTR [rbp + off], 0 */
2351 		if (!is_imm8(off))
2352 			EMIT2_off32(0xC7, 0x85, off);
2353 		else
2354 			EMIT3(0xC7, 0x45, off);
2355 		EMIT(0, 4);
2356 		*pprog = prog;
2357 	}
2358 }
2359 
2360 /* get the count of the regs that are used to pass arguments */
2361 static int get_nr_used_regs(const struct btf_func_model *m)
2362 {
2363 	int i, arg_regs, nr_used_regs = 0;
2364 
2365 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2366 		arg_regs = (m->arg_size[i] + 7) / 8;
2367 		if (nr_used_regs + arg_regs <= 6)
2368 			nr_used_regs += arg_regs;
2369 
2370 		if (nr_used_regs >= 6)
2371 			break;
2372 	}
2373 
2374 	return nr_used_regs;
2375 }
2376 
2377 static void save_args(const struct btf_func_model *m, u8 **prog,
2378 		      int stack_size, bool for_call_origin)
2379 {
2380 	int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2381 	int i, j;
2382 
2383 	/* Store function arguments to stack.
2384 	 * For a function that accepts two pointers the sequence will be:
2385 	 * mov QWORD PTR [rbp-0x10],rdi
2386 	 * mov QWORD PTR [rbp-0x8],rsi
2387 	 */
2388 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2389 		arg_regs = (m->arg_size[i] + 7) / 8;
2390 
2391 		/* According to the research of Yonghong, struct members
2392 		 * should be all in register or all on the stack.
2393 		 * Meanwhile, the compiler will pass the argument on regs
2394 		 * if the remaining regs can hold the argument.
2395 		 *
2396 		 * Disorder of the args can happen. For example:
2397 		 *
2398 		 * struct foo_struct {
2399 		 *     long a;
2400 		 *     int b;
2401 		 * };
2402 		 * int foo(char, char, char, char, char, struct foo_struct,
2403 		 *         char);
2404 		 *
2405 		 * the arg1-5,arg7 will be passed by regs, and arg6 will
2406 		 * by stack.
2407 		 */
2408 		if (nr_regs + arg_regs > 6) {
2409 			/* copy function arguments from origin stack frame
2410 			 * into current stack frame.
2411 			 *
2412 			 * The starting address of the arguments on-stack
2413 			 * is:
2414 			 *   rbp + 8(push rbp) +
2415 			 *   8(return addr of origin call) +
2416 			 *   8(return addr of the caller)
2417 			 * which means: rbp + 24
2418 			 */
2419 			for (j = 0; j < arg_regs; j++) {
2420 				emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2421 					 nr_stack_slots * 8 + 0x18);
2422 				emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2423 					 -stack_size);
2424 
2425 				if (!nr_stack_slots)
2426 					first_off = stack_size;
2427 				stack_size -= 8;
2428 				nr_stack_slots++;
2429 			}
2430 		} else {
2431 			/* Only copy the arguments on-stack to current
2432 			 * 'stack_size' and ignore the regs, used to
2433 			 * prepare the arguments on-stack for origin call.
2434 			 */
2435 			if (for_call_origin) {
2436 				nr_regs += arg_regs;
2437 				continue;
2438 			}
2439 
2440 			/* copy the arguments from regs into stack */
2441 			for (j = 0; j < arg_regs; j++) {
2442 				emit_stx(prog, BPF_DW, BPF_REG_FP,
2443 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2444 					 -stack_size);
2445 				stack_size -= 8;
2446 				nr_regs++;
2447 			}
2448 		}
2449 	}
2450 
2451 	clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2452 }
2453 
2454 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2455 			 int stack_size)
2456 {
2457 	int i, j, arg_regs, nr_regs = 0;
2458 
2459 	/* Restore function arguments from stack.
2460 	 * For a function that accepts two pointers the sequence will be:
2461 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2462 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2463 	 *
2464 	 * The logic here is similar to what we do in save_args()
2465 	 */
2466 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2467 		arg_regs = (m->arg_size[i] + 7) / 8;
2468 		if (nr_regs + arg_regs <= 6) {
2469 			for (j = 0; j < arg_regs; j++) {
2470 				emit_ldx(prog, BPF_DW,
2471 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2472 					 BPF_REG_FP,
2473 					 -stack_size);
2474 				stack_size -= 8;
2475 				nr_regs++;
2476 			}
2477 		} else {
2478 			stack_size -= 8 * arg_regs;
2479 		}
2480 
2481 		if (nr_regs >= 6)
2482 			break;
2483 	}
2484 }
2485 
2486 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2487 			   struct bpf_tramp_link *l, int stack_size,
2488 			   int run_ctx_off, bool save_ret,
2489 			   void *image, void *rw_image)
2490 {
2491 	u8 *prog = *pprog;
2492 	u8 *jmp_insn;
2493 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2494 	struct bpf_prog *p = l->link.prog;
2495 	u64 cookie = l->cookie;
2496 
2497 	/* mov rdi, cookie */
2498 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2499 
2500 	/* Prepare struct bpf_tramp_run_ctx.
2501 	 *
2502 	 * bpf_tramp_run_ctx is already preserved by
2503 	 * arch_prepare_bpf_trampoline().
2504 	 *
2505 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2506 	 */
2507 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2508 
2509 	/* arg1: mov rdi, progs[i] */
2510 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2511 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
2512 	if (!is_imm8(-run_ctx_off))
2513 		EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2514 	else
2515 		EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2516 
2517 	if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2518 		return -EINVAL;
2519 	/* remember prog start time returned by __bpf_prog_enter */
2520 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2521 
2522 	/* if (__bpf_prog_enter*(prog) == 0)
2523 	 *	goto skip_exec_of_prog;
2524 	 */
2525 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
2526 	/* emit 2 nops that will be replaced with JE insn */
2527 	jmp_insn = prog;
2528 	emit_nops(&prog, 2);
2529 
2530 	/* arg1: lea rdi, [rbp - stack_size] */
2531 	if (!is_imm8(-stack_size))
2532 		EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2533 	else
2534 		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2535 	/* arg2: progs[i]->insnsi for interpreter */
2536 	if (!p->jited)
2537 		emit_mov_imm64(&prog, BPF_REG_2,
2538 			       (long) p->insnsi >> 32,
2539 			       (u32) (long) p->insnsi);
2540 	/* call JITed bpf program or interpreter */
2541 	if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2542 		return -EINVAL;
2543 
2544 	/*
2545 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2546 	 * of the previous call which is then passed on the stack to
2547 	 * the next BPF program.
2548 	 *
2549 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
2550 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2551 	 */
2552 	if (save_ret)
2553 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2554 
2555 	/* replace 2 nops with JE insn, since jmp target is known */
2556 	jmp_insn[0] = X86_JE;
2557 	jmp_insn[1] = prog - jmp_insn - 2;
2558 
2559 	/* arg1: mov rdi, progs[i] */
2560 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2561 	/* arg2: mov rsi, rbx <- start time in nsec */
2562 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2563 	/* arg3: lea rdx, [rbp - run_ctx_off] */
2564 	if (!is_imm8(-run_ctx_off))
2565 		EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2566 	else
2567 		EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2568 	if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2569 		return -EINVAL;
2570 
2571 	*pprog = prog;
2572 	return 0;
2573 }
2574 
2575 static void emit_align(u8 **pprog, u32 align)
2576 {
2577 	u8 *target, *prog = *pprog;
2578 
2579 	target = PTR_ALIGN(prog, align);
2580 	if (target != prog)
2581 		emit_nops(&prog, target - prog);
2582 
2583 	*pprog = prog;
2584 }
2585 
2586 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2587 {
2588 	u8 *prog = *pprog;
2589 	s64 offset;
2590 
2591 	offset = func - (ip + 2 + 4);
2592 	if (!is_simm32(offset)) {
2593 		pr_err("Target %p is out of range\n", func);
2594 		return -EINVAL;
2595 	}
2596 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2597 	*pprog = prog;
2598 	return 0;
2599 }
2600 
2601 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2602 		      struct bpf_tramp_links *tl, int stack_size,
2603 		      int run_ctx_off, bool save_ret,
2604 		      void *image, void *rw_image)
2605 {
2606 	int i;
2607 	u8 *prog = *pprog;
2608 
2609 	for (i = 0; i < tl->nr_links; i++) {
2610 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2611 				    run_ctx_off, save_ret, image, rw_image))
2612 			return -EINVAL;
2613 	}
2614 	*pprog = prog;
2615 	return 0;
2616 }
2617 
2618 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2619 			      struct bpf_tramp_links *tl, int stack_size,
2620 			      int run_ctx_off, u8 **branches,
2621 			      void *image, void *rw_image)
2622 {
2623 	u8 *prog = *pprog;
2624 	int i;
2625 
2626 	/* The first fmod_ret program will receive a garbage return value.
2627 	 * Set this to 0 to avoid confusing the program.
2628 	 */
2629 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2630 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2631 	for (i = 0; i < tl->nr_links; i++) {
2632 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2633 				    image, rw_image))
2634 			return -EINVAL;
2635 
2636 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
2637 		 * if (*(u64 *)(rbp - 8) !=  0)
2638 		 *	goto do_fexit;
2639 		 */
2640 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
2641 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2642 
2643 		/* Save the location of the branch and Generate 6 nops
2644 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
2645 		 * are replaced with a conditional jump once do_fexit (i.e. the
2646 		 * start of the fexit invocation) is finalized.
2647 		 */
2648 		branches[i] = prog;
2649 		emit_nops(&prog, 4 + 2);
2650 	}
2651 
2652 	*pprog = prog;
2653 	return 0;
2654 }
2655 
2656 /* Example:
2657  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2658  * its 'struct btf_func_model' will be nr_args=2
2659  * The assembly code when eth_type_trans is executing after trampoline:
2660  *
2661  * push rbp
2662  * mov rbp, rsp
2663  * sub rsp, 16                     // space for skb and dev
2664  * push rbx                        // temp regs to pass start time
2665  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
2666  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
2667  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2668  * mov rbx, rax                    // remember start time in bpf stats are enabled
2669  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
2670  * call addr_of_jited_FENTRY_prog
2671  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2672  * mov rsi, rbx                    // prog start time
2673  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2674  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
2675  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
2676  * pop rbx
2677  * leave
2678  * ret
2679  *
2680  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2681  * replaced with 'call generated_bpf_trampoline'. When it returns
2682  * eth_type_trans will continue executing with original skb and dev pointers.
2683  *
2684  * The assembly code when eth_type_trans is called from trampoline:
2685  *
2686  * push rbp
2687  * mov rbp, rsp
2688  * sub rsp, 24                     // space for skb, dev, return value
2689  * push rbx                        // temp regs to pass start time
2690  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
2691  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
2692  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2693  * mov rbx, rax                    // remember start time if bpf stats are enabled
2694  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2695  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
2696  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2697  * mov rsi, rbx                    // prog start time
2698  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2699  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
2700  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
2701  * call eth_type_trans+5           // execute body of eth_type_trans
2702  * mov qword ptr [rbp - 8], rax    // save return value
2703  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2704  * mov rbx, rax                    // remember start time in bpf stats are enabled
2705  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2706  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
2707  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2708  * mov rsi, rbx                    // prog start time
2709  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2710  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
2711  * pop rbx
2712  * leave
2713  * add rsp, 8                      // skip eth_type_trans's frame
2714  * ret                             // return to its caller
2715  */
2716 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
2717 					 void *rw_image_end, void *image,
2718 					 const struct btf_func_model *m, u32 flags,
2719 					 struct bpf_tramp_links *tlinks,
2720 					 void *func_addr)
2721 {
2722 	int i, ret, nr_regs = m->nr_args, stack_size = 0;
2723 	int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2724 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2725 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2726 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2727 	void *orig_call = func_addr;
2728 	u8 **branches = NULL;
2729 	u8 *prog;
2730 	bool save_ret;
2731 
2732 	/*
2733 	 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2734 	 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2735 	 * because @func_addr.
2736 	 */
2737 	WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
2738 		     (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
2739 
2740 	/* extra registers for struct arguments */
2741 	for (i = 0; i < m->nr_args; i++) {
2742 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2743 			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
2744 	}
2745 
2746 	/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2747 	 * are passed through regs, the remains are through stack.
2748 	 */
2749 	if (nr_regs > MAX_BPF_FUNC_ARGS)
2750 		return -ENOTSUPP;
2751 
2752 	/* Generated trampoline stack layout:
2753 	 *
2754 	 * RBP + 8         [ return address  ]
2755 	 * RBP + 0         [ RBP             ]
2756 	 *
2757 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
2758 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
2759 	 *
2760 	 *                 [ reg_argN        ]  always
2761 	 *                 [ ...             ]
2762 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
2763 	 *
2764 	 * RBP - nregs_off [ regs count	     ]  always
2765 	 *
2766 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
2767 	 *
2768 	 * RBP - rbx_off   [ rbx value       ]  always
2769 	 *
2770 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2771 	 *
2772 	 *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
2773 	 *                     [ ...        ]
2774 	 *                     [ stack_arg2 ]
2775 	 * RBP - arg_stack_off [ stack_arg1 ]
2776 	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
2777 	 */
2778 
2779 	/* room for return value of orig_call or fentry prog */
2780 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2781 	if (save_ret)
2782 		stack_size += 8;
2783 
2784 	stack_size += nr_regs * 8;
2785 	regs_off = stack_size;
2786 
2787 	/* regs count  */
2788 	stack_size += 8;
2789 	nregs_off = stack_size;
2790 
2791 	if (flags & BPF_TRAMP_F_IP_ARG)
2792 		stack_size += 8; /* room for IP address argument */
2793 
2794 	ip_off = stack_size;
2795 
2796 	stack_size += 8;
2797 	rbx_off = stack_size;
2798 
2799 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2800 	run_ctx_off = stack_size;
2801 
2802 	if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
2803 		/* the space that used to pass arguments on-stack */
2804 		stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
2805 		/* make sure the stack pointer is 16-byte aligned if we
2806 		 * need pass arguments on stack, which means
2807 		 *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2808 		 * should be 16-byte aligned. Following code depend on
2809 		 * that stack_size is already 8-byte aligned.
2810 		 */
2811 		stack_size += (stack_size % 16) ? 0 : 8;
2812 	}
2813 
2814 	arg_stack_off = stack_size;
2815 
2816 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2817 		/* skip patched call instruction and point orig_call to actual
2818 		 * body of the kernel function.
2819 		 */
2820 		if (is_endbr(*(u32 *)orig_call))
2821 			orig_call += ENDBR_INSN_SIZE;
2822 		orig_call += X86_PATCH_SIZE;
2823 	}
2824 
2825 	prog = rw_image;
2826 
2827 	if (flags & BPF_TRAMP_F_INDIRECT) {
2828 		/*
2829 		 * Indirect call for bpf_struct_ops
2830 		 */
2831 		emit_cfi(&prog, cfi_get_func_hash(func_addr));
2832 	} else {
2833 		/*
2834 		 * Direct-call fentry stub, as such it needs accounting for the
2835 		 * __fentry__ call.
2836 		 */
2837 		x86_call_depth_emit_accounting(&prog, NULL);
2838 	}
2839 	EMIT1(0x55);		 /* push rbp */
2840 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2841 	if (!is_imm8(stack_size)) {
2842 		/* sub rsp, stack_size */
2843 		EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
2844 	} else {
2845 		/* sub rsp, stack_size */
2846 		EMIT4(0x48, 0x83, 0xEC, stack_size);
2847 	}
2848 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
2849 		EMIT1(0x50);		/* push rax */
2850 	/* mov QWORD PTR [rbp - rbx_off], rbx */
2851 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
2852 
2853 	/* Store number of argument registers of the traced function:
2854 	 *   mov rax, nr_regs
2855 	 *   mov QWORD PTR [rbp - nregs_off], rax
2856 	 */
2857 	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
2858 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
2859 
2860 	if (flags & BPF_TRAMP_F_IP_ARG) {
2861 		/* Store IP address of the traced function:
2862 		 * movabsq rax, func_addr
2863 		 * mov QWORD PTR [rbp - ip_off], rax
2864 		 */
2865 		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2866 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2867 	}
2868 
2869 	save_args(m, &prog, regs_off, false);
2870 
2871 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2872 		/* arg1: mov rdi, im */
2873 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2874 		if (emit_rsb_call(&prog, __bpf_tramp_enter,
2875 				  image + (prog - (u8 *)rw_image))) {
2876 			ret = -EINVAL;
2877 			goto cleanup;
2878 		}
2879 	}
2880 
2881 	if (fentry->nr_links) {
2882 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2883 			       flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
2884 			return -EINVAL;
2885 	}
2886 
2887 	if (fmod_ret->nr_links) {
2888 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2889 				   GFP_KERNEL);
2890 		if (!branches)
2891 			return -ENOMEM;
2892 
2893 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2894 				       run_ctx_off, branches, image, rw_image)) {
2895 			ret = -EINVAL;
2896 			goto cleanup;
2897 		}
2898 	}
2899 
2900 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2901 		restore_regs(m, &prog, regs_off);
2902 		save_args(m, &prog, arg_stack_off, true);
2903 
2904 		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
2905 			/* Before calling the original function, restore the
2906 			 * tail_call_cnt from stack to rax.
2907 			 */
2908 			RESTORE_TAIL_CALL_CNT(stack_size);
2909 		}
2910 
2911 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
2912 			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
2913 			EMIT2(0xff, 0xd3); /* call *rbx */
2914 		} else {
2915 			/* call original function */
2916 			if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
2917 				ret = -EINVAL;
2918 				goto cleanup;
2919 			}
2920 		}
2921 		/* remember return value in a stack for bpf prog to access */
2922 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2923 		im->ip_after_call = image + (prog - (u8 *)rw_image);
2924 		emit_nops(&prog, X86_PATCH_SIZE);
2925 	}
2926 
2927 	if (fmod_ret->nr_links) {
2928 		/* From Intel 64 and IA-32 Architectures Optimization
2929 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2930 		 * Coding Rule 11: All branch targets should be 16-byte
2931 		 * aligned.
2932 		 */
2933 		emit_align(&prog, 16);
2934 		/* Update the branches saved in invoke_bpf_mod_ret with the
2935 		 * aligned address of do_fexit.
2936 		 */
2937 		for (i = 0; i < fmod_ret->nr_links; i++) {
2938 			emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
2939 					    image + (branches[i] - (u8 *)rw_image), X86_JNE);
2940 		}
2941 	}
2942 
2943 	if (fexit->nr_links) {
2944 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
2945 			       false, image, rw_image)) {
2946 			ret = -EINVAL;
2947 			goto cleanup;
2948 		}
2949 	}
2950 
2951 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
2952 		restore_regs(m, &prog, regs_off);
2953 
2954 	/* This needs to be done regardless. If there were fmod_ret programs,
2955 	 * the return value is only updated on the stack and still needs to be
2956 	 * restored to R0.
2957 	 */
2958 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2959 		im->ip_epilogue = image + (prog - (u8 *)rw_image);
2960 		/* arg1: mov rdi, im */
2961 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2962 		if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
2963 			ret = -EINVAL;
2964 			goto cleanup;
2965 		}
2966 	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
2967 		/* Before running the original function, restore the
2968 		 * tail_call_cnt from stack to rax.
2969 		 */
2970 		RESTORE_TAIL_CALL_CNT(stack_size);
2971 	}
2972 
2973 	/* restore return value of orig_call or fentry prog back into RAX */
2974 	if (save_ret)
2975 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2976 
2977 	emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
2978 	EMIT1(0xC9); /* leave */
2979 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2980 		/* skip our return address and return to parent */
2981 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2982 	}
2983 	emit_return(&prog, image + (prog - (u8 *)rw_image));
2984 	/* Make sure the trampoline generation logic doesn't overflow */
2985 	if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
2986 		ret = -EFAULT;
2987 		goto cleanup;
2988 	}
2989 	ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
2990 
2991 cleanup:
2992 	kfree(branches);
2993 	return ret;
2994 }
2995 
2996 void *arch_alloc_bpf_trampoline(unsigned int size)
2997 {
2998 	return bpf_prog_pack_alloc(size, jit_fill_hole);
2999 }
3000 
3001 void arch_free_bpf_trampoline(void *image, unsigned int size)
3002 {
3003 	bpf_prog_pack_free(image, size);
3004 }
3005 
3006 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3007 {
3008 	return 0;
3009 }
3010 
3011 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3012 				const struct btf_func_model *m, u32 flags,
3013 				struct bpf_tramp_links *tlinks,
3014 				void *func_addr)
3015 {
3016 	void *rw_image, *tmp;
3017 	int ret;
3018 	u32 size = image_end - image;
3019 
3020 	/* rw_image doesn't need to be in module memory range, so we can
3021 	 * use kvmalloc.
3022 	 */
3023 	rw_image = kvmalloc(size, GFP_KERNEL);
3024 	if (!rw_image)
3025 		return -ENOMEM;
3026 
3027 	ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3028 					    flags, tlinks, func_addr);
3029 	if (ret < 0)
3030 		goto out;
3031 
3032 	tmp = bpf_arch_text_copy(image, rw_image, size);
3033 	if (IS_ERR(tmp))
3034 		ret = PTR_ERR(tmp);
3035 out:
3036 	kvfree(rw_image);
3037 	return ret;
3038 }
3039 
3040 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3041 			     struct bpf_tramp_links *tlinks, void *func_addr)
3042 {
3043 	struct bpf_tramp_image im;
3044 	void *image;
3045 	int ret;
3046 
3047 	/* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3048 	 * This will NOT cause fragmentation in direct map, as we do not
3049 	 * call set_memory_*() on this buffer.
3050 	 *
3051 	 * We cannot use kvmalloc here, because we need image to be in
3052 	 * module memory range.
3053 	 */
3054 	image = bpf_jit_alloc_exec(PAGE_SIZE);
3055 	if (!image)
3056 		return -ENOMEM;
3057 
3058 	ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3059 					    m, flags, tlinks, func_addr);
3060 	bpf_jit_free_exec(image);
3061 	return ret;
3062 }
3063 
3064 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3065 {
3066 	u8 *jg_reloc, *prog = *pprog;
3067 	int pivot, err, jg_bytes = 1;
3068 	s64 jg_offset;
3069 
3070 	if (a == b) {
3071 		/* Leaf node of recursion, i.e. not a range of indices
3072 		 * anymore.
3073 		 */
3074 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
3075 		if (!is_simm32(progs[a]))
3076 			return -1;
3077 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3078 			    progs[a]);
3079 		err = emit_cond_near_jump(&prog,	/* je func */
3080 					  (void *)progs[a], image + (prog - buf),
3081 					  X86_JE);
3082 		if (err)
3083 			return err;
3084 
3085 		emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
3086 
3087 		*pprog = prog;
3088 		return 0;
3089 	}
3090 
3091 	/* Not a leaf node, so we pivot, and recursively descend into
3092 	 * the lower and upper ranges.
3093 	 */
3094 	pivot = (b - a) / 2;
3095 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
3096 	if (!is_simm32(progs[a + pivot]))
3097 		return -1;
3098 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3099 
3100 	if (pivot > 2) {				/* jg upper_part */
3101 		/* Require near jump. */
3102 		jg_bytes = 4;
3103 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3104 	} else {
3105 		EMIT2(X86_JG, 0);
3106 	}
3107 	jg_reloc = prog;
3108 
3109 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
3110 				  progs, image, buf);
3111 	if (err)
3112 		return err;
3113 
3114 	/* From Intel 64 and IA-32 Architectures Optimization
3115 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3116 	 * Coding Rule 11: All branch targets should be 16-byte
3117 	 * aligned.
3118 	 */
3119 	emit_align(&prog, 16);
3120 	jg_offset = prog - jg_reloc;
3121 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3122 
3123 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
3124 				  b, progs, image, buf);
3125 	if (err)
3126 		return err;
3127 
3128 	*pprog = prog;
3129 	return 0;
3130 }
3131 
3132 static int cmp_ips(const void *a, const void *b)
3133 {
3134 	const s64 *ipa = a;
3135 	const s64 *ipb = b;
3136 
3137 	if (*ipa > *ipb)
3138 		return 1;
3139 	if (*ipa < *ipb)
3140 		return -1;
3141 	return 0;
3142 }
3143 
3144 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3145 {
3146 	u8 *prog = buf;
3147 
3148 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3149 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3150 }
3151 
3152 struct x64_jit_data {
3153 	struct bpf_binary_header *rw_header;
3154 	struct bpf_binary_header *header;
3155 	int *addrs;
3156 	u8 *image;
3157 	int proglen;
3158 	struct jit_context ctx;
3159 };
3160 
3161 #define MAX_PASSES 20
3162 #define PADDING_PASSES (MAX_PASSES - 5)
3163 
3164 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3165 {
3166 	struct bpf_binary_header *rw_header = NULL;
3167 	struct bpf_binary_header *header = NULL;
3168 	struct bpf_prog *tmp, *orig_prog = prog;
3169 	struct x64_jit_data *jit_data;
3170 	int proglen, oldproglen = 0;
3171 	struct jit_context ctx = {};
3172 	bool tmp_blinded = false;
3173 	bool extra_pass = false;
3174 	bool padding = false;
3175 	u8 *rw_image = NULL;
3176 	u8 *image = NULL;
3177 	int *addrs;
3178 	int pass;
3179 	int i;
3180 
3181 	if (!prog->jit_requested)
3182 		return orig_prog;
3183 
3184 	tmp = bpf_jit_blind_constants(prog);
3185 	/*
3186 	 * If blinding was requested and we failed during blinding,
3187 	 * we must fall back to the interpreter.
3188 	 */
3189 	if (IS_ERR(tmp))
3190 		return orig_prog;
3191 	if (tmp != prog) {
3192 		tmp_blinded = true;
3193 		prog = tmp;
3194 	}
3195 
3196 	jit_data = prog->aux->jit_data;
3197 	if (!jit_data) {
3198 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3199 		if (!jit_data) {
3200 			prog = orig_prog;
3201 			goto out;
3202 		}
3203 		prog->aux->jit_data = jit_data;
3204 	}
3205 	addrs = jit_data->addrs;
3206 	if (addrs) {
3207 		ctx = jit_data->ctx;
3208 		oldproglen = jit_data->proglen;
3209 		image = jit_data->image;
3210 		header = jit_data->header;
3211 		rw_header = jit_data->rw_header;
3212 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
3213 		extra_pass = true;
3214 		padding = true;
3215 		goto skip_init_addrs;
3216 	}
3217 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3218 	if (!addrs) {
3219 		prog = orig_prog;
3220 		goto out_addrs;
3221 	}
3222 
3223 	/*
3224 	 * Before first pass, make a rough estimation of addrs[]
3225 	 * each BPF instruction is translated to less than 64 bytes
3226 	 */
3227 	for (proglen = 0, i = 0; i <= prog->len; i++) {
3228 		proglen += 64;
3229 		addrs[i] = proglen;
3230 	}
3231 	ctx.cleanup_addr = proglen;
3232 skip_init_addrs:
3233 
3234 	/*
3235 	 * JITed image shrinks with every pass and the loop iterates
3236 	 * until the image stops shrinking. Very large BPF programs
3237 	 * may converge on the last pass. In such case do one more
3238 	 * pass to emit the final image.
3239 	 */
3240 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
3241 		if (!padding && pass >= PADDING_PASSES)
3242 			padding = true;
3243 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3244 		if (proglen <= 0) {
3245 out_image:
3246 			image = NULL;
3247 			if (header) {
3248 				bpf_arch_text_copy(&header->size, &rw_header->size,
3249 						   sizeof(rw_header->size));
3250 				bpf_jit_binary_pack_free(header, rw_header);
3251 			}
3252 			/* Fall back to interpreter mode */
3253 			prog = orig_prog;
3254 			if (extra_pass) {
3255 				prog->bpf_func = NULL;
3256 				prog->jited = 0;
3257 				prog->jited_len = 0;
3258 			}
3259 			goto out_addrs;
3260 		}
3261 		if (image) {
3262 			if (proglen != oldproglen) {
3263 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3264 				       proglen, oldproglen);
3265 				goto out_image;
3266 			}
3267 			break;
3268 		}
3269 		if (proglen == oldproglen) {
3270 			/*
3271 			 * The number of entries in extable is the number of BPF_LDX
3272 			 * insns that access kernel memory via "pointer to BTF type".
3273 			 * The verifier changed their opcode from LDX|MEM|size
3274 			 * to LDX|PROBE_MEM|size to make JITing easier.
3275 			 */
3276 			u32 align = __alignof__(struct exception_table_entry);
3277 			u32 extable_size = prog->aux->num_exentries *
3278 				sizeof(struct exception_table_entry);
3279 
3280 			/* allocate module memory for x86 insns and extable */
3281 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3282 							   &image, align, &rw_header, &rw_image,
3283 							   jit_fill_hole);
3284 			if (!header) {
3285 				prog = orig_prog;
3286 				goto out_addrs;
3287 			}
3288 			prog->aux->extable = (void *) image + roundup(proglen, align);
3289 		}
3290 		oldproglen = proglen;
3291 		cond_resched();
3292 	}
3293 
3294 	if (bpf_jit_enable > 1)
3295 		bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3296 
3297 	if (image) {
3298 		if (!prog->is_func || extra_pass) {
3299 			/*
3300 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
3301 			 *   1) header is not pointing to proper module memory;
3302 			 *   2) the arch doesn't support bpf_arch_text_copy().
3303 			 *
3304 			 * Both cases are serious bugs and justify WARN_ON.
3305 			 */
3306 			if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
3307 				/* header has been freed */
3308 				header = NULL;
3309 				goto out_image;
3310 			}
3311 
3312 			bpf_tail_call_direct_fixup(prog);
3313 		} else {
3314 			jit_data->addrs = addrs;
3315 			jit_data->ctx = ctx;
3316 			jit_data->proglen = proglen;
3317 			jit_data->image = image;
3318 			jit_data->header = header;
3319 			jit_data->rw_header = rw_header;
3320 		}
3321 		/*
3322 		 * ctx.prog_offset is used when CFI preambles put code *before*
3323 		 * the function. See emit_cfi(). For FineIBT specifically this code
3324 		 * can also be executed and bpf_prog_kallsyms_add() will
3325 		 * generate an additional symbol to cover this, hence also
3326 		 * decrement proglen.
3327 		 */
3328 		prog->bpf_func = (void *)image + cfi_get_offset();
3329 		prog->jited = 1;
3330 		prog->jited_len = proglen - cfi_get_offset();
3331 	} else {
3332 		prog = orig_prog;
3333 	}
3334 
3335 	if (!image || !prog->is_func || extra_pass) {
3336 		if (image)
3337 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
3338 out_addrs:
3339 		kvfree(addrs);
3340 		kfree(jit_data);
3341 		prog->aux->jit_data = NULL;
3342 	}
3343 out:
3344 	if (tmp_blinded)
3345 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
3346 					   tmp : orig_prog);
3347 	return prog;
3348 }
3349 
3350 bool bpf_jit_supports_kfunc_call(void)
3351 {
3352 	return true;
3353 }
3354 
3355 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3356 {
3357 	if (text_poke_copy(dst, src, len) == NULL)
3358 		return ERR_PTR(-EINVAL);
3359 	return dst;
3360 }
3361 
3362 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3363 bool bpf_jit_supports_subprog_tailcalls(void)
3364 {
3365 	return true;
3366 }
3367 
3368 void bpf_jit_free(struct bpf_prog *prog)
3369 {
3370 	if (prog->jited) {
3371 		struct x64_jit_data *jit_data = prog->aux->jit_data;
3372 		struct bpf_binary_header *hdr;
3373 
3374 		/*
3375 		 * If we fail the final pass of JIT (from jit_subprogs),
3376 		 * the program may not be finalized yet. Call finalize here
3377 		 * before freeing it.
3378 		 */
3379 		if (jit_data) {
3380 			bpf_jit_binary_pack_finalize(prog, jit_data->header,
3381 						     jit_data->rw_header);
3382 			kvfree(jit_data->addrs);
3383 			kfree(jit_data);
3384 		}
3385 		prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3386 		hdr = bpf_jit_binary_pack_hdr(prog);
3387 		bpf_jit_binary_pack_free(hdr, NULL);
3388 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3389 	}
3390 
3391 	bpf_prog_unlock_free(prog);
3392 }
3393 
3394 bool bpf_jit_supports_exceptions(void)
3395 {
3396 	/* We unwind through both kernel frames (starting from within bpf_throw
3397 	 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3398 	 * to walk kernel frames and reach BPF frames in the stack trace.
3399 	 */
3400 	return IS_ENABLED(CONFIG_UNWINDER_ORC);
3401 }
3402 
3403 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3404 {
3405 #if defined(CONFIG_UNWINDER_ORC)
3406 	struct unwind_state state;
3407 	unsigned long addr;
3408 
3409 	for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3410 	     unwind_next_frame(&state)) {
3411 		addr = unwind_get_return_address(&state);
3412 		if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3413 			break;
3414 	}
3415 	return;
3416 #endif
3417 	WARN(1, "verification of programs using bpf_throw should have failed\n");
3418 }
3419 
3420 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3421 			       struct bpf_prog *new, struct bpf_prog *old)
3422 {
3423 	u8 *old_addr, *new_addr, *old_bypass_addr;
3424 	int ret;
3425 
3426 	old_bypass_addr = old ? NULL : poke->bypass_addr;
3427 	old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
3428 	new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
3429 
3430 	/*
3431 	 * On program loading or teardown, the program's kallsym entry
3432 	 * might not be in place, so we use __bpf_arch_text_poke to skip
3433 	 * the kallsyms check.
3434 	 */
3435 	if (new) {
3436 		ret = __bpf_arch_text_poke(poke->tailcall_target,
3437 					   BPF_MOD_JUMP,
3438 					   old_addr, new_addr);
3439 		BUG_ON(ret < 0);
3440 		if (!old) {
3441 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3442 						   BPF_MOD_JUMP,
3443 						   poke->bypass_addr,
3444 						   NULL);
3445 			BUG_ON(ret < 0);
3446 		}
3447 	} else {
3448 		ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3449 					   BPF_MOD_JUMP,
3450 					   old_bypass_addr,
3451 					   poke->bypass_addr);
3452 		BUG_ON(ret < 0);
3453 		/* let other CPUs finish the execution of program
3454 		 * so that it will not possible to expose them
3455 		 * to invalid nop, stack unwind, nop state
3456 		 */
3457 		if (!ret)
3458 			synchronize_rcu();
3459 		ret = __bpf_arch_text_poke(poke->tailcall_target,
3460 					   BPF_MOD_JUMP,
3461 					   old_addr, NULL);
3462 		BUG_ON(ret < 0);
3463 	}
3464 }
3465 
3466 bool bpf_jit_supports_arena(void)
3467 {
3468 	return true;
3469 }
3470 
3471 bool bpf_jit_supports_ptr_xchg(void)
3472 {
3473 	return true;
3474 }
3475