xref: /linux/arch/x86/net/bpf_jit_comp.c (revision 8157cc739ad301b7fb6dfc4cfc5497cedd33df4e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bitfield.h>
12 #include <linux/bpf.h>
13 #include <linux/memory.h>
14 #include <linux/sort.h>
15 #include <asm/extable.h>
16 #include <asm/ftrace.h>
17 #include <asm/set_memory.h>
18 #include <asm/nospec-branch.h>
19 #include <asm/text-patching.h>
20 #include <asm/unwind.h>
21 #include <asm/cfi.h>
22 
23 static bool all_callee_regs_used[4] = {true, true, true, true};
24 
25 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
26 {
27 	if (len == 1)
28 		*ptr = bytes;
29 	else if (len == 2)
30 		*(u16 *)ptr = bytes;
31 	else {
32 		*(u32 *)ptr = bytes;
33 		barrier();
34 	}
35 	return ptr + len;
36 }
37 
38 #define EMIT(bytes, len) \
39 	do { prog = emit_code(prog, bytes, len); } while (0)
40 
41 #define EMIT1(b1)		EMIT(b1, 1)
42 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
43 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
44 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
45 #define EMIT5(b1, b2, b3, b4, b5) \
46 	do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
47 
48 #define EMIT1_off32(b1, off) \
49 	do { EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 
57 #ifdef CONFIG_X86_KERNEL_IBT
58 #define EMIT_ENDBR()		EMIT(gen_endbr(), 4)
59 #define EMIT_ENDBR_POISON()	EMIT(gen_endbr_poison(), 4)
60 #else
61 #define EMIT_ENDBR()
62 #define EMIT_ENDBR_POISON()
63 #endif
64 
65 static bool is_imm8(int value)
66 {
67 	return value <= 127 && value >= -128;
68 }
69 
70 /*
71  * Let us limit the positive offset to be <= 123.
72  * This is to ensure eventual jit convergence For the following patterns:
73  * ...
74  * pass4, final_proglen=4391:
75  *   ...
76  *   20e:    48 85 ff                test   rdi,rdi
77  *   211:    74 7d                   je     0x290
78  *   213:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
79  *   ...
80  *   289:    48 85 ff                test   rdi,rdi
81  *   28c:    74 17                   je     0x2a5
82  *   28e:    e9 7f ff ff ff          jmp    0x212
83  *   293:    bf 03 00 00 00          mov    edi,0x3
84  * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
85  * and insn at 0x28e is 5-byte jmp insn with offset -129.
86  *
87  * pass5, final_proglen=4392:
88  *   ...
89  *   20e:    48 85 ff                test   rdi,rdi
90  *   211:    0f 84 80 00 00 00       je     0x297
91  *   217:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
92  *   ...
93  *   28d:    48 85 ff                test   rdi,rdi
94  *   290:    74 1a                   je     0x2ac
95  *   292:    eb 84                   jmp    0x218
96  *   294:    bf 03 00 00 00          mov    edi,0x3
97  * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
98  * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
99  * At the same time, insn at 0x292 is a 2-byte insn since its offset is
100  * -124.
101  *
102  * pass6 will repeat the same code as in pass4 and this will prevent
103  * eventual convergence.
104  *
105  * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
106  * cycle in the above. In the above example je offset <= 0x7c should work.
107  *
108  * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
109  * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
110  * avoid no convergence issue.
111  *
112  * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
113  * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
114  */
115 static bool is_imm8_jmp_offset(int value)
116 {
117 	return value <= 123 && value >= -128;
118 }
119 
120 static bool is_simm32(s64 value)
121 {
122 	return value == (s64)(s32)value;
123 }
124 
125 static bool is_uimm32(u64 value)
126 {
127 	return value == (u64)(u32)value;
128 }
129 
130 /* mov dst, src */
131 #define EMIT_mov(DST, SRC)								 \
132 	do {										 \
133 		if (DST != SRC)								 \
134 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
135 	} while (0)
136 
137 static int bpf_size_to_x86_bytes(int bpf_size)
138 {
139 	if (bpf_size == BPF_W)
140 		return 4;
141 	else if (bpf_size == BPF_H)
142 		return 2;
143 	else if (bpf_size == BPF_B)
144 		return 1;
145 	else if (bpf_size == BPF_DW)
146 		return 4; /* imm32 */
147 	else
148 		return 0;
149 }
150 
151 /*
152  * List of x86 cond jumps opcodes (. + s8)
153  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
154  */
155 #define X86_JB  0x72
156 #define X86_JAE 0x73
157 #define X86_JE  0x74
158 #define X86_JNE 0x75
159 #define X86_JBE 0x76
160 #define X86_JA  0x77
161 #define X86_JL  0x7C
162 #define X86_JGE 0x7D
163 #define X86_JLE 0x7E
164 #define X86_JG  0x7F
165 
166 /* Pick a register outside of BPF range for JIT internal work */
167 #define AUX_REG (MAX_BPF_JIT_REG + 1)
168 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
169 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
170 
171 /*
172  * The following table maps BPF registers to x86-64 registers.
173  *
174  * x86-64 register R12 is unused, since if used as base address
175  * register in load/store instructions, it always needs an
176  * extra byte of encoding and is callee saved.
177  *
178  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
179  * trampoline. x86-64 register R10 is used for blinding (if enabled).
180  */
181 static const int reg2hex[] = {
182 	[BPF_REG_0] = 0,  /* RAX */
183 	[BPF_REG_1] = 7,  /* RDI */
184 	[BPF_REG_2] = 6,  /* RSI */
185 	[BPF_REG_3] = 2,  /* RDX */
186 	[BPF_REG_4] = 1,  /* RCX */
187 	[BPF_REG_5] = 0,  /* R8  */
188 	[BPF_REG_6] = 3,  /* RBX callee saved */
189 	[BPF_REG_7] = 5,  /* R13 callee saved */
190 	[BPF_REG_8] = 6,  /* R14 callee saved */
191 	[BPF_REG_9] = 7,  /* R15 callee saved */
192 	[BPF_REG_FP] = 5, /* RBP readonly */
193 	[BPF_REG_AX] = 2, /* R10 temp register */
194 	[AUX_REG] = 3,    /* R11 temp register */
195 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
196 	[X86_REG_R12] = 4, /* R12 callee saved */
197 };
198 
199 static const int reg2pt_regs[] = {
200 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
201 	[BPF_REG_1] = offsetof(struct pt_regs, di),
202 	[BPF_REG_2] = offsetof(struct pt_regs, si),
203 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
204 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
205 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
206 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
207 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
208 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
209 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
210 };
211 
212 /*
213  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
214  * which need extra byte of encoding.
215  * rax,rcx,...,rbp have simpler encoding
216  */
217 static bool is_ereg(u32 reg)
218 {
219 	return (1 << reg) & (BIT(BPF_REG_5) |
220 			     BIT(AUX_REG) |
221 			     BIT(BPF_REG_7) |
222 			     BIT(BPF_REG_8) |
223 			     BIT(BPF_REG_9) |
224 			     BIT(X86_REG_R9) |
225 			     BIT(X86_REG_R12) |
226 			     BIT(BPF_REG_AX));
227 }
228 
229 /*
230  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
231  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
232  * of encoding. al,cl,dl,bl have simpler encoding.
233  */
234 static bool is_ereg_8l(u32 reg)
235 {
236 	return is_ereg(reg) ||
237 	    (1 << reg) & (BIT(BPF_REG_1) |
238 			  BIT(BPF_REG_2) |
239 			  BIT(BPF_REG_FP));
240 }
241 
242 static bool is_axreg(u32 reg)
243 {
244 	return reg == BPF_REG_0;
245 }
246 
247 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
248 static u8 add_1mod(u8 byte, u32 reg)
249 {
250 	if (is_ereg(reg))
251 		byte |= 1;
252 	return byte;
253 }
254 
255 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
256 {
257 	if (is_ereg(r1))
258 		byte |= 1;
259 	if (is_ereg(r2))
260 		byte |= 4;
261 	return byte;
262 }
263 
264 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
265 {
266 	if (is_ereg(r1))
267 		byte |= 1;
268 	if (is_ereg(index))
269 		byte |= 2;
270 	if (is_ereg(r2))
271 		byte |= 4;
272 	return byte;
273 }
274 
275 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
276 static u8 add_1reg(u8 byte, u32 dst_reg)
277 {
278 	return byte + reg2hex[dst_reg];
279 }
280 
281 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
282 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
283 {
284 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
285 }
286 
287 /* Some 1-byte opcodes for binary ALU operations */
288 static u8 simple_alu_opcodes[] = {
289 	[BPF_ADD] = 0x01,
290 	[BPF_SUB] = 0x29,
291 	[BPF_AND] = 0x21,
292 	[BPF_OR] = 0x09,
293 	[BPF_XOR] = 0x31,
294 	[BPF_LSH] = 0xE0,
295 	[BPF_RSH] = 0xE8,
296 	[BPF_ARSH] = 0xF8,
297 };
298 
299 static void jit_fill_hole(void *area, unsigned int size)
300 {
301 	/* Fill whole space with INT3 instructions */
302 	memset(area, 0xcc, size);
303 }
304 
305 int bpf_arch_text_invalidate(void *dst, size_t len)
306 {
307 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
308 }
309 
310 struct jit_context {
311 	int cleanup_addr; /* Epilogue code offset */
312 
313 	/*
314 	 * Program specific offsets of labels in the code; these rely on the
315 	 * JIT doing at least 2 passes, recording the position on the first
316 	 * pass, only to generate the correct offset on the second pass.
317 	 */
318 	int tail_call_direct_label;
319 	int tail_call_indirect_label;
320 };
321 
322 /* Maximum number of bytes emitted while JITing one eBPF insn */
323 #define BPF_MAX_INSN_SIZE	128
324 #define BPF_INSN_SAFETY		64
325 
326 /* Number of bytes emit_patch() needs to generate instructions */
327 #define X86_PATCH_SIZE		5
328 /* Number of bytes that will be skipped on tailcall */
329 #define X86_TAIL_CALL_OFFSET	(12 + ENDBR_INSN_SIZE)
330 
331 static void push_r9(u8 **pprog)
332 {
333 	u8 *prog = *pprog;
334 
335 	EMIT2(0x41, 0x51);   /* push r9 */
336 	*pprog = prog;
337 }
338 
339 static void pop_r9(u8 **pprog)
340 {
341 	u8 *prog = *pprog;
342 
343 	EMIT2(0x41, 0x59);   /* pop r9 */
344 	*pprog = prog;
345 }
346 
347 static void push_r12(u8 **pprog)
348 {
349 	u8 *prog = *pprog;
350 
351 	EMIT2(0x41, 0x54);   /* push r12 */
352 	*pprog = prog;
353 }
354 
355 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
356 {
357 	u8 *prog = *pprog;
358 
359 	if (callee_regs_used[0])
360 		EMIT1(0x53);         /* push rbx */
361 	if (callee_regs_used[1])
362 		EMIT2(0x41, 0x55);   /* push r13 */
363 	if (callee_regs_used[2])
364 		EMIT2(0x41, 0x56);   /* push r14 */
365 	if (callee_regs_used[3])
366 		EMIT2(0x41, 0x57);   /* push r15 */
367 	*pprog = prog;
368 }
369 
370 static void pop_r12(u8 **pprog)
371 {
372 	u8 *prog = *pprog;
373 
374 	EMIT2(0x41, 0x5C);   /* pop r12 */
375 	*pprog = prog;
376 }
377 
378 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
379 {
380 	u8 *prog = *pprog;
381 
382 	if (callee_regs_used[3])
383 		EMIT2(0x41, 0x5F);   /* pop r15 */
384 	if (callee_regs_used[2])
385 		EMIT2(0x41, 0x5E);   /* pop r14 */
386 	if (callee_regs_used[1])
387 		EMIT2(0x41, 0x5D);   /* pop r13 */
388 	if (callee_regs_used[0])
389 		EMIT1(0x5B);         /* pop rbx */
390 	*pprog = prog;
391 }
392 
393 static void emit_nops(u8 **pprog, int len)
394 {
395 	u8 *prog = *pprog;
396 	int i, noplen;
397 
398 	while (len > 0) {
399 		noplen = len;
400 
401 		if (noplen > ASM_NOP_MAX)
402 			noplen = ASM_NOP_MAX;
403 
404 		for (i = 0; i < noplen; i++)
405 			EMIT1(x86_nops[noplen][i]);
406 		len -= noplen;
407 	}
408 
409 	*pprog = prog;
410 }
411 
412 /*
413  * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
414  * in arch/x86/kernel/alternative.c
415  */
416 static int emit_call(u8 **prog, void *func, void *ip);
417 
418 static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity)
419 {
420 	u8 *prog = *pprog;
421 
422 	EMIT_ENDBR();
423 	EMIT1_off32(0x2d, hash);			/* subl $hash, %eax	*/
424 	if (cfi_bhi) {
425 		EMIT2(0x2e, 0x2e);			/* cs cs */
426 		emit_call(&prog, __bhi_args[arity], ip + 11);
427 	} else {
428 		EMIT3_off32(0x2e, 0x0f, 0x85, 3);	/* jne.d32,pn 3		*/
429 	}
430 	EMIT_ENDBR_POISON();
431 
432 	*pprog = prog;
433 }
434 
435 static void emit_kcfi(u8 **pprog, u32 hash)
436 {
437 	u8 *prog = *pprog;
438 
439 	EMIT1_off32(0xb8, hash);			/* movl $hash, %eax	*/
440 #ifdef CONFIG_CALL_PADDING
441 	EMIT1(0x90);
442 	EMIT1(0x90);
443 	EMIT1(0x90);
444 	EMIT1(0x90);
445 	EMIT1(0x90);
446 	EMIT1(0x90);
447 	EMIT1(0x90);
448 	EMIT1(0x90);
449 	EMIT1(0x90);
450 	EMIT1(0x90);
451 	EMIT1(0x90);
452 #endif
453 	EMIT_ENDBR();
454 
455 	*pprog = prog;
456 }
457 
458 static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity)
459 {
460 	u8 *prog = *pprog;
461 
462 	switch (cfi_mode) {
463 	case CFI_FINEIBT:
464 		emit_fineibt(&prog, ip, hash, arity);
465 		break;
466 
467 	case CFI_KCFI:
468 		emit_kcfi(&prog, hash);
469 		break;
470 
471 	default:
472 		EMIT_ENDBR();
473 		break;
474 	}
475 
476 	*pprog = prog;
477 }
478 
479 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
480 {
481 	u8 *prog = *pprog;
482 
483 	if (!is_subprog) {
484 		/* cmp rax, MAX_TAIL_CALL_CNT */
485 		EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
486 		EMIT2(X86_JA, 6);        /* ja 6 */
487 		/* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
488 		 * case1: entry of main prog.
489 		 * case2: tail callee of main prog.
490 		 */
491 		EMIT1(0x50);             /* push rax */
492 		/* Make rax as tail_call_cnt_ptr. */
493 		EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
494 		EMIT2(0xEB, 1);          /* jmp 1 */
495 		/* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
496 		 * case: tail callee of subprog.
497 		 */
498 		EMIT1(0x50);             /* push rax */
499 		/* push tail_call_cnt_ptr */
500 		EMIT1(0x50);             /* push rax */
501 	} else { /* is_subprog */
502 		/* rax is tail_call_cnt_ptr. */
503 		EMIT1(0x50);             /* push rax */
504 		EMIT1(0x50);             /* push rax */
505 	}
506 
507 	*pprog = prog;
508 }
509 
510 /*
511  * Emit x86-64 prologue code for BPF program.
512  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
513  * while jumping to another program
514  */
515 static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf,
516 			  bool tail_call_reachable, bool is_subprog,
517 			  bool is_exception_cb)
518 {
519 	u8 *prog = *pprog;
520 
521 	if (is_subprog) {
522 		emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5);
523 	} else {
524 		emit_cfi(&prog, ip, cfi_bpf_hash, 1);
525 	}
526 	/* BPF trampoline can be made to work without these nops,
527 	 * but let's waste 5 bytes for now and optimize later
528 	 */
529 	emit_nops(&prog, X86_PATCH_SIZE);
530 	if (!ebpf_from_cbpf) {
531 		if (tail_call_reachable && !is_subprog)
532 			/* When it's the entry of the whole tailcall context,
533 			 * zeroing rax means initialising tail_call_cnt.
534 			 */
535 			EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
536 		else
537 			/* Keep the same instruction layout. */
538 			emit_nops(&prog, 3);     /* nop3 */
539 	}
540 	/* Exception callback receives FP as third parameter */
541 	if (is_exception_cb) {
542 		EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
543 		EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
544 		/* The main frame must have exception_boundary as true, so we
545 		 * first restore those callee-saved regs from stack, before
546 		 * reusing the stack frame.
547 		 */
548 		pop_callee_regs(&prog, all_callee_regs_used);
549 		pop_r12(&prog);
550 		/* Reset the stack frame. */
551 		EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
552 	} else {
553 		EMIT1(0x55);             /* push rbp */
554 		EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
555 	}
556 
557 	/* X86_TAIL_CALL_OFFSET is here */
558 	EMIT_ENDBR();
559 
560 	/* sub rsp, rounded_stack_depth */
561 	if (stack_depth)
562 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
563 	if (tail_call_reachable)
564 		emit_prologue_tail_call(&prog, is_subprog);
565 	*pprog = prog;
566 }
567 
568 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
569 {
570 	u8 *prog = *pprog;
571 	s64 offset;
572 
573 	offset = func - (ip + X86_PATCH_SIZE);
574 	if (!is_simm32(offset)) {
575 		pr_err("Target call %p is out of range\n", func);
576 		return -ERANGE;
577 	}
578 	EMIT1_off32(opcode, offset);
579 	*pprog = prog;
580 	return 0;
581 }
582 
583 static int emit_call(u8 **pprog, void *func, void *ip)
584 {
585 	return emit_patch(pprog, func, ip, 0xE8);
586 }
587 
588 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
589 {
590 	OPTIMIZER_HIDE_VAR(func);
591 	ip += x86_call_depth_emit_accounting(pprog, func, ip);
592 	return emit_patch(pprog, func, ip, 0xE8);
593 }
594 
595 static int emit_jump(u8 **pprog, void *func, void *ip)
596 {
597 	return emit_patch(pprog, func, ip, 0xE9);
598 }
599 
600 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
601 				enum bpf_text_poke_type new_t,
602 				void *old_addr, void *new_addr)
603 {
604 	const u8 *nop_insn = x86_nops[5];
605 	u8 old_insn[X86_PATCH_SIZE];
606 	u8 new_insn[X86_PATCH_SIZE];
607 	u8 *prog;
608 	int ret;
609 
610 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
611 	if (old_t != BPF_MOD_NOP && old_addr) {
612 		prog = old_insn;
613 		ret = old_t == BPF_MOD_CALL ?
614 		      emit_call(&prog, old_addr, ip) :
615 		      emit_jump(&prog, old_addr, ip);
616 		if (ret)
617 			return ret;
618 	}
619 
620 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
621 	if (new_t != BPF_MOD_NOP && new_addr) {
622 		prog = new_insn;
623 		ret = new_t == BPF_MOD_CALL ?
624 		      emit_call(&prog, new_addr, ip) :
625 		      emit_jump(&prog, new_addr, ip);
626 		if (ret)
627 			return ret;
628 	}
629 
630 	ret = -EBUSY;
631 	mutex_lock(&text_mutex);
632 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
633 		goto out;
634 	ret = 1;
635 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
636 		smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL);
637 		ret = 0;
638 	}
639 out:
640 	mutex_unlock(&text_mutex);
641 	return ret;
642 }
643 
644 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
645 		       enum bpf_text_poke_type new_t, void *old_addr,
646 		       void *new_addr)
647 {
648 	if (!is_kernel_text((long)ip) &&
649 	    !is_bpf_text_address((long)ip))
650 		/* BPF poking in modules is not supported */
651 		return -EINVAL;
652 
653 	/*
654 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
655 	 * with an ENDBR instruction.
656 	 */
657 	if (is_endbr(ip))
658 		ip += ENDBR_INSN_SIZE;
659 
660 	return __bpf_arch_text_poke(ip, old_t, new_t, old_addr, new_addr);
661 }
662 
663 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
664 
665 static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
666 {
667 	u8 *prog = *pprog;
668 
669 	if (ereg)
670 		EMIT1(0x41);
671 
672 	EMIT2(0xFF, 0xE0 + reg);
673 
674 	*pprog = prog;
675 }
676 
677 static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
678 {
679 	u8 *prog = *pprog;
680 	int reg = reg2hex[bpf_reg];
681 	bool ereg = is_ereg(bpf_reg);
682 
683 	if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
684 		OPTIMIZER_HIDE_VAR(reg);
685 		emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip);
686 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
687 		EMIT_LFENCE();
688 		__emit_indirect_jump(&prog, reg, ereg);
689 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
690 		OPTIMIZER_HIDE_VAR(reg);
691 		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
692 			emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
693 		else
694 			emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
695 	} else {
696 		__emit_indirect_jump(&prog, reg, ereg);
697 		if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
698 			EMIT1(0xCC);		/* int3 */
699 	}
700 
701 	*pprog = prog;
702 }
703 
704 static void emit_return(u8 **pprog, u8 *ip)
705 {
706 	u8 *prog = *pprog;
707 
708 	if (cpu_wants_rethunk()) {
709 		emit_jump(&prog, x86_return_thunk, ip);
710 	} else {
711 		EMIT1(0xC3);		/* ret */
712 		if (IS_ENABLED(CONFIG_MITIGATION_SLS))
713 			EMIT1(0xCC);	/* int3 */
714 	}
715 
716 	*pprog = prog;
717 }
718 
719 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)	(-16 - round_up(stack, 8))
720 
721 /*
722  * Generate the following code:
723  *
724  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
725  *   if (index >= array->map.max_entries)
726  *     goto out;
727  *   if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
728  *     goto out;
729  *   prog = array->ptrs[index];
730  *   if (prog == NULL)
731  *     goto out;
732  *   goto *(prog->bpf_func + prologue_size);
733  * out:
734  */
735 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
736 					u8 **pprog, bool *callee_regs_used,
737 					u32 stack_depth, u8 *ip,
738 					struct jit_context *ctx)
739 {
740 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
741 	u8 *prog = *pprog, *start = *pprog;
742 	int offset;
743 
744 	/*
745 	 * rdi - pointer to ctx
746 	 * rsi - pointer to bpf_array
747 	 * rdx - index in bpf_array
748 	 */
749 
750 	/*
751 	 * if (index >= array->map.max_entries)
752 	 *	goto out;
753 	 */
754 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
755 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
756 	      offsetof(struct bpf_array, map.max_entries));
757 
758 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
759 	EMIT2(X86_JBE, offset);                   /* jbe out */
760 
761 	/*
762 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
763 	 *	goto out;
764 	 */
765 	EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
766 	EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
767 
768 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
769 	EMIT2(X86_JAE, offset);                   /* jae out */
770 
771 	/* prog = array->ptrs[index]; */
772 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
773 		    offsetof(struct bpf_array, ptrs));
774 
775 	/*
776 	 * if (prog == NULL)
777 	 *	goto out;
778 	 */
779 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
780 
781 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
782 	EMIT2(X86_JE, offset);                    /* je out */
783 
784 	/* Inc tail_call_cnt if the slot is populated. */
785 	EMIT4(0x48, 0x83, 0x00, 0x01);            /* add qword ptr [rax], 1 */
786 
787 	if (bpf_prog->aux->exception_boundary) {
788 		pop_callee_regs(&prog, all_callee_regs_used);
789 		pop_r12(&prog);
790 	} else {
791 		pop_callee_regs(&prog, callee_regs_used);
792 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
793 			pop_r12(&prog);
794 	}
795 
796 	/* Pop tail_call_cnt_ptr. */
797 	EMIT1(0x58);                              /* pop rax */
798 	/* Pop tail_call_cnt, if it's main prog.
799 	 * Pop tail_call_cnt_ptr, if it's subprog.
800 	 */
801 	EMIT1(0x58);                              /* pop rax */
802 	if (stack_depth)
803 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
804 			    round_up(stack_depth, 8));
805 
806 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
807 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
808 	      offsetof(struct bpf_prog, bpf_func));
809 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
810 	      X86_TAIL_CALL_OFFSET);
811 	/*
812 	 * Now we're ready to jump into next BPF program
813 	 * rdi == ctx (1st arg)
814 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
815 	 */
816 	emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
817 
818 	/* out: */
819 	ctx->tail_call_indirect_label = prog - start;
820 	*pprog = prog;
821 }
822 
823 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
824 				      struct bpf_jit_poke_descriptor *poke,
825 				      u8 **pprog, u8 *ip,
826 				      bool *callee_regs_used, u32 stack_depth,
827 				      struct jit_context *ctx)
828 {
829 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
830 	u8 *prog = *pprog, *start = *pprog;
831 	int offset;
832 
833 	/*
834 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
835 	 *	goto out;
836 	 */
837 	EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off);   /* mov rax, qword ptr [rbp - tcc_ptr_off] */
838 	EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT);   /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
839 
840 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
841 	EMIT2(X86_JAE, offset);                       /* jae out */
842 
843 	poke->tailcall_bypass = ip + (prog - start);
844 	poke->adj_off = X86_TAIL_CALL_OFFSET;
845 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
846 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
847 
848 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
849 		  poke->tailcall_bypass);
850 
851 	/* Inc tail_call_cnt if the slot is populated. */
852 	EMIT4(0x48, 0x83, 0x00, 0x01);                /* add qword ptr [rax], 1 */
853 
854 	if (bpf_prog->aux->exception_boundary) {
855 		pop_callee_regs(&prog, all_callee_regs_used);
856 		pop_r12(&prog);
857 	} else {
858 		pop_callee_regs(&prog, callee_regs_used);
859 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
860 			pop_r12(&prog);
861 	}
862 
863 	/* Pop tail_call_cnt_ptr. */
864 	EMIT1(0x58);                                  /* pop rax */
865 	/* Pop tail_call_cnt, if it's main prog.
866 	 * Pop tail_call_cnt_ptr, if it's subprog.
867 	 */
868 	EMIT1(0x58);                                  /* pop rax */
869 	if (stack_depth)
870 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
871 
872 	emit_nops(&prog, X86_PATCH_SIZE);
873 
874 	/* out: */
875 	ctx->tail_call_direct_label = prog - start;
876 
877 	*pprog = prog;
878 }
879 
880 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
881 {
882 	struct bpf_jit_poke_descriptor *poke;
883 	struct bpf_array *array;
884 	struct bpf_prog *target;
885 	int i, ret;
886 
887 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
888 		poke = &prog->aux->poke_tab[i];
889 		if (poke->aux && poke->aux != prog->aux)
890 			continue;
891 
892 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
893 
894 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
895 			continue;
896 
897 		array = container_of(poke->tail_call.map, struct bpf_array, map);
898 		mutex_lock(&array->aux->poke_mutex);
899 		target = array->ptrs[poke->tail_call.key];
900 		if (target) {
901 			ret = __bpf_arch_text_poke(poke->tailcall_target,
902 						   BPF_MOD_NOP, BPF_MOD_JUMP,
903 						   NULL,
904 						   (u8 *)target->bpf_func +
905 						   poke->adj_off);
906 			BUG_ON(ret < 0);
907 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
908 						   BPF_MOD_JUMP, BPF_MOD_NOP,
909 						   (u8 *)poke->tailcall_target +
910 						   X86_PATCH_SIZE, NULL);
911 			BUG_ON(ret < 0);
912 		}
913 		WRITE_ONCE(poke->tailcall_target_stable, true);
914 		mutex_unlock(&array->aux->poke_mutex);
915 	}
916 }
917 
918 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
919 			   u32 dst_reg, const u32 imm32)
920 {
921 	u8 *prog = *pprog;
922 	u8 b1, b2, b3;
923 
924 	/*
925 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
926 	 * (which zero-extends imm32) to save 2 bytes.
927 	 */
928 	if (sign_propagate && (s32)imm32 < 0) {
929 		/* 'mov %rax, imm32' sign extends imm32 */
930 		b1 = add_1mod(0x48, dst_reg);
931 		b2 = 0xC7;
932 		b3 = 0xC0;
933 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
934 		goto done;
935 	}
936 
937 	/*
938 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
939 	 * to save 3 bytes.
940 	 */
941 	if (imm32 == 0) {
942 		if (is_ereg(dst_reg))
943 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
944 		b2 = 0x31; /* xor */
945 		b3 = 0xC0;
946 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
947 		goto done;
948 	}
949 
950 	/* mov %eax, imm32 */
951 	if (is_ereg(dst_reg))
952 		EMIT1(add_1mod(0x40, dst_reg));
953 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
954 done:
955 	*pprog = prog;
956 }
957 
958 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
959 			   const u32 imm32_hi, const u32 imm32_lo)
960 {
961 	u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
962 	u8 *prog = *pprog;
963 
964 	if (is_uimm32(imm64)) {
965 		/*
966 		 * For emitting plain u32, where sign bit must not be
967 		 * propagated LLVM tends to load imm64 over mov32
968 		 * directly, so save couple of bytes by just doing
969 		 * 'mov %eax, imm32' instead.
970 		 */
971 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
972 	} else if (is_simm32(imm64)) {
973 		emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
974 	} else {
975 		/* movabsq rax, imm64 */
976 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
977 		EMIT(imm32_lo, 4);
978 		EMIT(imm32_hi, 4);
979 	}
980 
981 	*pprog = prog;
982 }
983 
984 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
985 {
986 	u8 *prog = *pprog;
987 
988 	if (is64) {
989 		/* mov dst, src */
990 		EMIT_mov(dst_reg, src_reg);
991 	} else {
992 		/* mov32 dst, src */
993 		if (is_ereg(dst_reg) || is_ereg(src_reg))
994 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
995 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
996 	}
997 
998 	*pprog = prog;
999 }
1000 
1001 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
1002 			   u32 src_reg)
1003 {
1004 	u8 *prog = *pprog;
1005 
1006 	if (is64) {
1007 		/* movs[b,w,l]q dst, src */
1008 		if (num_bits == 8)
1009 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
1010 			      add_2reg(0xC0, src_reg, dst_reg));
1011 		else if (num_bits == 16)
1012 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
1013 			      add_2reg(0xC0, src_reg, dst_reg));
1014 		else if (num_bits == 32)
1015 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
1016 			      add_2reg(0xC0, src_reg, dst_reg));
1017 	} else {
1018 		/* movs[b,w]l dst, src */
1019 		if (num_bits == 8) {
1020 			EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
1021 			      add_2reg(0xC0, src_reg, dst_reg));
1022 		} else if (num_bits == 16) {
1023 			if (is_ereg(dst_reg) || is_ereg(src_reg))
1024 				EMIT1(add_2mod(0x40, src_reg, dst_reg));
1025 			EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
1026 			      add_2reg(0xC0, src_reg, dst_reg));
1027 		}
1028 	}
1029 
1030 	*pprog = prog;
1031 }
1032 
1033 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
1034 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
1035 {
1036 	u8 *prog = *pprog;
1037 
1038 	if (is_imm8(off)) {
1039 		/* 1-byte signed displacement.
1040 		 *
1041 		 * If off == 0 we could skip this and save one extra byte, but
1042 		 * special case of x86 R13 which always needs an offset is not
1043 		 * worth the hassle
1044 		 */
1045 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
1046 	} else {
1047 		/* 4-byte signed displacement */
1048 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
1049 	}
1050 	*pprog = prog;
1051 }
1052 
1053 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
1054 {
1055 	u8 *prog = *pprog;
1056 
1057 	if (is_imm8(off)) {
1058 		EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1059 	} else {
1060 		EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1061 	}
1062 	*pprog = prog;
1063 }
1064 
1065 /*
1066  * Emit a REX byte if it will be necessary to address these registers
1067  */
1068 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
1069 {
1070 	u8 *prog = *pprog;
1071 
1072 	if (is64)
1073 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
1074 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
1075 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
1076 	*pprog = prog;
1077 }
1078 
1079 /*
1080  * Similar version of maybe_emit_mod() for a single register
1081  */
1082 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
1083 {
1084 	u8 *prog = *pprog;
1085 
1086 	if (is64)
1087 		EMIT1(add_1mod(0x48, reg));
1088 	else if (is_ereg(reg))
1089 		EMIT1(add_1mod(0x40, reg));
1090 	*pprog = prog;
1091 }
1092 
1093 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1094 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1095 {
1096 	u8 *prog = *pprog;
1097 
1098 	switch (size) {
1099 	case BPF_B:
1100 		/* Emit 'movzx rax, byte ptr [rax + off]' */
1101 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
1102 		break;
1103 	case BPF_H:
1104 		/* Emit 'movzx rax, word ptr [rax + off]' */
1105 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
1106 		break;
1107 	case BPF_W:
1108 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
1109 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1110 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
1111 		else
1112 			EMIT1(0x8B);
1113 		break;
1114 	case BPF_DW:
1115 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
1116 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
1117 		break;
1118 	}
1119 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
1120 	*pprog = prog;
1121 }
1122 
1123 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
1124 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1125 {
1126 	u8 *prog = *pprog;
1127 
1128 	switch (size) {
1129 	case BPF_B:
1130 		/* Emit 'movsx rax, byte ptr [rax + off]' */
1131 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
1132 		break;
1133 	case BPF_H:
1134 		/* Emit 'movsx rax, word ptr [rax + off]' */
1135 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
1136 		break;
1137 	case BPF_W:
1138 		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
1139 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
1140 		break;
1141 	}
1142 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
1143 	*pprog = prog;
1144 }
1145 
1146 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1147 {
1148 	u8 *prog = *pprog;
1149 
1150 	switch (size) {
1151 	case BPF_B:
1152 		/* movzx rax, byte ptr [rax + r12 + off] */
1153 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1154 		break;
1155 	case BPF_H:
1156 		/* movzx rax, word ptr [rax + r12 + off] */
1157 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1158 		break;
1159 	case BPF_W:
1160 		/* mov eax, dword ptr [rax + r12 + off] */
1161 		EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1162 		break;
1163 	case BPF_DW:
1164 		/* mov rax, qword ptr [rax + r12 + off] */
1165 		EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1166 		break;
1167 	}
1168 	emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1169 	*pprog = prog;
1170 }
1171 
1172 static void emit_ldsx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1173 {
1174 	u8 *prog = *pprog;
1175 
1176 	switch (size) {
1177 	case BPF_B:
1178 		/* movsx rax, byte ptr [rax + r12 + off] */
1179 		EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBE);
1180 		break;
1181 	case BPF_H:
1182 		/* movsx rax, word ptr [rax + r12 + off] */
1183 		EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBF);
1184 		break;
1185 	case BPF_W:
1186 		/* movsx rax, dword ptr [rax + r12 + off] */
1187 		EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x63);
1188 		break;
1189 	}
1190 	emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1191 	*pprog = prog;
1192 }
1193 
1194 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1195 {
1196 	emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1197 }
1198 
1199 static void emit_ldsx_r12(u8 **prog, u32 size, u32 dst_reg, u32 src_reg, int off)
1200 {
1201 	emit_ldsx_index(prog, size, dst_reg, src_reg, X86_REG_R12, off);
1202 }
1203 
1204 /* STX: *(u8*)(dst_reg + off) = src_reg */
1205 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1206 {
1207 	u8 *prog = *pprog;
1208 
1209 	switch (size) {
1210 	case BPF_B:
1211 		/* Emit 'mov byte ptr [rax + off], al' */
1212 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1213 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1214 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1215 		else
1216 			EMIT1(0x88);
1217 		break;
1218 	case BPF_H:
1219 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1220 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1221 		else
1222 			EMIT2(0x66, 0x89);
1223 		break;
1224 	case BPF_W:
1225 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1226 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1227 		else
1228 			EMIT1(0x89);
1229 		break;
1230 	case BPF_DW:
1231 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1232 		break;
1233 	}
1234 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1235 	*pprog = prog;
1236 }
1237 
1238 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
1239 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1240 {
1241 	u8 *prog = *pprog;
1242 
1243 	switch (size) {
1244 	case BPF_B:
1245 		/* mov byte ptr [rax + r12 + off], al */
1246 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1247 		break;
1248 	case BPF_H:
1249 		/* mov word ptr [rax + r12 + off], ax */
1250 		EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1251 		break;
1252 	case BPF_W:
1253 		/* mov dword ptr [rax + r12 + 1], eax */
1254 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1255 		break;
1256 	case BPF_DW:
1257 		/* mov qword ptr [rax + r12 + 1], rax */
1258 		EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1259 		break;
1260 	}
1261 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1262 	*pprog = prog;
1263 }
1264 
1265 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1266 {
1267 	emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1268 }
1269 
1270 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
1271 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1272 {
1273 	u8 *prog = *pprog;
1274 
1275 	switch (size) {
1276 	case BPF_B:
1277 		/* mov byte ptr [rax + r12 + off], imm8 */
1278 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1279 		break;
1280 	case BPF_H:
1281 		/* mov word ptr [rax + r12 + off], imm16 */
1282 		EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1283 		break;
1284 	case BPF_W:
1285 		/* mov dword ptr [rax + r12 + 1], imm32 */
1286 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1287 		break;
1288 	case BPF_DW:
1289 		/* mov qword ptr [rax + r12 + 1], imm32 */
1290 		EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1291 		break;
1292 	}
1293 	emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1294 	EMIT(imm, bpf_size_to_x86_bytes(size));
1295 	*pprog = prog;
1296 }
1297 
1298 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1299 {
1300 	emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1301 }
1302 
1303 static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
1304 			   u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1305 {
1306 	u8 *prog = *pprog;
1307 
1308 	if (atomic_op != BPF_XCHG)
1309 		EMIT1(0xF0); /* lock prefix */
1310 
1311 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1312 
1313 	/* emit opcode */
1314 	switch (atomic_op) {
1315 	case BPF_ADD:
1316 	case BPF_AND:
1317 	case BPF_OR:
1318 	case BPF_XOR:
1319 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1320 		EMIT1(simple_alu_opcodes[atomic_op]);
1321 		break;
1322 	case BPF_ADD | BPF_FETCH:
1323 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1324 		EMIT2(0x0F, 0xC1);
1325 		break;
1326 	case BPF_XCHG:
1327 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1328 		EMIT1(0x87);
1329 		break;
1330 	case BPF_CMPXCHG:
1331 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1332 		EMIT2(0x0F, 0xB1);
1333 		break;
1334 	default:
1335 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1336 		return -EFAULT;
1337 	}
1338 
1339 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1340 
1341 	*pprog = prog;
1342 	return 0;
1343 }
1344 
1345 static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
1346 				 u32 dst_reg, u32 src_reg, u32 index_reg,
1347 				 int off)
1348 {
1349 	u8 *prog = *pprog;
1350 
1351 	if (atomic_op != BPF_XCHG)
1352 		EMIT1(0xF0); /* lock prefix */
1353 
1354 	switch (size) {
1355 	case BPF_W:
1356 		EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1357 		break;
1358 	case BPF_DW:
1359 		EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1360 		break;
1361 	default:
1362 		pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
1363 		return -EFAULT;
1364 	}
1365 
1366 	/* emit opcode */
1367 	switch (atomic_op) {
1368 	case BPF_ADD:
1369 	case BPF_AND:
1370 	case BPF_OR:
1371 	case BPF_XOR:
1372 		/* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1373 		EMIT1(simple_alu_opcodes[atomic_op]);
1374 		break;
1375 	case BPF_ADD | BPF_FETCH:
1376 		/* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1377 		EMIT2(0x0F, 0xC1);
1378 		break;
1379 	case BPF_XCHG:
1380 		/* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1381 		EMIT1(0x87);
1382 		break;
1383 	case BPF_CMPXCHG:
1384 		/* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1385 		EMIT2(0x0F, 0xB1);
1386 		break;
1387 	default:
1388 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1389 		return -EFAULT;
1390 	}
1391 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1392 	*pprog = prog;
1393 	return 0;
1394 }
1395 
1396 static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
1397 			     u32 src_reg, s16 off, u8 bpf_size)
1398 {
1399 	switch (atomic_op) {
1400 	case BPF_LOAD_ACQ:
1401 		/* dst_reg = smp_load_acquire(src_reg + off16) */
1402 		emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
1403 		break;
1404 	case BPF_STORE_REL:
1405 		/* smp_store_release(dst_reg + off16, src_reg) */
1406 		emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
1407 		break;
1408 	default:
1409 		pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1410 		       atomic_op);
1411 		return -EFAULT;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
1418 				   u32 dst_reg, u32 src_reg, u32 index_reg,
1419 				   int off)
1420 {
1421 	switch (atomic_op) {
1422 	case BPF_LOAD_ACQ:
1423 		/* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
1424 		emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1425 		break;
1426 	case BPF_STORE_REL:
1427 		/* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
1428 		emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1429 		break;
1430 	default:
1431 		pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1432 		       atomic_op);
1433 		return -EFAULT;
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 /*
1440  * Metadata encoding for exception handling in JITed code.
1441  *
1442  * Format of `fixup` and `data` fields in `struct exception_table_entry`:
1443  *
1444  * Bit layout of `fixup` (32-bit):
1445  *
1446  * +-----------+--------+-----------+---------+----------+
1447  * | 31        | 30-24  |   23-16   |   15-8  |    7-0   |
1448  * |           |        |           |         |          |
1449  * | ARENA_ACC | Unused | ARENA_REG | DST_REG | INSN_LEN |
1450  * +-----------+--------+-----------+---------+----------+
1451  *
1452  * - INSN_LEN (8 bits): Length of faulting insn (max x86 insn = 15 bytes (fits in 8 bits)).
1453  * - DST_REG  (8 bits): Offset of dst_reg from reg2pt_regs[] (max offset = 112 (fits in 8 bits)).
1454  *                      This is set to DONT_CLEAR if the insn is a store.
1455  * - ARENA_REG (8 bits): Offset of the register that is used to calculate the
1456  *                       address for load/store when accessing the arena region.
1457  * - ARENA_ACCESS (1 bit): This bit is set when the faulting instruction accessed the arena region.
1458  *
1459  * Bit layout of `data` (32-bit):
1460  *
1461  * +--------------+--------+--------------+
1462  * |	31-16	  |  15-8  |     7-0      |
1463  * |              |	   |              |
1464  * | ARENA_OFFSET | Unused |  EX_TYPE_BPF |
1465  * +--------------+--------+--------------+
1466  *
1467  * - ARENA_OFFSET (16 bits): Offset used to calculate the address for load/store when
1468  *                           accessing the arena region.
1469  */
1470 
1471 #define DONT_CLEAR 1
1472 #define FIXUP_INSN_LEN_MASK	GENMASK(7, 0)
1473 #define FIXUP_REG_MASK		GENMASK(15, 8)
1474 #define FIXUP_ARENA_REG_MASK	GENMASK(23, 16)
1475 #define FIXUP_ARENA_ACCESS	BIT(31)
1476 #define DATA_ARENA_OFFSET_MASK	GENMASK(31, 16)
1477 
1478 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1479 {
1480 	u32 reg = FIELD_GET(FIXUP_REG_MASK, x->fixup);
1481 	u32 insn_len = FIELD_GET(FIXUP_INSN_LEN_MASK, x->fixup);
1482 	bool is_arena = !!(x->fixup & FIXUP_ARENA_ACCESS);
1483 	bool is_write = (reg == DONT_CLEAR);
1484 	unsigned long addr;
1485 	s16 off;
1486 	u32 arena_reg;
1487 
1488 	if (is_arena) {
1489 		arena_reg = FIELD_GET(FIXUP_ARENA_REG_MASK, x->fixup);
1490 		off = FIELD_GET(DATA_ARENA_OFFSET_MASK, x->data);
1491 		addr = *(unsigned long *)((void *)regs + arena_reg) + off;
1492 		bpf_prog_report_arena_violation(is_write, addr, regs->ip);
1493 	}
1494 
1495 	/* jump over faulting load and clear dest register */
1496 	if (reg != DONT_CLEAR)
1497 		*(unsigned long *)((void *)regs + reg) = 0;
1498 	regs->ip += insn_len;
1499 
1500 	return true;
1501 }
1502 
1503 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1504 			     bool *regs_used)
1505 {
1506 	int i;
1507 
1508 	for (i = 1; i <= insn_cnt; i++, insn++) {
1509 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1510 			regs_used[0] = true;
1511 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1512 			regs_used[1] = true;
1513 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1514 			regs_used[2] = true;
1515 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1516 			regs_used[3] = true;
1517 	}
1518 }
1519 
1520 /* emit the 3-byte VEX prefix
1521  *
1522  * r: same as rex.r, extra bit for ModRM reg field
1523  * x: same as rex.x, extra bit for SIB index field
1524  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1525  * m: opcode map select, encoding escape bytes e.g. 0x0f38
1526  * w: same as rex.w (32 bit or 64 bit) or opcode specific
1527  * src_reg2: additional source reg (encoded as BPF reg)
1528  * l: vector length (128 bit or 256 bit) or reserved
1529  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1530  */
1531 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1532 		      bool w, u8 src_reg2, bool l, u8 pp)
1533 {
1534 	u8 *prog = *pprog;
1535 	const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1536 	u8 b1, b2;
1537 	u8 vvvv = reg2hex[src_reg2];
1538 
1539 	/* reg2hex gives only the lower 3 bit of vvvv */
1540 	if (is_ereg(src_reg2))
1541 		vvvv |= 1 << 3;
1542 
1543 	/*
1544 	 * 2nd byte of 3-byte VEX prefix
1545 	 * ~ means bit inverted encoding
1546 	 *
1547 	 *    7                           0
1548 	 *  +---+---+---+---+---+---+---+---+
1549 	 *  |~R |~X |~B |         m         |
1550 	 *  +---+---+---+---+---+---+---+---+
1551 	 */
1552 	b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1553 	/*
1554 	 * 3rd byte of 3-byte VEX prefix
1555 	 *
1556 	 *    7                           0
1557 	 *  +---+---+---+---+---+---+---+---+
1558 	 *  | W |     ~vvvv     | L |   pp  |
1559 	 *  +---+---+---+---+---+---+---+---+
1560 	 */
1561 	b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1562 
1563 	EMIT3(b0, b1, b2);
1564 	*pprog = prog;
1565 }
1566 
1567 /* emit BMI2 shift instruction */
1568 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1569 {
1570 	u8 *prog = *pprog;
1571 	bool r = is_ereg(dst_reg);
1572 	u8 m = 2; /* escape code 0f38 */
1573 
1574 	emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1575 	EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1576 	*pprog = prog;
1577 }
1578 
1579 static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
1580 {
1581 	u8 *prog = *pprog;
1582 
1583 	/* movabs r9, priv_frame_ptr */
1584 	emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32,
1585 		       (u32) (__force long) priv_frame_ptr);
1586 
1587 #ifdef CONFIG_SMP
1588 	/* add <r9>, gs:[<off>] */
1589 	EMIT2(0x65, 0x4c);
1590 	EMIT3(0x03, 0x0c, 0x25);
1591 	EMIT((u32)(unsigned long)&this_cpu_off, 4);
1592 #endif
1593 
1594 	*pprog = prog;
1595 }
1596 
1597 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1598 
1599 #define __LOAD_TCC_PTR(off)			\
1600 	EMIT3_off32(0x48, 0x8B, 0x85, off)
1601 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1602 #define LOAD_TAIL_CALL_CNT_PTR(stack)				\
1603 	__LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1604 
1605 /* Memory size/value to protect private stack overflow/underflow */
1606 #define PRIV_STACK_GUARD_SZ    8
1607 #define PRIV_STACK_GUARD_VAL   0xEB9F12345678eb9fULL
1608 
1609 static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
1610 				    struct bpf_prog *bpf_prog)
1611 {
1612 	u8 *prog = *pprog;
1613 	u8 *func;
1614 
1615 	if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
1616 		/* The clearing sequence clobbers eax and ecx. */
1617 		EMIT1(0x50); /* push rax */
1618 		EMIT1(0x51); /* push rcx */
1619 		ip += 2;
1620 
1621 		func = (u8 *)clear_bhb_loop;
1622 		ip += x86_call_depth_emit_accounting(&prog, func, ip);
1623 
1624 		if (emit_call(&prog, func, ip))
1625 			return -EINVAL;
1626 		EMIT1(0x59); /* pop rcx */
1627 		EMIT1(0x58); /* pop rax */
1628 	}
1629 	/* Insert IBHF instruction */
1630 	if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
1631 	     cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
1632 	    cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
1633 		/*
1634 		 * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
1635 		 * fence preventing branch history from before the fence from
1636 		 * affecting indirect branches after the fence. This is
1637 		 * specifically used in cBPF jitted code to prevent Intra-mode
1638 		 * BHI attacks. The IBHF instruction is designed to be a NOP on
1639 		 * hardware that doesn't need or support it.  The REP and REX.W
1640 		 * prefixes are required by the microcode, and they also ensure
1641 		 * that the NOP is unlikely to be used in existing code.
1642 		 *
1643 		 * IBHF is not a valid instruction in 32-bit mode.
1644 		 */
1645 		EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
1646 	}
1647 	*pprog = prog;
1648 	return 0;
1649 }
1650 
1651 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1652 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
1653 {
1654 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1655 	struct bpf_insn *insn = bpf_prog->insnsi;
1656 	bool callee_regs_used[4] = {};
1657 	int insn_cnt = bpf_prog->len;
1658 	bool seen_exit = false;
1659 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1660 	void __percpu *priv_frame_ptr = NULL;
1661 	u64 arena_vm_start, user_vm_start;
1662 	void __percpu *priv_stack_ptr;
1663 	int i, excnt = 0;
1664 	int ilen, proglen = 0;
1665 	u8 *prog = temp;
1666 	u32 stack_depth;
1667 	int err;
1668 
1669 	stack_depth = bpf_prog->aux->stack_depth;
1670 	priv_stack_ptr = bpf_prog->aux->priv_stack_ptr;
1671 	if (priv_stack_ptr) {
1672 		priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8);
1673 		stack_depth = 0;
1674 	}
1675 
1676 	arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1677 	user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1678 
1679 	detect_reg_usage(insn, insn_cnt, callee_regs_used);
1680 
1681 	emit_prologue(&prog, image, stack_depth,
1682 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1683 		      bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1684 
1685 	bpf_prog->aux->ksym.fp_start = prog - temp;
1686 
1687 	/* Exception callback will clobber callee regs for its own use, and
1688 	 * restore the original callee regs from main prog's stack frame.
1689 	 */
1690 	if (bpf_prog->aux->exception_boundary) {
1691 		/* We also need to save r12, which is not mapped to any BPF
1692 		 * register, as we throw after entry into the kernel, which may
1693 		 * overwrite r12.
1694 		 */
1695 		push_r12(&prog);
1696 		push_callee_regs(&prog, all_callee_regs_used);
1697 	} else {
1698 		if (arena_vm_start)
1699 			push_r12(&prog);
1700 		push_callee_regs(&prog, callee_regs_used);
1701 	}
1702 	if (arena_vm_start)
1703 		emit_mov_imm64(&prog, X86_REG_R12,
1704 			       arena_vm_start >> 32, (u32) arena_vm_start);
1705 
1706 	if (priv_frame_ptr)
1707 		emit_priv_frame_ptr(&prog, priv_frame_ptr);
1708 
1709 	ilen = prog - temp;
1710 	if (rw_image)
1711 		memcpy(rw_image + proglen, temp, ilen);
1712 	proglen += ilen;
1713 	addrs[0] = proglen;
1714 	prog = temp;
1715 
1716 	for (i = 1; i <= insn_cnt; i++, insn++) {
1717 		const s32 imm32 = insn->imm;
1718 		u32 dst_reg = insn->dst_reg;
1719 		u32 src_reg = insn->src_reg;
1720 		u8 b2 = 0, b3 = 0;
1721 		u8 *start_of_ldx;
1722 		s64 jmp_offset;
1723 		s16 insn_off;
1724 		u8 jmp_cond;
1725 		u8 *func;
1726 		int nops;
1727 
1728 		if (priv_frame_ptr) {
1729 			if (src_reg == BPF_REG_FP)
1730 				src_reg = X86_REG_R9;
1731 
1732 			if (dst_reg == BPF_REG_FP)
1733 				dst_reg = X86_REG_R9;
1734 		}
1735 
1736 		switch (insn->code) {
1737 			/* ALU */
1738 		case BPF_ALU | BPF_ADD | BPF_X:
1739 		case BPF_ALU | BPF_SUB | BPF_X:
1740 		case BPF_ALU | BPF_AND | BPF_X:
1741 		case BPF_ALU | BPF_OR | BPF_X:
1742 		case BPF_ALU | BPF_XOR | BPF_X:
1743 		case BPF_ALU64 | BPF_ADD | BPF_X:
1744 		case BPF_ALU64 | BPF_SUB | BPF_X:
1745 		case BPF_ALU64 | BPF_AND | BPF_X:
1746 		case BPF_ALU64 | BPF_OR | BPF_X:
1747 		case BPF_ALU64 | BPF_XOR | BPF_X:
1748 			maybe_emit_mod(&prog, dst_reg, src_reg,
1749 				       BPF_CLASS(insn->code) == BPF_ALU64);
1750 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1751 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1752 			break;
1753 
1754 		case BPF_ALU64 | BPF_MOV | BPF_X:
1755 			if (insn_is_cast_user(insn)) {
1756 				if (dst_reg != src_reg)
1757 					/* 32-bit mov */
1758 					emit_mov_reg(&prog, false, dst_reg, src_reg);
1759 				/* shl dst_reg, 32 */
1760 				maybe_emit_1mod(&prog, dst_reg, true);
1761 				EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1762 
1763 				/* or dst_reg, user_vm_start */
1764 				maybe_emit_1mod(&prog, dst_reg, true);
1765 				if (is_axreg(dst_reg))
1766 					EMIT1_off32(0x0D,  user_vm_start >> 32);
1767 				else
1768 					EMIT2_off32(0x81, add_1reg(0xC8, dst_reg),  user_vm_start >> 32);
1769 
1770 				/* rol dst_reg, 32 */
1771 				maybe_emit_1mod(&prog, dst_reg, true);
1772 				EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1773 
1774 				/* xor r11, r11 */
1775 				EMIT3(0x4D, 0x31, 0xDB);
1776 
1777 				/* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1778 				maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1779 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1780 
1781 				/* cmove r11, dst_reg; if so, set dst_reg to zero */
1782 				/* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1783 				maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1784 				EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1785 				break;
1786 			} else if (insn_is_mov_percpu_addr(insn)) {
1787 				/* mov <dst>, <src> (if necessary) */
1788 				EMIT_mov(dst_reg, src_reg);
1789 #ifdef CONFIG_SMP
1790 				/* add <dst>, gs:[<off>] */
1791 				EMIT2(0x65, add_1mod(0x48, dst_reg));
1792 				EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1793 				EMIT((u32)(unsigned long)&this_cpu_off, 4);
1794 #endif
1795 				break;
1796 			}
1797 			fallthrough;
1798 		case BPF_ALU | BPF_MOV | BPF_X:
1799 			if (insn->off == 0)
1800 				emit_mov_reg(&prog,
1801 					     BPF_CLASS(insn->code) == BPF_ALU64,
1802 					     dst_reg, src_reg);
1803 			else
1804 				emit_movsx_reg(&prog, insn->off,
1805 					       BPF_CLASS(insn->code) == BPF_ALU64,
1806 					       dst_reg, src_reg);
1807 			break;
1808 
1809 			/* neg dst */
1810 		case BPF_ALU | BPF_NEG:
1811 		case BPF_ALU64 | BPF_NEG:
1812 			maybe_emit_1mod(&prog, dst_reg,
1813 					BPF_CLASS(insn->code) == BPF_ALU64);
1814 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1815 			break;
1816 
1817 		case BPF_ALU | BPF_ADD | BPF_K:
1818 		case BPF_ALU | BPF_SUB | BPF_K:
1819 		case BPF_ALU | BPF_AND | BPF_K:
1820 		case BPF_ALU | BPF_OR | BPF_K:
1821 		case BPF_ALU | BPF_XOR | BPF_K:
1822 		case BPF_ALU64 | BPF_ADD | BPF_K:
1823 		case BPF_ALU64 | BPF_SUB | BPF_K:
1824 		case BPF_ALU64 | BPF_AND | BPF_K:
1825 		case BPF_ALU64 | BPF_OR | BPF_K:
1826 		case BPF_ALU64 | BPF_XOR | BPF_K:
1827 			maybe_emit_1mod(&prog, dst_reg,
1828 					BPF_CLASS(insn->code) == BPF_ALU64);
1829 
1830 			/*
1831 			 * b3 holds 'normal' opcode, b2 short form only valid
1832 			 * in case dst is eax/rax.
1833 			 */
1834 			switch (BPF_OP(insn->code)) {
1835 			case BPF_ADD:
1836 				b3 = 0xC0;
1837 				b2 = 0x05;
1838 				break;
1839 			case BPF_SUB:
1840 				b3 = 0xE8;
1841 				b2 = 0x2D;
1842 				break;
1843 			case BPF_AND:
1844 				b3 = 0xE0;
1845 				b2 = 0x25;
1846 				break;
1847 			case BPF_OR:
1848 				b3 = 0xC8;
1849 				b2 = 0x0D;
1850 				break;
1851 			case BPF_XOR:
1852 				b3 = 0xF0;
1853 				b2 = 0x35;
1854 				break;
1855 			}
1856 
1857 			if (is_imm8(imm32))
1858 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1859 			else if (is_axreg(dst_reg))
1860 				EMIT1_off32(b2, imm32);
1861 			else
1862 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1863 			break;
1864 
1865 		case BPF_ALU64 | BPF_MOV | BPF_K:
1866 		case BPF_ALU | BPF_MOV | BPF_K:
1867 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1868 				       dst_reg, imm32);
1869 			break;
1870 
1871 		case BPF_LD | BPF_IMM | BPF_DW:
1872 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1873 			insn++;
1874 			i++;
1875 			break;
1876 
1877 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1878 		case BPF_ALU | BPF_MOD | BPF_X:
1879 		case BPF_ALU | BPF_DIV | BPF_X:
1880 		case BPF_ALU | BPF_MOD | BPF_K:
1881 		case BPF_ALU | BPF_DIV | BPF_K:
1882 		case BPF_ALU64 | BPF_MOD | BPF_X:
1883 		case BPF_ALU64 | BPF_DIV | BPF_X:
1884 		case BPF_ALU64 | BPF_MOD | BPF_K:
1885 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1886 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1887 
1888 			if (dst_reg != BPF_REG_0)
1889 				EMIT1(0x50); /* push rax */
1890 			if (dst_reg != BPF_REG_3)
1891 				EMIT1(0x52); /* push rdx */
1892 
1893 			if (BPF_SRC(insn->code) == BPF_X) {
1894 				if (src_reg == BPF_REG_0 ||
1895 				    src_reg == BPF_REG_3) {
1896 					/* mov r11, src_reg */
1897 					EMIT_mov(AUX_REG, src_reg);
1898 					src_reg = AUX_REG;
1899 				}
1900 			} else {
1901 				/* mov r11, imm32 */
1902 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1903 				src_reg = AUX_REG;
1904 			}
1905 
1906 			if (dst_reg != BPF_REG_0)
1907 				/* mov rax, dst_reg */
1908 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1909 
1910 			if (insn->off == 0) {
1911 				/*
1912 				 * xor edx, edx
1913 				 * equivalent to 'xor rdx, rdx', but one byte less
1914 				 */
1915 				EMIT2(0x31, 0xd2);
1916 
1917 				/* div src_reg */
1918 				maybe_emit_1mod(&prog, src_reg, is64);
1919 				EMIT2(0xF7, add_1reg(0xF0, src_reg));
1920 			} else {
1921 				if (BPF_CLASS(insn->code) == BPF_ALU)
1922 					EMIT1(0x99); /* cdq */
1923 				else
1924 					EMIT2(0x48, 0x99); /* cqo */
1925 
1926 				/* idiv src_reg */
1927 				maybe_emit_1mod(&prog, src_reg, is64);
1928 				EMIT2(0xF7, add_1reg(0xF8, src_reg));
1929 			}
1930 
1931 			if (BPF_OP(insn->code) == BPF_MOD &&
1932 			    dst_reg != BPF_REG_3)
1933 				/* mov dst_reg, rdx */
1934 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1935 			else if (BPF_OP(insn->code) == BPF_DIV &&
1936 				 dst_reg != BPF_REG_0)
1937 				/* mov dst_reg, rax */
1938 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1939 
1940 			if (dst_reg != BPF_REG_3)
1941 				EMIT1(0x5A); /* pop rdx */
1942 			if (dst_reg != BPF_REG_0)
1943 				EMIT1(0x58); /* pop rax */
1944 			break;
1945 		}
1946 
1947 		case BPF_ALU | BPF_MUL | BPF_K:
1948 		case BPF_ALU64 | BPF_MUL | BPF_K:
1949 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1950 				       BPF_CLASS(insn->code) == BPF_ALU64);
1951 
1952 			if (is_imm8(imm32))
1953 				/* imul dst_reg, dst_reg, imm8 */
1954 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1955 				      imm32);
1956 			else
1957 				/* imul dst_reg, dst_reg, imm32 */
1958 				EMIT2_off32(0x69,
1959 					    add_2reg(0xC0, dst_reg, dst_reg),
1960 					    imm32);
1961 			break;
1962 
1963 		case BPF_ALU | BPF_MUL | BPF_X:
1964 		case BPF_ALU64 | BPF_MUL | BPF_X:
1965 			maybe_emit_mod(&prog, src_reg, dst_reg,
1966 				       BPF_CLASS(insn->code) == BPF_ALU64);
1967 
1968 			/* imul dst_reg, src_reg */
1969 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1970 			break;
1971 
1972 			/* Shifts */
1973 		case BPF_ALU | BPF_LSH | BPF_K:
1974 		case BPF_ALU | BPF_RSH | BPF_K:
1975 		case BPF_ALU | BPF_ARSH | BPF_K:
1976 		case BPF_ALU64 | BPF_LSH | BPF_K:
1977 		case BPF_ALU64 | BPF_RSH | BPF_K:
1978 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1979 			maybe_emit_1mod(&prog, dst_reg,
1980 					BPF_CLASS(insn->code) == BPF_ALU64);
1981 
1982 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1983 			if (imm32 == 1)
1984 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1985 			else
1986 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1987 			break;
1988 
1989 		case BPF_ALU | BPF_LSH | BPF_X:
1990 		case BPF_ALU | BPF_RSH | BPF_X:
1991 		case BPF_ALU | BPF_ARSH | BPF_X:
1992 		case BPF_ALU64 | BPF_LSH | BPF_X:
1993 		case BPF_ALU64 | BPF_RSH | BPF_X:
1994 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1995 			/* BMI2 shifts aren't better when shift count is already in rcx */
1996 			if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1997 				/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1998 				bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1999 				u8 op;
2000 
2001 				switch (BPF_OP(insn->code)) {
2002 				case BPF_LSH:
2003 					op = 1; /* prefix 0x66 */
2004 					break;
2005 				case BPF_RSH:
2006 					op = 3; /* prefix 0xf2 */
2007 					break;
2008 				case BPF_ARSH:
2009 					op = 2; /* prefix 0xf3 */
2010 					break;
2011 				}
2012 
2013 				emit_shiftx(&prog, dst_reg, src_reg, w, op);
2014 
2015 				break;
2016 			}
2017 
2018 			if (src_reg != BPF_REG_4) { /* common case */
2019 				/* Check for bad case when dst_reg == rcx */
2020 				if (dst_reg == BPF_REG_4) {
2021 					/* mov r11, dst_reg */
2022 					EMIT_mov(AUX_REG, dst_reg);
2023 					dst_reg = AUX_REG;
2024 				} else {
2025 					EMIT1(0x51); /* push rcx */
2026 				}
2027 				/* mov rcx, src_reg */
2028 				EMIT_mov(BPF_REG_4, src_reg);
2029 			}
2030 
2031 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
2032 			maybe_emit_1mod(&prog, dst_reg,
2033 					BPF_CLASS(insn->code) == BPF_ALU64);
2034 
2035 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
2036 			EMIT2(0xD3, add_1reg(b3, dst_reg));
2037 
2038 			if (src_reg != BPF_REG_4) {
2039 				if (insn->dst_reg == BPF_REG_4)
2040 					/* mov dst_reg, r11 */
2041 					EMIT_mov(insn->dst_reg, AUX_REG);
2042 				else
2043 					EMIT1(0x59); /* pop rcx */
2044 			}
2045 
2046 			break;
2047 
2048 		case BPF_ALU | BPF_END | BPF_FROM_BE:
2049 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
2050 			switch (imm32) {
2051 			case 16:
2052 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
2053 				EMIT1(0x66);
2054 				if (is_ereg(dst_reg))
2055 					EMIT1(0x41);
2056 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
2057 
2058 				/* Emit 'movzwl eax, ax' */
2059 				if (is_ereg(dst_reg))
2060 					EMIT3(0x45, 0x0F, 0xB7);
2061 				else
2062 					EMIT2(0x0F, 0xB7);
2063 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2064 				break;
2065 			case 32:
2066 				/* Emit 'bswap eax' to swap lower 4 bytes */
2067 				if (is_ereg(dst_reg))
2068 					EMIT2(0x41, 0x0F);
2069 				else
2070 					EMIT1(0x0F);
2071 				EMIT1(add_1reg(0xC8, dst_reg));
2072 				break;
2073 			case 64:
2074 				/* Emit 'bswap rax' to swap 8 bytes */
2075 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
2076 				      add_1reg(0xC8, dst_reg));
2077 				break;
2078 			}
2079 			break;
2080 
2081 		case BPF_ALU | BPF_END | BPF_FROM_LE:
2082 			switch (imm32) {
2083 			case 16:
2084 				/*
2085 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
2086 				 * into 64 bit
2087 				 */
2088 				if (is_ereg(dst_reg))
2089 					EMIT3(0x45, 0x0F, 0xB7);
2090 				else
2091 					EMIT2(0x0F, 0xB7);
2092 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2093 				break;
2094 			case 32:
2095 				/* Emit 'mov eax, eax' to clear upper 32-bits */
2096 				if (is_ereg(dst_reg))
2097 					EMIT1(0x45);
2098 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
2099 				break;
2100 			case 64:
2101 				/* nop */
2102 				break;
2103 			}
2104 			break;
2105 
2106 			/* speculation barrier */
2107 		case BPF_ST | BPF_NOSPEC:
2108 			EMIT_LFENCE();
2109 			break;
2110 
2111 			/* ST: *(u8*)(dst_reg + off) = imm */
2112 		case BPF_ST | BPF_MEM | BPF_B:
2113 			if (is_ereg(dst_reg))
2114 				EMIT2(0x41, 0xC6);
2115 			else
2116 				EMIT1(0xC6);
2117 			goto st;
2118 		case BPF_ST | BPF_MEM | BPF_H:
2119 			if (is_ereg(dst_reg))
2120 				EMIT3(0x66, 0x41, 0xC7);
2121 			else
2122 				EMIT2(0x66, 0xC7);
2123 			goto st;
2124 		case BPF_ST | BPF_MEM | BPF_W:
2125 			if (is_ereg(dst_reg))
2126 				EMIT2(0x41, 0xC7);
2127 			else
2128 				EMIT1(0xC7);
2129 			goto st;
2130 		case BPF_ST | BPF_MEM | BPF_DW:
2131 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
2132 
2133 st:			if (is_imm8(insn->off))
2134 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
2135 			else
2136 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
2137 
2138 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
2139 			break;
2140 
2141 			/* STX: *(u8*)(dst_reg + off) = src_reg */
2142 		case BPF_STX | BPF_MEM | BPF_B:
2143 		case BPF_STX | BPF_MEM | BPF_H:
2144 		case BPF_STX | BPF_MEM | BPF_W:
2145 		case BPF_STX | BPF_MEM | BPF_DW:
2146 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2147 			break;
2148 
2149 		case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
2150 		case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
2151 		case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
2152 		case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
2153 			start_of_ldx = prog;
2154 			emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
2155 			goto populate_extable;
2156 
2157 			/* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
2158 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
2159 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
2160 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
2161 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
2162 		case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
2163 		case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
2164 		case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
2165 		case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
2166 		case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
2167 		case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
2168 		case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
2169 			start_of_ldx = prog;
2170 			if (BPF_CLASS(insn->code) == BPF_LDX) {
2171 				if (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX)
2172 					emit_ldsx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2173 				else
2174 					emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2175 			} else {
2176 				emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2177 			}
2178 populate_extable:
2179 			{
2180 				struct exception_table_entry *ex;
2181 				u8 *_insn = image + proglen + (start_of_ldx - temp);
2182 				u32 arena_reg, fixup_reg;
2183 				s64 delta;
2184 
2185 				if (!bpf_prog->aux->extable)
2186 					break;
2187 
2188 				if (excnt >= bpf_prog->aux->num_exentries) {
2189 					pr_err("mem32 extable bug\n");
2190 					return -EFAULT;
2191 				}
2192 				ex = &bpf_prog->aux->extable[excnt++];
2193 
2194 				delta = _insn - (u8 *)&ex->insn;
2195 				/* switch ex to rw buffer for writes */
2196 				ex = (void *)rw_image + ((void *)ex - (void *)image);
2197 
2198 				ex->insn = delta;
2199 
2200 				ex->data = EX_TYPE_BPF;
2201 
2202 				/*
2203 				 * src_reg/dst_reg holds the address in the arena region with upper
2204 				 * 32-bits being zero because of a preceding addr_space_cast(r<n>,
2205 				 * 0x0, 0x1) instruction. This address is adjusted with the addition
2206 				 * of arena_vm_start (see the implementation of BPF_PROBE_MEM32 and
2207 				 * BPF_PROBE_ATOMIC) before being used for the memory access. Pass
2208 				 * the reg holding the unmodified 32-bit address to
2209 				 * ex_handler_bpf().
2210 				 */
2211 				if (BPF_CLASS(insn->code) == BPF_LDX) {
2212 					arena_reg = reg2pt_regs[src_reg];
2213 					fixup_reg = reg2pt_regs[dst_reg];
2214 				} else {
2215 					arena_reg = reg2pt_regs[dst_reg];
2216 					fixup_reg = DONT_CLEAR;
2217 				}
2218 
2219 				ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2220 					    FIELD_PREP(FIXUP_ARENA_REG_MASK, arena_reg) |
2221 					    FIELD_PREP(FIXUP_REG_MASK, fixup_reg);
2222 				ex->fixup |= FIXUP_ARENA_ACCESS;
2223 
2224 				ex->data |= FIELD_PREP(DATA_ARENA_OFFSET_MASK, insn->off);
2225 			}
2226 			break;
2227 
2228 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
2229 		case BPF_LDX | BPF_MEM | BPF_B:
2230 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
2231 		case BPF_LDX | BPF_MEM | BPF_H:
2232 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
2233 		case BPF_LDX | BPF_MEM | BPF_W:
2234 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
2235 		case BPF_LDX | BPF_MEM | BPF_DW:
2236 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
2237 			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
2238 		case BPF_LDX | BPF_MEMSX | BPF_B:
2239 		case BPF_LDX | BPF_MEMSX | BPF_H:
2240 		case BPF_LDX | BPF_MEMSX | BPF_W:
2241 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
2242 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
2243 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
2244 			insn_off = insn->off;
2245 
2246 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2247 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2248 				/* Conservatively check that src_reg + insn->off is a kernel address:
2249 				 *   src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
2250 				 *   and
2251 				 *   src_reg + insn->off < VSYSCALL_ADDR
2252 				 */
2253 
2254 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
2255 				u8 *end_of_jmp;
2256 
2257 				/* movabsq r10, VSYSCALL_ADDR */
2258 				emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
2259 					       (u32)(long)VSYSCALL_ADDR);
2260 
2261 				/* mov src_reg, r11 */
2262 				EMIT_mov(AUX_REG, src_reg);
2263 
2264 				if (insn->off) {
2265 					/* add r11, insn->off */
2266 					maybe_emit_1mod(&prog, AUX_REG, true);
2267 					EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
2268 				}
2269 
2270 				/* sub r11, r10 */
2271 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2272 				EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2273 
2274 				/* movabsq r10, limit */
2275 				emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
2276 					       (u32)(long)limit);
2277 
2278 				/* cmp r10, r11 */
2279 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2280 				EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2281 
2282 				/* if unsigned '>', goto load */
2283 				EMIT2(X86_JA, 0);
2284 				end_of_jmp = prog;
2285 
2286 				/* xor dst_reg, dst_reg */
2287 				emit_mov_imm32(&prog, false, dst_reg, 0);
2288 				/* jmp byte_after_ldx */
2289 				EMIT2(0xEB, 0);
2290 
2291 				/* populate jmp_offset for JAE above to jump to start_of_ldx */
2292 				start_of_ldx = prog;
2293 				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
2294 			}
2295 			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
2296 			    BPF_MODE(insn->code) == BPF_MEMSX)
2297 				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2298 			else
2299 				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2300 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2301 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2302 				struct exception_table_entry *ex;
2303 				u8 *_insn = image + proglen + (start_of_ldx - temp);
2304 				s64 delta;
2305 
2306 				/* populate jmp_offset for JMP above */
2307 				start_of_ldx[-1] = prog - start_of_ldx;
2308 
2309 				if (!bpf_prog->aux->extable)
2310 					break;
2311 
2312 				if (excnt >= bpf_prog->aux->num_exentries) {
2313 					pr_err("ex gen bug\n");
2314 					return -EFAULT;
2315 				}
2316 				ex = &bpf_prog->aux->extable[excnt++];
2317 
2318 				delta = _insn - (u8 *)&ex->insn;
2319 				if (!is_simm32(delta)) {
2320 					pr_err("extable->insn doesn't fit into 32-bit\n");
2321 					return -EFAULT;
2322 				}
2323 				/* switch ex to rw buffer for writes */
2324 				ex = (void *)rw_image + ((void *)ex - (void *)image);
2325 
2326 				ex->insn = delta;
2327 
2328 				ex->data = EX_TYPE_BPF;
2329 
2330 				if (dst_reg > BPF_REG_9) {
2331 					pr_err("verifier error\n");
2332 					return -EFAULT;
2333 				}
2334 				/*
2335 				 * Compute size of x86 insn and its target dest x86 register.
2336 				 * ex_handler_bpf() will use lower 8 bits to adjust
2337 				 * pt_regs->ip to jump over this x86 instruction
2338 				 * and upper bits to figure out which pt_regs to zero out.
2339 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2340 				 * of 4 bytes will be ignored and rbx will be zero inited.
2341 				 */
2342 				ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2343 					    FIELD_PREP(FIXUP_REG_MASK, reg2pt_regs[dst_reg]);
2344 			}
2345 			break;
2346 
2347 		case BPF_STX | BPF_ATOMIC | BPF_B:
2348 		case BPF_STX | BPF_ATOMIC | BPF_H:
2349 			if (!bpf_atomic_is_load_store(insn)) {
2350 				pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2351 				return -EFAULT;
2352 			}
2353 			fallthrough;
2354 		case BPF_STX | BPF_ATOMIC | BPF_W:
2355 		case BPF_STX | BPF_ATOMIC | BPF_DW:
2356 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
2357 			    insn->imm == (BPF_OR | BPF_FETCH) ||
2358 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
2359 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2360 				u32 real_src_reg = src_reg;
2361 				u32 real_dst_reg = dst_reg;
2362 				u8 *branch_target;
2363 
2364 				/*
2365 				 * Can't be implemented with a single x86 insn.
2366 				 * Need to do a CMPXCHG loop.
2367 				 */
2368 
2369 				/* Will need RAX as a CMPXCHG operand so save R0 */
2370 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2371 				if (src_reg == BPF_REG_0)
2372 					real_src_reg = BPF_REG_AX;
2373 				if (dst_reg == BPF_REG_0)
2374 					real_dst_reg = BPF_REG_AX;
2375 
2376 				branch_target = prog;
2377 				/* Load old value */
2378 				emit_ldx(&prog, BPF_SIZE(insn->code),
2379 					 BPF_REG_0, real_dst_reg, insn->off);
2380 				/*
2381 				 * Perform the (commutative) operation locally,
2382 				 * put the result in the AUX_REG.
2383 				 */
2384 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2385 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2386 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2387 				      add_2reg(0xC0, AUX_REG, real_src_reg));
2388 				/* Attempt to swap in new value */
2389 				err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
2390 						      real_dst_reg, AUX_REG,
2391 						      insn->off,
2392 						      BPF_SIZE(insn->code));
2393 				if (WARN_ON(err))
2394 					return err;
2395 				/*
2396 				 * ZF tells us whether we won the race. If it's
2397 				 * cleared we need to try again.
2398 				 */
2399 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
2400 				/* Return the pre-modification value */
2401 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2402 				/* Restore R0 after clobbering RAX */
2403 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2404 				break;
2405 			}
2406 
2407 			if (bpf_atomic_is_load_store(insn))
2408 				err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
2409 							insn->off, BPF_SIZE(insn->code));
2410 			else
2411 				err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
2412 						      insn->off, BPF_SIZE(insn->code));
2413 			if (err)
2414 				return err;
2415 			break;
2416 
2417 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
2418 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
2419 			if (!bpf_atomic_is_load_store(insn)) {
2420 				pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2421 				return -EFAULT;
2422 			}
2423 			fallthrough;
2424 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2425 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2426 			start_of_ldx = prog;
2427 
2428 			if (bpf_atomic_is_load_store(insn))
2429 				err = emit_atomic_ld_st_index(&prog, insn->imm,
2430 							      BPF_SIZE(insn->code), dst_reg,
2431 							      src_reg, X86_REG_R12, insn->off);
2432 			else
2433 				err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
2434 							    dst_reg, src_reg, X86_REG_R12,
2435 							    insn->off);
2436 			if (err)
2437 				return err;
2438 			goto populate_extable;
2439 
2440 			/* call */
2441 		case BPF_JMP | BPF_CALL: {
2442 			u8 *ip = image + addrs[i - 1];
2443 
2444 			func = (u8 *) __bpf_call_base + imm32;
2445 			if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
2446 				LOAD_TAIL_CALL_CNT_PTR(stack_depth);
2447 				ip += 7;
2448 			}
2449 			if (!imm32)
2450 				return -EINVAL;
2451 			if (priv_frame_ptr) {
2452 				push_r9(&prog);
2453 				ip += 2;
2454 			}
2455 			ip += x86_call_depth_emit_accounting(&prog, func, ip);
2456 			if (emit_call(&prog, func, ip))
2457 				return -EINVAL;
2458 			if (priv_frame_ptr)
2459 				pop_r9(&prog);
2460 			break;
2461 		}
2462 
2463 		case BPF_JMP | BPF_TAIL_CALL:
2464 			if (imm32)
2465 				emit_bpf_tail_call_direct(bpf_prog,
2466 							  &bpf_prog->aux->poke_tab[imm32 - 1],
2467 							  &prog, image + addrs[i - 1],
2468 							  callee_regs_used,
2469 							  stack_depth,
2470 							  ctx);
2471 			else
2472 				emit_bpf_tail_call_indirect(bpf_prog,
2473 							    &prog,
2474 							    callee_regs_used,
2475 							    stack_depth,
2476 							    image + addrs[i - 1],
2477 							    ctx);
2478 			break;
2479 
2480 			/* cond jump */
2481 		case BPF_JMP | BPF_JEQ | BPF_X:
2482 		case BPF_JMP | BPF_JNE | BPF_X:
2483 		case BPF_JMP | BPF_JGT | BPF_X:
2484 		case BPF_JMP | BPF_JLT | BPF_X:
2485 		case BPF_JMP | BPF_JGE | BPF_X:
2486 		case BPF_JMP | BPF_JLE | BPF_X:
2487 		case BPF_JMP | BPF_JSGT | BPF_X:
2488 		case BPF_JMP | BPF_JSLT | BPF_X:
2489 		case BPF_JMP | BPF_JSGE | BPF_X:
2490 		case BPF_JMP | BPF_JSLE | BPF_X:
2491 		case BPF_JMP32 | BPF_JEQ | BPF_X:
2492 		case BPF_JMP32 | BPF_JNE | BPF_X:
2493 		case BPF_JMP32 | BPF_JGT | BPF_X:
2494 		case BPF_JMP32 | BPF_JLT | BPF_X:
2495 		case BPF_JMP32 | BPF_JGE | BPF_X:
2496 		case BPF_JMP32 | BPF_JLE | BPF_X:
2497 		case BPF_JMP32 | BPF_JSGT | BPF_X:
2498 		case BPF_JMP32 | BPF_JSLT | BPF_X:
2499 		case BPF_JMP32 | BPF_JSGE | BPF_X:
2500 		case BPF_JMP32 | BPF_JSLE | BPF_X:
2501 			/* cmp dst_reg, src_reg */
2502 			maybe_emit_mod(&prog, dst_reg, src_reg,
2503 				       BPF_CLASS(insn->code) == BPF_JMP);
2504 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2505 			goto emit_cond_jmp;
2506 
2507 		case BPF_JMP | BPF_JSET | BPF_X:
2508 		case BPF_JMP32 | BPF_JSET | BPF_X:
2509 			/* test dst_reg, src_reg */
2510 			maybe_emit_mod(&prog, dst_reg, src_reg,
2511 				       BPF_CLASS(insn->code) == BPF_JMP);
2512 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2513 			goto emit_cond_jmp;
2514 
2515 		case BPF_JMP | BPF_JSET | BPF_K:
2516 		case BPF_JMP32 | BPF_JSET | BPF_K:
2517 			/* test dst_reg, imm32 */
2518 			maybe_emit_1mod(&prog, dst_reg,
2519 					BPF_CLASS(insn->code) == BPF_JMP);
2520 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2521 			goto emit_cond_jmp;
2522 
2523 		case BPF_JMP | BPF_JEQ | BPF_K:
2524 		case BPF_JMP | BPF_JNE | BPF_K:
2525 		case BPF_JMP | BPF_JGT | BPF_K:
2526 		case BPF_JMP | BPF_JLT | BPF_K:
2527 		case BPF_JMP | BPF_JGE | BPF_K:
2528 		case BPF_JMP | BPF_JLE | BPF_K:
2529 		case BPF_JMP | BPF_JSGT | BPF_K:
2530 		case BPF_JMP | BPF_JSLT | BPF_K:
2531 		case BPF_JMP | BPF_JSGE | BPF_K:
2532 		case BPF_JMP | BPF_JSLE | BPF_K:
2533 		case BPF_JMP32 | BPF_JEQ | BPF_K:
2534 		case BPF_JMP32 | BPF_JNE | BPF_K:
2535 		case BPF_JMP32 | BPF_JGT | BPF_K:
2536 		case BPF_JMP32 | BPF_JLT | BPF_K:
2537 		case BPF_JMP32 | BPF_JGE | BPF_K:
2538 		case BPF_JMP32 | BPF_JLE | BPF_K:
2539 		case BPF_JMP32 | BPF_JSGT | BPF_K:
2540 		case BPF_JMP32 | BPF_JSLT | BPF_K:
2541 		case BPF_JMP32 | BPF_JSGE | BPF_K:
2542 		case BPF_JMP32 | BPF_JSLE | BPF_K:
2543 			/* test dst_reg, dst_reg to save one extra byte */
2544 			if (imm32 == 0) {
2545 				maybe_emit_mod(&prog, dst_reg, dst_reg,
2546 					       BPF_CLASS(insn->code) == BPF_JMP);
2547 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2548 				goto emit_cond_jmp;
2549 			}
2550 
2551 			/* cmp dst_reg, imm8/32 */
2552 			maybe_emit_1mod(&prog, dst_reg,
2553 					BPF_CLASS(insn->code) == BPF_JMP);
2554 
2555 			if (is_imm8(imm32))
2556 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2557 			else
2558 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2559 
2560 emit_cond_jmp:		/* Convert BPF opcode to x86 */
2561 			switch (BPF_OP(insn->code)) {
2562 			case BPF_JEQ:
2563 				jmp_cond = X86_JE;
2564 				break;
2565 			case BPF_JSET:
2566 			case BPF_JNE:
2567 				jmp_cond = X86_JNE;
2568 				break;
2569 			case BPF_JGT:
2570 				/* GT is unsigned '>', JA in x86 */
2571 				jmp_cond = X86_JA;
2572 				break;
2573 			case BPF_JLT:
2574 				/* LT is unsigned '<', JB in x86 */
2575 				jmp_cond = X86_JB;
2576 				break;
2577 			case BPF_JGE:
2578 				/* GE is unsigned '>=', JAE in x86 */
2579 				jmp_cond = X86_JAE;
2580 				break;
2581 			case BPF_JLE:
2582 				/* LE is unsigned '<=', JBE in x86 */
2583 				jmp_cond = X86_JBE;
2584 				break;
2585 			case BPF_JSGT:
2586 				/* Signed '>', GT in x86 */
2587 				jmp_cond = X86_JG;
2588 				break;
2589 			case BPF_JSLT:
2590 				/* Signed '<', LT in x86 */
2591 				jmp_cond = X86_JL;
2592 				break;
2593 			case BPF_JSGE:
2594 				/* Signed '>=', GE in x86 */
2595 				jmp_cond = X86_JGE;
2596 				break;
2597 			case BPF_JSLE:
2598 				/* Signed '<=', LE in x86 */
2599 				jmp_cond = X86_JLE;
2600 				break;
2601 			default: /* to silence GCC warning */
2602 				return -EFAULT;
2603 			}
2604 			jmp_offset = addrs[i + insn->off] - addrs[i];
2605 			if (is_imm8_jmp_offset(jmp_offset)) {
2606 				if (jmp_padding) {
2607 					/* To keep the jmp_offset valid, the extra bytes are
2608 					 * padded before the jump insn, so we subtract the
2609 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2610 					 *
2611 					 * If the previous pass already emits an imm8
2612 					 * jmp_cond, then this BPF insn won't shrink, so
2613 					 * "nops" is 0.
2614 					 *
2615 					 * On the other hand, if the previous pass emits an
2616 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2617 					 * keep the image from shrinking further.
2618 					 *
2619 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2620 					 *     is 2 bytes, so the size difference is 4 bytes.
2621 					 */
2622 					nops = INSN_SZ_DIFF - 2;
2623 					if (nops != 0 && nops != 4) {
2624 						pr_err("unexpected jmp_cond padding: %d bytes\n",
2625 						       nops);
2626 						return -EFAULT;
2627 					}
2628 					emit_nops(&prog, nops);
2629 				}
2630 				EMIT2(jmp_cond, jmp_offset);
2631 			} else if (is_simm32(jmp_offset)) {
2632 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2633 			} else {
2634 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2635 				return -EFAULT;
2636 			}
2637 
2638 			break;
2639 
2640 		case BPF_JMP | BPF_JA | BPF_X:
2641 			emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
2642 			break;
2643 		case BPF_JMP | BPF_JA:
2644 		case BPF_JMP32 | BPF_JA:
2645 			if (BPF_CLASS(insn->code) == BPF_JMP) {
2646 				if (insn->off == -1)
2647 					/* -1 jmp instructions will always jump
2648 					 * backwards two bytes. Explicitly handling
2649 					 * this case avoids wasting too many passes
2650 					 * when there are long sequences of replaced
2651 					 * dead code.
2652 					 */
2653 					jmp_offset = -2;
2654 				else
2655 					jmp_offset = addrs[i + insn->off] - addrs[i];
2656 			} else {
2657 				if (insn->imm == -1)
2658 					jmp_offset = -2;
2659 				else
2660 					jmp_offset = addrs[i + insn->imm] - addrs[i];
2661 			}
2662 
2663 			if (!jmp_offset) {
2664 				/*
2665 				 * If jmp_padding is enabled, the extra nops will
2666 				 * be inserted. Otherwise, optimize out nop jumps.
2667 				 */
2668 				if (jmp_padding) {
2669 					/* There are 3 possible conditions.
2670 					 * (1) This BPF_JA is already optimized out in
2671 					 *     the previous run, so there is no need
2672 					 *     to pad any extra byte (0 byte).
2673 					 * (2) The previous pass emits an imm8 jmp,
2674 					 *     so we pad 2 bytes to match the previous
2675 					 *     insn size.
2676 					 * (3) Similarly, the previous pass emits an
2677 					 *     imm32 jmp, and 5 bytes is padded.
2678 					 */
2679 					nops = INSN_SZ_DIFF;
2680 					if (nops != 0 && nops != 2 && nops != 5) {
2681 						pr_err("unexpected nop jump padding: %d bytes\n",
2682 						       nops);
2683 						return -EFAULT;
2684 					}
2685 					emit_nops(&prog, nops);
2686 				}
2687 				break;
2688 			}
2689 emit_jmp:
2690 			if (is_imm8_jmp_offset(jmp_offset)) {
2691 				if (jmp_padding) {
2692 					/* To avoid breaking jmp_offset, the extra bytes
2693 					 * are padded before the actual jmp insn, so
2694 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
2695 					 *
2696 					 * If the previous pass already emits an imm8
2697 					 * jmp, there is nothing to pad (0 byte).
2698 					 *
2699 					 * If it emits an imm32 jmp (5 bytes) previously
2700 					 * and now an imm8 jmp (2 bytes), then we pad
2701 					 * (5 - 2 = 3) bytes to stop the image from
2702 					 * shrinking further.
2703 					 */
2704 					nops = INSN_SZ_DIFF - 2;
2705 					if (nops != 0 && nops != 3) {
2706 						pr_err("unexpected jump padding: %d bytes\n",
2707 						       nops);
2708 						return -EFAULT;
2709 					}
2710 					emit_nops(&prog, INSN_SZ_DIFF - 2);
2711 				}
2712 				EMIT2(0xEB, jmp_offset);
2713 			} else if (is_simm32(jmp_offset)) {
2714 				EMIT1_off32(0xE9, jmp_offset);
2715 			} else {
2716 				pr_err("jmp gen bug %llx\n", jmp_offset);
2717 				return -EFAULT;
2718 			}
2719 			break;
2720 
2721 		case BPF_JMP | BPF_EXIT:
2722 			if (seen_exit) {
2723 				jmp_offset = ctx->cleanup_addr - addrs[i];
2724 				goto emit_jmp;
2725 			}
2726 			seen_exit = true;
2727 			/* Update cleanup_addr */
2728 			ctx->cleanup_addr = proglen;
2729 			if (bpf_prog_was_classic(bpf_prog) &&
2730 			    !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
2731 				u8 *ip = image + addrs[i - 1];
2732 
2733 				if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
2734 					return -EINVAL;
2735 			}
2736 			if (bpf_prog->aux->exception_boundary) {
2737 				pop_callee_regs(&prog, all_callee_regs_used);
2738 				pop_r12(&prog);
2739 			} else {
2740 				pop_callee_regs(&prog, callee_regs_used);
2741 				if (arena_vm_start)
2742 					pop_r12(&prog);
2743 			}
2744 			EMIT1(0xC9);         /* leave */
2745 			bpf_prog->aux->ksym.fp_end = prog - temp;
2746 
2747 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2748 			break;
2749 
2750 		default:
2751 			/*
2752 			 * By design x86-64 JIT should support all BPF instructions.
2753 			 * This error will be seen if new instruction was added
2754 			 * to the interpreter, but not to the JIT, or if there is
2755 			 * junk in bpf_prog.
2756 			 */
2757 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2758 			return -EINVAL;
2759 		}
2760 
2761 		ilen = prog - temp;
2762 		if (ilen > BPF_MAX_INSN_SIZE) {
2763 			pr_err("bpf_jit: fatal insn size error\n");
2764 			return -EFAULT;
2765 		}
2766 
2767 		if (image) {
2768 			/*
2769 			 * When populating the image, assert that:
2770 			 *
2771 			 *  i) We do not write beyond the allocated space, and
2772 			 * ii) addrs[i] did not change from the prior run, in order
2773 			 *     to validate assumptions made for computing branch
2774 			 *     displacements.
2775 			 */
2776 			if (unlikely(proglen + ilen > oldproglen ||
2777 				     proglen + ilen != addrs[i])) {
2778 				pr_err("bpf_jit: fatal error\n");
2779 				return -EFAULT;
2780 			}
2781 			memcpy(rw_image + proglen, temp, ilen);
2782 		}
2783 		proglen += ilen;
2784 		addrs[i] = proglen;
2785 		prog = temp;
2786 	}
2787 
2788 	if (image && excnt != bpf_prog->aux->num_exentries) {
2789 		pr_err("extable is not populated\n");
2790 		return -EFAULT;
2791 	}
2792 	return proglen;
2793 }
2794 
2795 static void clean_stack_garbage(const struct btf_func_model *m,
2796 				u8 **pprog, int nr_stack_slots,
2797 				int stack_size)
2798 {
2799 	int arg_size, off;
2800 	u8 *prog;
2801 
2802 	/* Generally speaking, the compiler will pass the arguments
2803 	 * on-stack with "push" instruction, which will take 8-byte
2804 	 * on the stack. In this case, there won't be garbage values
2805 	 * while we copy the arguments from origin stack frame to current
2806 	 * in BPF_DW.
2807 	 *
2808 	 * However, sometimes the compiler will only allocate 4-byte on
2809 	 * the stack for the arguments. For now, this case will only
2810 	 * happen if there is only one argument on-stack and its size
2811 	 * not more than 4 byte. In this case, there will be garbage
2812 	 * values on the upper 4-byte where we store the argument on
2813 	 * current stack frame.
2814 	 *
2815 	 * arguments on origin stack:
2816 	 *
2817 	 * stack_arg_1(4-byte) xxx(4-byte)
2818 	 *
2819 	 * what we copy:
2820 	 *
2821 	 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2822 	 *
2823 	 * and the xxx is the garbage values which we should clean here.
2824 	 */
2825 	if (nr_stack_slots != 1)
2826 		return;
2827 
2828 	/* the size of the last argument */
2829 	arg_size = m->arg_size[m->nr_args - 1];
2830 	if (arg_size <= 4) {
2831 		off = -(stack_size - 4);
2832 		prog = *pprog;
2833 		/* mov DWORD PTR [rbp + off], 0 */
2834 		if (!is_imm8(off))
2835 			EMIT2_off32(0xC7, 0x85, off);
2836 		else
2837 			EMIT3(0xC7, 0x45, off);
2838 		EMIT(0, 4);
2839 		*pprog = prog;
2840 	}
2841 }
2842 
2843 /* get the count of the regs that are used to pass arguments */
2844 static int get_nr_used_regs(const struct btf_func_model *m)
2845 {
2846 	int i, arg_regs, nr_used_regs = 0;
2847 
2848 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2849 		arg_regs = (m->arg_size[i] + 7) / 8;
2850 		if (nr_used_regs + arg_regs <= 6)
2851 			nr_used_regs += arg_regs;
2852 
2853 		if (nr_used_regs >= 6)
2854 			break;
2855 	}
2856 
2857 	return nr_used_regs;
2858 }
2859 
2860 static void save_args(const struct btf_func_model *m, u8 **prog,
2861 		      int stack_size, bool for_call_origin, u32 flags)
2862 {
2863 	int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2864 	bool use_jmp = bpf_trampoline_use_jmp(flags);
2865 	int i, j;
2866 
2867 	/* Store function arguments to stack.
2868 	 * For a function that accepts two pointers the sequence will be:
2869 	 * mov QWORD PTR [rbp-0x10],rdi
2870 	 * mov QWORD PTR [rbp-0x8],rsi
2871 	 */
2872 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2873 		arg_regs = (m->arg_size[i] + 7) / 8;
2874 
2875 		/* According to the research of Yonghong, struct members
2876 		 * should be all in register or all on the stack.
2877 		 * Meanwhile, the compiler will pass the argument on regs
2878 		 * if the remaining regs can hold the argument.
2879 		 *
2880 		 * Disorder of the args can happen. For example:
2881 		 *
2882 		 * struct foo_struct {
2883 		 *     long a;
2884 		 *     int b;
2885 		 * };
2886 		 * int foo(char, char, char, char, char, struct foo_struct,
2887 		 *         char);
2888 		 *
2889 		 * the arg1-5,arg7 will be passed by regs, and arg6 will
2890 		 * by stack.
2891 		 */
2892 		if (nr_regs + arg_regs > 6) {
2893 			/* copy function arguments from origin stack frame
2894 			 * into current stack frame.
2895 			 *
2896 			 * The starting address of the arguments on-stack
2897 			 * is:
2898 			 *   rbp + 8(push rbp) +
2899 			 *   8(return addr of origin call) +
2900 			 *   8(return addr of the caller)
2901 			 * which means: rbp + 24
2902 			 */
2903 			for (j = 0; j < arg_regs; j++) {
2904 				emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2905 					 nr_stack_slots * 8 + 16 + (!use_jmp) * 8);
2906 				emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2907 					 -stack_size);
2908 
2909 				if (!nr_stack_slots)
2910 					first_off = stack_size;
2911 				stack_size -= 8;
2912 				nr_stack_slots++;
2913 			}
2914 		} else {
2915 			/* Only copy the arguments on-stack to current
2916 			 * 'stack_size' and ignore the regs, used to
2917 			 * prepare the arguments on-stack for origin call.
2918 			 */
2919 			if (for_call_origin) {
2920 				nr_regs += arg_regs;
2921 				continue;
2922 			}
2923 
2924 			/* copy the arguments from regs into stack */
2925 			for (j = 0; j < arg_regs; j++) {
2926 				emit_stx(prog, BPF_DW, BPF_REG_FP,
2927 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2928 					 -stack_size);
2929 				stack_size -= 8;
2930 				nr_regs++;
2931 			}
2932 		}
2933 	}
2934 
2935 	clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2936 }
2937 
2938 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2939 			 int stack_size)
2940 {
2941 	int i, j, arg_regs, nr_regs = 0;
2942 
2943 	/* Restore function arguments from stack.
2944 	 * For a function that accepts two pointers the sequence will be:
2945 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2946 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2947 	 *
2948 	 * The logic here is similar to what we do in save_args()
2949 	 */
2950 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2951 		arg_regs = (m->arg_size[i] + 7) / 8;
2952 		if (nr_regs + arg_regs <= 6) {
2953 			for (j = 0; j < arg_regs; j++) {
2954 				emit_ldx(prog, BPF_DW,
2955 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2956 					 BPF_REG_FP,
2957 					 -stack_size);
2958 				stack_size -= 8;
2959 				nr_regs++;
2960 			}
2961 		} else {
2962 			stack_size -= 8 * arg_regs;
2963 		}
2964 
2965 		if (nr_regs >= 6)
2966 			break;
2967 	}
2968 }
2969 
2970 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2971 			   struct bpf_tramp_link *l, int stack_size,
2972 			   int run_ctx_off, bool save_ret,
2973 			   void *image, void *rw_image)
2974 {
2975 	u8 *prog = *pprog;
2976 	u8 *jmp_insn;
2977 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2978 	struct bpf_prog *p = l->link.prog;
2979 	u64 cookie = l->cookie;
2980 
2981 	/* mov rdi, cookie */
2982 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2983 
2984 	/* Prepare struct bpf_tramp_run_ctx.
2985 	 *
2986 	 * bpf_tramp_run_ctx is already preserved by
2987 	 * arch_prepare_bpf_trampoline().
2988 	 *
2989 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2990 	 */
2991 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2992 
2993 	/* arg1: mov rdi, progs[i] */
2994 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2995 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
2996 	if (!is_imm8(-run_ctx_off))
2997 		EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2998 	else
2999 		EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
3000 
3001 	if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
3002 		return -EINVAL;
3003 	/* remember prog start time returned by __bpf_prog_enter */
3004 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
3005 
3006 	/* if (__bpf_prog_enter*(prog) == 0)
3007 	 *	goto skip_exec_of_prog;
3008 	 */
3009 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
3010 	/* emit 2 nops that will be replaced with JE insn */
3011 	jmp_insn = prog;
3012 	emit_nops(&prog, 2);
3013 
3014 	/* arg1: lea rdi, [rbp - stack_size] */
3015 	if (!is_imm8(-stack_size))
3016 		EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
3017 	else
3018 		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
3019 	/* arg2: progs[i]->insnsi for interpreter */
3020 	if (!p->jited)
3021 		emit_mov_imm64(&prog, BPF_REG_2,
3022 			       (long) p->insnsi >> 32,
3023 			       (u32) (long) p->insnsi);
3024 	/* call JITed bpf program or interpreter */
3025 	if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
3026 		return -EINVAL;
3027 
3028 	/*
3029 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
3030 	 * of the previous call which is then passed on the stack to
3031 	 * the next BPF program.
3032 	 *
3033 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
3034 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
3035 	 */
3036 	if (save_ret)
3037 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3038 
3039 	/* replace 2 nops with JE insn, since jmp target is known */
3040 	jmp_insn[0] = X86_JE;
3041 	jmp_insn[1] = prog - jmp_insn - 2;
3042 
3043 	/* arg1: mov rdi, progs[i] */
3044 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
3045 	/* arg2: mov rsi, rbx <- start time in nsec */
3046 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
3047 	/* arg3: lea rdx, [rbp - run_ctx_off] */
3048 	if (!is_imm8(-run_ctx_off))
3049 		EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
3050 	else
3051 		EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
3052 	if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
3053 		return -EINVAL;
3054 
3055 	*pprog = prog;
3056 	return 0;
3057 }
3058 
3059 static void emit_align(u8 **pprog, u32 align)
3060 {
3061 	u8 *target, *prog = *pprog;
3062 
3063 	target = PTR_ALIGN(prog, align);
3064 	if (target != prog)
3065 		emit_nops(&prog, target - prog);
3066 
3067 	*pprog = prog;
3068 }
3069 
3070 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
3071 {
3072 	u8 *prog = *pprog;
3073 	s64 offset;
3074 
3075 	offset = func - (ip + 2 + 4);
3076 	if (!is_simm32(offset)) {
3077 		pr_err("Target %p is out of range\n", func);
3078 		return -EINVAL;
3079 	}
3080 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
3081 	*pprog = prog;
3082 	return 0;
3083 }
3084 
3085 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
3086 		      struct bpf_tramp_links *tl, int stack_size,
3087 		      int run_ctx_off, bool save_ret,
3088 		      void *image, void *rw_image)
3089 {
3090 	int i;
3091 	u8 *prog = *pprog;
3092 
3093 	for (i = 0; i < tl->nr_links; i++) {
3094 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
3095 				    run_ctx_off, save_ret, image, rw_image))
3096 			return -EINVAL;
3097 	}
3098 	*pprog = prog;
3099 	return 0;
3100 }
3101 
3102 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
3103 			      struct bpf_tramp_links *tl, int stack_size,
3104 			      int run_ctx_off, u8 **branches,
3105 			      void *image, void *rw_image)
3106 {
3107 	u8 *prog = *pprog;
3108 	int i;
3109 
3110 	/* The first fmod_ret program will receive a garbage return value.
3111 	 * Set this to 0 to avoid confusing the program.
3112 	 */
3113 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
3114 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3115 	for (i = 0; i < tl->nr_links; i++) {
3116 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
3117 				    image, rw_image))
3118 			return -EINVAL;
3119 
3120 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
3121 		 * if (*(u64 *)(rbp - 8) !=  0)
3122 		 *	goto do_fexit;
3123 		 */
3124 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
3125 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
3126 
3127 		/* Save the location of the branch and Generate 6 nops
3128 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
3129 		 * are replaced with a conditional jump once do_fexit (i.e. the
3130 		 * start of the fexit invocation) is finalized.
3131 		 */
3132 		branches[i] = prog;
3133 		emit_nops(&prog, 4 + 2);
3134 	}
3135 
3136 	*pprog = prog;
3137 	return 0;
3138 }
3139 
3140 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
3141 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack)	\
3142 	__LOAD_TCC_PTR(-round_up(stack, 8) - 8)
3143 
3144 /* Example:
3145  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
3146  * its 'struct btf_func_model' will be nr_args=2
3147  * The assembly code when eth_type_trans is executing after trampoline:
3148  *
3149  * push rbp
3150  * mov rbp, rsp
3151  * sub rsp, 16                     // space for skb and dev
3152  * push rbx                        // temp regs to pass start time
3153  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
3154  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
3155  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
3156  * mov rbx, rax                    // remember start time in bpf stats are enabled
3157  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
3158  * call addr_of_jited_FENTRY_prog
3159  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
3160  * mov rsi, rbx                    // prog start time
3161  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
3162  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
3163  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
3164  * pop rbx
3165  * leave
3166  * ret
3167  *
3168  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
3169  * replaced with 'call generated_bpf_trampoline'. When it returns
3170  * eth_type_trans will continue executing with original skb and dev pointers.
3171  *
3172  * The assembly code when eth_type_trans is called from trampoline:
3173  *
3174  * push rbp
3175  * mov rbp, rsp
3176  * sub rsp, 24                     // space for skb, dev, return value
3177  * push rbx                        // temp regs to pass start time
3178  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
3179  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
3180  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
3181  * mov rbx, rax                    // remember start time if bpf stats are enabled
3182  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
3183  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
3184  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
3185  * mov rsi, rbx                    // prog start time
3186  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
3187  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
3188  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
3189  * call eth_type_trans+5           // execute body of eth_type_trans
3190  * mov qword ptr [rbp - 8], rax    // save return value
3191  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
3192  * mov rbx, rax                    // remember start time in bpf stats are enabled
3193  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
3194  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
3195  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
3196  * mov rsi, rbx                    // prog start time
3197  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
3198  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
3199  * pop rbx
3200  * leave
3201  * add rsp, 8                      // skip eth_type_trans's frame
3202  * ret                             // return to its caller
3203  */
3204 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
3205 					 void *rw_image_end, void *image,
3206 					 const struct btf_func_model *m, u32 flags,
3207 					 struct bpf_tramp_links *tlinks,
3208 					 void *func_addr)
3209 {
3210 	int i, ret, nr_regs = m->nr_args, stack_size = 0;
3211 	int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
3212 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
3213 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
3214 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
3215 	void *orig_call = func_addr;
3216 	u8 **branches = NULL;
3217 	u8 *prog;
3218 	bool save_ret;
3219 
3220 	/*
3221 	 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
3222 	 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
3223 	 * because @func_addr.
3224 	 */
3225 	WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
3226 		     (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
3227 
3228 	/* extra registers for struct arguments */
3229 	for (i = 0; i < m->nr_args; i++) {
3230 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
3231 			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
3232 	}
3233 
3234 	/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
3235 	 * are passed through regs, the remains are through stack.
3236 	 */
3237 	if (nr_regs > MAX_BPF_FUNC_ARGS)
3238 		return -ENOTSUPP;
3239 
3240 	/* Generated trampoline stack layout:
3241 	 *
3242 	 * RBP + 8         [ return address  ]
3243 	 * RBP + 0         [ RBP             ]
3244 	 *
3245 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
3246 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
3247 	 *
3248 	 *                 [ reg_argN        ]  always
3249 	 *                 [ ...             ]
3250 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
3251 	 *
3252 	 * RBP - nregs_off [ regs count	     ]  always
3253 	 *
3254 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
3255 	 *
3256 	 * RBP - rbx_off   [ rbx value       ]  always
3257 	 *
3258 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
3259 	 *
3260 	 *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
3261 	 *                     [ ...        ]
3262 	 *                     [ stack_arg2 ]
3263 	 * RBP - arg_stack_off [ stack_arg1 ]
3264 	 * RSP                 [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
3265 	 */
3266 
3267 	/* room for return value of orig_call or fentry prog */
3268 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
3269 	if (save_ret)
3270 		stack_size += 8;
3271 
3272 	stack_size += nr_regs * 8;
3273 	regs_off = stack_size;
3274 
3275 	/* regs count  */
3276 	stack_size += 8;
3277 	nregs_off = stack_size;
3278 
3279 	if (flags & BPF_TRAMP_F_IP_ARG)
3280 		stack_size += 8; /* room for IP address argument */
3281 
3282 	ip_off = stack_size;
3283 
3284 	stack_size += 8;
3285 	rbx_off = stack_size;
3286 
3287 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
3288 	run_ctx_off = stack_size;
3289 
3290 	if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
3291 		/* the space that used to pass arguments on-stack */
3292 		stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
3293 		/* make sure the stack pointer is 16-byte aligned if we
3294 		 * need pass arguments on stack, which means
3295 		 *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
3296 		 * should be 16-byte aligned. Following code depend on
3297 		 * that stack_size is already 8-byte aligned.
3298 		 */
3299 		if (bpf_trampoline_use_jmp(flags)) {
3300 			/* no rip in the "jmp" case */
3301 			stack_size += (stack_size % 16) ? 8 : 0;
3302 		} else {
3303 			stack_size += (stack_size % 16) ? 0 : 8;
3304 		}
3305 	}
3306 
3307 	arg_stack_off = stack_size;
3308 
3309 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3310 		/* skip patched call instruction and point orig_call to actual
3311 		 * body of the kernel function.
3312 		 */
3313 		if (is_endbr(orig_call))
3314 			orig_call += ENDBR_INSN_SIZE;
3315 		orig_call += X86_PATCH_SIZE;
3316 	}
3317 
3318 	prog = rw_image;
3319 
3320 	if (flags & BPF_TRAMP_F_INDIRECT) {
3321 		/*
3322 		 * Indirect call for bpf_struct_ops
3323 		 */
3324 		emit_cfi(&prog, image,
3325 			 cfi_get_func_hash(func_addr),
3326 			 cfi_get_func_arity(func_addr));
3327 	} else {
3328 		/*
3329 		 * Direct-call fentry stub, as such it needs accounting for the
3330 		 * __fentry__ call.
3331 		 */
3332 		x86_call_depth_emit_accounting(&prog, NULL, image);
3333 	}
3334 	EMIT1(0x55);		 /* push rbp */
3335 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
3336 	if (im)
3337 		im->ksym.fp_start = prog - (u8 *)rw_image;
3338 
3339 	if (!is_imm8(stack_size)) {
3340 		/* sub rsp, stack_size */
3341 		EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
3342 	} else {
3343 		/* sub rsp, stack_size */
3344 		EMIT4(0x48, 0x83, 0xEC, stack_size);
3345 	}
3346 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
3347 		EMIT1(0x50);		/* push rax */
3348 	/* mov QWORD PTR [rbp - rbx_off], rbx */
3349 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3350 
3351 	/* Store number of argument registers of the traced function:
3352 	 *   mov rax, nr_regs
3353 	 *   mov QWORD PTR [rbp - nregs_off], rax
3354 	 */
3355 	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
3356 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
3357 
3358 	if (flags & BPF_TRAMP_F_IP_ARG) {
3359 		/* Store IP address of the traced function:
3360 		 * movabsq rax, func_addr
3361 		 * mov QWORD PTR [rbp - ip_off], rax
3362 		 */
3363 		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
3364 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
3365 	}
3366 
3367 	save_args(m, &prog, regs_off, false, flags);
3368 
3369 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3370 		/* arg1: mov rdi, im */
3371 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3372 		if (emit_rsb_call(&prog, __bpf_tramp_enter,
3373 				  image + (prog - (u8 *)rw_image))) {
3374 			ret = -EINVAL;
3375 			goto cleanup;
3376 		}
3377 	}
3378 
3379 	if (fentry->nr_links) {
3380 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
3381 			       flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
3382 			return -EINVAL;
3383 	}
3384 
3385 	if (fmod_ret->nr_links) {
3386 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3387 				   GFP_KERNEL);
3388 		if (!branches)
3389 			return -ENOMEM;
3390 
3391 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
3392 				       run_ctx_off, branches, image, rw_image)) {
3393 			ret = -EINVAL;
3394 			goto cleanup;
3395 		}
3396 	}
3397 
3398 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3399 		restore_regs(m, &prog, regs_off);
3400 		save_args(m, &prog, arg_stack_off, true, flags);
3401 
3402 		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3403 			/* Before calling the original function, load the
3404 			 * tail_call_cnt_ptr from stack to rax.
3405 			 */
3406 			LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3407 		}
3408 
3409 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
3410 			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
3411 			EMIT2(0xff, 0xd3); /* call *rbx */
3412 		} else {
3413 			/* call original function */
3414 			if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3415 				ret = -EINVAL;
3416 				goto cleanup;
3417 			}
3418 		}
3419 		/* remember return value in a stack for bpf prog to access */
3420 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3421 		im->ip_after_call = image + (prog - (u8 *)rw_image);
3422 		emit_nops(&prog, X86_PATCH_SIZE);
3423 	}
3424 
3425 	if (fmod_ret->nr_links) {
3426 		/* From Intel 64 and IA-32 Architectures Optimization
3427 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3428 		 * Coding Rule 11: All branch targets should be 16-byte
3429 		 * aligned.
3430 		 */
3431 		emit_align(&prog, 16);
3432 		/* Update the branches saved in invoke_bpf_mod_ret with the
3433 		 * aligned address of do_fexit.
3434 		 */
3435 		for (i = 0; i < fmod_ret->nr_links; i++) {
3436 			emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3437 					    image + (branches[i] - (u8 *)rw_image), X86_JNE);
3438 		}
3439 	}
3440 
3441 	if (fexit->nr_links) {
3442 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
3443 			       false, image, rw_image)) {
3444 			ret = -EINVAL;
3445 			goto cleanup;
3446 		}
3447 	}
3448 
3449 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
3450 		restore_regs(m, &prog, regs_off);
3451 
3452 	/* This needs to be done regardless. If there were fmod_ret programs,
3453 	 * the return value is only updated on the stack and still needs to be
3454 	 * restored to R0.
3455 	 */
3456 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3457 		im->ip_epilogue = image + (prog - (u8 *)rw_image);
3458 		/* arg1: mov rdi, im */
3459 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3460 		if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3461 			ret = -EINVAL;
3462 			goto cleanup;
3463 		}
3464 	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3465 		/* Before running the original function, load the
3466 		 * tail_call_cnt_ptr from stack to rax.
3467 		 */
3468 		LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3469 	}
3470 
3471 	/* restore return value of orig_call or fentry prog back into RAX */
3472 	if (save_ret)
3473 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3474 
3475 	emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3476 
3477 	EMIT1(0xC9); /* leave */
3478 	if (im)
3479 		im->ksym.fp_end = prog - (u8 *)rw_image;
3480 
3481 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3482 		/* skip our return address and return to parent */
3483 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3484 	}
3485 	emit_return(&prog, image + (prog - (u8 *)rw_image));
3486 	/* Make sure the trampoline generation logic doesn't overflow */
3487 	if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3488 		ret = -EFAULT;
3489 		goto cleanup;
3490 	}
3491 	ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3492 
3493 cleanup:
3494 	kfree(branches);
3495 	return ret;
3496 }
3497 
3498 void *arch_alloc_bpf_trampoline(unsigned int size)
3499 {
3500 	return bpf_prog_pack_alloc(size, jit_fill_hole);
3501 }
3502 
3503 void arch_free_bpf_trampoline(void *image, unsigned int size)
3504 {
3505 	bpf_prog_pack_free(image, size);
3506 }
3507 
3508 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3509 {
3510 	return 0;
3511 }
3512 
3513 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3514 				const struct btf_func_model *m, u32 flags,
3515 				struct bpf_tramp_links *tlinks,
3516 				void *func_addr)
3517 {
3518 	void *rw_image, *tmp;
3519 	int ret;
3520 	u32 size = image_end - image;
3521 
3522 	/* rw_image doesn't need to be in module memory range, so we can
3523 	 * use kvmalloc.
3524 	 */
3525 	rw_image = kvmalloc(size, GFP_KERNEL);
3526 	if (!rw_image)
3527 		return -ENOMEM;
3528 
3529 	ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3530 					    flags, tlinks, func_addr);
3531 	if (ret < 0)
3532 		goto out;
3533 
3534 	tmp = bpf_arch_text_copy(image, rw_image, size);
3535 	if (IS_ERR(tmp))
3536 		ret = PTR_ERR(tmp);
3537 out:
3538 	kvfree(rw_image);
3539 	return ret;
3540 }
3541 
3542 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3543 			     struct bpf_tramp_links *tlinks, void *func_addr)
3544 {
3545 	struct bpf_tramp_image im;
3546 	void *image;
3547 	int ret;
3548 
3549 	/* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3550 	 * This will NOT cause fragmentation in direct map, as we do not
3551 	 * call set_memory_*() on this buffer.
3552 	 *
3553 	 * We cannot use kvmalloc here, because we need image to be in
3554 	 * module memory range.
3555 	 */
3556 	image = bpf_jit_alloc_exec(PAGE_SIZE);
3557 	if (!image)
3558 		return -ENOMEM;
3559 
3560 	ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3561 					    m, flags, tlinks, func_addr);
3562 	bpf_jit_free_exec(image);
3563 	return ret;
3564 }
3565 
3566 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3567 {
3568 	u8 *jg_reloc, *prog = *pprog;
3569 	int pivot, err, jg_bytes = 1;
3570 	s64 jg_offset;
3571 
3572 	if (a == b) {
3573 		/* Leaf node of recursion, i.e. not a range of indices
3574 		 * anymore.
3575 		 */
3576 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
3577 		if (!is_simm32(progs[a]))
3578 			return -1;
3579 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3580 			    progs[a]);
3581 		err = emit_cond_near_jump(&prog,	/* je func */
3582 					  (void *)progs[a], image + (prog - buf),
3583 					  X86_JE);
3584 		if (err)
3585 			return err;
3586 
3587 		emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
3588 
3589 		*pprog = prog;
3590 		return 0;
3591 	}
3592 
3593 	/* Not a leaf node, so we pivot, and recursively descend into
3594 	 * the lower and upper ranges.
3595 	 */
3596 	pivot = (b - a) / 2;
3597 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
3598 	if (!is_simm32(progs[a + pivot]))
3599 		return -1;
3600 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3601 
3602 	if (pivot > 2) {				/* jg upper_part */
3603 		/* Require near jump. */
3604 		jg_bytes = 4;
3605 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3606 	} else {
3607 		EMIT2(X86_JG, 0);
3608 	}
3609 	jg_reloc = prog;
3610 
3611 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
3612 				  progs, image, buf);
3613 	if (err)
3614 		return err;
3615 
3616 	/* From Intel 64 and IA-32 Architectures Optimization
3617 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3618 	 * Coding Rule 11: All branch targets should be 16-byte
3619 	 * aligned.
3620 	 */
3621 	emit_align(&prog, 16);
3622 	jg_offset = prog - jg_reloc;
3623 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3624 
3625 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
3626 				  b, progs, image, buf);
3627 	if (err)
3628 		return err;
3629 
3630 	*pprog = prog;
3631 	return 0;
3632 }
3633 
3634 static int cmp_ips(const void *a, const void *b)
3635 {
3636 	const s64 *ipa = a;
3637 	const s64 *ipb = b;
3638 
3639 	if (*ipa > *ipb)
3640 		return 1;
3641 	if (*ipa < *ipb)
3642 		return -1;
3643 	return 0;
3644 }
3645 
3646 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3647 {
3648 	u8 *prog = buf;
3649 
3650 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3651 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3652 }
3653 
3654 static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
3655 {
3656 	int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3657 	u64 *stack_ptr;
3658 
3659 	for_each_possible_cpu(cpu) {
3660 		stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3661 		stack_ptr[0] = PRIV_STACK_GUARD_VAL;
3662 		stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
3663 	}
3664 }
3665 
3666 static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
3667 				   struct bpf_prog *prog)
3668 {
3669 	int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3670 	u64 *stack_ptr;
3671 
3672 	for_each_possible_cpu(cpu) {
3673 		stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3674 		if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
3675 		    stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) {
3676 			pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
3677 			       bpf_jit_get_prog_name(prog));
3678 			break;
3679 		}
3680 	}
3681 }
3682 
3683 struct x64_jit_data {
3684 	struct bpf_binary_header *rw_header;
3685 	struct bpf_binary_header *header;
3686 	int *addrs;
3687 	u8 *image;
3688 	int proglen;
3689 	struct jit_context ctx;
3690 };
3691 
3692 #define MAX_PASSES 20
3693 #define PADDING_PASSES (MAX_PASSES - 5)
3694 
3695 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3696 {
3697 	struct bpf_binary_header *rw_header = NULL;
3698 	struct bpf_binary_header *header = NULL;
3699 	struct bpf_prog *tmp, *orig_prog = prog;
3700 	void __percpu *priv_stack_ptr = NULL;
3701 	struct x64_jit_data *jit_data;
3702 	int priv_stack_alloc_sz;
3703 	int proglen, oldproglen = 0;
3704 	struct jit_context ctx = {};
3705 	bool tmp_blinded = false;
3706 	bool extra_pass = false;
3707 	bool padding = false;
3708 	u8 *rw_image = NULL;
3709 	u8 *image = NULL;
3710 	int *addrs;
3711 	int pass;
3712 	int i;
3713 
3714 	if (!prog->jit_requested)
3715 		return orig_prog;
3716 
3717 	tmp = bpf_jit_blind_constants(prog);
3718 	/*
3719 	 * If blinding was requested and we failed during blinding,
3720 	 * we must fall back to the interpreter.
3721 	 */
3722 	if (IS_ERR(tmp))
3723 		return orig_prog;
3724 	if (tmp != prog) {
3725 		tmp_blinded = true;
3726 		prog = tmp;
3727 	}
3728 
3729 	jit_data = prog->aux->jit_data;
3730 	if (!jit_data) {
3731 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3732 		if (!jit_data) {
3733 			prog = orig_prog;
3734 			goto out;
3735 		}
3736 		prog->aux->jit_data = jit_data;
3737 	}
3738 	priv_stack_ptr = prog->aux->priv_stack_ptr;
3739 	if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) {
3740 		/* Allocate actual private stack size with verifier-calculated
3741 		 * stack size plus two memory guards to protect overflow and
3742 		 * underflow.
3743 		 */
3744 		priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3745 				      2 * PRIV_STACK_GUARD_SZ;
3746 		priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL);
3747 		if (!priv_stack_ptr) {
3748 			prog = orig_prog;
3749 			goto out_priv_stack;
3750 		}
3751 
3752 		priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
3753 		prog->aux->priv_stack_ptr = priv_stack_ptr;
3754 	}
3755 	addrs = jit_data->addrs;
3756 	if (addrs) {
3757 		ctx = jit_data->ctx;
3758 		oldproglen = jit_data->proglen;
3759 		image = jit_data->image;
3760 		header = jit_data->header;
3761 		rw_header = jit_data->rw_header;
3762 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
3763 		extra_pass = true;
3764 		padding = true;
3765 		goto skip_init_addrs;
3766 	}
3767 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3768 	if (!addrs) {
3769 		prog = orig_prog;
3770 		goto out_addrs;
3771 	}
3772 
3773 	/*
3774 	 * Before first pass, make a rough estimation of addrs[]
3775 	 * each BPF instruction is translated to less than 64 bytes
3776 	 */
3777 	for (proglen = 0, i = 0; i <= prog->len; i++) {
3778 		proglen += 64;
3779 		addrs[i] = proglen;
3780 	}
3781 	ctx.cleanup_addr = proglen;
3782 skip_init_addrs:
3783 
3784 	/*
3785 	 * JITed image shrinks with every pass and the loop iterates
3786 	 * until the image stops shrinking. Very large BPF programs
3787 	 * may converge on the last pass. In such case do one more
3788 	 * pass to emit the final image.
3789 	 */
3790 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
3791 		if (!padding && pass >= PADDING_PASSES)
3792 			padding = true;
3793 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3794 		if (proglen <= 0) {
3795 out_image:
3796 			image = NULL;
3797 			if (header) {
3798 				bpf_arch_text_copy(&header->size, &rw_header->size,
3799 						   sizeof(rw_header->size));
3800 				bpf_jit_binary_pack_free(header, rw_header);
3801 			}
3802 			/* Fall back to interpreter mode */
3803 			prog = orig_prog;
3804 			if (extra_pass) {
3805 				prog->bpf_func = NULL;
3806 				prog->jited = 0;
3807 				prog->jited_len = 0;
3808 			}
3809 			goto out_addrs;
3810 		}
3811 		if (image) {
3812 			if (proglen != oldproglen) {
3813 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3814 				       proglen, oldproglen);
3815 				goto out_image;
3816 			}
3817 			break;
3818 		}
3819 		if (proglen == oldproglen) {
3820 			/*
3821 			 * The number of entries in extable is the number of BPF_LDX
3822 			 * insns that access kernel memory via "pointer to BTF type".
3823 			 * The verifier changed their opcode from LDX|MEM|size
3824 			 * to LDX|PROBE_MEM|size to make JITing easier.
3825 			 */
3826 			u32 align = __alignof__(struct exception_table_entry);
3827 			u32 extable_size = prog->aux->num_exentries *
3828 				sizeof(struct exception_table_entry);
3829 
3830 			/* allocate module memory for x86 insns and extable */
3831 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3832 							   &image, align, &rw_header, &rw_image,
3833 							   jit_fill_hole);
3834 			if (!header) {
3835 				prog = orig_prog;
3836 				goto out_addrs;
3837 			}
3838 			prog->aux->extable = (void *) image + roundup(proglen, align);
3839 		}
3840 		oldproglen = proglen;
3841 		cond_resched();
3842 	}
3843 
3844 	if (bpf_jit_enable > 1)
3845 		bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3846 
3847 	if (image) {
3848 		if (!prog->is_func || extra_pass) {
3849 			/*
3850 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
3851 			 *   1) header is not pointing to proper module memory;
3852 			 *   2) the arch doesn't support bpf_arch_text_copy().
3853 			 *
3854 			 * Both cases are serious bugs and justify WARN_ON.
3855 			 */
3856 			if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3857 				/* header has been freed */
3858 				header = NULL;
3859 				goto out_image;
3860 			}
3861 
3862 			bpf_tail_call_direct_fixup(prog);
3863 		} else {
3864 			jit_data->addrs = addrs;
3865 			jit_data->ctx = ctx;
3866 			jit_data->proglen = proglen;
3867 			jit_data->image = image;
3868 			jit_data->header = header;
3869 			jit_data->rw_header = rw_header;
3870 		}
3871 
3872 		/*
3873 		 * The bpf_prog_update_insn_ptrs function expects addrs to
3874 		 * point to the first byte of the jitted instruction (unlike
3875 		 * the bpf_prog_fill_jited_linfo below, which, for historical
3876 		 * reasons, expects to point to the next instruction)
3877 		 */
3878 		bpf_prog_update_insn_ptrs(prog, addrs, image);
3879 
3880 		/*
3881 		 * ctx.prog_offset is used when CFI preambles put code *before*
3882 		 * the function. See emit_cfi(). For FineIBT specifically this code
3883 		 * can also be executed and bpf_prog_kallsyms_add() will
3884 		 * generate an additional symbol to cover this, hence also
3885 		 * decrement proglen.
3886 		 */
3887 		prog->bpf_func = (void *)image + cfi_get_offset();
3888 		prog->jited = 1;
3889 		prog->jited_len = proglen - cfi_get_offset();
3890 	} else {
3891 		prog = orig_prog;
3892 	}
3893 
3894 	if (!image || !prog->is_func || extra_pass) {
3895 		if (image)
3896 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
3897 out_addrs:
3898 		kvfree(addrs);
3899 		if (!image && priv_stack_ptr) {
3900 			free_percpu(priv_stack_ptr);
3901 			prog->aux->priv_stack_ptr = NULL;
3902 		}
3903 out_priv_stack:
3904 		kfree(jit_data);
3905 		prog->aux->jit_data = NULL;
3906 	}
3907 out:
3908 	if (tmp_blinded)
3909 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
3910 					   tmp : orig_prog);
3911 	return prog;
3912 }
3913 
3914 bool bpf_jit_supports_kfunc_call(void)
3915 {
3916 	return true;
3917 }
3918 
3919 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3920 {
3921 	if (text_poke_copy(dst, src, len) == NULL)
3922 		return ERR_PTR(-EINVAL);
3923 	return dst;
3924 }
3925 
3926 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3927 bool bpf_jit_supports_subprog_tailcalls(void)
3928 {
3929 	return true;
3930 }
3931 
3932 bool bpf_jit_supports_percpu_insn(void)
3933 {
3934 	return true;
3935 }
3936 
3937 void bpf_jit_free(struct bpf_prog *prog)
3938 {
3939 	if (prog->jited) {
3940 		struct x64_jit_data *jit_data = prog->aux->jit_data;
3941 		struct bpf_binary_header *hdr;
3942 		void __percpu *priv_stack_ptr;
3943 		int priv_stack_alloc_sz;
3944 
3945 		/*
3946 		 * If we fail the final pass of JIT (from jit_subprogs),
3947 		 * the program may not be finalized yet. Call finalize here
3948 		 * before freeing it.
3949 		 */
3950 		if (jit_data) {
3951 			bpf_jit_binary_pack_finalize(jit_data->header,
3952 						     jit_data->rw_header);
3953 			kvfree(jit_data->addrs);
3954 			kfree(jit_data);
3955 		}
3956 		prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3957 		hdr = bpf_jit_binary_pack_hdr(prog);
3958 		bpf_jit_binary_pack_free(hdr, NULL);
3959 		priv_stack_ptr = prog->aux->priv_stack_ptr;
3960 		if (priv_stack_ptr) {
3961 			priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3962 					      2 * PRIV_STACK_GUARD_SZ;
3963 			priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog);
3964 			free_percpu(prog->aux->priv_stack_ptr);
3965 		}
3966 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3967 	}
3968 
3969 	bpf_prog_unlock_free(prog);
3970 }
3971 
3972 bool bpf_jit_supports_exceptions(void)
3973 {
3974 	/* We unwind through both kernel frames (starting from within bpf_throw
3975 	 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3976 	 * to walk kernel frames and reach BPF frames in the stack trace.
3977 	 */
3978 	return IS_ENABLED(CONFIG_UNWINDER_ORC);
3979 }
3980 
3981 bool bpf_jit_supports_private_stack(void)
3982 {
3983 	return true;
3984 }
3985 
3986 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3987 {
3988 #if defined(CONFIG_UNWINDER_ORC)
3989 	struct unwind_state state;
3990 	unsigned long addr;
3991 
3992 	for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3993 	     unwind_next_frame(&state)) {
3994 		addr = unwind_get_return_address(&state);
3995 		if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3996 			break;
3997 	}
3998 	return;
3999 #endif
4000 }
4001 
4002 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
4003 			       struct bpf_prog *new, struct bpf_prog *old)
4004 {
4005 	u8 *old_addr, *new_addr, *old_bypass_addr;
4006 	enum bpf_text_poke_type t;
4007 	int ret;
4008 
4009 	old_bypass_addr = old ? NULL : poke->bypass_addr;
4010 	old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
4011 	new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
4012 
4013 	/*
4014 	 * On program loading or teardown, the program's kallsym entry
4015 	 * might not be in place, so we use __bpf_arch_text_poke to skip
4016 	 * the kallsyms check.
4017 	 */
4018 	if (new) {
4019 		t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4020 		ret = __bpf_arch_text_poke(poke->tailcall_target,
4021 					   t, BPF_MOD_JUMP,
4022 					   old_addr, new_addr);
4023 		BUG_ON(ret < 0);
4024 		if (!old) {
4025 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4026 						   BPF_MOD_JUMP, BPF_MOD_NOP,
4027 						   poke->bypass_addr,
4028 						   NULL);
4029 			BUG_ON(ret < 0);
4030 		}
4031 	} else {
4032 		t = old_bypass_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4033 		ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4034 					   t, BPF_MOD_JUMP, old_bypass_addr,
4035 					   poke->bypass_addr);
4036 		BUG_ON(ret < 0);
4037 		/* let other CPUs finish the execution of program
4038 		 * so that it will not possible to expose them
4039 		 * to invalid nop, stack unwind, nop state
4040 		 */
4041 		if (!ret)
4042 			synchronize_rcu();
4043 		t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4044 		ret = __bpf_arch_text_poke(poke->tailcall_target,
4045 					   t, BPF_MOD_NOP, old_addr, NULL);
4046 		BUG_ON(ret < 0);
4047 	}
4048 }
4049 
4050 bool bpf_jit_supports_arena(void)
4051 {
4052 	return true;
4053 }
4054 
4055 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
4056 {
4057 	if (!in_arena)
4058 		return true;
4059 	switch (insn->code) {
4060 	case BPF_STX | BPF_ATOMIC | BPF_W:
4061 	case BPF_STX | BPF_ATOMIC | BPF_DW:
4062 		if (insn->imm == (BPF_AND | BPF_FETCH) ||
4063 		    insn->imm == (BPF_OR | BPF_FETCH) ||
4064 		    insn->imm == (BPF_XOR | BPF_FETCH))
4065 			return false;
4066 	}
4067 	return true;
4068 }
4069 
4070 bool bpf_jit_supports_ptr_xchg(void)
4071 {
4072 	return true;
4073 }
4074 
4075 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
4076 u64 bpf_arch_uaddress_limit(void)
4077 {
4078 	return 0;
4079 }
4080 
4081 bool bpf_jit_supports_timed_may_goto(void)
4082 {
4083 	return true;
4084 }
4085