xref: /linux/arch/x86/net/bpf_jit_comp.c (revision 8798902f2b8bcae6f90229a1a1496b48ddda2972)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bitfield.h>
12 #include <linux/bpf.h>
13 #include <linux/memory.h>
14 #include <linux/sort.h>
15 #include <asm/extable.h>
16 #include <asm/ftrace.h>
17 #include <asm/set_memory.h>
18 #include <asm/nospec-branch.h>
19 #include <asm/text-patching.h>
20 #include <asm/unwind.h>
21 #include <asm/cfi.h>
22 
23 static bool all_callee_regs_used[4] = {true, true, true, true};
24 
25 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
26 {
27 	if (len == 1)
28 		*ptr = bytes;
29 	else if (len == 2)
30 		*(u16 *)ptr = bytes;
31 	else {
32 		*(u32 *)ptr = bytes;
33 		barrier();
34 	}
35 	return ptr + len;
36 }
37 
38 #define EMIT(bytes, len) \
39 	do { prog = emit_code(prog, bytes, len); } while (0)
40 
41 #define EMIT1(b1)		EMIT(b1, 1)
42 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
43 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
44 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
45 #define EMIT5(b1, b2, b3, b4, b5) \
46 	do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
47 
48 #define EMIT1_off32(b1, off) \
49 	do { EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 
57 #ifdef CONFIG_X86_KERNEL_IBT
58 #define EMIT_ENDBR()		EMIT(gen_endbr(), 4)
59 #define EMIT_ENDBR_POISON()	EMIT(gen_endbr_poison(), 4)
60 #else
61 #define EMIT_ENDBR()
62 #define EMIT_ENDBR_POISON()
63 #endif
64 
65 static bool is_imm8(int value)
66 {
67 	return value <= 127 && value >= -128;
68 }
69 
70 /*
71  * Let us limit the positive offset to be <= 123.
72  * This is to ensure eventual jit convergence For the following patterns:
73  * ...
74  * pass4, final_proglen=4391:
75  *   ...
76  *   20e:    48 85 ff                test   rdi,rdi
77  *   211:    74 7d                   je     0x290
78  *   213:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
79  *   ...
80  *   289:    48 85 ff                test   rdi,rdi
81  *   28c:    74 17                   je     0x2a5
82  *   28e:    e9 7f ff ff ff          jmp    0x212
83  *   293:    bf 03 00 00 00          mov    edi,0x3
84  * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
85  * and insn at 0x28e is 5-byte jmp insn with offset -129.
86  *
87  * pass5, final_proglen=4392:
88  *   ...
89  *   20e:    48 85 ff                test   rdi,rdi
90  *   211:    0f 84 80 00 00 00       je     0x297
91  *   217:    48 8b 77 00             mov    rsi,QWORD PTR [rdi+0x0]
92  *   ...
93  *   28d:    48 85 ff                test   rdi,rdi
94  *   290:    74 1a                   je     0x2ac
95  *   292:    eb 84                   jmp    0x218
96  *   294:    bf 03 00 00 00          mov    edi,0x3
97  * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
98  * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
99  * At the same time, insn at 0x292 is a 2-byte insn since its offset is
100  * -124.
101  *
102  * pass6 will repeat the same code as in pass4 and this will prevent
103  * eventual convergence.
104  *
105  * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
106  * cycle in the above. In the above example je offset <= 0x7c should work.
107  *
108  * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
109  * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
110  * avoid no convergence issue.
111  *
112  * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
113  * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
114  */
115 static bool is_imm8_jmp_offset(int value)
116 {
117 	return value <= 123 && value >= -128;
118 }
119 
120 static bool is_simm32(s64 value)
121 {
122 	return value == (s64)(s32)value;
123 }
124 
125 static bool is_uimm32(u64 value)
126 {
127 	return value == (u64)(u32)value;
128 }
129 
130 /* mov dst, src */
131 #define EMIT_mov(DST, SRC)								 \
132 	do {										 \
133 		if (DST != SRC)								 \
134 			EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
135 	} while (0)
136 
137 static int bpf_size_to_x86_bytes(int bpf_size)
138 {
139 	if (bpf_size == BPF_W)
140 		return 4;
141 	else if (bpf_size == BPF_H)
142 		return 2;
143 	else if (bpf_size == BPF_B)
144 		return 1;
145 	else if (bpf_size == BPF_DW)
146 		return 4; /* imm32 */
147 	else
148 		return 0;
149 }
150 
151 /*
152  * List of x86 cond jumps opcodes (. + s8)
153  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
154  */
155 #define X86_JB  0x72
156 #define X86_JAE 0x73
157 #define X86_JE  0x74
158 #define X86_JNE 0x75
159 #define X86_JBE 0x76
160 #define X86_JA  0x77
161 #define X86_JL  0x7C
162 #define X86_JGE 0x7D
163 #define X86_JLE 0x7E
164 #define X86_JG  0x7F
165 
166 /* Pick a register outside of BPF range for JIT internal work */
167 #define AUX_REG (MAX_BPF_JIT_REG + 1)
168 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
169 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
170 
171 /*
172  * The following table maps BPF registers to x86-64 registers.
173  *
174  * x86-64 register R12 is unused, since if used as base address
175  * register in load/store instructions, it always needs an
176  * extra byte of encoding and is callee saved.
177  *
178  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
179  * trampoline. x86-64 register R10 is used for blinding (if enabled).
180  */
181 static const int reg2hex[] = {
182 	[BPF_REG_0] = 0,  /* RAX */
183 	[BPF_REG_1] = 7,  /* RDI */
184 	[BPF_REG_2] = 6,  /* RSI */
185 	[BPF_REG_3] = 2,  /* RDX */
186 	[BPF_REG_4] = 1,  /* RCX */
187 	[BPF_REG_5] = 0,  /* R8  */
188 	[BPF_REG_6] = 3,  /* RBX callee saved */
189 	[BPF_REG_7] = 5,  /* R13 callee saved */
190 	[BPF_REG_8] = 6,  /* R14 callee saved */
191 	[BPF_REG_9] = 7,  /* R15 callee saved */
192 	[BPF_REG_FP] = 5, /* RBP readonly */
193 	[BPF_REG_AX] = 2, /* R10 temp register */
194 	[AUX_REG] = 3,    /* R11 temp register */
195 	[X86_REG_R9] = 1, /* R9 register, 6th function argument */
196 	[X86_REG_R12] = 4, /* R12 callee saved */
197 };
198 
199 static const int reg2pt_regs[] = {
200 	[BPF_REG_0] = offsetof(struct pt_regs, ax),
201 	[BPF_REG_1] = offsetof(struct pt_regs, di),
202 	[BPF_REG_2] = offsetof(struct pt_regs, si),
203 	[BPF_REG_3] = offsetof(struct pt_regs, dx),
204 	[BPF_REG_4] = offsetof(struct pt_regs, cx),
205 	[BPF_REG_5] = offsetof(struct pt_regs, r8),
206 	[BPF_REG_6] = offsetof(struct pt_regs, bx),
207 	[BPF_REG_7] = offsetof(struct pt_regs, r13),
208 	[BPF_REG_8] = offsetof(struct pt_regs, r14),
209 	[BPF_REG_9] = offsetof(struct pt_regs, r15),
210 };
211 
212 /*
213  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
214  * which need extra byte of encoding.
215  * rax,rcx,...,rbp have simpler encoding
216  */
217 static bool is_ereg(u32 reg)
218 {
219 	return (1 << reg) & (BIT(BPF_REG_5) |
220 			     BIT(AUX_REG) |
221 			     BIT(BPF_REG_7) |
222 			     BIT(BPF_REG_8) |
223 			     BIT(BPF_REG_9) |
224 			     BIT(X86_REG_R9) |
225 			     BIT(X86_REG_R12) |
226 			     BIT(BPF_REG_AX));
227 }
228 
229 /*
230  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
231  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
232  * of encoding. al,cl,dl,bl have simpler encoding.
233  */
234 static bool is_ereg_8l(u32 reg)
235 {
236 	return is_ereg(reg) ||
237 	    (1 << reg) & (BIT(BPF_REG_1) |
238 			  BIT(BPF_REG_2) |
239 			  BIT(BPF_REG_FP));
240 }
241 
242 static bool is_axreg(u32 reg)
243 {
244 	return reg == BPF_REG_0;
245 }
246 
247 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
248 static u8 add_1mod(u8 byte, u32 reg)
249 {
250 	if (is_ereg(reg))
251 		byte |= 1;
252 	return byte;
253 }
254 
255 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
256 {
257 	if (is_ereg(r1))
258 		byte |= 1;
259 	if (is_ereg(r2))
260 		byte |= 4;
261 	return byte;
262 }
263 
264 static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
265 {
266 	if (is_ereg(r1))
267 		byte |= 1;
268 	if (is_ereg(index))
269 		byte |= 2;
270 	if (is_ereg(r2))
271 		byte |= 4;
272 	return byte;
273 }
274 
275 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
276 static u8 add_1reg(u8 byte, u32 dst_reg)
277 {
278 	return byte + reg2hex[dst_reg];
279 }
280 
281 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
282 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
283 {
284 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
285 }
286 
287 /* Some 1-byte opcodes for binary ALU operations */
288 static u8 simple_alu_opcodes[] = {
289 	[BPF_ADD] = 0x01,
290 	[BPF_SUB] = 0x29,
291 	[BPF_AND] = 0x21,
292 	[BPF_OR] = 0x09,
293 	[BPF_XOR] = 0x31,
294 	[BPF_LSH] = 0xE0,
295 	[BPF_RSH] = 0xE8,
296 	[BPF_ARSH] = 0xF8,
297 };
298 
299 static void jit_fill_hole(void *area, unsigned int size)
300 {
301 	/* Fill whole space with INT3 instructions */
302 	memset(area, 0xcc, size);
303 }
304 
305 int bpf_arch_text_invalidate(void *dst, size_t len)
306 {
307 	return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
308 }
309 
310 struct jit_context {
311 	int cleanup_addr; /* Epilogue code offset */
312 
313 	/*
314 	 * Program specific offsets of labels in the code; these rely on the
315 	 * JIT doing at least 2 passes, recording the position on the first
316 	 * pass, only to generate the correct offset on the second pass.
317 	 */
318 	int tail_call_direct_label;
319 	int tail_call_indirect_label;
320 };
321 
322 /* Maximum number of bytes emitted while JITing one eBPF insn */
323 #define BPF_MAX_INSN_SIZE	128
324 #define BPF_INSN_SAFETY		64
325 
326 /* Number of bytes emit_patch() needs to generate instructions */
327 #define X86_PATCH_SIZE		5
328 /* Number of bytes that will be skipped on tailcall */
329 #define X86_TAIL_CALL_OFFSET	(12 + ENDBR_INSN_SIZE)
330 
331 static void push_r9(u8 **pprog)
332 {
333 	u8 *prog = *pprog;
334 
335 	EMIT2(0x41, 0x51);   /* push r9 */
336 	*pprog = prog;
337 }
338 
339 static void pop_r9(u8 **pprog)
340 {
341 	u8 *prog = *pprog;
342 
343 	EMIT2(0x41, 0x59);   /* pop r9 */
344 	*pprog = prog;
345 }
346 
347 static void push_r12(u8 **pprog)
348 {
349 	u8 *prog = *pprog;
350 
351 	EMIT2(0x41, 0x54);   /* push r12 */
352 	*pprog = prog;
353 }
354 
355 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
356 {
357 	u8 *prog = *pprog;
358 
359 	if (callee_regs_used[0])
360 		EMIT1(0x53);         /* push rbx */
361 	if (callee_regs_used[1])
362 		EMIT2(0x41, 0x55);   /* push r13 */
363 	if (callee_regs_used[2])
364 		EMIT2(0x41, 0x56);   /* push r14 */
365 	if (callee_regs_used[3])
366 		EMIT2(0x41, 0x57);   /* push r15 */
367 	*pprog = prog;
368 }
369 
370 static void pop_r12(u8 **pprog)
371 {
372 	u8 *prog = *pprog;
373 
374 	EMIT2(0x41, 0x5C);   /* pop r12 */
375 	*pprog = prog;
376 }
377 
378 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
379 {
380 	u8 *prog = *pprog;
381 
382 	if (callee_regs_used[3])
383 		EMIT2(0x41, 0x5F);   /* pop r15 */
384 	if (callee_regs_used[2])
385 		EMIT2(0x41, 0x5E);   /* pop r14 */
386 	if (callee_regs_used[1])
387 		EMIT2(0x41, 0x5D);   /* pop r13 */
388 	if (callee_regs_used[0])
389 		EMIT1(0x5B);         /* pop rbx */
390 	*pprog = prog;
391 }
392 
393 static void emit_nops(u8 **pprog, int len)
394 {
395 	u8 *prog = *pprog;
396 	int i, noplen;
397 
398 	while (len > 0) {
399 		noplen = len;
400 
401 		if (noplen > ASM_NOP_MAX)
402 			noplen = ASM_NOP_MAX;
403 
404 		for (i = 0; i < noplen; i++)
405 			EMIT1(x86_nops[noplen][i]);
406 		len -= noplen;
407 	}
408 
409 	*pprog = prog;
410 }
411 
412 /*
413  * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
414  * in arch/x86/kernel/alternative.c
415  */
416 static int emit_call(u8 **prog, void *func, void *ip);
417 
418 static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity)
419 {
420 	u8 *prog = *pprog;
421 
422 	EMIT_ENDBR();
423 	EMIT1_off32(0x2d, hash);			/* subl $hash, %eax	*/
424 	if (cfi_bhi) {
425 		EMIT2(0x2e, 0x2e);			/* cs cs */
426 		emit_call(&prog, __bhi_args[arity], ip + 11);
427 	} else {
428 		EMIT3_off32(0x2e, 0x0f, 0x85, 3);	/* jne.d32,pn 3		*/
429 	}
430 	EMIT_ENDBR_POISON();
431 
432 	*pprog = prog;
433 }
434 
435 static void emit_kcfi(u8 **pprog, u32 hash)
436 {
437 	u8 *prog = *pprog;
438 
439 	EMIT1_off32(0xb8, hash);			/* movl $hash, %eax	*/
440 #ifdef CONFIG_CALL_PADDING
441 	EMIT1(0x90);
442 	EMIT1(0x90);
443 	EMIT1(0x90);
444 	EMIT1(0x90);
445 	EMIT1(0x90);
446 	EMIT1(0x90);
447 	EMIT1(0x90);
448 	EMIT1(0x90);
449 	EMIT1(0x90);
450 	EMIT1(0x90);
451 	EMIT1(0x90);
452 #endif
453 	EMIT_ENDBR();
454 
455 	*pprog = prog;
456 }
457 
458 static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity)
459 {
460 	u8 *prog = *pprog;
461 
462 	switch (cfi_mode) {
463 	case CFI_FINEIBT:
464 		emit_fineibt(&prog, ip, hash, arity);
465 		break;
466 
467 	case CFI_KCFI:
468 		emit_kcfi(&prog, hash);
469 		break;
470 
471 	default:
472 		EMIT_ENDBR();
473 		break;
474 	}
475 
476 	*pprog = prog;
477 }
478 
479 static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
480 {
481 	u8 *prog = *pprog;
482 
483 	if (!is_subprog) {
484 		/* cmp rax, MAX_TAIL_CALL_CNT */
485 		EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
486 		EMIT2(X86_JA, 6);        /* ja 6 */
487 		/* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
488 		 * case1: entry of main prog.
489 		 * case2: tail callee of main prog.
490 		 */
491 		EMIT1(0x50);             /* push rax */
492 		/* Make rax as tail_call_cnt_ptr. */
493 		EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
494 		EMIT2(0xEB, 1);          /* jmp 1 */
495 		/* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
496 		 * case: tail callee of subprog.
497 		 */
498 		EMIT1(0x50);             /* push rax */
499 		/* push tail_call_cnt_ptr */
500 		EMIT1(0x50);             /* push rax */
501 	} else { /* is_subprog */
502 		/* rax is tail_call_cnt_ptr. */
503 		EMIT1(0x50);             /* push rax */
504 		EMIT1(0x50);             /* push rax */
505 	}
506 
507 	*pprog = prog;
508 }
509 
510 /*
511  * Emit x86-64 prologue code for BPF program.
512  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
513  * while jumping to another program
514  */
515 static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf,
516 			  bool tail_call_reachable, bool is_subprog,
517 			  bool is_exception_cb)
518 {
519 	u8 *prog = *pprog;
520 
521 	if (is_subprog) {
522 		emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5);
523 	} else {
524 		emit_cfi(&prog, ip, cfi_bpf_hash, 1);
525 	}
526 	/* BPF trampoline can be made to work without these nops,
527 	 * but let's waste 5 bytes for now and optimize later
528 	 */
529 	emit_nops(&prog, X86_PATCH_SIZE);
530 	if (!ebpf_from_cbpf) {
531 		if (tail_call_reachable && !is_subprog)
532 			/* When it's the entry of the whole tailcall context,
533 			 * zeroing rax means initialising tail_call_cnt.
534 			 */
535 			EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
536 		else
537 			/* Keep the same instruction layout. */
538 			emit_nops(&prog, 3);     /* nop3 */
539 	}
540 	/* Exception callback receives FP as third parameter */
541 	if (is_exception_cb) {
542 		EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
543 		EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
544 		/* The main frame must have exception_boundary as true, so we
545 		 * first restore those callee-saved regs from stack, before
546 		 * reusing the stack frame.
547 		 */
548 		pop_callee_regs(&prog, all_callee_regs_used);
549 		pop_r12(&prog);
550 		/* Reset the stack frame. */
551 		EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
552 	} else {
553 		EMIT1(0x55);             /* push rbp */
554 		EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
555 	}
556 
557 	/* X86_TAIL_CALL_OFFSET is here */
558 	EMIT_ENDBR();
559 
560 	/* sub rsp, rounded_stack_depth */
561 	if (stack_depth)
562 		EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
563 	if (tail_call_reachable)
564 		emit_prologue_tail_call(&prog, is_subprog);
565 	*pprog = prog;
566 }
567 
568 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
569 {
570 	u8 *prog = *pprog;
571 	s64 offset;
572 
573 	offset = func - (ip + X86_PATCH_SIZE);
574 	if (!is_simm32(offset)) {
575 		pr_err("Target call %p is out of range\n", func);
576 		return -ERANGE;
577 	}
578 	EMIT1_off32(opcode, offset);
579 	*pprog = prog;
580 	return 0;
581 }
582 
583 static int emit_call(u8 **pprog, void *func, void *ip)
584 {
585 	return emit_patch(pprog, func, ip, 0xE8);
586 }
587 
588 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
589 {
590 	OPTIMIZER_HIDE_VAR(func);
591 	ip += x86_call_depth_emit_accounting(pprog, func, ip);
592 	return emit_patch(pprog, func, ip, 0xE8);
593 }
594 
595 static int emit_jump(u8 **pprog, void *func, void *ip)
596 {
597 	return emit_patch(pprog, func, ip, 0xE9);
598 }
599 
600 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
601 				enum bpf_text_poke_type new_t,
602 				void *old_addr, void *new_addr)
603 {
604 	const u8 *nop_insn = x86_nops[5];
605 	u8 old_insn[X86_PATCH_SIZE];
606 	u8 new_insn[X86_PATCH_SIZE];
607 	u8 *prog;
608 	int ret;
609 
610 	memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
611 	if (old_t != BPF_MOD_NOP && old_addr) {
612 		prog = old_insn;
613 		ret = old_t == BPF_MOD_CALL ?
614 		      emit_call(&prog, old_addr, ip) :
615 		      emit_jump(&prog, old_addr, ip);
616 		if (ret)
617 			return ret;
618 	}
619 
620 	memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
621 	if (new_t != BPF_MOD_NOP && new_addr) {
622 		prog = new_insn;
623 		ret = new_t == BPF_MOD_CALL ?
624 		      emit_call(&prog, new_addr, ip) :
625 		      emit_jump(&prog, new_addr, ip);
626 		if (ret)
627 			return ret;
628 	}
629 
630 	ret = -EBUSY;
631 	mutex_lock(&text_mutex);
632 	if (memcmp(ip, old_insn, X86_PATCH_SIZE))
633 		goto out;
634 	ret = 1;
635 	if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
636 		smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL);
637 		ret = 0;
638 	}
639 out:
640 	mutex_unlock(&text_mutex);
641 	return ret;
642 }
643 
644 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
645 		       enum bpf_text_poke_type new_t, void *old_addr,
646 		       void *new_addr)
647 {
648 	if (!is_kernel_text((long)ip) &&
649 	    !is_bpf_text_address((long)ip))
650 		/* BPF poking in modules is not supported */
651 		return -EINVAL;
652 
653 	/*
654 	 * See emit_prologue(), for IBT builds the trampoline hook is preceded
655 	 * with an ENDBR instruction.
656 	 */
657 	if (is_endbr(ip))
658 		ip += ENDBR_INSN_SIZE;
659 
660 	return __bpf_arch_text_poke(ip, old_t, new_t, old_addr, new_addr);
661 }
662 
663 #define EMIT_LFENCE()	EMIT3(0x0F, 0xAE, 0xE8)
664 
665 static void __emit_indirect_jump(u8 **pprog, int reg, bool ereg)
666 {
667 	u8 *prog = *pprog;
668 
669 	if (ereg)
670 		EMIT1(0x41);
671 
672 	EMIT2(0xFF, 0xE0 + reg);
673 
674 	*pprog = prog;
675 }
676 
677 static void emit_indirect_jump(u8 **pprog, int bpf_reg, u8 *ip)
678 {
679 	u8 *prog = *pprog;
680 	int reg = reg2hex[bpf_reg];
681 	bool ereg = is_ereg(bpf_reg);
682 
683 	if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
684 		OPTIMIZER_HIDE_VAR(reg);
685 		emit_jump(&prog, its_static_thunk(reg + 8*ereg), ip);
686 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
687 		EMIT_LFENCE();
688 		__emit_indirect_jump(&prog, reg, ereg);
689 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
690 		OPTIMIZER_HIDE_VAR(reg);
691 		if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
692 			emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg + 8*ereg], ip);
693 		else
694 			emit_jump(&prog, &__x86_indirect_thunk_array[reg + 8*ereg], ip);
695 	} else {
696 		__emit_indirect_jump(&prog, reg, ereg);
697 		if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
698 			EMIT1(0xCC);		/* int3 */
699 	}
700 
701 	*pprog = prog;
702 }
703 
704 static void emit_return(u8 **pprog, u8 *ip)
705 {
706 	u8 *prog = *pprog;
707 
708 	if (cpu_wants_rethunk()) {
709 		emit_jump(&prog, x86_return_thunk, ip);
710 	} else {
711 		EMIT1(0xC3);		/* ret */
712 		if (IS_ENABLED(CONFIG_MITIGATION_SLS))
713 			EMIT1(0xCC);	/* int3 */
714 	}
715 
716 	*pprog = prog;
717 }
718 
719 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)	(-16 - round_up(stack, 8))
720 
721 /*
722  * Generate the following code:
723  *
724  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
725  *   if (index >= array->map.max_entries)
726  *     goto out;
727  *   if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
728  *     goto out;
729  *   prog = array->ptrs[index];
730  *   if (prog == NULL)
731  *     goto out;
732  *   goto *(prog->bpf_func + prologue_size);
733  * out:
734  */
735 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
736 					u8 **pprog, bool *callee_regs_used,
737 					u32 stack_depth, u8 *ip,
738 					struct jit_context *ctx)
739 {
740 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
741 	u8 *prog = *pprog, *start = *pprog;
742 	int offset;
743 
744 	/*
745 	 * rdi - pointer to ctx
746 	 * rsi - pointer to bpf_array
747 	 * rdx - index in bpf_array
748 	 */
749 
750 	/*
751 	 * if (index >= array->map.max_entries)
752 	 *	goto out;
753 	 */
754 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
755 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
756 	      offsetof(struct bpf_array, map.max_entries));
757 
758 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
759 	EMIT2(X86_JBE, offset);                   /* jbe out */
760 
761 	/*
762 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
763 	 *	goto out;
764 	 */
765 	EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
766 	EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
767 
768 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
769 	EMIT2(X86_JAE, offset);                   /* jae out */
770 
771 	/* prog = array->ptrs[index]; */
772 	EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
773 		    offsetof(struct bpf_array, ptrs));
774 
775 	/*
776 	 * if (prog == NULL)
777 	 *	goto out;
778 	 */
779 	EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
780 
781 	offset = ctx->tail_call_indirect_label - (prog + 2 - start);
782 	EMIT2(X86_JE, offset);                    /* je out */
783 
784 	/* Inc tail_call_cnt if the slot is populated. */
785 	EMIT4(0x48, 0x83, 0x00, 0x01);            /* add qword ptr [rax], 1 */
786 
787 	if (bpf_prog->aux->exception_boundary) {
788 		pop_callee_regs(&prog, all_callee_regs_used);
789 		pop_r12(&prog);
790 	} else {
791 		pop_callee_regs(&prog, callee_regs_used);
792 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
793 			pop_r12(&prog);
794 	}
795 
796 	/* Pop tail_call_cnt_ptr. */
797 	EMIT1(0x58);                              /* pop rax */
798 	/* Pop tail_call_cnt, if it's main prog.
799 	 * Pop tail_call_cnt_ptr, if it's subprog.
800 	 */
801 	EMIT1(0x58);                              /* pop rax */
802 	if (stack_depth)
803 		EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
804 			    round_up(stack_depth, 8));
805 
806 	/* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
807 	EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
808 	      offsetof(struct bpf_prog, bpf_func));
809 	EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
810 	      X86_TAIL_CALL_OFFSET);
811 	/*
812 	 * Now we're ready to jump into next BPF program
813 	 * rdi == ctx (1st arg)
814 	 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
815 	 */
816 	emit_indirect_jump(&prog, BPF_REG_4 /* R4 -> rcx */, ip + (prog - start));
817 
818 	/* out: */
819 	ctx->tail_call_indirect_label = prog - start;
820 	*pprog = prog;
821 }
822 
823 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
824 				      struct bpf_jit_poke_descriptor *poke,
825 				      u8 **pprog, u8 *ip,
826 				      bool *callee_regs_used, u32 stack_depth,
827 				      struct jit_context *ctx)
828 {
829 	int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
830 	u8 *prog = *pprog, *start = *pprog;
831 	int offset;
832 
833 	/*
834 	 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
835 	 *	goto out;
836 	 */
837 	EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off);   /* mov rax, qword ptr [rbp - tcc_ptr_off] */
838 	EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT);   /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
839 
840 	offset = ctx->tail_call_direct_label - (prog + 2 - start);
841 	EMIT2(X86_JAE, offset);                       /* jae out */
842 
843 	poke->tailcall_bypass = ip + (prog - start);
844 	poke->adj_off = X86_TAIL_CALL_OFFSET;
845 	poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
846 	poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
847 
848 	emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
849 		  poke->tailcall_bypass);
850 
851 	/* Inc tail_call_cnt if the slot is populated. */
852 	EMIT4(0x48, 0x83, 0x00, 0x01);                /* add qword ptr [rax], 1 */
853 
854 	if (bpf_prog->aux->exception_boundary) {
855 		pop_callee_regs(&prog, all_callee_regs_used);
856 		pop_r12(&prog);
857 	} else {
858 		pop_callee_regs(&prog, callee_regs_used);
859 		if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
860 			pop_r12(&prog);
861 	}
862 
863 	/* Pop tail_call_cnt_ptr. */
864 	EMIT1(0x58);                                  /* pop rax */
865 	/* Pop tail_call_cnt, if it's main prog.
866 	 * Pop tail_call_cnt_ptr, if it's subprog.
867 	 */
868 	EMIT1(0x58);                                  /* pop rax */
869 	if (stack_depth)
870 		EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
871 
872 	emit_nops(&prog, X86_PATCH_SIZE);
873 
874 	/* out: */
875 	ctx->tail_call_direct_label = prog - start;
876 
877 	*pprog = prog;
878 }
879 
880 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
881 {
882 	struct bpf_jit_poke_descriptor *poke;
883 	struct bpf_array *array;
884 	struct bpf_prog *target;
885 	int i, ret;
886 
887 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
888 		poke = &prog->aux->poke_tab[i];
889 		if (poke->aux && poke->aux != prog->aux)
890 			continue;
891 
892 		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
893 
894 		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
895 			continue;
896 
897 		array = container_of(poke->tail_call.map, struct bpf_array, map);
898 		mutex_lock(&array->aux->poke_mutex);
899 		target = array->ptrs[poke->tail_call.key];
900 		if (target) {
901 			ret = __bpf_arch_text_poke(poke->tailcall_target,
902 						   BPF_MOD_NOP, BPF_MOD_JUMP,
903 						   NULL,
904 						   (u8 *)target->bpf_func +
905 						   poke->adj_off);
906 			BUG_ON(ret < 0);
907 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
908 						   BPF_MOD_JUMP, BPF_MOD_NOP,
909 						   (u8 *)poke->tailcall_target +
910 						   X86_PATCH_SIZE, NULL);
911 			BUG_ON(ret < 0);
912 		}
913 		WRITE_ONCE(poke->tailcall_target_stable, true);
914 		mutex_unlock(&array->aux->poke_mutex);
915 	}
916 }
917 
918 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
919 			   u32 dst_reg, const u32 imm32)
920 {
921 	u8 *prog = *pprog;
922 	u8 b1, b2, b3;
923 
924 	/*
925 	 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
926 	 * (which zero-extends imm32) to save 2 bytes.
927 	 */
928 	if (sign_propagate && (s32)imm32 < 0) {
929 		/* 'mov %rax, imm32' sign extends imm32 */
930 		b1 = add_1mod(0x48, dst_reg);
931 		b2 = 0xC7;
932 		b3 = 0xC0;
933 		EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
934 		goto done;
935 	}
936 
937 	/*
938 	 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
939 	 * to save 3 bytes.
940 	 */
941 	if (imm32 == 0) {
942 		if (is_ereg(dst_reg))
943 			EMIT1(add_2mod(0x40, dst_reg, dst_reg));
944 		b2 = 0x31; /* xor */
945 		b3 = 0xC0;
946 		EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
947 		goto done;
948 	}
949 
950 	/* mov %eax, imm32 */
951 	if (is_ereg(dst_reg))
952 		EMIT1(add_1mod(0x40, dst_reg));
953 	EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
954 done:
955 	*pprog = prog;
956 }
957 
958 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
959 			   const u32 imm32_hi, const u32 imm32_lo)
960 {
961 	u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
962 	u8 *prog = *pprog;
963 
964 	if (is_uimm32(imm64)) {
965 		/*
966 		 * For emitting plain u32, where sign bit must not be
967 		 * propagated LLVM tends to load imm64 over mov32
968 		 * directly, so save couple of bytes by just doing
969 		 * 'mov %eax, imm32' instead.
970 		 */
971 		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
972 	} else if (is_simm32(imm64)) {
973 		emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
974 	} else {
975 		/* movabsq rax, imm64 */
976 		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
977 		EMIT(imm32_lo, 4);
978 		EMIT(imm32_hi, 4);
979 	}
980 
981 	*pprog = prog;
982 }
983 
984 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
985 {
986 	u8 *prog = *pprog;
987 
988 	if (is64) {
989 		/* mov dst, src */
990 		EMIT_mov(dst_reg, src_reg);
991 	} else {
992 		/* mov32 dst, src */
993 		if (is_ereg(dst_reg) || is_ereg(src_reg))
994 			EMIT1(add_2mod(0x40, dst_reg, src_reg));
995 		EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
996 	}
997 
998 	*pprog = prog;
999 }
1000 
1001 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
1002 			   u32 src_reg)
1003 {
1004 	u8 *prog = *pprog;
1005 
1006 	if (is64) {
1007 		/* movs[b,w,l]q dst, src */
1008 		if (num_bits == 8)
1009 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
1010 			      add_2reg(0xC0, src_reg, dst_reg));
1011 		else if (num_bits == 16)
1012 			EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
1013 			      add_2reg(0xC0, src_reg, dst_reg));
1014 		else if (num_bits == 32)
1015 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
1016 			      add_2reg(0xC0, src_reg, dst_reg));
1017 	} else {
1018 		/* movs[b,w]l dst, src */
1019 		if (num_bits == 8) {
1020 			EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
1021 			      add_2reg(0xC0, src_reg, dst_reg));
1022 		} else if (num_bits == 16) {
1023 			if (is_ereg(dst_reg) || is_ereg(src_reg))
1024 				EMIT1(add_2mod(0x40, src_reg, dst_reg));
1025 			EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
1026 			      add_2reg(0xC0, src_reg, dst_reg));
1027 		}
1028 	}
1029 
1030 	*pprog = prog;
1031 }
1032 
1033 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
1034 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
1035 {
1036 	u8 *prog = *pprog;
1037 
1038 	if (is_imm8(off)) {
1039 		/* 1-byte signed displacement.
1040 		 *
1041 		 * If off == 0 we could skip this and save one extra byte, but
1042 		 * special case of x86 R13 which always needs an offset is not
1043 		 * worth the hassle
1044 		 */
1045 		EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
1046 	} else {
1047 		/* 4-byte signed displacement */
1048 		EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
1049 	}
1050 	*pprog = prog;
1051 }
1052 
1053 static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
1054 {
1055 	u8 *prog = *pprog;
1056 
1057 	if (is_imm8(off)) {
1058 		EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1059 	} else {
1060 		EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1061 	}
1062 	*pprog = prog;
1063 }
1064 
1065 /*
1066  * Emit a REX byte if it will be necessary to address these registers
1067  */
1068 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
1069 {
1070 	u8 *prog = *pprog;
1071 
1072 	if (is64)
1073 		EMIT1(add_2mod(0x48, dst_reg, src_reg));
1074 	else if (is_ereg(dst_reg) || is_ereg(src_reg))
1075 		EMIT1(add_2mod(0x40, dst_reg, src_reg));
1076 	*pprog = prog;
1077 }
1078 
1079 /*
1080  * Similar version of maybe_emit_mod() for a single register
1081  */
1082 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
1083 {
1084 	u8 *prog = *pprog;
1085 
1086 	if (is64)
1087 		EMIT1(add_1mod(0x48, reg));
1088 	else if (is_ereg(reg))
1089 		EMIT1(add_1mod(0x40, reg));
1090 	*pprog = prog;
1091 }
1092 
1093 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1094 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1095 {
1096 	u8 *prog = *pprog;
1097 
1098 	switch (size) {
1099 	case BPF_B:
1100 		/* Emit 'movzx rax, byte ptr [rax + off]' */
1101 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
1102 		break;
1103 	case BPF_H:
1104 		/* Emit 'movzx rax, word ptr [rax + off]' */
1105 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
1106 		break;
1107 	case BPF_W:
1108 		/* Emit 'mov eax, dword ptr [rax+0x14]' */
1109 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1110 			EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
1111 		else
1112 			EMIT1(0x8B);
1113 		break;
1114 	case BPF_DW:
1115 		/* Emit 'mov rax, qword ptr [rax+0x14]' */
1116 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
1117 		break;
1118 	}
1119 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
1120 	*pprog = prog;
1121 }
1122 
1123 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
1124 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1125 {
1126 	u8 *prog = *pprog;
1127 
1128 	switch (size) {
1129 	case BPF_B:
1130 		/* Emit 'movsx rax, byte ptr [rax + off]' */
1131 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
1132 		break;
1133 	case BPF_H:
1134 		/* Emit 'movsx rax, word ptr [rax + off]' */
1135 		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
1136 		break;
1137 	case BPF_W:
1138 		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
1139 		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
1140 		break;
1141 	}
1142 	emit_insn_suffix(&prog, src_reg, dst_reg, off);
1143 	*pprog = prog;
1144 }
1145 
1146 static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1147 {
1148 	u8 *prog = *pprog;
1149 
1150 	switch (size) {
1151 	case BPF_B:
1152 		/* movzx rax, byte ptr [rax + r12 + off] */
1153 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1154 		break;
1155 	case BPF_H:
1156 		/* movzx rax, word ptr [rax + r12 + off] */
1157 		EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1158 		break;
1159 	case BPF_W:
1160 		/* mov eax, dword ptr [rax + r12 + off] */
1161 		EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1162 		break;
1163 	case BPF_DW:
1164 		/* mov rax, qword ptr [rax + r12 + off] */
1165 		EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1166 		break;
1167 	}
1168 	emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1169 	*pprog = prog;
1170 }
1171 
1172 static void emit_ldsx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1173 {
1174 	u8 *prog = *pprog;
1175 
1176 	switch (size) {
1177 	case BPF_B:
1178 		/* movsx rax, byte ptr [rax + r12 + off] */
1179 		EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBE);
1180 		break;
1181 	case BPF_H:
1182 		/* movsx rax, word ptr [rax + r12 + off] */
1183 		EMIT3(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x0F, 0xBF);
1184 		break;
1185 	case BPF_W:
1186 		/* movsx rax, dword ptr [rax + r12 + off] */
1187 		EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x63);
1188 		break;
1189 	}
1190 	emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1191 	*pprog = prog;
1192 }
1193 
1194 static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1195 {
1196 	emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1197 }
1198 
1199 static void emit_ldsx_r12(u8 **prog, u32 size, u32 dst_reg, u32 src_reg, int off)
1200 {
1201 	emit_ldsx_index(prog, size, dst_reg, src_reg, X86_REG_R12, off);
1202 }
1203 
1204 /* STX: *(u8*)(dst_reg + off) = src_reg */
1205 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1206 {
1207 	u8 *prog = *pprog;
1208 
1209 	switch (size) {
1210 	case BPF_B:
1211 		/* Emit 'mov byte ptr [rax + off], al' */
1212 		if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1213 			/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1214 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1215 		else
1216 			EMIT1(0x88);
1217 		break;
1218 	case BPF_H:
1219 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1220 			EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1221 		else
1222 			EMIT2(0x66, 0x89);
1223 		break;
1224 	case BPF_W:
1225 		if (is_ereg(dst_reg) || is_ereg(src_reg))
1226 			EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1227 		else
1228 			EMIT1(0x89);
1229 		break;
1230 	case BPF_DW:
1231 		EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1232 		break;
1233 	}
1234 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1235 	*pprog = prog;
1236 }
1237 
1238 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
1239 static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1240 {
1241 	u8 *prog = *pprog;
1242 
1243 	switch (size) {
1244 	case BPF_B:
1245 		/* mov byte ptr [rax + r12 + off], al */
1246 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1247 		break;
1248 	case BPF_H:
1249 		/* mov word ptr [rax + r12 + off], ax */
1250 		EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1251 		break;
1252 	case BPF_W:
1253 		/* mov dword ptr [rax + r12 + 1], eax */
1254 		EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1255 		break;
1256 	case BPF_DW:
1257 		/* mov qword ptr [rax + r12 + 1], rax */
1258 		EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1259 		break;
1260 	}
1261 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1262 	*pprog = prog;
1263 }
1264 
1265 static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1266 {
1267 	emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1268 }
1269 
1270 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
1271 static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1272 {
1273 	u8 *prog = *pprog;
1274 
1275 	switch (size) {
1276 	case BPF_B:
1277 		/* mov byte ptr [rax + r12 + off], imm8 */
1278 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1279 		break;
1280 	case BPF_H:
1281 		/* mov word ptr [rax + r12 + off], imm16 */
1282 		EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1283 		break;
1284 	case BPF_W:
1285 		/* mov dword ptr [rax + r12 + 1], imm32 */
1286 		EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1287 		break;
1288 	case BPF_DW:
1289 		/* mov qword ptr [rax + r12 + 1], imm32 */
1290 		EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1291 		break;
1292 	}
1293 	emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1294 	EMIT(imm, bpf_size_to_x86_bytes(size));
1295 	*pprog = prog;
1296 }
1297 
1298 static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1299 {
1300 	emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1301 }
1302 
1303 static void emit_store_stack_imm64(u8 **pprog, int reg, int stack_off, u64 imm64)
1304 {
1305 	/*
1306 	 * mov reg, imm64
1307 	 * mov QWORD PTR [rbp + stack_off], reg
1308 	 */
1309 	emit_mov_imm64(pprog, reg, imm64 >> 32, (u32) imm64);
1310 	emit_stx(pprog, BPF_DW, BPF_REG_FP, reg, stack_off);
1311 }
1312 
1313 static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
1314 			   u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1315 {
1316 	u8 *prog = *pprog;
1317 
1318 	if (atomic_op != BPF_XCHG)
1319 		EMIT1(0xF0); /* lock prefix */
1320 
1321 	maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1322 
1323 	/* emit opcode */
1324 	switch (atomic_op) {
1325 	case BPF_ADD:
1326 	case BPF_AND:
1327 	case BPF_OR:
1328 	case BPF_XOR:
1329 		/* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1330 		EMIT1(simple_alu_opcodes[atomic_op]);
1331 		break;
1332 	case BPF_ADD | BPF_FETCH:
1333 		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1334 		EMIT2(0x0F, 0xC1);
1335 		break;
1336 	case BPF_XCHG:
1337 		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1338 		EMIT1(0x87);
1339 		break;
1340 	case BPF_CMPXCHG:
1341 		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1342 		EMIT2(0x0F, 0xB1);
1343 		break;
1344 	default:
1345 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1346 		return -EFAULT;
1347 	}
1348 
1349 	emit_insn_suffix(&prog, dst_reg, src_reg, off);
1350 
1351 	*pprog = prog;
1352 	return 0;
1353 }
1354 
1355 static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size,
1356 				 u32 dst_reg, u32 src_reg, u32 index_reg,
1357 				 int off)
1358 {
1359 	u8 *prog = *pprog;
1360 
1361 	if (atomic_op != BPF_XCHG)
1362 		EMIT1(0xF0); /* lock prefix */
1363 
1364 	switch (size) {
1365 	case BPF_W:
1366 		EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1367 		break;
1368 	case BPF_DW:
1369 		EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1370 		break;
1371 	default:
1372 		pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
1373 		return -EFAULT;
1374 	}
1375 
1376 	/* emit opcode */
1377 	switch (atomic_op) {
1378 	case BPF_ADD:
1379 	case BPF_AND:
1380 	case BPF_OR:
1381 	case BPF_XOR:
1382 		/* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1383 		EMIT1(simple_alu_opcodes[atomic_op]);
1384 		break;
1385 	case BPF_ADD | BPF_FETCH:
1386 		/* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1387 		EMIT2(0x0F, 0xC1);
1388 		break;
1389 	case BPF_XCHG:
1390 		/* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1391 		EMIT1(0x87);
1392 		break;
1393 	case BPF_CMPXCHG:
1394 		/* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1395 		EMIT2(0x0F, 0xB1);
1396 		break;
1397 	default:
1398 		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1399 		return -EFAULT;
1400 	}
1401 	emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1402 	*pprog = prog;
1403 	return 0;
1404 }
1405 
1406 static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg,
1407 			     u32 src_reg, s16 off, u8 bpf_size)
1408 {
1409 	switch (atomic_op) {
1410 	case BPF_LOAD_ACQ:
1411 		/* dst_reg = smp_load_acquire(src_reg + off16) */
1412 		emit_ldx(pprog, bpf_size, dst_reg, src_reg, off);
1413 		break;
1414 	case BPF_STORE_REL:
1415 		/* smp_store_release(dst_reg + off16, src_reg) */
1416 		emit_stx(pprog, bpf_size, dst_reg, src_reg, off);
1417 		break;
1418 	default:
1419 		pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1420 		       atomic_op);
1421 		return -EFAULT;
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size,
1428 				   u32 dst_reg, u32 src_reg, u32 index_reg,
1429 				   int off)
1430 {
1431 	switch (atomic_op) {
1432 	case BPF_LOAD_ACQ:
1433 		/* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */
1434 		emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1435 		break;
1436 	case BPF_STORE_REL:
1437 		/* smp_store_release(dst_reg + idx_reg + off16, src_reg) */
1438 		emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off);
1439 		break;
1440 	default:
1441 		pr_err("bpf_jit: unknown atomic load/store opcode %02x\n",
1442 		       atomic_op);
1443 		return -EFAULT;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 /*
1450  * Metadata encoding for exception handling in JITed code.
1451  *
1452  * Format of `fixup` and `data` fields in `struct exception_table_entry`:
1453  *
1454  * Bit layout of `fixup` (32-bit):
1455  *
1456  * +-----------+--------+-----------+---------+----------+
1457  * | 31        | 30-24  |   23-16   |   15-8  |    7-0   |
1458  * |           |        |           |         |          |
1459  * | ARENA_ACC | Unused | ARENA_REG | DST_REG | INSN_LEN |
1460  * +-----------+--------+-----------+---------+----------+
1461  *
1462  * - INSN_LEN (8 bits): Length of faulting insn (max x86 insn = 15 bytes (fits in 8 bits)).
1463  * - DST_REG  (8 bits): Offset of dst_reg from reg2pt_regs[] (max offset = 112 (fits in 8 bits)).
1464  *                      This is set to DONT_CLEAR if the insn is a store.
1465  * - ARENA_REG (8 bits): Offset of the register that is used to calculate the
1466  *                       address for load/store when accessing the arena region.
1467  * - ARENA_ACCESS (1 bit): This bit is set when the faulting instruction accessed the arena region.
1468  *
1469  * Bit layout of `data` (32-bit):
1470  *
1471  * +--------------+--------+--------------+
1472  * |	31-16	  |  15-8  |     7-0      |
1473  * |              |	   |              |
1474  * | ARENA_OFFSET | Unused |  EX_TYPE_BPF |
1475  * +--------------+--------+--------------+
1476  *
1477  * - ARENA_OFFSET (16 bits): Offset used to calculate the address for load/store when
1478  *                           accessing the arena region.
1479  */
1480 
1481 #define DONT_CLEAR 1
1482 #define FIXUP_INSN_LEN_MASK	GENMASK(7, 0)
1483 #define FIXUP_REG_MASK		GENMASK(15, 8)
1484 #define FIXUP_ARENA_REG_MASK	GENMASK(23, 16)
1485 #define FIXUP_ARENA_ACCESS	BIT(31)
1486 #define DATA_ARENA_OFFSET_MASK	GENMASK(31, 16)
1487 
1488 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1489 {
1490 	u32 reg = FIELD_GET(FIXUP_REG_MASK, x->fixup);
1491 	u32 insn_len = FIELD_GET(FIXUP_INSN_LEN_MASK, x->fixup);
1492 	bool is_arena = !!(x->fixup & FIXUP_ARENA_ACCESS);
1493 	bool is_write = (reg == DONT_CLEAR);
1494 	unsigned long addr;
1495 	s16 off;
1496 	u32 arena_reg;
1497 
1498 	if (is_arena) {
1499 		arena_reg = FIELD_GET(FIXUP_ARENA_REG_MASK, x->fixup);
1500 		off = FIELD_GET(DATA_ARENA_OFFSET_MASK, x->data);
1501 		addr = *(unsigned long *)((void *)regs + arena_reg) + off;
1502 		bpf_prog_report_arena_violation(is_write, addr, regs->ip);
1503 	}
1504 
1505 	/* jump over faulting load and clear dest register */
1506 	if (reg != DONT_CLEAR)
1507 		*(unsigned long *)((void *)regs + reg) = 0;
1508 	regs->ip += insn_len;
1509 
1510 	return true;
1511 }
1512 
1513 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1514 			     bool *regs_used)
1515 {
1516 	int i;
1517 
1518 	for (i = 1; i <= insn_cnt; i++, insn++) {
1519 		if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1520 			regs_used[0] = true;
1521 		if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1522 			regs_used[1] = true;
1523 		if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1524 			regs_used[2] = true;
1525 		if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1526 			regs_used[3] = true;
1527 	}
1528 }
1529 
1530 /* emit the 3-byte VEX prefix
1531  *
1532  * r: same as rex.r, extra bit for ModRM reg field
1533  * x: same as rex.x, extra bit for SIB index field
1534  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1535  * m: opcode map select, encoding escape bytes e.g. 0x0f38
1536  * w: same as rex.w (32 bit or 64 bit) or opcode specific
1537  * src_reg2: additional source reg (encoded as BPF reg)
1538  * l: vector length (128 bit or 256 bit) or reserved
1539  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1540  */
1541 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1542 		      bool w, u8 src_reg2, bool l, u8 pp)
1543 {
1544 	u8 *prog = *pprog;
1545 	const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1546 	u8 b1, b2;
1547 	u8 vvvv = reg2hex[src_reg2];
1548 
1549 	/* reg2hex gives only the lower 3 bit of vvvv */
1550 	if (is_ereg(src_reg2))
1551 		vvvv |= 1 << 3;
1552 
1553 	/*
1554 	 * 2nd byte of 3-byte VEX prefix
1555 	 * ~ means bit inverted encoding
1556 	 *
1557 	 *    7                           0
1558 	 *  +---+---+---+---+---+---+---+---+
1559 	 *  |~R |~X |~B |         m         |
1560 	 *  +---+---+---+---+---+---+---+---+
1561 	 */
1562 	b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1563 	/*
1564 	 * 3rd byte of 3-byte VEX prefix
1565 	 *
1566 	 *    7                           0
1567 	 *  +---+---+---+---+---+---+---+---+
1568 	 *  | W |     ~vvvv     | L |   pp  |
1569 	 *  +---+---+---+---+---+---+---+---+
1570 	 */
1571 	b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1572 
1573 	EMIT3(b0, b1, b2);
1574 	*pprog = prog;
1575 }
1576 
1577 /* emit BMI2 shift instruction */
1578 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1579 {
1580 	u8 *prog = *pprog;
1581 	bool r = is_ereg(dst_reg);
1582 	u8 m = 2; /* escape code 0f38 */
1583 
1584 	emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1585 	EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1586 	*pprog = prog;
1587 }
1588 
1589 static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
1590 {
1591 	u8 *prog = *pprog;
1592 
1593 	/* movabs r9, priv_frame_ptr */
1594 	emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32,
1595 		       (u32) (__force long) priv_frame_ptr);
1596 
1597 #ifdef CONFIG_SMP
1598 	/* add <r9>, gs:[<off>] */
1599 	EMIT2(0x65, 0x4c);
1600 	EMIT3(0x03, 0x0c, 0x25);
1601 	EMIT((u32)(unsigned long)&this_cpu_off, 4);
1602 #endif
1603 
1604 	*pprog = prog;
1605 }
1606 
1607 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1608 
1609 #define __LOAD_TCC_PTR(off)			\
1610 	EMIT3_off32(0x48, 0x8B, 0x85, off)
1611 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1612 #define LOAD_TAIL_CALL_CNT_PTR(stack)				\
1613 	__LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1614 
1615 /* Memory size/value to protect private stack overflow/underflow */
1616 #define PRIV_STACK_GUARD_SZ    8
1617 #define PRIV_STACK_GUARD_VAL   0xEB9F12345678eb9fULL
1618 
1619 static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
1620 				    struct bpf_prog *bpf_prog)
1621 {
1622 	u8 *prog = *pprog;
1623 	u8 *func;
1624 
1625 	if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
1626 		/* The clearing sequence clobbers eax and ecx. */
1627 		EMIT1(0x50); /* push rax */
1628 		EMIT1(0x51); /* push rcx */
1629 		ip += 2;
1630 
1631 		func = (u8 *)clear_bhb_loop;
1632 		ip += x86_call_depth_emit_accounting(&prog, func, ip);
1633 
1634 		if (emit_call(&prog, func, ip))
1635 			return -EINVAL;
1636 		EMIT1(0x59); /* pop rcx */
1637 		EMIT1(0x58); /* pop rax */
1638 	}
1639 	/* Insert IBHF instruction */
1640 	if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
1641 	     cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
1642 	    cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
1643 		/*
1644 		 * Add an Indirect Branch History Fence (IBHF). IBHF acts as a
1645 		 * fence preventing branch history from before the fence from
1646 		 * affecting indirect branches after the fence. This is
1647 		 * specifically used in cBPF jitted code to prevent Intra-mode
1648 		 * BHI attacks. The IBHF instruction is designed to be a NOP on
1649 		 * hardware that doesn't need or support it.  The REP and REX.W
1650 		 * prefixes are required by the microcode, and they also ensure
1651 		 * that the NOP is unlikely to be used in existing code.
1652 		 *
1653 		 * IBHF is not a valid instruction in 32-bit mode.
1654 		 */
1655 		EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
1656 	}
1657 	*pprog = prog;
1658 	return 0;
1659 }
1660 
1661 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1662 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
1663 {
1664 	bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1665 	struct bpf_insn *insn = bpf_prog->insnsi;
1666 	bool callee_regs_used[4] = {};
1667 	int insn_cnt = bpf_prog->len;
1668 	bool seen_exit = false;
1669 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1670 	void __percpu *priv_frame_ptr = NULL;
1671 	u64 arena_vm_start, user_vm_start;
1672 	void __percpu *priv_stack_ptr;
1673 	int i, excnt = 0;
1674 	int ilen, proglen = 0;
1675 	u8 *prog = temp;
1676 	u32 stack_depth;
1677 	int err;
1678 
1679 	stack_depth = bpf_prog->aux->stack_depth;
1680 	priv_stack_ptr = bpf_prog->aux->priv_stack_ptr;
1681 	if (priv_stack_ptr) {
1682 		priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8);
1683 		stack_depth = 0;
1684 	}
1685 
1686 	arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1687 	user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1688 
1689 	detect_reg_usage(insn, insn_cnt, callee_regs_used);
1690 
1691 	emit_prologue(&prog, image, stack_depth,
1692 		      bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1693 		      bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1694 
1695 	bpf_prog->aux->ksym.fp_start = prog - temp;
1696 
1697 	/* Exception callback will clobber callee regs for its own use, and
1698 	 * restore the original callee regs from main prog's stack frame.
1699 	 */
1700 	if (bpf_prog->aux->exception_boundary) {
1701 		/* We also need to save r12, which is not mapped to any BPF
1702 		 * register, as we throw after entry into the kernel, which may
1703 		 * overwrite r12.
1704 		 */
1705 		push_r12(&prog);
1706 		push_callee_regs(&prog, all_callee_regs_used);
1707 	} else {
1708 		if (arena_vm_start)
1709 			push_r12(&prog);
1710 		push_callee_regs(&prog, callee_regs_used);
1711 	}
1712 	if (arena_vm_start)
1713 		emit_mov_imm64(&prog, X86_REG_R12,
1714 			       arena_vm_start >> 32, (u32) arena_vm_start);
1715 
1716 	if (priv_frame_ptr)
1717 		emit_priv_frame_ptr(&prog, priv_frame_ptr);
1718 
1719 	ilen = prog - temp;
1720 	if (rw_image)
1721 		memcpy(rw_image + proglen, temp, ilen);
1722 	proglen += ilen;
1723 	addrs[0] = proglen;
1724 	prog = temp;
1725 
1726 	for (i = 1; i <= insn_cnt; i++, insn++) {
1727 		const s32 imm32 = insn->imm;
1728 		u32 dst_reg = insn->dst_reg;
1729 		u32 src_reg = insn->src_reg;
1730 		u8 b2 = 0, b3 = 0;
1731 		u8 *start_of_ldx;
1732 		s64 jmp_offset;
1733 		s16 insn_off;
1734 		u8 jmp_cond;
1735 		u8 *func;
1736 		int nops;
1737 
1738 		if (priv_frame_ptr) {
1739 			if (src_reg == BPF_REG_FP)
1740 				src_reg = X86_REG_R9;
1741 
1742 			if (dst_reg == BPF_REG_FP)
1743 				dst_reg = X86_REG_R9;
1744 		}
1745 
1746 		switch (insn->code) {
1747 			/* ALU */
1748 		case BPF_ALU | BPF_ADD | BPF_X:
1749 		case BPF_ALU | BPF_SUB | BPF_X:
1750 		case BPF_ALU | BPF_AND | BPF_X:
1751 		case BPF_ALU | BPF_OR | BPF_X:
1752 		case BPF_ALU | BPF_XOR | BPF_X:
1753 		case BPF_ALU64 | BPF_ADD | BPF_X:
1754 		case BPF_ALU64 | BPF_SUB | BPF_X:
1755 		case BPF_ALU64 | BPF_AND | BPF_X:
1756 		case BPF_ALU64 | BPF_OR | BPF_X:
1757 		case BPF_ALU64 | BPF_XOR | BPF_X:
1758 			maybe_emit_mod(&prog, dst_reg, src_reg,
1759 				       BPF_CLASS(insn->code) == BPF_ALU64);
1760 			b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1761 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1762 			break;
1763 
1764 		case BPF_ALU64 | BPF_MOV | BPF_X:
1765 			if (insn_is_cast_user(insn)) {
1766 				if (dst_reg != src_reg)
1767 					/* 32-bit mov */
1768 					emit_mov_reg(&prog, false, dst_reg, src_reg);
1769 				/* shl dst_reg, 32 */
1770 				maybe_emit_1mod(&prog, dst_reg, true);
1771 				EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1772 
1773 				/* or dst_reg, user_vm_start */
1774 				maybe_emit_1mod(&prog, dst_reg, true);
1775 				if (is_axreg(dst_reg))
1776 					EMIT1_off32(0x0D,  user_vm_start >> 32);
1777 				else
1778 					EMIT2_off32(0x81, add_1reg(0xC8, dst_reg),  user_vm_start >> 32);
1779 
1780 				/* rol dst_reg, 32 */
1781 				maybe_emit_1mod(&prog, dst_reg, true);
1782 				EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1783 
1784 				/* xor r11, r11 */
1785 				EMIT3(0x4D, 0x31, 0xDB);
1786 
1787 				/* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1788 				maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1789 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1790 
1791 				/* cmove r11, dst_reg; if so, set dst_reg to zero */
1792 				/* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1793 				maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1794 				EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1795 				break;
1796 			} else if (insn_is_mov_percpu_addr(insn)) {
1797 				/* mov <dst>, <src> (if necessary) */
1798 				EMIT_mov(dst_reg, src_reg);
1799 #ifdef CONFIG_SMP
1800 				/* add <dst>, gs:[<off>] */
1801 				EMIT2(0x65, add_1mod(0x48, dst_reg));
1802 				EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1803 				EMIT((u32)(unsigned long)&this_cpu_off, 4);
1804 #endif
1805 				break;
1806 			}
1807 			fallthrough;
1808 		case BPF_ALU | BPF_MOV | BPF_X:
1809 			if (insn->off == 0)
1810 				emit_mov_reg(&prog,
1811 					     BPF_CLASS(insn->code) == BPF_ALU64,
1812 					     dst_reg, src_reg);
1813 			else
1814 				emit_movsx_reg(&prog, insn->off,
1815 					       BPF_CLASS(insn->code) == BPF_ALU64,
1816 					       dst_reg, src_reg);
1817 			break;
1818 
1819 			/* neg dst */
1820 		case BPF_ALU | BPF_NEG:
1821 		case BPF_ALU64 | BPF_NEG:
1822 			maybe_emit_1mod(&prog, dst_reg,
1823 					BPF_CLASS(insn->code) == BPF_ALU64);
1824 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1825 			break;
1826 
1827 		case BPF_ALU | BPF_ADD | BPF_K:
1828 		case BPF_ALU | BPF_SUB | BPF_K:
1829 		case BPF_ALU | BPF_AND | BPF_K:
1830 		case BPF_ALU | BPF_OR | BPF_K:
1831 		case BPF_ALU | BPF_XOR | BPF_K:
1832 		case BPF_ALU64 | BPF_ADD | BPF_K:
1833 		case BPF_ALU64 | BPF_SUB | BPF_K:
1834 		case BPF_ALU64 | BPF_AND | BPF_K:
1835 		case BPF_ALU64 | BPF_OR | BPF_K:
1836 		case BPF_ALU64 | BPF_XOR | BPF_K:
1837 			maybe_emit_1mod(&prog, dst_reg,
1838 					BPF_CLASS(insn->code) == BPF_ALU64);
1839 
1840 			/*
1841 			 * b3 holds 'normal' opcode, b2 short form only valid
1842 			 * in case dst is eax/rax.
1843 			 */
1844 			switch (BPF_OP(insn->code)) {
1845 			case BPF_ADD:
1846 				b3 = 0xC0;
1847 				b2 = 0x05;
1848 				break;
1849 			case BPF_SUB:
1850 				b3 = 0xE8;
1851 				b2 = 0x2D;
1852 				break;
1853 			case BPF_AND:
1854 				b3 = 0xE0;
1855 				b2 = 0x25;
1856 				break;
1857 			case BPF_OR:
1858 				b3 = 0xC8;
1859 				b2 = 0x0D;
1860 				break;
1861 			case BPF_XOR:
1862 				b3 = 0xF0;
1863 				b2 = 0x35;
1864 				break;
1865 			}
1866 
1867 			if (is_imm8(imm32))
1868 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1869 			else if (is_axreg(dst_reg))
1870 				EMIT1_off32(b2, imm32);
1871 			else
1872 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1873 			break;
1874 
1875 		case BPF_ALU64 | BPF_MOV | BPF_K:
1876 		case BPF_ALU | BPF_MOV | BPF_K:
1877 			emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1878 				       dst_reg, imm32);
1879 			break;
1880 
1881 		case BPF_LD | BPF_IMM | BPF_DW:
1882 			emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1883 			insn++;
1884 			i++;
1885 			break;
1886 
1887 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1888 		case BPF_ALU | BPF_MOD | BPF_X:
1889 		case BPF_ALU | BPF_DIV | BPF_X:
1890 		case BPF_ALU | BPF_MOD | BPF_K:
1891 		case BPF_ALU | BPF_DIV | BPF_K:
1892 		case BPF_ALU64 | BPF_MOD | BPF_X:
1893 		case BPF_ALU64 | BPF_DIV | BPF_X:
1894 		case BPF_ALU64 | BPF_MOD | BPF_K:
1895 		case BPF_ALU64 | BPF_DIV | BPF_K: {
1896 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1897 
1898 			if (dst_reg != BPF_REG_0)
1899 				EMIT1(0x50); /* push rax */
1900 			if (dst_reg != BPF_REG_3)
1901 				EMIT1(0x52); /* push rdx */
1902 
1903 			if (BPF_SRC(insn->code) == BPF_X) {
1904 				if (src_reg == BPF_REG_0 ||
1905 				    src_reg == BPF_REG_3) {
1906 					/* mov r11, src_reg */
1907 					EMIT_mov(AUX_REG, src_reg);
1908 					src_reg = AUX_REG;
1909 				}
1910 			} else {
1911 				/* mov r11, imm32 */
1912 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1913 				src_reg = AUX_REG;
1914 			}
1915 
1916 			if (dst_reg != BPF_REG_0)
1917 				/* mov rax, dst_reg */
1918 				emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1919 
1920 			if (insn->off == 0) {
1921 				/*
1922 				 * xor edx, edx
1923 				 * equivalent to 'xor rdx, rdx', but one byte less
1924 				 */
1925 				EMIT2(0x31, 0xd2);
1926 
1927 				/* div src_reg */
1928 				maybe_emit_1mod(&prog, src_reg, is64);
1929 				EMIT2(0xF7, add_1reg(0xF0, src_reg));
1930 			} else {
1931 				if (BPF_CLASS(insn->code) == BPF_ALU)
1932 					EMIT1(0x99); /* cdq */
1933 				else
1934 					EMIT2(0x48, 0x99); /* cqo */
1935 
1936 				/* idiv src_reg */
1937 				maybe_emit_1mod(&prog, src_reg, is64);
1938 				EMIT2(0xF7, add_1reg(0xF8, src_reg));
1939 			}
1940 
1941 			if (BPF_OP(insn->code) == BPF_MOD &&
1942 			    dst_reg != BPF_REG_3)
1943 				/* mov dst_reg, rdx */
1944 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1945 			else if (BPF_OP(insn->code) == BPF_DIV &&
1946 				 dst_reg != BPF_REG_0)
1947 				/* mov dst_reg, rax */
1948 				emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1949 
1950 			if (dst_reg != BPF_REG_3)
1951 				EMIT1(0x5A); /* pop rdx */
1952 			if (dst_reg != BPF_REG_0)
1953 				EMIT1(0x58); /* pop rax */
1954 			break;
1955 		}
1956 
1957 		case BPF_ALU | BPF_MUL | BPF_K:
1958 		case BPF_ALU64 | BPF_MUL | BPF_K:
1959 			maybe_emit_mod(&prog, dst_reg, dst_reg,
1960 				       BPF_CLASS(insn->code) == BPF_ALU64);
1961 
1962 			if (is_imm8(imm32))
1963 				/* imul dst_reg, dst_reg, imm8 */
1964 				EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1965 				      imm32);
1966 			else
1967 				/* imul dst_reg, dst_reg, imm32 */
1968 				EMIT2_off32(0x69,
1969 					    add_2reg(0xC0, dst_reg, dst_reg),
1970 					    imm32);
1971 			break;
1972 
1973 		case BPF_ALU | BPF_MUL | BPF_X:
1974 		case BPF_ALU64 | BPF_MUL | BPF_X:
1975 			maybe_emit_mod(&prog, src_reg, dst_reg,
1976 				       BPF_CLASS(insn->code) == BPF_ALU64);
1977 
1978 			/* imul dst_reg, src_reg */
1979 			EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1980 			break;
1981 
1982 			/* Shifts */
1983 		case BPF_ALU | BPF_LSH | BPF_K:
1984 		case BPF_ALU | BPF_RSH | BPF_K:
1985 		case BPF_ALU | BPF_ARSH | BPF_K:
1986 		case BPF_ALU64 | BPF_LSH | BPF_K:
1987 		case BPF_ALU64 | BPF_RSH | BPF_K:
1988 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1989 			maybe_emit_1mod(&prog, dst_reg,
1990 					BPF_CLASS(insn->code) == BPF_ALU64);
1991 
1992 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1993 			if (imm32 == 1)
1994 				EMIT2(0xD1, add_1reg(b3, dst_reg));
1995 			else
1996 				EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1997 			break;
1998 
1999 		case BPF_ALU | BPF_LSH | BPF_X:
2000 		case BPF_ALU | BPF_RSH | BPF_X:
2001 		case BPF_ALU | BPF_ARSH | BPF_X:
2002 		case BPF_ALU64 | BPF_LSH | BPF_X:
2003 		case BPF_ALU64 | BPF_RSH | BPF_X:
2004 		case BPF_ALU64 | BPF_ARSH | BPF_X:
2005 			/* BMI2 shifts aren't better when shift count is already in rcx */
2006 			if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
2007 				/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
2008 				bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
2009 				u8 op;
2010 
2011 				switch (BPF_OP(insn->code)) {
2012 				case BPF_LSH:
2013 					op = 1; /* prefix 0x66 */
2014 					break;
2015 				case BPF_RSH:
2016 					op = 3; /* prefix 0xf2 */
2017 					break;
2018 				case BPF_ARSH:
2019 					op = 2; /* prefix 0xf3 */
2020 					break;
2021 				}
2022 
2023 				emit_shiftx(&prog, dst_reg, src_reg, w, op);
2024 
2025 				break;
2026 			}
2027 
2028 			if (src_reg != BPF_REG_4) { /* common case */
2029 				/* Check for bad case when dst_reg == rcx */
2030 				if (dst_reg == BPF_REG_4) {
2031 					/* mov r11, dst_reg */
2032 					EMIT_mov(AUX_REG, dst_reg);
2033 					dst_reg = AUX_REG;
2034 				} else {
2035 					EMIT1(0x51); /* push rcx */
2036 				}
2037 				/* mov rcx, src_reg */
2038 				EMIT_mov(BPF_REG_4, src_reg);
2039 			}
2040 
2041 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
2042 			maybe_emit_1mod(&prog, dst_reg,
2043 					BPF_CLASS(insn->code) == BPF_ALU64);
2044 
2045 			b3 = simple_alu_opcodes[BPF_OP(insn->code)];
2046 			EMIT2(0xD3, add_1reg(b3, dst_reg));
2047 
2048 			if (src_reg != BPF_REG_4) {
2049 				if (insn->dst_reg == BPF_REG_4)
2050 					/* mov dst_reg, r11 */
2051 					EMIT_mov(insn->dst_reg, AUX_REG);
2052 				else
2053 					EMIT1(0x59); /* pop rcx */
2054 			}
2055 
2056 			break;
2057 
2058 		case BPF_ALU | BPF_END | BPF_FROM_BE:
2059 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
2060 			switch (imm32) {
2061 			case 16:
2062 				/* Emit 'ror %ax, 8' to swap lower 2 bytes */
2063 				EMIT1(0x66);
2064 				if (is_ereg(dst_reg))
2065 					EMIT1(0x41);
2066 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
2067 
2068 				/* Emit 'movzwl eax, ax' */
2069 				if (is_ereg(dst_reg))
2070 					EMIT3(0x45, 0x0F, 0xB7);
2071 				else
2072 					EMIT2(0x0F, 0xB7);
2073 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2074 				break;
2075 			case 32:
2076 				/* Emit 'bswap eax' to swap lower 4 bytes */
2077 				if (is_ereg(dst_reg))
2078 					EMIT2(0x41, 0x0F);
2079 				else
2080 					EMIT1(0x0F);
2081 				EMIT1(add_1reg(0xC8, dst_reg));
2082 				break;
2083 			case 64:
2084 				/* Emit 'bswap rax' to swap 8 bytes */
2085 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
2086 				      add_1reg(0xC8, dst_reg));
2087 				break;
2088 			}
2089 			break;
2090 
2091 		case BPF_ALU | BPF_END | BPF_FROM_LE:
2092 			switch (imm32) {
2093 			case 16:
2094 				/*
2095 				 * Emit 'movzwl eax, ax' to zero extend 16-bit
2096 				 * into 64 bit
2097 				 */
2098 				if (is_ereg(dst_reg))
2099 					EMIT3(0x45, 0x0F, 0xB7);
2100 				else
2101 					EMIT2(0x0F, 0xB7);
2102 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
2103 				break;
2104 			case 32:
2105 				/* Emit 'mov eax, eax' to clear upper 32-bits */
2106 				if (is_ereg(dst_reg))
2107 					EMIT1(0x45);
2108 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
2109 				break;
2110 			case 64:
2111 				/* nop */
2112 				break;
2113 			}
2114 			break;
2115 
2116 			/* speculation barrier */
2117 		case BPF_ST | BPF_NOSPEC:
2118 			EMIT_LFENCE();
2119 			break;
2120 
2121 			/* ST: *(u8*)(dst_reg + off) = imm */
2122 		case BPF_ST | BPF_MEM | BPF_B:
2123 			if (is_ereg(dst_reg))
2124 				EMIT2(0x41, 0xC6);
2125 			else
2126 				EMIT1(0xC6);
2127 			goto st;
2128 		case BPF_ST | BPF_MEM | BPF_H:
2129 			if (is_ereg(dst_reg))
2130 				EMIT3(0x66, 0x41, 0xC7);
2131 			else
2132 				EMIT2(0x66, 0xC7);
2133 			goto st;
2134 		case BPF_ST | BPF_MEM | BPF_W:
2135 			if (is_ereg(dst_reg))
2136 				EMIT2(0x41, 0xC7);
2137 			else
2138 				EMIT1(0xC7);
2139 			goto st;
2140 		case BPF_ST | BPF_MEM | BPF_DW:
2141 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
2142 
2143 st:			if (is_imm8(insn->off))
2144 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
2145 			else
2146 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
2147 
2148 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
2149 			break;
2150 
2151 			/* STX: *(u8*)(dst_reg + off) = src_reg */
2152 		case BPF_STX | BPF_MEM | BPF_B:
2153 		case BPF_STX | BPF_MEM | BPF_H:
2154 		case BPF_STX | BPF_MEM | BPF_W:
2155 		case BPF_STX | BPF_MEM | BPF_DW:
2156 			emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2157 			break;
2158 
2159 		case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
2160 		case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
2161 		case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
2162 		case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
2163 			start_of_ldx = prog;
2164 			emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
2165 			goto populate_extable;
2166 
2167 			/* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
2168 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
2169 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
2170 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
2171 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
2172 		case BPF_LDX | BPF_PROBE_MEM32SX | BPF_B:
2173 		case BPF_LDX | BPF_PROBE_MEM32SX | BPF_H:
2174 		case BPF_LDX | BPF_PROBE_MEM32SX | BPF_W:
2175 		case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
2176 		case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
2177 		case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
2178 		case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
2179 			start_of_ldx = prog;
2180 			if (BPF_CLASS(insn->code) == BPF_LDX) {
2181 				if (BPF_MODE(insn->code) == BPF_PROBE_MEM32SX)
2182 					emit_ldsx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2183 				else
2184 					emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2185 			} else {
2186 				emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2187 			}
2188 populate_extable:
2189 			{
2190 				struct exception_table_entry *ex;
2191 				u8 *_insn = image + proglen + (start_of_ldx - temp);
2192 				u32 arena_reg, fixup_reg;
2193 				s64 delta;
2194 
2195 				if (!bpf_prog->aux->extable)
2196 					break;
2197 
2198 				if (excnt >= bpf_prog->aux->num_exentries) {
2199 					pr_err("mem32 extable bug\n");
2200 					return -EFAULT;
2201 				}
2202 				ex = &bpf_prog->aux->extable[excnt++];
2203 
2204 				delta = _insn - (u8 *)&ex->insn;
2205 				/* switch ex to rw buffer for writes */
2206 				ex = (void *)rw_image + ((void *)ex - (void *)image);
2207 
2208 				ex->insn = delta;
2209 
2210 				ex->data = EX_TYPE_BPF;
2211 
2212 				/*
2213 				 * src_reg/dst_reg holds the address in the arena region with upper
2214 				 * 32-bits being zero because of a preceding addr_space_cast(r<n>,
2215 				 * 0x0, 0x1) instruction. This address is adjusted with the addition
2216 				 * of arena_vm_start (see the implementation of BPF_PROBE_MEM32 and
2217 				 * BPF_PROBE_ATOMIC) before being used for the memory access. Pass
2218 				 * the reg holding the unmodified 32-bit address to
2219 				 * ex_handler_bpf().
2220 				 */
2221 				if (BPF_CLASS(insn->code) == BPF_LDX) {
2222 					arena_reg = reg2pt_regs[src_reg];
2223 					fixup_reg = reg2pt_regs[dst_reg];
2224 				} else {
2225 					arena_reg = reg2pt_regs[dst_reg];
2226 					fixup_reg = DONT_CLEAR;
2227 				}
2228 
2229 				ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2230 					    FIELD_PREP(FIXUP_ARENA_REG_MASK, arena_reg) |
2231 					    FIELD_PREP(FIXUP_REG_MASK, fixup_reg);
2232 				ex->fixup |= FIXUP_ARENA_ACCESS;
2233 
2234 				ex->data |= FIELD_PREP(DATA_ARENA_OFFSET_MASK, insn->off);
2235 			}
2236 			break;
2237 
2238 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
2239 		case BPF_LDX | BPF_MEM | BPF_B:
2240 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
2241 		case BPF_LDX | BPF_MEM | BPF_H:
2242 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
2243 		case BPF_LDX | BPF_MEM | BPF_W:
2244 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
2245 		case BPF_LDX | BPF_MEM | BPF_DW:
2246 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
2247 			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
2248 		case BPF_LDX | BPF_MEMSX | BPF_B:
2249 		case BPF_LDX | BPF_MEMSX | BPF_H:
2250 		case BPF_LDX | BPF_MEMSX | BPF_W:
2251 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
2252 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
2253 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
2254 			insn_off = insn->off;
2255 
2256 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2257 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2258 				/* Conservatively check that src_reg + insn->off is a kernel address:
2259 				 *   src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
2260 				 *   and
2261 				 *   src_reg + insn->off < VSYSCALL_ADDR
2262 				 */
2263 
2264 				u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
2265 				u8 *end_of_jmp;
2266 
2267 				/* movabsq r10, VSYSCALL_ADDR */
2268 				emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
2269 					       (u32)(long)VSYSCALL_ADDR);
2270 
2271 				/* mov src_reg, r11 */
2272 				EMIT_mov(AUX_REG, src_reg);
2273 
2274 				if (insn->off) {
2275 					/* add r11, insn->off */
2276 					maybe_emit_1mod(&prog, AUX_REG, true);
2277 					EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
2278 				}
2279 
2280 				/* sub r11, r10 */
2281 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2282 				EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2283 
2284 				/* movabsq r10, limit */
2285 				emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
2286 					       (u32)(long)limit);
2287 
2288 				/* cmp r10, r11 */
2289 				maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2290 				EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2291 
2292 				/* if unsigned '>', goto load */
2293 				EMIT2(X86_JA, 0);
2294 				end_of_jmp = prog;
2295 
2296 				/* xor dst_reg, dst_reg */
2297 				emit_mov_imm32(&prog, false, dst_reg, 0);
2298 				/* jmp byte_after_ldx */
2299 				EMIT2(0xEB, 0);
2300 
2301 				/* populate jmp_offset for JAE above to jump to start_of_ldx */
2302 				start_of_ldx = prog;
2303 				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
2304 			}
2305 			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
2306 			    BPF_MODE(insn->code) == BPF_MEMSX)
2307 				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2308 			else
2309 				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2310 			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2311 			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2312 				struct exception_table_entry *ex;
2313 				u8 *_insn = image + proglen + (start_of_ldx - temp);
2314 				s64 delta;
2315 
2316 				/* populate jmp_offset for JMP above */
2317 				start_of_ldx[-1] = prog - start_of_ldx;
2318 
2319 				if (!bpf_prog->aux->extable)
2320 					break;
2321 
2322 				if (excnt >= bpf_prog->aux->num_exentries) {
2323 					pr_err("ex gen bug\n");
2324 					return -EFAULT;
2325 				}
2326 				ex = &bpf_prog->aux->extable[excnt++];
2327 
2328 				delta = _insn - (u8 *)&ex->insn;
2329 				if (!is_simm32(delta)) {
2330 					pr_err("extable->insn doesn't fit into 32-bit\n");
2331 					return -EFAULT;
2332 				}
2333 				/* switch ex to rw buffer for writes */
2334 				ex = (void *)rw_image + ((void *)ex - (void *)image);
2335 
2336 				ex->insn = delta;
2337 
2338 				ex->data = EX_TYPE_BPF;
2339 
2340 				if (dst_reg > BPF_REG_9) {
2341 					pr_err("verifier error\n");
2342 					return -EFAULT;
2343 				}
2344 				/*
2345 				 * Compute size of x86 insn and its target dest x86 register.
2346 				 * ex_handler_bpf() will use lower 8 bits to adjust
2347 				 * pt_regs->ip to jump over this x86 instruction
2348 				 * and upper bits to figure out which pt_regs to zero out.
2349 				 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2350 				 * of 4 bytes will be ignored and rbx will be zero inited.
2351 				 */
2352 				ex->fixup = FIELD_PREP(FIXUP_INSN_LEN_MASK, prog - start_of_ldx) |
2353 					    FIELD_PREP(FIXUP_REG_MASK, reg2pt_regs[dst_reg]);
2354 			}
2355 			break;
2356 
2357 		case BPF_STX | BPF_ATOMIC | BPF_B:
2358 		case BPF_STX | BPF_ATOMIC | BPF_H:
2359 			if (!bpf_atomic_is_load_store(insn)) {
2360 				pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2361 				return -EFAULT;
2362 			}
2363 			fallthrough;
2364 		case BPF_STX | BPF_ATOMIC | BPF_W:
2365 		case BPF_STX | BPF_ATOMIC | BPF_DW:
2366 			if (insn->imm == (BPF_AND | BPF_FETCH) ||
2367 			    insn->imm == (BPF_OR | BPF_FETCH) ||
2368 			    insn->imm == (BPF_XOR | BPF_FETCH)) {
2369 				bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2370 				u32 real_src_reg = src_reg;
2371 				u32 real_dst_reg = dst_reg;
2372 				u8 *branch_target;
2373 
2374 				/*
2375 				 * Can't be implemented with a single x86 insn.
2376 				 * Need to do a CMPXCHG loop.
2377 				 */
2378 
2379 				/* Will need RAX as a CMPXCHG operand so save R0 */
2380 				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2381 				if (src_reg == BPF_REG_0)
2382 					real_src_reg = BPF_REG_AX;
2383 				if (dst_reg == BPF_REG_0)
2384 					real_dst_reg = BPF_REG_AX;
2385 
2386 				branch_target = prog;
2387 				/* Load old value */
2388 				emit_ldx(&prog, BPF_SIZE(insn->code),
2389 					 BPF_REG_0, real_dst_reg, insn->off);
2390 				/*
2391 				 * Perform the (commutative) operation locally,
2392 				 * put the result in the AUX_REG.
2393 				 */
2394 				emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2395 				maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2396 				EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2397 				      add_2reg(0xC0, AUX_REG, real_src_reg));
2398 				/* Attempt to swap in new value */
2399 				err = emit_atomic_rmw(&prog, BPF_CMPXCHG,
2400 						      real_dst_reg, AUX_REG,
2401 						      insn->off,
2402 						      BPF_SIZE(insn->code));
2403 				if (WARN_ON(err))
2404 					return err;
2405 				/*
2406 				 * ZF tells us whether we won the race. If it's
2407 				 * cleared we need to try again.
2408 				 */
2409 				EMIT2(X86_JNE, -(prog - branch_target) - 2);
2410 				/* Return the pre-modification value */
2411 				emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2412 				/* Restore R0 after clobbering RAX */
2413 				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2414 				break;
2415 			}
2416 
2417 			if (bpf_atomic_is_load_store(insn))
2418 				err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg,
2419 							insn->off, BPF_SIZE(insn->code));
2420 			else
2421 				err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg,
2422 						      insn->off, BPF_SIZE(insn->code));
2423 			if (err)
2424 				return err;
2425 			break;
2426 
2427 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_B:
2428 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_H:
2429 			if (!bpf_atomic_is_load_store(insn)) {
2430 				pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n");
2431 				return -EFAULT;
2432 			}
2433 			fallthrough;
2434 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2435 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2436 			start_of_ldx = prog;
2437 
2438 			if (bpf_atomic_is_load_store(insn))
2439 				err = emit_atomic_ld_st_index(&prog, insn->imm,
2440 							      BPF_SIZE(insn->code), dst_reg,
2441 							      src_reg, X86_REG_R12, insn->off);
2442 			else
2443 				err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code),
2444 							    dst_reg, src_reg, X86_REG_R12,
2445 							    insn->off);
2446 			if (err)
2447 				return err;
2448 			goto populate_extable;
2449 
2450 			/* call */
2451 		case BPF_JMP | BPF_CALL: {
2452 			u8 *ip = image + addrs[i - 1];
2453 
2454 			func = (u8 *) __bpf_call_base + imm32;
2455 			if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
2456 				LOAD_TAIL_CALL_CNT_PTR(stack_depth);
2457 				ip += 7;
2458 			}
2459 			if (!imm32)
2460 				return -EINVAL;
2461 			if (priv_frame_ptr) {
2462 				push_r9(&prog);
2463 				ip += 2;
2464 			}
2465 			ip += x86_call_depth_emit_accounting(&prog, func, ip);
2466 			if (emit_call(&prog, func, ip))
2467 				return -EINVAL;
2468 			if (priv_frame_ptr)
2469 				pop_r9(&prog);
2470 			break;
2471 		}
2472 
2473 		case BPF_JMP | BPF_TAIL_CALL:
2474 			if (imm32)
2475 				emit_bpf_tail_call_direct(bpf_prog,
2476 							  &bpf_prog->aux->poke_tab[imm32 - 1],
2477 							  &prog, image + addrs[i - 1],
2478 							  callee_regs_used,
2479 							  stack_depth,
2480 							  ctx);
2481 			else
2482 				emit_bpf_tail_call_indirect(bpf_prog,
2483 							    &prog,
2484 							    callee_regs_used,
2485 							    stack_depth,
2486 							    image + addrs[i - 1],
2487 							    ctx);
2488 			break;
2489 
2490 			/* cond jump */
2491 		case BPF_JMP | BPF_JEQ | BPF_X:
2492 		case BPF_JMP | BPF_JNE | BPF_X:
2493 		case BPF_JMP | BPF_JGT | BPF_X:
2494 		case BPF_JMP | BPF_JLT | BPF_X:
2495 		case BPF_JMP | BPF_JGE | BPF_X:
2496 		case BPF_JMP | BPF_JLE | BPF_X:
2497 		case BPF_JMP | BPF_JSGT | BPF_X:
2498 		case BPF_JMP | BPF_JSLT | BPF_X:
2499 		case BPF_JMP | BPF_JSGE | BPF_X:
2500 		case BPF_JMP | BPF_JSLE | BPF_X:
2501 		case BPF_JMP32 | BPF_JEQ | BPF_X:
2502 		case BPF_JMP32 | BPF_JNE | BPF_X:
2503 		case BPF_JMP32 | BPF_JGT | BPF_X:
2504 		case BPF_JMP32 | BPF_JLT | BPF_X:
2505 		case BPF_JMP32 | BPF_JGE | BPF_X:
2506 		case BPF_JMP32 | BPF_JLE | BPF_X:
2507 		case BPF_JMP32 | BPF_JSGT | BPF_X:
2508 		case BPF_JMP32 | BPF_JSLT | BPF_X:
2509 		case BPF_JMP32 | BPF_JSGE | BPF_X:
2510 		case BPF_JMP32 | BPF_JSLE | BPF_X:
2511 			/* cmp dst_reg, src_reg */
2512 			maybe_emit_mod(&prog, dst_reg, src_reg,
2513 				       BPF_CLASS(insn->code) == BPF_JMP);
2514 			EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2515 			goto emit_cond_jmp;
2516 
2517 		case BPF_JMP | BPF_JSET | BPF_X:
2518 		case BPF_JMP32 | BPF_JSET | BPF_X:
2519 			/* test dst_reg, src_reg */
2520 			maybe_emit_mod(&prog, dst_reg, src_reg,
2521 				       BPF_CLASS(insn->code) == BPF_JMP);
2522 			EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2523 			goto emit_cond_jmp;
2524 
2525 		case BPF_JMP | BPF_JSET | BPF_K:
2526 		case BPF_JMP32 | BPF_JSET | BPF_K:
2527 			/* test dst_reg, imm32 */
2528 			maybe_emit_1mod(&prog, dst_reg,
2529 					BPF_CLASS(insn->code) == BPF_JMP);
2530 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2531 			goto emit_cond_jmp;
2532 
2533 		case BPF_JMP | BPF_JEQ | BPF_K:
2534 		case BPF_JMP | BPF_JNE | BPF_K:
2535 		case BPF_JMP | BPF_JGT | BPF_K:
2536 		case BPF_JMP | BPF_JLT | BPF_K:
2537 		case BPF_JMP | BPF_JGE | BPF_K:
2538 		case BPF_JMP | BPF_JLE | BPF_K:
2539 		case BPF_JMP | BPF_JSGT | BPF_K:
2540 		case BPF_JMP | BPF_JSLT | BPF_K:
2541 		case BPF_JMP | BPF_JSGE | BPF_K:
2542 		case BPF_JMP | BPF_JSLE | BPF_K:
2543 		case BPF_JMP32 | BPF_JEQ | BPF_K:
2544 		case BPF_JMP32 | BPF_JNE | BPF_K:
2545 		case BPF_JMP32 | BPF_JGT | BPF_K:
2546 		case BPF_JMP32 | BPF_JLT | BPF_K:
2547 		case BPF_JMP32 | BPF_JGE | BPF_K:
2548 		case BPF_JMP32 | BPF_JLE | BPF_K:
2549 		case BPF_JMP32 | BPF_JSGT | BPF_K:
2550 		case BPF_JMP32 | BPF_JSLT | BPF_K:
2551 		case BPF_JMP32 | BPF_JSGE | BPF_K:
2552 		case BPF_JMP32 | BPF_JSLE | BPF_K:
2553 			/* test dst_reg, dst_reg to save one extra byte */
2554 			if (imm32 == 0) {
2555 				maybe_emit_mod(&prog, dst_reg, dst_reg,
2556 					       BPF_CLASS(insn->code) == BPF_JMP);
2557 				EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2558 				goto emit_cond_jmp;
2559 			}
2560 
2561 			/* cmp dst_reg, imm8/32 */
2562 			maybe_emit_1mod(&prog, dst_reg,
2563 					BPF_CLASS(insn->code) == BPF_JMP);
2564 
2565 			if (is_imm8(imm32))
2566 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2567 			else
2568 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2569 
2570 emit_cond_jmp:		/* Convert BPF opcode to x86 */
2571 			switch (BPF_OP(insn->code)) {
2572 			case BPF_JEQ:
2573 				jmp_cond = X86_JE;
2574 				break;
2575 			case BPF_JSET:
2576 			case BPF_JNE:
2577 				jmp_cond = X86_JNE;
2578 				break;
2579 			case BPF_JGT:
2580 				/* GT is unsigned '>', JA in x86 */
2581 				jmp_cond = X86_JA;
2582 				break;
2583 			case BPF_JLT:
2584 				/* LT is unsigned '<', JB in x86 */
2585 				jmp_cond = X86_JB;
2586 				break;
2587 			case BPF_JGE:
2588 				/* GE is unsigned '>=', JAE in x86 */
2589 				jmp_cond = X86_JAE;
2590 				break;
2591 			case BPF_JLE:
2592 				/* LE is unsigned '<=', JBE in x86 */
2593 				jmp_cond = X86_JBE;
2594 				break;
2595 			case BPF_JSGT:
2596 				/* Signed '>', GT in x86 */
2597 				jmp_cond = X86_JG;
2598 				break;
2599 			case BPF_JSLT:
2600 				/* Signed '<', LT in x86 */
2601 				jmp_cond = X86_JL;
2602 				break;
2603 			case BPF_JSGE:
2604 				/* Signed '>=', GE in x86 */
2605 				jmp_cond = X86_JGE;
2606 				break;
2607 			case BPF_JSLE:
2608 				/* Signed '<=', LE in x86 */
2609 				jmp_cond = X86_JLE;
2610 				break;
2611 			default: /* to silence GCC warning */
2612 				return -EFAULT;
2613 			}
2614 			jmp_offset = addrs[i + insn->off] - addrs[i];
2615 			if (is_imm8_jmp_offset(jmp_offset)) {
2616 				if (jmp_padding) {
2617 					/* To keep the jmp_offset valid, the extra bytes are
2618 					 * padded before the jump insn, so we subtract the
2619 					 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2620 					 *
2621 					 * If the previous pass already emits an imm8
2622 					 * jmp_cond, then this BPF insn won't shrink, so
2623 					 * "nops" is 0.
2624 					 *
2625 					 * On the other hand, if the previous pass emits an
2626 					 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2627 					 * keep the image from shrinking further.
2628 					 *
2629 					 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2630 					 *     is 2 bytes, so the size difference is 4 bytes.
2631 					 */
2632 					nops = INSN_SZ_DIFF - 2;
2633 					if (nops != 0 && nops != 4) {
2634 						pr_err("unexpected jmp_cond padding: %d bytes\n",
2635 						       nops);
2636 						return -EFAULT;
2637 					}
2638 					emit_nops(&prog, nops);
2639 				}
2640 				EMIT2(jmp_cond, jmp_offset);
2641 			} else if (is_simm32(jmp_offset)) {
2642 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2643 			} else {
2644 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2645 				return -EFAULT;
2646 			}
2647 
2648 			break;
2649 
2650 		case BPF_JMP | BPF_JA | BPF_X:
2651 			emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
2652 			break;
2653 		case BPF_JMP | BPF_JA:
2654 		case BPF_JMP32 | BPF_JA:
2655 			if (BPF_CLASS(insn->code) == BPF_JMP) {
2656 				if (insn->off == -1)
2657 					/* -1 jmp instructions will always jump
2658 					 * backwards two bytes. Explicitly handling
2659 					 * this case avoids wasting too many passes
2660 					 * when there are long sequences of replaced
2661 					 * dead code.
2662 					 */
2663 					jmp_offset = -2;
2664 				else
2665 					jmp_offset = addrs[i + insn->off] - addrs[i];
2666 			} else {
2667 				if (insn->imm == -1)
2668 					jmp_offset = -2;
2669 				else
2670 					jmp_offset = addrs[i + insn->imm] - addrs[i];
2671 			}
2672 
2673 			if (!jmp_offset) {
2674 				/*
2675 				 * If jmp_padding is enabled, the extra nops will
2676 				 * be inserted. Otherwise, optimize out nop jumps.
2677 				 */
2678 				if (jmp_padding) {
2679 					/* There are 3 possible conditions.
2680 					 * (1) This BPF_JA is already optimized out in
2681 					 *     the previous run, so there is no need
2682 					 *     to pad any extra byte (0 byte).
2683 					 * (2) The previous pass emits an imm8 jmp,
2684 					 *     so we pad 2 bytes to match the previous
2685 					 *     insn size.
2686 					 * (3) Similarly, the previous pass emits an
2687 					 *     imm32 jmp, and 5 bytes is padded.
2688 					 */
2689 					nops = INSN_SZ_DIFF;
2690 					if (nops != 0 && nops != 2 && nops != 5) {
2691 						pr_err("unexpected nop jump padding: %d bytes\n",
2692 						       nops);
2693 						return -EFAULT;
2694 					}
2695 					emit_nops(&prog, nops);
2696 				}
2697 				break;
2698 			}
2699 emit_jmp:
2700 			if (is_imm8_jmp_offset(jmp_offset)) {
2701 				if (jmp_padding) {
2702 					/* To avoid breaking jmp_offset, the extra bytes
2703 					 * are padded before the actual jmp insn, so
2704 					 * 2 bytes is subtracted from INSN_SZ_DIFF.
2705 					 *
2706 					 * If the previous pass already emits an imm8
2707 					 * jmp, there is nothing to pad (0 byte).
2708 					 *
2709 					 * If it emits an imm32 jmp (5 bytes) previously
2710 					 * and now an imm8 jmp (2 bytes), then we pad
2711 					 * (5 - 2 = 3) bytes to stop the image from
2712 					 * shrinking further.
2713 					 */
2714 					nops = INSN_SZ_DIFF - 2;
2715 					if (nops != 0 && nops != 3) {
2716 						pr_err("unexpected jump padding: %d bytes\n",
2717 						       nops);
2718 						return -EFAULT;
2719 					}
2720 					emit_nops(&prog, INSN_SZ_DIFF - 2);
2721 				}
2722 				EMIT2(0xEB, jmp_offset);
2723 			} else if (is_simm32(jmp_offset)) {
2724 				EMIT1_off32(0xE9, jmp_offset);
2725 			} else {
2726 				pr_err("jmp gen bug %llx\n", jmp_offset);
2727 				return -EFAULT;
2728 			}
2729 			break;
2730 
2731 		case BPF_JMP | BPF_EXIT:
2732 			if (seen_exit) {
2733 				jmp_offset = ctx->cleanup_addr - addrs[i];
2734 				goto emit_jmp;
2735 			}
2736 			seen_exit = true;
2737 			/* Update cleanup_addr */
2738 			ctx->cleanup_addr = proglen;
2739 			if (bpf_prog_was_classic(bpf_prog) &&
2740 			    !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
2741 				u8 *ip = image + addrs[i - 1];
2742 
2743 				if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
2744 					return -EINVAL;
2745 			}
2746 			if (bpf_prog->aux->exception_boundary) {
2747 				pop_callee_regs(&prog, all_callee_regs_used);
2748 				pop_r12(&prog);
2749 			} else {
2750 				pop_callee_regs(&prog, callee_regs_used);
2751 				if (arena_vm_start)
2752 					pop_r12(&prog);
2753 			}
2754 			EMIT1(0xC9);         /* leave */
2755 			bpf_prog->aux->ksym.fp_end = prog - temp;
2756 
2757 			emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2758 			break;
2759 
2760 		default:
2761 			/*
2762 			 * By design x86-64 JIT should support all BPF instructions.
2763 			 * This error will be seen if new instruction was added
2764 			 * to the interpreter, but not to the JIT, or if there is
2765 			 * junk in bpf_prog.
2766 			 */
2767 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2768 			return -EINVAL;
2769 		}
2770 
2771 		ilen = prog - temp;
2772 		if (ilen > BPF_MAX_INSN_SIZE) {
2773 			pr_err("bpf_jit: fatal insn size error\n");
2774 			return -EFAULT;
2775 		}
2776 
2777 		if (image) {
2778 			/*
2779 			 * When populating the image, assert that:
2780 			 *
2781 			 *  i) We do not write beyond the allocated space, and
2782 			 * ii) addrs[i] did not change from the prior run, in order
2783 			 *     to validate assumptions made for computing branch
2784 			 *     displacements.
2785 			 */
2786 			if (unlikely(proglen + ilen > oldproglen ||
2787 				     proglen + ilen != addrs[i])) {
2788 				pr_err("bpf_jit: fatal error\n");
2789 				return -EFAULT;
2790 			}
2791 			memcpy(rw_image + proglen, temp, ilen);
2792 		}
2793 		proglen += ilen;
2794 		addrs[i] = proglen;
2795 		prog = temp;
2796 	}
2797 
2798 	if (image && excnt != bpf_prog->aux->num_exentries) {
2799 		pr_err("extable is not populated\n");
2800 		return -EFAULT;
2801 	}
2802 	return proglen;
2803 }
2804 
2805 static void clean_stack_garbage(const struct btf_func_model *m,
2806 				u8 **pprog, int nr_stack_slots,
2807 				int stack_size)
2808 {
2809 	int arg_size, off;
2810 	u8 *prog;
2811 
2812 	/* Generally speaking, the compiler will pass the arguments
2813 	 * on-stack with "push" instruction, which will take 8-byte
2814 	 * on the stack. In this case, there won't be garbage values
2815 	 * while we copy the arguments from origin stack frame to current
2816 	 * in BPF_DW.
2817 	 *
2818 	 * However, sometimes the compiler will only allocate 4-byte on
2819 	 * the stack for the arguments. For now, this case will only
2820 	 * happen if there is only one argument on-stack and its size
2821 	 * not more than 4 byte. In this case, there will be garbage
2822 	 * values on the upper 4-byte where we store the argument on
2823 	 * current stack frame.
2824 	 *
2825 	 * arguments on origin stack:
2826 	 *
2827 	 * stack_arg_1(4-byte) xxx(4-byte)
2828 	 *
2829 	 * what we copy:
2830 	 *
2831 	 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2832 	 *
2833 	 * and the xxx is the garbage values which we should clean here.
2834 	 */
2835 	if (nr_stack_slots != 1)
2836 		return;
2837 
2838 	/* the size of the last argument */
2839 	arg_size = m->arg_size[m->nr_args - 1];
2840 	if (arg_size <= 4) {
2841 		off = -(stack_size - 4);
2842 		prog = *pprog;
2843 		/* mov DWORD PTR [rbp + off], 0 */
2844 		if (!is_imm8(off))
2845 			EMIT2_off32(0xC7, 0x85, off);
2846 		else
2847 			EMIT3(0xC7, 0x45, off);
2848 		EMIT(0, 4);
2849 		*pprog = prog;
2850 	}
2851 }
2852 
2853 /* get the count of the regs that are used to pass arguments */
2854 static int get_nr_used_regs(const struct btf_func_model *m)
2855 {
2856 	int i, arg_regs, nr_used_regs = 0;
2857 
2858 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2859 		arg_regs = (m->arg_size[i] + 7) / 8;
2860 		if (nr_used_regs + arg_regs <= 6)
2861 			nr_used_regs += arg_regs;
2862 
2863 		if (nr_used_regs >= 6)
2864 			break;
2865 	}
2866 
2867 	return nr_used_regs;
2868 }
2869 
2870 static void save_args(const struct btf_func_model *m, u8 **prog,
2871 		      int stack_size, bool for_call_origin, u32 flags)
2872 {
2873 	int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2874 	bool use_jmp = bpf_trampoline_use_jmp(flags);
2875 	int i, j;
2876 
2877 	/* Store function arguments to stack.
2878 	 * For a function that accepts two pointers the sequence will be:
2879 	 * mov QWORD PTR [rbp-0x10],rdi
2880 	 * mov QWORD PTR [rbp-0x8],rsi
2881 	 */
2882 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2883 		arg_regs = (m->arg_size[i] + 7) / 8;
2884 
2885 		/* According to the research of Yonghong, struct members
2886 		 * should be all in register or all on the stack.
2887 		 * Meanwhile, the compiler will pass the argument on regs
2888 		 * if the remaining regs can hold the argument.
2889 		 *
2890 		 * Disorder of the args can happen. For example:
2891 		 *
2892 		 * struct foo_struct {
2893 		 *     long a;
2894 		 *     int b;
2895 		 * };
2896 		 * int foo(char, char, char, char, char, struct foo_struct,
2897 		 *         char);
2898 		 *
2899 		 * the arg1-5,arg7 will be passed by regs, and arg6 will
2900 		 * by stack.
2901 		 */
2902 		if (nr_regs + arg_regs > 6) {
2903 			/* copy function arguments from origin stack frame
2904 			 * into current stack frame.
2905 			 *
2906 			 * The starting address of the arguments on-stack
2907 			 * is:
2908 			 *   rbp + 8(push rbp) +
2909 			 *   8(return addr of origin call) +
2910 			 *   8(return addr of the caller)
2911 			 * which means: rbp + 24
2912 			 */
2913 			for (j = 0; j < arg_regs; j++) {
2914 				emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2915 					 nr_stack_slots * 8 + 16 + (!use_jmp) * 8);
2916 				emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2917 					 -stack_size);
2918 
2919 				if (!nr_stack_slots)
2920 					first_off = stack_size;
2921 				stack_size -= 8;
2922 				nr_stack_slots++;
2923 			}
2924 		} else {
2925 			/* Only copy the arguments on-stack to current
2926 			 * 'stack_size' and ignore the regs, used to
2927 			 * prepare the arguments on-stack for origin call.
2928 			 */
2929 			if (for_call_origin) {
2930 				nr_regs += arg_regs;
2931 				continue;
2932 			}
2933 
2934 			/* copy the arguments from regs into stack */
2935 			for (j = 0; j < arg_regs; j++) {
2936 				emit_stx(prog, BPF_DW, BPF_REG_FP,
2937 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2938 					 -stack_size);
2939 				stack_size -= 8;
2940 				nr_regs++;
2941 			}
2942 		}
2943 	}
2944 
2945 	clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2946 }
2947 
2948 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2949 			 int stack_size)
2950 {
2951 	int i, j, arg_regs, nr_regs = 0;
2952 
2953 	/* Restore function arguments from stack.
2954 	 * For a function that accepts two pointers the sequence will be:
2955 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2956 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2957 	 *
2958 	 * The logic here is similar to what we do in save_args()
2959 	 */
2960 	for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2961 		arg_regs = (m->arg_size[i] + 7) / 8;
2962 		if (nr_regs + arg_regs <= 6) {
2963 			for (j = 0; j < arg_regs; j++) {
2964 				emit_ldx(prog, BPF_DW,
2965 					 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2966 					 BPF_REG_FP,
2967 					 -stack_size);
2968 				stack_size -= 8;
2969 				nr_regs++;
2970 			}
2971 		} else {
2972 			stack_size -= 8 * arg_regs;
2973 		}
2974 
2975 		if (nr_regs >= 6)
2976 			break;
2977 	}
2978 }
2979 
2980 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2981 			   struct bpf_tramp_link *l, int stack_size,
2982 			   int run_ctx_off, bool save_ret,
2983 			   void *image, void *rw_image)
2984 {
2985 	u8 *prog = *pprog;
2986 	u8 *jmp_insn;
2987 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2988 	struct bpf_prog *p = l->link.prog;
2989 	u64 cookie = l->cookie;
2990 
2991 	/* mov rdi, cookie */
2992 	emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2993 
2994 	/* Prepare struct bpf_tramp_run_ctx.
2995 	 *
2996 	 * bpf_tramp_run_ctx is already preserved by
2997 	 * arch_prepare_bpf_trampoline().
2998 	 *
2999 	 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
3000 	 */
3001 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
3002 
3003 	/* arg1: mov rdi, progs[i] */
3004 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
3005 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
3006 	if (!is_imm8(-run_ctx_off))
3007 		EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
3008 	else
3009 		EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
3010 
3011 	if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
3012 		return -EINVAL;
3013 	/* remember prog start time returned by __bpf_prog_enter */
3014 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
3015 
3016 	/* if (__bpf_prog_enter*(prog) == 0)
3017 	 *	goto skip_exec_of_prog;
3018 	 */
3019 	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
3020 	/* emit 2 nops that will be replaced with JE insn */
3021 	jmp_insn = prog;
3022 	emit_nops(&prog, 2);
3023 
3024 	/* arg1: lea rdi, [rbp - stack_size] */
3025 	if (!is_imm8(-stack_size))
3026 		EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
3027 	else
3028 		EMIT4(0x48, 0x8D, 0x7D, -stack_size);
3029 	/* arg2: progs[i]->insnsi for interpreter */
3030 	if (!p->jited)
3031 		emit_mov_imm64(&prog, BPF_REG_2,
3032 			       (long) p->insnsi >> 32,
3033 			       (u32) (long) p->insnsi);
3034 	/* call JITed bpf program or interpreter */
3035 	if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
3036 		return -EINVAL;
3037 
3038 	/*
3039 	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
3040 	 * of the previous call which is then passed on the stack to
3041 	 * the next BPF program.
3042 	 *
3043 	 * BPF_TRAMP_FENTRY trampoline may need to return the return
3044 	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
3045 	 */
3046 	if (save_ret)
3047 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3048 
3049 	/* replace 2 nops with JE insn, since jmp target is known */
3050 	jmp_insn[0] = X86_JE;
3051 	jmp_insn[1] = prog - jmp_insn - 2;
3052 
3053 	/* arg1: mov rdi, progs[i] */
3054 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
3055 	/* arg2: mov rsi, rbx <- start time in nsec */
3056 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
3057 	/* arg3: lea rdx, [rbp - run_ctx_off] */
3058 	if (!is_imm8(-run_ctx_off))
3059 		EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
3060 	else
3061 		EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
3062 	if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
3063 		return -EINVAL;
3064 
3065 	*pprog = prog;
3066 	return 0;
3067 }
3068 
3069 static void emit_align(u8 **pprog, u32 align)
3070 {
3071 	u8 *target, *prog = *pprog;
3072 
3073 	target = PTR_ALIGN(prog, align);
3074 	if (target != prog)
3075 		emit_nops(&prog, target - prog);
3076 
3077 	*pprog = prog;
3078 }
3079 
3080 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
3081 {
3082 	u8 *prog = *pprog;
3083 	s64 offset;
3084 
3085 	offset = func - (ip + 2 + 4);
3086 	if (!is_simm32(offset)) {
3087 		pr_err("Target %p is out of range\n", func);
3088 		return -EINVAL;
3089 	}
3090 	EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
3091 	*pprog = prog;
3092 	return 0;
3093 }
3094 
3095 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
3096 		      struct bpf_tramp_links *tl, int stack_size,
3097 		      int run_ctx_off, int func_meta_off, bool save_ret,
3098 		      void *image, void *rw_image, u64 func_meta,
3099 		      int cookie_off)
3100 {
3101 	int i, cur_cookie = (cookie_off - stack_size) / 8;
3102 	u8 *prog = *pprog;
3103 
3104 	for (i = 0; i < tl->nr_links; i++) {
3105 		if (tl->links[i]->link.prog->call_session_cookie) {
3106 			emit_store_stack_imm64(&prog, BPF_REG_0, -func_meta_off,
3107 				func_meta | (cur_cookie << BPF_TRAMP_COOKIE_INDEX_SHIFT));
3108 			cur_cookie--;
3109 		}
3110 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
3111 				    run_ctx_off, save_ret, image, rw_image))
3112 			return -EINVAL;
3113 	}
3114 	*pprog = prog;
3115 	return 0;
3116 }
3117 
3118 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
3119 			      struct bpf_tramp_links *tl, int stack_size,
3120 			      int run_ctx_off, u8 **branches,
3121 			      void *image, void *rw_image)
3122 {
3123 	u8 *prog = *pprog;
3124 	int i;
3125 
3126 	/* The first fmod_ret program will receive a garbage return value.
3127 	 * Set this to 0 to avoid confusing the program.
3128 	 */
3129 	emit_mov_imm32(&prog, false, BPF_REG_0, 0);
3130 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3131 	for (i = 0; i < tl->nr_links; i++) {
3132 		if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
3133 				    image, rw_image))
3134 			return -EINVAL;
3135 
3136 		/* mod_ret prog stored return value into [rbp - 8]. Emit:
3137 		 * if (*(u64 *)(rbp - 8) !=  0)
3138 		 *	goto do_fexit;
3139 		 */
3140 		/* cmp QWORD PTR [rbp - 0x8], 0x0 */
3141 		EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
3142 
3143 		/* Save the location of the branch and Generate 6 nops
3144 		 * (4 bytes for an offset and 2 bytes for the jump) These nops
3145 		 * are replaced with a conditional jump once do_fexit (i.e. the
3146 		 * start of the fexit invocation) is finalized.
3147 		 */
3148 		branches[i] = prog;
3149 		emit_nops(&prog, 4 + 2);
3150 	}
3151 
3152 	*pprog = prog;
3153 	return 0;
3154 }
3155 
3156 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
3157 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack)	\
3158 	__LOAD_TCC_PTR(-round_up(stack, 8) - 8)
3159 
3160 /* Example:
3161  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
3162  * its 'struct btf_func_model' will be nr_args=2
3163  * The assembly code when eth_type_trans is executing after trampoline:
3164  *
3165  * push rbp
3166  * mov rbp, rsp
3167  * sub rsp, 16                     // space for skb and dev
3168  * push rbx                        // temp regs to pass start time
3169  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
3170  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
3171  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
3172  * mov rbx, rax                    // remember start time in bpf stats are enabled
3173  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
3174  * call addr_of_jited_FENTRY_prog
3175  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
3176  * mov rsi, rbx                    // prog start time
3177  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
3178  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
3179  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
3180  * pop rbx
3181  * leave
3182  * ret
3183  *
3184  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
3185  * replaced with 'call generated_bpf_trampoline'. When it returns
3186  * eth_type_trans will continue executing with original skb and dev pointers.
3187  *
3188  * The assembly code when eth_type_trans is called from trampoline:
3189  *
3190  * push rbp
3191  * mov rbp, rsp
3192  * sub rsp, 24                     // space for skb, dev, return value
3193  * push rbx                        // temp regs to pass start time
3194  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
3195  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
3196  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
3197  * mov rbx, rax                    // remember start time if bpf stats are enabled
3198  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
3199  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
3200  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
3201  * mov rsi, rbx                    // prog start time
3202  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
3203  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
3204  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
3205  * call eth_type_trans+5           // execute body of eth_type_trans
3206  * mov qword ptr [rbp - 8], rax    // save return value
3207  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
3208  * mov rbx, rax                    // remember start time in bpf stats are enabled
3209  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
3210  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
3211  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
3212  * mov rsi, rbx                    // prog start time
3213  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
3214  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
3215  * pop rbx
3216  * leave
3217  * add rsp, 8                      // skip eth_type_trans's frame
3218  * ret                             // return to its caller
3219  */
3220 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
3221 					 void *rw_image_end, void *image,
3222 					 const struct btf_func_model *m, u32 flags,
3223 					 struct bpf_tramp_links *tlinks,
3224 					 void *func_addr)
3225 {
3226 	int i, ret, nr_regs = m->nr_args, stack_size = 0;
3227 	int regs_off, func_meta_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
3228 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
3229 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
3230 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
3231 	void *orig_call = func_addr;
3232 	int cookie_off, cookie_cnt;
3233 	u8 **branches = NULL;
3234 	u64 func_meta;
3235 	u8 *prog;
3236 	bool save_ret;
3237 
3238 	/*
3239 	 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
3240 	 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
3241 	 * because @func_addr.
3242 	 */
3243 	WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
3244 		     (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
3245 
3246 	/* extra registers for struct arguments */
3247 	for (i = 0; i < m->nr_args; i++) {
3248 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
3249 			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
3250 	}
3251 
3252 	/* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
3253 	 * are passed through regs, the remains are through stack.
3254 	 */
3255 	if (nr_regs > MAX_BPF_FUNC_ARGS)
3256 		return -ENOTSUPP;
3257 
3258 	/* Generated trampoline stack layout:
3259 	 *
3260 	 * RBP + 8         [ return address  ]
3261 	 * RBP + 0         [ RBP             ]
3262 	 *
3263 	 * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
3264 	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
3265 	 *
3266 	 *                 [ reg_argN        ]  always
3267 	 *                 [ ...             ]
3268 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
3269 	 *
3270 	 * RBP - func_meta_off [ regs count, etc ]  always
3271 	 *
3272 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
3273 	 *
3274 	 * RBP - rbx_off   [ rbx value       ]  always
3275 	 *
3276 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
3277 	 *
3278 	 *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
3279 	 *                     [ ...        ]
3280 	 *                     [ stack_arg2 ]
3281 	 * RBP - arg_stack_off [ stack_arg1 ]
3282 	 * RSP                 [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
3283 	 */
3284 
3285 	/* room for return value of orig_call or fentry prog */
3286 	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
3287 	if (save_ret)
3288 		stack_size += 8;
3289 
3290 	stack_size += nr_regs * 8;
3291 	regs_off = stack_size;
3292 
3293 	/* function matedata, such as regs count  */
3294 	stack_size += 8;
3295 	func_meta_off = stack_size;
3296 
3297 	if (flags & BPF_TRAMP_F_IP_ARG)
3298 		stack_size += 8; /* room for IP address argument */
3299 
3300 	ip_off = stack_size;
3301 
3302 	cookie_cnt = bpf_fsession_cookie_cnt(tlinks);
3303 	/* room for session cookies */
3304 	stack_size += cookie_cnt * 8;
3305 	cookie_off = stack_size;
3306 
3307 	stack_size += 8;
3308 	rbx_off = stack_size;
3309 
3310 	stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
3311 	run_ctx_off = stack_size;
3312 
3313 	if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
3314 		/* the space that used to pass arguments on-stack */
3315 		stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
3316 		/* make sure the stack pointer is 16-byte aligned if we
3317 		 * need pass arguments on stack, which means
3318 		 *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
3319 		 * should be 16-byte aligned. Following code depend on
3320 		 * that stack_size is already 8-byte aligned.
3321 		 */
3322 		if (bpf_trampoline_use_jmp(flags)) {
3323 			/* no rip in the "jmp" case */
3324 			stack_size += (stack_size % 16) ? 8 : 0;
3325 		} else {
3326 			stack_size += (stack_size % 16) ? 0 : 8;
3327 		}
3328 	}
3329 
3330 	arg_stack_off = stack_size;
3331 
3332 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3333 		/* skip patched call instruction and point orig_call to actual
3334 		 * body of the kernel function.
3335 		 */
3336 		if (is_endbr(orig_call))
3337 			orig_call += ENDBR_INSN_SIZE;
3338 		orig_call += X86_PATCH_SIZE;
3339 	}
3340 
3341 	prog = rw_image;
3342 
3343 	if (flags & BPF_TRAMP_F_INDIRECT) {
3344 		/*
3345 		 * Indirect call for bpf_struct_ops
3346 		 */
3347 		emit_cfi(&prog, image,
3348 			 cfi_get_func_hash(func_addr),
3349 			 cfi_get_func_arity(func_addr));
3350 	} else {
3351 		/*
3352 		 * Direct-call fentry stub, as such it needs accounting for the
3353 		 * __fentry__ call.
3354 		 */
3355 		x86_call_depth_emit_accounting(&prog, NULL, image);
3356 	}
3357 	EMIT1(0x55);		 /* push rbp */
3358 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
3359 	if (im)
3360 		im->ksym.fp_start = prog - (u8 *)rw_image;
3361 
3362 	if (!is_imm8(stack_size)) {
3363 		/* sub rsp, stack_size */
3364 		EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
3365 	} else {
3366 		/* sub rsp, stack_size */
3367 		EMIT4(0x48, 0x83, 0xEC, stack_size);
3368 	}
3369 	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
3370 		EMIT1(0x50);		/* push rax */
3371 	/* mov QWORD PTR [rbp - rbx_off], rbx */
3372 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3373 
3374 	func_meta = nr_regs;
3375 	/* Store number of argument registers of the traced function */
3376 	emit_store_stack_imm64(&prog, BPF_REG_0, -func_meta_off, func_meta);
3377 
3378 	if (flags & BPF_TRAMP_F_IP_ARG) {
3379 		/* Store IP address of the traced function */
3380 		emit_store_stack_imm64(&prog, BPF_REG_0, -ip_off, (long)func_addr);
3381 	}
3382 
3383 	save_args(m, &prog, regs_off, false, flags);
3384 
3385 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3386 		/* arg1: mov rdi, im */
3387 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3388 		if (emit_rsb_call(&prog, __bpf_tramp_enter,
3389 				  image + (prog - (u8 *)rw_image))) {
3390 			ret = -EINVAL;
3391 			goto cleanup;
3392 		}
3393 	}
3394 
3395 	if (bpf_fsession_cnt(tlinks)) {
3396 		/* clear all the session cookies' value */
3397 		for (int i = 0; i < cookie_cnt; i++)
3398 			emit_store_stack_imm64(&prog, BPF_REG_0, -cookie_off + 8 * i, 0);
3399 		/* clear the return value to make sure fentry always get 0 */
3400 		emit_store_stack_imm64(&prog, BPF_REG_0, -8, 0);
3401 	}
3402 
3403 	if (fentry->nr_links) {
3404 		if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, func_meta_off,
3405 			       flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image,
3406 			       func_meta, cookie_off))
3407 			return -EINVAL;
3408 	}
3409 
3410 	if (fmod_ret->nr_links) {
3411 		branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3412 				   GFP_KERNEL);
3413 		if (!branches)
3414 			return -ENOMEM;
3415 
3416 		if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
3417 				       run_ctx_off, branches, image, rw_image)) {
3418 			ret = -EINVAL;
3419 			goto cleanup;
3420 		}
3421 	}
3422 
3423 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3424 		restore_regs(m, &prog, regs_off);
3425 		save_args(m, &prog, arg_stack_off, true, flags);
3426 
3427 		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3428 			/* Before calling the original function, load the
3429 			 * tail_call_cnt_ptr from stack to rax.
3430 			 */
3431 			LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3432 		}
3433 
3434 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
3435 			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
3436 			EMIT2(0xff, 0xd3); /* call *rbx */
3437 		} else {
3438 			/* call original function */
3439 			if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3440 				ret = -EINVAL;
3441 				goto cleanup;
3442 			}
3443 		}
3444 		/* remember return value in a stack for bpf prog to access */
3445 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3446 		im->ip_after_call = image + (prog - (u8 *)rw_image);
3447 		emit_nops(&prog, X86_PATCH_SIZE);
3448 	}
3449 
3450 	if (fmod_ret->nr_links) {
3451 		/* From Intel 64 and IA-32 Architectures Optimization
3452 		 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3453 		 * Coding Rule 11: All branch targets should be 16-byte
3454 		 * aligned.
3455 		 */
3456 		emit_align(&prog, 16);
3457 		/* Update the branches saved in invoke_bpf_mod_ret with the
3458 		 * aligned address of do_fexit.
3459 		 */
3460 		for (i = 0; i < fmod_ret->nr_links; i++) {
3461 			emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3462 					    image + (branches[i] - (u8 *)rw_image), X86_JNE);
3463 		}
3464 	}
3465 
3466 	/* set the "is_return" flag for fsession */
3467 	func_meta |= (1ULL << BPF_TRAMP_IS_RETURN_SHIFT);
3468 	if (bpf_fsession_cnt(tlinks))
3469 		emit_store_stack_imm64(&prog, BPF_REG_0, -func_meta_off, func_meta);
3470 
3471 	if (fexit->nr_links) {
3472 		if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, func_meta_off,
3473 			       false, image, rw_image, func_meta, cookie_off)) {
3474 			ret = -EINVAL;
3475 			goto cleanup;
3476 		}
3477 	}
3478 
3479 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
3480 		restore_regs(m, &prog, regs_off);
3481 
3482 	/* This needs to be done regardless. If there were fmod_ret programs,
3483 	 * the return value is only updated on the stack and still needs to be
3484 	 * restored to R0.
3485 	 */
3486 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
3487 		im->ip_epilogue = image + (prog - (u8 *)rw_image);
3488 		/* arg1: mov rdi, im */
3489 		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3490 		if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3491 			ret = -EINVAL;
3492 			goto cleanup;
3493 		}
3494 	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3495 		/* Before running the original function, load the
3496 		 * tail_call_cnt_ptr from stack to rax.
3497 		 */
3498 		LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3499 	}
3500 
3501 	/* restore return value of orig_call or fentry prog back into RAX */
3502 	if (save_ret)
3503 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3504 
3505 	emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3506 
3507 	EMIT1(0xC9); /* leave */
3508 	if (im)
3509 		im->ksym.fp_end = prog - (u8 *)rw_image;
3510 
3511 	if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3512 		/* skip our return address and return to parent */
3513 		EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3514 	}
3515 	emit_return(&prog, image + (prog - (u8 *)rw_image));
3516 	/* Make sure the trampoline generation logic doesn't overflow */
3517 	if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3518 		ret = -EFAULT;
3519 		goto cleanup;
3520 	}
3521 	ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3522 
3523 cleanup:
3524 	kfree(branches);
3525 	return ret;
3526 }
3527 
3528 void *arch_alloc_bpf_trampoline(unsigned int size)
3529 {
3530 	return bpf_prog_pack_alloc(size, jit_fill_hole);
3531 }
3532 
3533 void arch_free_bpf_trampoline(void *image, unsigned int size)
3534 {
3535 	bpf_prog_pack_free(image, size);
3536 }
3537 
3538 int arch_protect_bpf_trampoline(void *image, unsigned int size)
3539 {
3540 	return 0;
3541 }
3542 
3543 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3544 				const struct btf_func_model *m, u32 flags,
3545 				struct bpf_tramp_links *tlinks,
3546 				void *func_addr)
3547 {
3548 	void *rw_image, *tmp;
3549 	int ret;
3550 	u32 size = image_end - image;
3551 
3552 	/* rw_image doesn't need to be in module memory range, so we can
3553 	 * use kvmalloc.
3554 	 */
3555 	rw_image = kvmalloc(size, GFP_KERNEL);
3556 	if (!rw_image)
3557 		return -ENOMEM;
3558 
3559 	ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3560 					    flags, tlinks, func_addr);
3561 	if (ret < 0)
3562 		goto out;
3563 
3564 	tmp = bpf_arch_text_copy(image, rw_image, size);
3565 	if (IS_ERR(tmp))
3566 		ret = PTR_ERR(tmp);
3567 out:
3568 	kvfree(rw_image);
3569 	return ret;
3570 }
3571 
3572 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3573 			     struct bpf_tramp_links *tlinks, void *func_addr)
3574 {
3575 	struct bpf_tramp_image im;
3576 	void *image;
3577 	int ret;
3578 
3579 	/* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3580 	 * This will NOT cause fragmentation in direct map, as we do not
3581 	 * call set_memory_*() on this buffer.
3582 	 *
3583 	 * We cannot use kvmalloc here, because we need image to be in
3584 	 * module memory range.
3585 	 */
3586 	image = bpf_jit_alloc_exec(PAGE_SIZE);
3587 	if (!image)
3588 		return -ENOMEM;
3589 
3590 	ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3591 					    m, flags, tlinks, func_addr);
3592 	bpf_jit_free_exec(image);
3593 	return ret;
3594 }
3595 
3596 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3597 {
3598 	u8 *jg_reloc, *prog = *pprog;
3599 	int pivot, err, jg_bytes = 1;
3600 	s64 jg_offset;
3601 
3602 	if (a == b) {
3603 		/* Leaf node of recursion, i.e. not a range of indices
3604 		 * anymore.
3605 		 */
3606 		EMIT1(add_1mod(0x48, BPF_REG_3));	/* cmp rdx,func */
3607 		if (!is_simm32(progs[a]))
3608 			return -1;
3609 		EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3610 			    progs[a]);
3611 		err = emit_cond_near_jump(&prog,	/* je func */
3612 					  (void *)progs[a], image + (prog - buf),
3613 					  X86_JE);
3614 		if (err)
3615 			return err;
3616 
3617 		emit_indirect_jump(&prog, BPF_REG_3 /* R3 -> rdx */, image + (prog - buf));
3618 
3619 		*pprog = prog;
3620 		return 0;
3621 	}
3622 
3623 	/* Not a leaf node, so we pivot, and recursively descend into
3624 	 * the lower and upper ranges.
3625 	 */
3626 	pivot = (b - a) / 2;
3627 	EMIT1(add_1mod(0x48, BPF_REG_3));		/* cmp rdx,func */
3628 	if (!is_simm32(progs[a + pivot]))
3629 		return -1;
3630 	EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3631 
3632 	if (pivot > 2) {				/* jg upper_part */
3633 		/* Require near jump. */
3634 		jg_bytes = 4;
3635 		EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3636 	} else {
3637 		EMIT2(X86_JG, 0);
3638 	}
3639 	jg_reloc = prog;
3640 
3641 	err = emit_bpf_dispatcher(&prog, a, a + pivot,	/* emit lower_part */
3642 				  progs, image, buf);
3643 	if (err)
3644 		return err;
3645 
3646 	/* From Intel 64 and IA-32 Architectures Optimization
3647 	 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3648 	 * Coding Rule 11: All branch targets should be 16-byte
3649 	 * aligned.
3650 	 */
3651 	emit_align(&prog, 16);
3652 	jg_offset = prog - jg_reloc;
3653 	emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3654 
3655 	err = emit_bpf_dispatcher(&prog, a + pivot + 1,	/* emit upper_part */
3656 				  b, progs, image, buf);
3657 	if (err)
3658 		return err;
3659 
3660 	*pprog = prog;
3661 	return 0;
3662 }
3663 
3664 static int cmp_ips(const void *a, const void *b)
3665 {
3666 	const s64 *ipa = a;
3667 	const s64 *ipb = b;
3668 
3669 	if (*ipa > *ipb)
3670 		return 1;
3671 	if (*ipa < *ipb)
3672 		return -1;
3673 	return 0;
3674 }
3675 
3676 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3677 {
3678 	u8 *prog = buf;
3679 
3680 	sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3681 	return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3682 }
3683 
3684 static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
3685 {
3686 	int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3687 	u64 *stack_ptr;
3688 
3689 	for_each_possible_cpu(cpu) {
3690 		stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3691 		stack_ptr[0] = PRIV_STACK_GUARD_VAL;
3692 		stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
3693 	}
3694 }
3695 
3696 static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
3697 				   struct bpf_prog *prog)
3698 {
3699 	int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3700 	u64 *stack_ptr;
3701 
3702 	for_each_possible_cpu(cpu) {
3703 		stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3704 		if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
3705 		    stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) {
3706 			pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
3707 			       bpf_jit_get_prog_name(prog));
3708 			break;
3709 		}
3710 	}
3711 }
3712 
3713 struct x64_jit_data {
3714 	struct bpf_binary_header *rw_header;
3715 	struct bpf_binary_header *header;
3716 	int *addrs;
3717 	u8 *image;
3718 	int proglen;
3719 	struct jit_context ctx;
3720 };
3721 
3722 #define MAX_PASSES 20
3723 #define PADDING_PASSES (MAX_PASSES - 5)
3724 
3725 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3726 {
3727 	struct bpf_binary_header *rw_header = NULL;
3728 	struct bpf_binary_header *header = NULL;
3729 	struct bpf_prog *tmp, *orig_prog = prog;
3730 	void __percpu *priv_stack_ptr = NULL;
3731 	struct x64_jit_data *jit_data;
3732 	int priv_stack_alloc_sz;
3733 	int proglen, oldproglen = 0;
3734 	struct jit_context ctx = {};
3735 	bool tmp_blinded = false;
3736 	bool extra_pass = false;
3737 	bool padding = false;
3738 	u8 *rw_image = NULL;
3739 	u8 *image = NULL;
3740 	int *addrs;
3741 	int pass;
3742 	int i;
3743 
3744 	if (!prog->jit_requested)
3745 		return orig_prog;
3746 
3747 	tmp = bpf_jit_blind_constants(prog);
3748 	/*
3749 	 * If blinding was requested and we failed during blinding,
3750 	 * we must fall back to the interpreter.
3751 	 */
3752 	if (IS_ERR(tmp))
3753 		return orig_prog;
3754 	if (tmp != prog) {
3755 		tmp_blinded = true;
3756 		prog = tmp;
3757 	}
3758 
3759 	jit_data = prog->aux->jit_data;
3760 	if (!jit_data) {
3761 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3762 		if (!jit_data) {
3763 			prog = orig_prog;
3764 			goto out;
3765 		}
3766 		prog->aux->jit_data = jit_data;
3767 	}
3768 	priv_stack_ptr = prog->aux->priv_stack_ptr;
3769 	if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) {
3770 		/* Allocate actual private stack size with verifier-calculated
3771 		 * stack size plus two memory guards to protect overflow and
3772 		 * underflow.
3773 		 */
3774 		priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3775 				      2 * PRIV_STACK_GUARD_SZ;
3776 		priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL);
3777 		if (!priv_stack_ptr) {
3778 			prog = orig_prog;
3779 			goto out_priv_stack;
3780 		}
3781 
3782 		priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
3783 		prog->aux->priv_stack_ptr = priv_stack_ptr;
3784 	}
3785 	addrs = jit_data->addrs;
3786 	if (addrs) {
3787 		ctx = jit_data->ctx;
3788 		oldproglen = jit_data->proglen;
3789 		image = jit_data->image;
3790 		header = jit_data->header;
3791 		rw_header = jit_data->rw_header;
3792 		rw_image = (void *)rw_header + ((void *)image - (void *)header);
3793 		extra_pass = true;
3794 		padding = true;
3795 		goto skip_init_addrs;
3796 	}
3797 	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3798 	if (!addrs) {
3799 		prog = orig_prog;
3800 		goto out_addrs;
3801 	}
3802 
3803 	/*
3804 	 * Before first pass, make a rough estimation of addrs[]
3805 	 * each BPF instruction is translated to less than 64 bytes
3806 	 */
3807 	for (proglen = 0, i = 0; i <= prog->len; i++) {
3808 		proglen += 64;
3809 		addrs[i] = proglen;
3810 	}
3811 	ctx.cleanup_addr = proglen;
3812 skip_init_addrs:
3813 
3814 	/*
3815 	 * JITed image shrinks with every pass and the loop iterates
3816 	 * until the image stops shrinking. Very large BPF programs
3817 	 * may converge on the last pass. In such case do one more
3818 	 * pass to emit the final image.
3819 	 */
3820 	for (pass = 0; pass < MAX_PASSES || image; pass++) {
3821 		if (!padding && pass >= PADDING_PASSES)
3822 			padding = true;
3823 		proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3824 		if (proglen <= 0) {
3825 out_image:
3826 			image = NULL;
3827 			if (header) {
3828 				bpf_arch_text_copy(&header->size, &rw_header->size,
3829 						   sizeof(rw_header->size));
3830 				bpf_jit_binary_pack_free(header, rw_header);
3831 			}
3832 			/* Fall back to interpreter mode */
3833 			prog = orig_prog;
3834 			if (extra_pass) {
3835 				prog->bpf_func = NULL;
3836 				prog->jited = 0;
3837 				prog->jited_len = 0;
3838 			}
3839 			goto out_addrs;
3840 		}
3841 		if (image) {
3842 			if (proglen != oldproglen) {
3843 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3844 				       proglen, oldproglen);
3845 				goto out_image;
3846 			}
3847 			break;
3848 		}
3849 		if (proglen == oldproglen) {
3850 			/*
3851 			 * The number of entries in extable is the number of BPF_LDX
3852 			 * insns that access kernel memory via "pointer to BTF type".
3853 			 * The verifier changed their opcode from LDX|MEM|size
3854 			 * to LDX|PROBE_MEM|size to make JITing easier.
3855 			 */
3856 			u32 align = __alignof__(struct exception_table_entry);
3857 			u32 extable_size = prog->aux->num_exentries *
3858 				sizeof(struct exception_table_entry);
3859 
3860 			/* allocate module memory for x86 insns and extable */
3861 			header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3862 							   &image, align, &rw_header, &rw_image,
3863 							   jit_fill_hole);
3864 			if (!header) {
3865 				prog = orig_prog;
3866 				goto out_addrs;
3867 			}
3868 			prog->aux->extable = (void *) image + roundup(proglen, align);
3869 		}
3870 		oldproglen = proglen;
3871 		cond_resched();
3872 	}
3873 
3874 	if (bpf_jit_enable > 1)
3875 		bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3876 
3877 	if (image) {
3878 		if (!prog->is_func || extra_pass) {
3879 			/*
3880 			 * bpf_jit_binary_pack_finalize fails in two scenarios:
3881 			 *   1) header is not pointing to proper module memory;
3882 			 *   2) the arch doesn't support bpf_arch_text_copy().
3883 			 *
3884 			 * Both cases are serious bugs and justify WARN_ON.
3885 			 */
3886 			if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3887 				/* header has been freed */
3888 				header = NULL;
3889 				goto out_image;
3890 			}
3891 
3892 			bpf_tail_call_direct_fixup(prog);
3893 		} else {
3894 			jit_data->addrs = addrs;
3895 			jit_data->ctx = ctx;
3896 			jit_data->proglen = proglen;
3897 			jit_data->image = image;
3898 			jit_data->header = header;
3899 			jit_data->rw_header = rw_header;
3900 		}
3901 
3902 		/*
3903 		 * The bpf_prog_update_insn_ptrs function expects addrs to
3904 		 * point to the first byte of the jitted instruction (unlike
3905 		 * the bpf_prog_fill_jited_linfo below, which, for historical
3906 		 * reasons, expects to point to the next instruction)
3907 		 */
3908 		bpf_prog_update_insn_ptrs(prog, addrs, image);
3909 
3910 		/*
3911 		 * ctx.prog_offset is used when CFI preambles put code *before*
3912 		 * the function. See emit_cfi(). For FineIBT specifically this code
3913 		 * can also be executed and bpf_prog_kallsyms_add() will
3914 		 * generate an additional symbol to cover this, hence also
3915 		 * decrement proglen.
3916 		 */
3917 		prog->bpf_func = (void *)image + cfi_get_offset();
3918 		prog->jited = 1;
3919 		prog->jited_len = proglen - cfi_get_offset();
3920 	} else {
3921 		prog = orig_prog;
3922 	}
3923 
3924 	if (!image || !prog->is_func || extra_pass) {
3925 		if (image)
3926 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
3927 out_addrs:
3928 		kvfree(addrs);
3929 		if (!image && priv_stack_ptr) {
3930 			free_percpu(priv_stack_ptr);
3931 			prog->aux->priv_stack_ptr = NULL;
3932 		}
3933 out_priv_stack:
3934 		kfree(jit_data);
3935 		prog->aux->jit_data = NULL;
3936 	}
3937 out:
3938 	if (tmp_blinded)
3939 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
3940 					   tmp : orig_prog);
3941 	return prog;
3942 }
3943 
3944 bool bpf_jit_supports_kfunc_call(void)
3945 {
3946 	return true;
3947 }
3948 
3949 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3950 {
3951 	if (text_poke_copy(dst, src, len) == NULL)
3952 		return ERR_PTR(-EINVAL);
3953 	return dst;
3954 }
3955 
3956 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3957 bool bpf_jit_supports_subprog_tailcalls(void)
3958 {
3959 	return true;
3960 }
3961 
3962 bool bpf_jit_supports_percpu_insn(void)
3963 {
3964 	return true;
3965 }
3966 
3967 void bpf_jit_free(struct bpf_prog *prog)
3968 {
3969 	if (prog->jited) {
3970 		struct x64_jit_data *jit_data = prog->aux->jit_data;
3971 		struct bpf_binary_header *hdr;
3972 		void __percpu *priv_stack_ptr;
3973 		int priv_stack_alloc_sz;
3974 
3975 		/*
3976 		 * If we fail the final pass of JIT (from jit_subprogs),
3977 		 * the program may not be finalized yet. Call finalize here
3978 		 * before freeing it.
3979 		 */
3980 		if (jit_data) {
3981 			bpf_jit_binary_pack_finalize(jit_data->header,
3982 						     jit_data->rw_header);
3983 			kvfree(jit_data->addrs);
3984 			kfree(jit_data);
3985 		}
3986 		prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3987 		hdr = bpf_jit_binary_pack_hdr(prog);
3988 		bpf_jit_binary_pack_free(hdr, NULL);
3989 		priv_stack_ptr = prog->aux->priv_stack_ptr;
3990 		if (priv_stack_ptr) {
3991 			priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3992 					      2 * PRIV_STACK_GUARD_SZ;
3993 			priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog);
3994 			free_percpu(prog->aux->priv_stack_ptr);
3995 		}
3996 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3997 	}
3998 
3999 	bpf_prog_unlock_free(prog);
4000 }
4001 
4002 bool bpf_jit_supports_exceptions(void)
4003 {
4004 	/* We unwind through both kernel frames (starting from within bpf_throw
4005 	 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
4006 	 * to walk kernel frames and reach BPF frames in the stack trace.
4007 	 */
4008 	return IS_ENABLED(CONFIG_UNWINDER_ORC);
4009 }
4010 
4011 bool bpf_jit_supports_private_stack(void)
4012 {
4013 	return true;
4014 }
4015 
4016 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
4017 {
4018 #if defined(CONFIG_UNWINDER_ORC)
4019 	struct unwind_state state;
4020 	unsigned long addr;
4021 
4022 	for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
4023 	     unwind_next_frame(&state)) {
4024 		addr = unwind_get_return_address(&state);
4025 		if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
4026 			break;
4027 	}
4028 	return;
4029 #endif
4030 }
4031 
4032 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
4033 			       struct bpf_prog *new, struct bpf_prog *old)
4034 {
4035 	u8 *old_addr, *new_addr, *old_bypass_addr;
4036 	enum bpf_text_poke_type t;
4037 	int ret;
4038 
4039 	old_bypass_addr = old ? NULL : poke->bypass_addr;
4040 	old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
4041 	new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
4042 
4043 	/*
4044 	 * On program loading or teardown, the program's kallsym entry
4045 	 * might not be in place, so we use __bpf_arch_text_poke to skip
4046 	 * the kallsyms check.
4047 	 */
4048 	if (new) {
4049 		t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4050 		ret = __bpf_arch_text_poke(poke->tailcall_target,
4051 					   t, BPF_MOD_JUMP,
4052 					   old_addr, new_addr);
4053 		BUG_ON(ret < 0);
4054 		if (!old) {
4055 			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4056 						   BPF_MOD_JUMP, BPF_MOD_NOP,
4057 						   poke->bypass_addr,
4058 						   NULL);
4059 			BUG_ON(ret < 0);
4060 		}
4061 	} else {
4062 		t = old_bypass_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4063 		ret = __bpf_arch_text_poke(poke->tailcall_bypass,
4064 					   t, BPF_MOD_JUMP, old_bypass_addr,
4065 					   poke->bypass_addr);
4066 		BUG_ON(ret < 0);
4067 		/* let other CPUs finish the execution of program
4068 		 * so that it will not possible to expose them
4069 		 * to invalid nop, stack unwind, nop state
4070 		 */
4071 		if (!ret)
4072 			synchronize_rcu();
4073 		t = old_addr ? BPF_MOD_JUMP : BPF_MOD_NOP;
4074 		ret = __bpf_arch_text_poke(poke->tailcall_target,
4075 					   t, BPF_MOD_NOP, old_addr, NULL);
4076 		BUG_ON(ret < 0);
4077 	}
4078 }
4079 
4080 bool bpf_jit_supports_arena(void)
4081 {
4082 	return true;
4083 }
4084 
4085 bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
4086 {
4087 	if (!in_arena)
4088 		return true;
4089 	switch (insn->code) {
4090 	case BPF_STX | BPF_ATOMIC | BPF_W:
4091 	case BPF_STX | BPF_ATOMIC | BPF_DW:
4092 		if (insn->imm == (BPF_AND | BPF_FETCH) ||
4093 		    insn->imm == (BPF_OR | BPF_FETCH) ||
4094 		    insn->imm == (BPF_XOR | BPF_FETCH))
4095 			return false;
4096 	}
4097 	return true;
4098 }
4099 
4100 bool bpf_jit_supports_ptr_xchg(void)
4101 {
4102 	return true;
4103 }
4104 
4105 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
4106 u64 bpf_arch_uaddress_limit(void)
4107 {
4108 	return 0;
4109 }
4110 
4111 bool bpf_jit_supports_timed_may_goto(void)
4112 {
4113 	return true;
4114 }
4115 
4116 bool bpf_jit_supports_fsession(void)
4117 {
4118 	return true;
4119 }
4120