xref: /linux/arch/x86/net/bpf_jit_comp32.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Just-In-Time compiler for eBPF filters on IA32 (32bit x86)
4  *
5  * Author: Wang YanQing (udknight@gmail.com)
6  * The code based on code and ideas from:
7  * Eric Dumazet (eric.dumazet@gmail.com)
8  * and from:
9  * Shubham Bansal <illusionist.neo@gmail.com>
10  */
11 
12 #include <linux/netdevice.h>
13 #include <linux/filter.h>
14 #include <linux/if_vlan.h>
15 #include <asm/cacheflush.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <linux/bpf.h>
19 
20 /*
21  * eBPF prog stack layout:
22  *
23  *                         high
24  * original ESP =>        +-----+
25  *                        |     | callee saved registers
26  *                        +-----+
27  *                        | ... | eBPF JIT scratch space
28  * BPF_FP,IA32_EBP  =>    +-----+
29  *                        | ... | eBPF prog stack
30  *                        +-----+
31  *                        |RSVD | JIT scratchpad
32  * current ESP =>         +-----+
33  *                        |     |
34  *                        | ... | Function call stack
35  *                        |     |
36  *                        +-----+
37  *                          low
38  *
39  * The callee saved registers:
40  *
41  *                                high
42  * original ESP =>        +------------------+ \
43  *                        |        ebp       | |
44  * current EBP =>         +------------------+ } callee saved registers
45  *                        |    ebx,esi,edi   | |
46  *                        +------------------+ /
47  *                                low
48  */
49 
50 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
51 {
52 	if (len == 1)
53 		*ptr = bytes;
54 	else if (len == 2)
55 		*(u16 *)ptr = bytes;
56 	else {
57 		*(u32 *)ptr = bytes;
58 		barrier();
59 	}
60 	return ptr + len;
61 }
62 
63 #define EMIT(bytes, len) \
64 	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
65 
66 #define EMIT1(b1)		EMIT(b1, 1)
67 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
68 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
69 #define EMIT4(b1, b2, b3, b4)   \
70 	EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
71 
72 #define EMIT1_off32(b1, off) \
73 	do { EMIT1(b1); EMIT(off, 4); } while (0)
74 #define EMIT2_off32(b1, b2, off) \
75 	do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
76 #define EMIT3_off32(b1, b2, b3, off) \
77 	do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
78 #define EMIT4_off32(b1, b2, b3, b4, off) \
79 	do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
80 
81 #define jmp_label(label, jmp_insn_len) (label - cnt - jmp_insn_len)
82 
83 static bool is_imm8(int value)
84 {
85 	return value <= 127 && value >= -128;
86 }
87 
88 static bool is_simm32(s64 value)
89 {
90 	return value == (s64) (s32) value;
91 }
92 
93 #define STACK_OFFSET(k)	(k)
94 #define TCALL_CNT	(MAX_BPF_JIT_REG + 0)	/* Tail Call Count */
95 
96 #define IA32_EAX	(0x0)
97 #define IA32_EBX	(0x3)
98 #define IA32_ECX	(0x1)
99 #define IA32_EDX	(0x2)
100 #define IA32_ESI	(0x6)
101 #define IA32_EDI	(0x7)
102 #define IA32_EBP	(0x5)
103 #define IA32_ESP	(0x4)
104 
105 /*
106  * List of x86 cond jumps opcodes (. + s8)
107  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
108  */
109 #define IA32_JB  0x72
110 #define IA32_JAE 0x73
111 #define IA32_JE  0x74
112 #define IA32_JNE 0x75
113 #define IA32_JBE 0x76
114 #define IA32_JA  0x77
115 #define IA32_JL  0x7C
116 #define IA32_JGE 0x7D
117 #define IA32_JLE 0x7E
118 #define IA32_JG  0x7F
119 
120 /*
121  * Map eBPF registers to IA32 32bit registers or stack scratch space.
122  *
123  * 1. All the registers, R0-R10, are mapped to scratch space on stack.
124  * 2. We need two 64 bit temp registers to do complex operations on eBPF
125  *    registers.
126  * 3. For performance reason, the BPF_REG_AX for blinding constant, is
127  *    mapped to real hardware register pair, IA32_ESI and IA32_EDI.
128  *
129  * As the eBPF registers are all 64 bit registers and IA32 has only 32 bit
130  * registers, we have to map each eBPF registers with two IA32 32 bit regs
131  * or scratch memory space and we have to build eBPF 64 bit register from those.
132  *
133  * We use IA32_EAX, IA32_EDX, IA32_ECX, IA32_EBX as temporary registers.
134  */
135 static const u8 bpf2ia32[][2] = {
136 	/* Return value from in-kernel function, and exit value from eBPF */
137 	[BPF_REG_0] = {STACK_OFFSET(0), STACK_OFFSET(4)},
138 
139 	/* The arguments from eBPF program to in-kernel function */
140 	/* Stored on stack scratch space */
141 	[BPF_REG_1] = {STACK_OFFSET(8), STACK_OFFSET(12)},
142 	[BPF_REG_2] = {STACK_OFFSET(16), STACK_OFFSET(20)},
143 	[BPF_REG_3] = {STACK_OFFSET(24), STACK_OFFSET(28)},
144 	[BPF_REG_4] = {STACK_OFFSET(32), STACK_OFFSET(36)},
145 	[BPF_REG_5] = {STACK_OFFSET(40), STACK_OFFSET(44)},
146 
147 	/* Callee saved registers that in-kernel function will preserve */
148 	/* Stored on stack scratch space */
149 	[BPF_REG_6] = {STACK_OFFSET(48), STACK_OFFSET(52)},
150 	[BPF_REG_7] = {STACK_OFFSET(56), STACK_OFFSET(60)},
151 	[BPF_REG_8] = {STACK_OFFSET(64), STACK_OFFSET(68)},
152 	[BPF_REG_9] = {STACK_OFFSET(72), STACK_OFFSET(76)},
153 
154 	/* Read only Frame Pointer to access Stack */
155 	[BPF_REG_FP] = {STACK_OFFSET(80), STACK_OFFSET(84)},
156 
157 	/* Temporary register for blinding constants. */
158 	[BPF_REG_AX] = {IA32_ESI, IA32_EDI},
159 
160 	/* Tail call count. Stored on stack scratch space. */
161 	[TCALL_CNT] = {STACK_OFFSET(88), STACK_OFFSET(92)},
162 };
163 
164 #define dst_lo	dst[0]
165 #define dst_hi	dst[1]
166 #define src_lo	src[0]
167 #define src_hi	src[1]
168 
169 #define STACK_ALIGNMENT	8
170 /*
171  * Stack space for BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4,
172  * BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9,
173  * BPF_REG_FP, BPF_REG_AX and Tail call counts.
174  */
175 #define SCRATCH_SIZE 96
176 
177 /* Total stack size used in JITed code */
178 #define _STACK_SIZE	(stack_depth + SCRATCH_SIZE)
179 
180 #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
181 
182 /* Get the offset of eBPF REGISTERs stored on scratch space. */
183 #define STACK_VAR(off) (off)
184 
185 /* Encode 'dst_reg' register into IA32 opcode 'byte' */
186 static u8 add_1reg(u8 byte, u32 dst_reg)
187 {
188 	return byte + dst_reg;
189 }
190 
191 /* Encode 'dst_reg' and 'src_reg' registers into IA32 opcode 'byte' */
192 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
193 {
194 	return byte + dst_reg + (src_reg << 3);
195 }
196 
197 static void jit_fill_hole(void *area, unsigned int size)
198 {
199 	/* Fill whole space with int3 instructions */
200 	memset(area, 0xcc, size);
201 }
202 
203 static inline void emit_ia32_mov_i(const u8 dst, const u32 val, bool dstk,
204 				   u8 **pprog)
205 {
206 	u8 *prog = *pprog;
207 	int cnt = 0;
208 
209 	if (dstk) {
210 		if (val == 0) {
211 			/* xor eax,eax */
212 			EMIT2(0x33, add_2reg(0xC0, IA32_EAX, IA32_EAX));
213 			/* mov dword ptr [ebp+off],eax */
214 			EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
215 			      STACK_VAR(dst));
216 		} else {
217 			EMIT3_off32(0xC7, add_1reg(0x40, IA32_EBP),
218 				    STACK_VAR(dst), val);
219 		}
220 	} else {
221 		if (val == 0)
222 			EMIT2(0x33, add_2reg(0xC0, dst, dst));
223 		else
224 			EMIT2_off32(0xC7, add_1reg(0xC0, dst),
225 				    val);
226 	}
227 	*pprog = prog;
228 }
229 
230 /* dst = imm (4 bytes)*/
231 static inline void emit_ia32_mov_r(const u8 dst, const u8 src, bool dstk,
232 				   bool sstk, u8 **pprog)
233 {
234 	u8 *prog = *pprog;
235 	int cnt = 0;
236 	u8 sreg = sstk ? IA32_EAX : src;
237 
238 	if (sstk)
239 		/* mov eax,dword ptr [ebp+off] */
240 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src));
241 	if (dstk)
242 		/* mov dword ptr [ebp+off],eax */
243 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, sreg), STACK_VAR(dst));
244 	else
245 		/* mov dst,sreg */
246 		EMIT2(0x89, add_2reg(0xC0, dst, sreg));
247 
248 	*pprog = prog;
249 }
250 
251 /* dst = src */
252 static inline void emit_ia32_mov_r64(const bool is64, const u8 dst[],
253 				     const u8 src[], bool dstk,
254 				     bool sstk, u8 **pprog)
255 {
256 	emit_ia32_mov_r(dst_lo, src_lo, dstk, sstk, pprog);
257 	if (is64)
258 		/* complete 8 byte move */
259 		emit_ia32_mov_r(dst_hi, src_hi, dstk, sstk, pprog);
260 	else
261 		/* zero out high 4 bytes */
262 		emit_ia32_mov_i(dst_hi, 0, dstk, pprog);
263 }
264 
265 /* Sign extended move */
266 static inline void emit_ia32_mov_i64(const bool is64, const u8 dst[],
267 				     const u32 val, bool dstk, u8 **pprog)
268 {
269 	u32 hi = 0;
270 
271 	if (is64 && (val & (1<<31)))
272 		hi = (u32)~0;
273 	emit_ia32_mov_i(dst_lo, val, dstk, pprog);
274 	emit_ia32_mov_i(dst_hi, hi, dstk, pprog);
275 }
276 
277 /*
278  * ALU operation (32 bit)
279  * dst = dst * src
280  */
281 static inline void emit_ia32_mul_r(const u8 dst, const u8 src, bool dstk,
282 				   bool sstk, u8 **pprog)
283 {
284 	u8 *prog = *pprog;
285 	int cnt = 0;
286 	u8 sreg = sstk ? IA32_ECX : src;
287 
288 	if (sstk)
289 		/* mov ecx,dword ptr [ebp+off] */
290 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src));
291 
292 	if (dstk)
293 		/* mov eax,dword ptr [ebp+off] */
294 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
295 	else
296 		/* mov eax,dst */
297 		EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX));
298 
299 
300 	EMIT2(0xF7, add_1reg(0xE0, sreg));
301 
302 	if (dstk)
303 		/* mov dword ptr [ebp+off],eax */
304 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
305 		      STACK_VAR(dst));
306 	else
307 		/* mov dst,eax */
308 		EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX));
309 
310 	*pprog = prog;
311 }
312 
313 static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val,
314 					 bool dstk, u8 **pprog)
315 {
316 	u8 *prog = *pprog;
317 	int cnt = 0;
318 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
319 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
320 
321 	if (dstk && val != 64) {
322 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
323 		      STACK_VAR(dst_lo));
324 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
325 		      STACK_VAR(dst_hi));
326 	}
327 	switch (val) {
328 	case 16:
329 		/*
330 		 * Emit 'movzwl eax,ax' to zero extend 16-bit
331 		 * into 64 bit
332 		 */
333 		EMIT2(0x0F, 0xB7);
334 		EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
335 		/* xor dreg_hi,dreg_hi */
336 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
337 		break;
338 	case 32:
339 		/* xor dreg_hi,dreg_hi */
340 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
341 		break;
342 	case 64:
343 		/* nop */
344 		break;
345 	}
346 
347 	if (dstk && val != 64) {
348 		/* mov dword ptr [ebp+off],dreg_lo */
349 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
350 		      STACK_VAR(dst_lo));
351 		/* mov dword ptr [ebp+off],dreg_hi */
352 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
353 		      STACK_VAR(dst_hi));
354 	}
355 	*pprog = prog;
356 }
357 
358 static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val,
359 				       bool dstk, u8 **pprog)
360 {
361 	u8 *prog = *pprog;
362 	int cnt = 0;
363 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
364 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
365 
366 	if (dstk) {
367 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
368 		      STACK_VAR(dst_lo));
369 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
370 		      STACK_VAR(dst_hi));
371 	}
372 	switch (val) {
373 	case 16:
374 		/* Emit 'ror %ax, 8' to swap lower 2 bytes */
375 		EMIT1(0x66);
376 		EMIT3(0xC1, add_1reg(0xC8, dreg_lo), 8);
377 
378 		EMIT2(0x0F, 0xB7);
379 		EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
380 
381 		/* xor dreg_hi,dreg_hi */
382 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
383 		break;
384 	case 32:
385 		/* Emit 'bswap eax' to swap lower 4 bytes */
386 		EMIT1(0x0F);
387 		EMIT1(add_1reg(0xC8, dreg_lo));
388 
389 		/* xor dreg_hi,dreg_hi */
390 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
391 		break;
392 	case 64:
393 		/* Emit 'bswap eax' to swap lower 4 bytes */
394 		EMIT1(0x0F);
395 		EMIT1(add_1reg(0xC8, dreg_lo));
396 
397 		/* Emit 'bswap edx' to swap lower 4 bytes */
398 		EMIT1(0x0F);
399 		EMIT1(add_1reg(0xC8, dreg_hi));
400 
401 		/* mov ecx,dreg_hi */
402 		EMIT2(0x89, add_2reg(0xC0, IA32_ECX, dreg_hi));
403 		/* mov dreg_hi,dreg_lo */
404 		EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
405 		/* mov dreg_lo,ecx */
406 		EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
407 
408 		break;
409 	}
410 	if (dstk) {
411 		/* mov dword ptr [ebp+off],dreg_lo */
412 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
413 		      STACK_VAR(dst_lo));
414 		/* mov dword ptr [ebp+off],dreg_hi */
415 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
416 		      STACK_VAR(dst_hi));
417 	}
418 	*pprog = prog;
419 }
420 
421 /*
422  * ALU operation (32 bit)
423  * dst = dst (div|mod) src
424  */
425 static inline void emit_ia32_div_mod_r(const u8 op, const u8 dst, const u8 src,
426 				       bool dstk, bool sstk, u8 **pprog)
427 {
428 	u8 *prog = *pprog;
429 	int cnt = 0;
430 
431 	if (sstk)
432 		/* mov ecx,dword ptr [ebp+off] */
433 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
434 		      STACK_VAR(src));
435 	else if (src != IA32_ECX)
436 		/* mov ecx,src */
437 		EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX));
438 
439 	if (dstk)
440 		/* mov eax,dword ptr [ebp+off] */
441 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
442 		      STACK_VAR(dst));
443 	else
444 		/* mov eax,dst */
445 		EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX));
446 
447 	/* xor edx,edx */
448 	EMIT2(0x31, add_2reg(0xC0, IA32_EDX, IA32_EDX));
449 	/* div ecx */
450 	EMIT2(0xF7, add_1reg(0xF0, IA32_ECX));
451 
452 	if (op == BPF_MOD) {
453 		if (dstk)
454 			EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
455 			      STACK_VAR(dst));
456 		else
457 			EMIT2(0x89, add_2reg(0xC0, dst, IA32_EDX));
458 	} else {
459 		if (dstk)
460 			EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
461 			      STACK_VAR(dst));
462 		else
463 			EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX));
464 	}
465 	*pprog = prog;
466 }
467 
468 /*
469  * ALU operation (32 bit)
470  * dst = dst (shift) src
471  */
472 static inline void emit_ia32_shift_r(const u8 op, const u8 dst, const u8 src,
473 				     bool dstk, bool sstk, u8 **pprog)
474 {
475 	u8 *prog = *pprog;
476 	int cnt = 0;
477 	u8 dreg = dstk ? IA32_EAX : dst;
478 	u8 b2;
479 
480 	if (dstk)
481 		/* mov eax,dword ptr [ebp+off] */
482 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
483 
484 	if (sstk)
485 		/* mov ecx,dword ptr [ebp+off] */
486 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src));
487 	else if (src != IA32_ECX)
488 		/* mov ecx,src */
489 		EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX));
490 
491 	switch (op) {
492 	case BPF_LSH:
493 		b2 = 0xE0; break;
494 	case BPF_RSH:
495 		b2 = 0xE8; break;
496 	case BPF_ARSH:
497 		b2 = 0xF8; break;
498 	default:
499 		return;
500 	}
501 	EMIT2(0xD3, add_1reg(b2, dreg));
502 
503 	if (dstk)
504 		/* mov dword ptr [ebp+off],dreg */
505 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg), STACK_VAR(dst));
506 	*pprog = prog;
507 }
508 
509 /*
510  * ALU operation (32 bit)
511  * dst = dst (op) src
512  */
513 static inline void emit_ia32_alu_r(const bool is64, const bool hi, const u8 op,
514 				   const u8 dst, const u8 src, bool dstk,
515 				   bool sstk, u8 **pprog)
516 {
517 	u8 *prog = *pprog;
518 	int cnt = 0;
519 	u8 sreg = sstk ? IA32_EAX : src;
520 	u8 dreg = dstk ? IA32_EDX : dst;
521 
522 	if (sstk)
523 		/* mov eax,dword ptr [ebp+off] */
524 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src));
525 
526 	if (dstk)
527 		/* mov eax,dword ptr [ebp+off] */
528 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst));
529 
530 	switch (BPF_OP(op)) {
531 	/* dst = dst + src */
532 	case BPF_ADD:
533 		if (hi && is64)
534 			EMIT2(0x11, add_2reg(0xC0, dreg, sreg));
535 		else
536 			EMIT2(0x01, add_2reg(0xC0, dreg, sreg));
537 		break;
538 	/* dst = dst - src */
539 	case BPF_SUB:
540 		if (hi && is64)
541 			EMIT2(0x19, add_2reg(0xC0, dreg, sreg));
542 		else
543 			EMIT2(0x29, add_2reg(0xC0, dreg, sreg));
544 		break;
545 	/* dst = dst | src */
546 	case BPF_OR:
547 		EMIT2(0x09, add_2reg(0xC0, dreg, sreg));
548 		break;
549 	/* dst = dst & src */
550 	case BPF_AND:
551 		EMIT2(0x21, add_2reg(0xC0, dreg, sreg));
552 		break;
553 	/* dst = dst ^ src */
554 	case BPF_XOR:
555 		EMIT2(0x31, add_2reg(0xC0, dreg, sreg));
556 		break;
557 	}
558 
559 	if (dstk)
560 		/* mov dword ptr [ebp+off],dreg */
561 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg),
562 		      STACK_VAR(dst));
563 	*pprog = prog;
564 }
565 
566 /* ALU operation (64 bit) */
567 static inline void emit_ia32_alu_r64(const bool is64, const u8 op,
568 				     const u8 dst[], const u8 src[],
569 				     bool dstk,  bool sstk,
570 				     u8 **pprog)
571 {
572 	u8 *prog = *pprog;
573 
574 	emit_ia32_alu_r(is64, false, op, dst_lo, src_lo, dstk, sstk, &prog);
575 	if (is64)
576 		emit_ia32_alu_r(is64, true, op, dst_hi, src_hi, dstk, sstk,
577 				&prog);
578 	else
579 		emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
580 	*pprog = prog;
581 }
582 
583 /*
584  * ALU operation (32 bit)
585  * dst = dst (op) val
586  */
587 static inline void emit_ia32_alu_i(const bool is64, const bool hi, const u8 op,
588 				   const u8 dst, const s32 val, bool dstk,
589 				   u8 **pprog)
590 {
591 	u8 *prog = *pprog;
592 	int cnt = 0;
593 	u8 dreg = dstk ? IA32_EAX : dst;
594 	u8 sreg = IA32_EDX;
595 
596 	if (dstk)
597 		/* mov eax,dword ptr [ebp+off] */
598 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
599 
600 	if (!is_imm8(val))
601 		/* mov edx,imm32*/
602 		EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EDX), val);
603 
604 	switch (op) {
605 	/* dst = dst + val */
606 	case BPF_ADD:
607 		if (hi && is64) {
608 			if (is_imm8(val))
609 				EMIT3(0x83, add_1reg(0xD0, dreg), val);
610 			else
611 				EMIT2(0x11, add_2reg(0xC0, dreg, sreg));
612 		} else {
613 			if (is_imm8(val))
614 				EMIT3(0x83, add_1reg(0xC0, dreg), val);
615 			else
616 				EMIT2(0x01, add_2reg(0xC0, dreg, sreg));
617 		}
618 		break;
619 	/* dst = dst - val */
620 	case BPF_SUB:
621 		if (hi && is64) {
622 			if (is_imm8(val))
623 				EMIT3(0x83, add_1reg(0xD8, dreg), val);
624 			else
625 				EMIT2(0x19, add_2reg(0xC0, dreg, sreg));
626 		} else {
627 			if (is_imm8(val))
628 				EMIT3(0x83, add_1reg(0xE8, dreg), val);
629 			else
630 				EMIT2(0x29, add_2reg(0xC0, dreg, sreg));
631 		}
632 		break;
633 	/* dst = dst | val */
634 	case BPF_OR:
635 		if (is_imm8(val))
636 			EMIT3(0x83, add_1reg(0xC8, dreg), val);
637 		else
638 			EMIT2(0x09, add_2reg(0xC0, dreg, sreg));
639 		break;
640 	/* dst = dst & val */
641 	case BPF_AND:
642 		if (is_imm8(val))
643 			EMIT3(0x83, add_1reg(0xE0, dreg), val);
644 		else
645 			EMIT2(0x21, add_2reg(0xC0, dreg, sreg));
646 		break;
647 	/* dst = dst ^ val */
648 	case BPF_XOR:
649 		if (is_imm8(val))
650 			EMIT3(0x83, add_1reg(0xF0, dreg), val);
651 		else
652 			EMIT2(0x31, add_2reg(0xC0, dreg, sreg));
653 		break;
654 	case BPF_NEG:
655 		EMIT2(0xF7, add_1reg(0xD8, dreg));
656 		break;
657 	}
658 
659 	if (dstk)
660 		/* mov dword ptr [ebp+off],dreg */
661 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg),
662 		      STACK_VAR(dst));
663 	*pprog = prog;
664 }
665 
666 /* ALU operation (64 bit) */
667 static inline void emit_ia32_alu_i64(const bool is64, const u8 op,
668 				     const u8 dst[], const u32 val,
669 				     bool dstk, u8 **pprog)
670 {
671 	u8 *prog = *pprog;
672 	u32 hi = 0;
673 
674 	if (is64 && (val & (1<<31)))
675 		hi = (u32)~0;
676 
677 	emit_ia32_alu_i(is64, false, op, dst_lo, val, dstk, &prog);
678 	if (is64)
679 		emit_ia32_alu_i(is64, true, op, dst_hi, hi, dstk, &prog);
680 	else
681 		emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
682 
683 	*pprog = prog;
684 }
685 
686 /* dst = ~dst (64 bit) */
687 static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
688 {
689 	u8 *prog = *pprog;
690 	int cnt = 0;
691 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
692 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
693 
694 	if (dstk) {
695 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
696 		      STACK_VAR(dst_lo));
697 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
698 		      STACK_VAR(dst_hi));
699 	}
700 
701 	/* xor ecx,ecx */
702 	EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
703 	/* sub dreg_lo,ecx */
704 	EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX));
705 	/* mov dreg_lo,ecx */
706 	EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
707 
708 	/* xor ecx,ecx */
709 	EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
710 	/* sbb dreg_hi,ecx */
711 	EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX));
712 	/* mov dreg_hi,ecx */
713 	EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX));
714 
715 	if (dstk) {
716 		/* mov dword ptr [ebp+off],dreg_lo */
717 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
718 		      STACK_VAR(dst_lo));
719 		/* mov dword ptr [ebp+off],dreg_hi */
720 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
721 		      STACK_VAR(dst_hi));
722 	}
723 	*pprog = prog;
724 }
725 
726 /* dst = dst << src */
727 static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
728 				     bool dstk, bool sstk, u8 **pprog)
729 {
730 	u8 *prog = *pprog;
731 	int cnt = 0;
732 	static int jmp_label1 = -1;
733 	static int jmp_label2 = -1;
734 	static int jmp_label3 = -1;
735 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
736 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
737 
738 	if (dstk) {
739 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
740 		      STACK_VAR(dst_lo));
741 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
742 		      STACK_VAR(dst_hi));
743 	}
744 
745 	if (sstk)
746 		/* mov ecx,dword ptr [ebp+off] */
747 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
748 		      STACK_VAR(src_lo));
749 	else
750 		/* mov ecx,src_lo */
751 		EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
752 
753 	/* cmp ecx,32 */
754 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
755 	/* Jumps when >= 32 */
756 	if (is_imm8(jmp_label(jmp_label1, 2)))
757 		EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
758 	else
759 		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
760 
761 	/* < 32 */
762 	/* shl dreg_hi,cl */
763 	EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
764 	/* mov ebx,dreg_lo */
765 	EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
766 	/* shl dreg_lo,cl */
767 	EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
768 
769 	/* IA32_ECX = -IA32_ECX + 32 */
770 	/* neg ecx */
771 	EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
772 	/* add ecx,32 */
773 	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
774 
775 	/* shr ebx,cl */
776 	EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
777 	/* or dreg_hi,ebx */
778 	EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
779 
780 	/* goto out; */
781 	if (is_imm8(jmp_label(jmp_label3, 2)))
782 		EMIT2(0xEB, jmp_label(jmp_label3, 2));
783 	else
784 		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
785 
786 	/* >= 32 */
787 	if (jmp_label1 == -1)
788 		jmp_label1 = cnt;
789 
790 	/* cmp ecx,64 */
791 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
792 	/* Jumps when >= 64 */
793 	if (is_imm8(jmp_label(jmp_label2, 2)))
794 		EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
795 	else
796 		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
797 
798 	/* >= 32 && < 64 */
799 	/* sub ecx,32 */
800 	EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
801 	/* shl dreg_lo,cl */
802 	EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
803 	/* mov dreg_hi,dreg_lo */
804 	EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
805 
806 	/* xor dreg_lo,dreg_lo */
807 	EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
808 
809 	/* goto out; */
810 	if (is_imm8(jmp_label(jmp_label3, 2)))
811 		EMIT2(0xEB, jmp_label(jmp_label3, 2));
812 	else
813 		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
814 
815 	/* >= 64 */
816 	if (jmp_label2 == -1)
817 		jmp_label2 = cnt;
818 	/* xor dreg_lo,dreg_lo */
819 	EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
820 	/* xor dreg_hi,dreg_hi */
821 	EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
822 
823 	if (jmp_label3 == -1)
824 		jmp_label3 = cnt;
825 
826 	if (dstk) {
827 		/* mov dword ptr [ebp+off],dreg_lo */
828 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
829 		      STACK_VAR(dst_lo));
830 		/* mov dword ptr [ebp+off],dreg_hi */
831 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
832 		      STACK_VAR(dst_hi));
833 	}
834 	/* out: */
835 	*pprog = prog;
836 }
837 
838 /* dst = dst >> src (signed)*/
839 static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
840 				      bool dstk, bool sstk, u8 **pprog)
841 {
842 	u8 *prog = *pprog;
843 	int cnt = 0;
844 	static int jmp_label1 = -1;
845 	static int jmp_label2 = -1;
846 	static int jmp_label3 = -1;
847 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
848 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
849 
850 	if (dstk) {
851 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
852 		      STACK_VAR(dst_lo));
853 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
854 		      STACK_VAR(dst_hi));
855 	}
856 
857 	if (sstk)
858 		/* mov ecx,dword ptr [ebp+off] */
859 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
860 		      STACK_VAR(src_lo));
861 	else
862 		/* mov ecx,src_lo */
863 		EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
864 
865 	/* cmp ecx,32 */
866 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
867 	/* Jumps when >= 32 */
868 	if (is_imm8(jmp_label(jmp_label1, 2)))
869 		EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
870 	else
871 		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
872 
873 	/* < 32 */
874 	/* lshr dreg_lo,cl */
875 	EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
876 	/* mov ebx,dreg_hi */
877 	EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
878 	/* ashr dreg_hi,cl */
879 	EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
880 
881 	/* IA32_ECX = -IA32_ECX + 32 */
882 	/* neg ecx */
883 	EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
884 	/* add ecx,32 */
885 	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
886 
887 	/* shl ebx,cl */
888 	EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
889 	/* or dreg_lo,ebx */
890 	EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
891 
892 	/* goto out; */
893 	if (is_imm8(jmp_label(jmp_label3, 2)))
894 		EMIT2(0xEB, jmp_label(jmp_label3, 2));
895 	else
896 		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
897 
898 	/* >= 32 */
899 	if (jmp_label1 == -1)
900 		jmp_label1 = cnt;
901 
902 	/* cmp ecx,64 */
903 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
904 	/* Jumps when >= 64 */
905 	if (is_imm8(jmp_label(jmp_label2, 2)))
906 		EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
907 	else
908 		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
909 
910 	/* >= 32 && < 64 */
911 	/* sub ecx,32 */
912 	EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
913 	/* ashr dreg_hi,cl */
914 	EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
915 	/* mov dreg_lo,dreg_hi */
916 	EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
917 
918 	/* ashr dreg_hi,imm8 */
919 	EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
920 
921 	/* goto out; */
922 	if (is_imm8(jmp_label(jmp_label3, 2)))
923 		EMIT2(0xEB, jmp_label(jmp_label3, 2));
924 	else
925 		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
926 
927 	/* >= 64 */
928 	if (jmp_label2 == -1)
929 		jmp_label2 = cnt;
930 	/* ashr dreg_hi,imm8 */
931 	EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
932 	/* mov dreg_lo,dreg_hi */
933 	EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
934 
935 	if (jmp_label3 == -1)
936 		jmp_label3 = cnt;
937 
938 	if (dstk) {
939 		/* mov dword ptr [ebp+off],dreg_lo */
940 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
941 		      STACK_VAR(dst_lo));
942 		/* mov dword ptr [ebp+off],dreg_hi */
943 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
944 		      STACK_VAR(dst_hi));
945 	}
946 	/* out: */
947 	*pprog = prog;
948 }
949 
950 /* dst = dst >> src */
951 static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
952 				     bool sstk, u8 **pprog)
953 {
954 	u8 *prog = *pprog;
955 	int cnt = 0;
956 	static int jmp_label1 = -1;
957 	static int jmp_label2 = -1;
958 	static int jmp_label3 = -1;
959 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
960 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
961 
962 	if (dstk) {
963 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
964 		      STACK_VAR(dst_lo));
965 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
966 		      STACK_VAR(dst_hi));
967 	}
968 
969 	if (sstk)
970 		/* mov ecx,dword ptr [ebp+off] */
971 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
972 		      STACK_VAR(src_lo));
973 	else
974 		/* mov ecx,src_lo */
975 		EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
976 
977 	/* cmp ecx,32 */
978 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
979 	/* Jumps when >= 32 */
980 	if (is_imm8(jmp_label(jmp_label1, 2)))
981 		EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
982 	else
983 		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
984 
985 	/* < 32 */
986 	/* lshr dreg_lo,cl */
987 	EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
988 	/* mov ebx,dreg_hi */
989 	EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
990 	/* shr dreg_hi,cl */
991 	EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
992 
993 	/* IA32_ECX = -IA32_ECX + 32 */
994 	/* neg ecx */
995 	EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
996 	/* add ecx,32 */
997 	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
998 
999 	/* shl ebx,cl */
1000 	EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1001 	/* or dreg_lo,ebx */
1002 	EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1003 
1004 	/* goto out; */
1005 	if (is_imm8(jmp_label(jmp_label3, 2)))
1006 		EMIT2(0xEB, jmp_label(jmp_label3, 2));
1007 	else
1008 		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
1009 
1010 	/* >= 32 */
1011 	if (jmp_label1 == -1)
1012 		jmp_label1 = cnt;
1013 	/* cmp ecx,64 */
1014 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
1015 	/* Jumps when >= 64 */
1016 	if (is_imm8(jmp_label(jmp_label2, 2)))
1017 		EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
1018 	else
1019 		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
1020 
1021 	/* >= 32 && < 64 */
1022 	/* sub ecx,32 */
1023 	EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
1024 	/* shr dreg_hi,cl */
1025 	EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
1026 	/* mov dreg_lo,dreg_hi */
1027 	EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1028 	/* xor dreg_hi,dreg_hi */
1029 	EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1030 
1031 	/* goto out; */
1032 	if (is_imm8(jmp_label(jmp_label3, 2)))
1033 		EMIT2(0xEB, jmp_label(jmp_label3, 2));
1034 	else
1035 		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
1036 
1037 	/* >= 64 */
1038 	if (jmp_label2 == -1)
1039 		jmp_label2 = cnt;
1040 	/* xor dreg_lo,dreg_lo */
1041 	EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1042 	/* xor dreg_hi,dreg_hi */
1043 	EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1044 
1045 	if (jmp_label3 == -1)
1046 		jmp_label3 = cnt;
1047 
1048 	if (dstk) {
1049 		/* mov dword ptr [ebp+off],dreg_lo */
1050 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1051 		      STACK_VAR(dst_lo));
1052 		/* mov dword ptr [ebp+off],dreg_hi */
1053 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1054 		      STACK_VAR(dst_hi));
1055 	}
1056 	/* out: */
1057 	*pprog = prog;
1058 }
1059 
1060 /* dst = dst << val */
1061 static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
1062 				     bool dstk, u8 **pprog)
1063 {
1064 	u8 *prog = *pprog;
1065 	int cnt = 0;
1066 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
1067 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
1068 
1069 	if (dstk) {
1070 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1071 		      STACK_VAR(dst_lo));
1072 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1073 		      STACK_VAR(dst_hi));
1074 	}
1075 	/* Do LSH operation */
1076 	if (val < 32) {
1077 		/* shl dreg_hi,imm8 */
1078 		EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val);
1079 		/* mov ebx,dreg_lo */
1080 		EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
1081 		/* shl dreg_lo,imm8 */
1082 		EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
1083 
1084 		/* IA32_ECX = 32 - val */
1085 		/* mov ecx,val */
1086 		EMIT2(0xB1, val);
1087 		/* movzx ecx,ecx */
1088 		EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1089 		/* neg ecx */
1090 		EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1091 		/* add ecx,32 */
1092 		EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1093 
1094 		/* shr ebx,cl */
1095 		EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
1096 		/* or dreg_hi,ebx */
1097 		EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
1098 	} else if (val >= 32 && val < 64) {
1099 		u32 value = val - 32;
1100 
1101 		/* shl dreg_lo,imm8 */
1102 		EMIT3(0xC1, add_1reg(0xE0, dreg_lo), value);
1103 		/* mov dreg_hi,dreg_lo */
1104 		EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
1105 		/* xor dreg_lo,dreg_lo */
1106 		EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1107 	} else {
1108 		/* xor dreg_lo,dreg_lo */
1109 		EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1110 		/* xor dreg_hi,dreg_hi */
1111 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1112 	}
1113 
1114 	if (dstk) {
1115 		/* mov dword ptr [ebp+off],dreg_lo */
1116 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1117 		      STACK_VAR(dst_lo));
1118 		/* mov dword ptr [ebp+off],dreg_hi */
1119 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1120 		      STACK_VAR(dst_hi));
1121 	}
1122 	*pprog = prog;
1123 }
1124 
1125 /* dst = dst >> val */
1126 static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
1127 				     bool dstk, u8 **pprog)
1128 {
1129 	u8 *prog = *pprog;
1130 	int cnt = 0;
1131 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
1132 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
1133 
1134 	if (dstk) {
1135 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1136 		      STACK_VAR(dst_lo));
1137 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1138 		      STACK_VAR(dst_hi));
1139 	}
1140 
1141 	/* Do RSH operation */
1142 	if (val < 32) {
1143 		/* shr dreg_lo,imm8 */
1144 		EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
1145 		/* mov ebx,dreg_hi */
1146 		EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
1147 		/* shr dreg_hi,imm8 */
1148 		EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
1149 
1150 		/* IA32_ECX = 32 - val */
1151 		/* mov ecx,val */
1152 		EMIT2(0xB1, val);
1153 		/* movzx ecx,ecx */
1154 		EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1155 		/* neg ecx */
1156 		EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1157 		/* add ecx,32 */
1158 		EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1159 
1160 		/* shl ebx,cl */
1161 		EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1162 		/* or dreg_lo,ebx */
1163 		EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1164 	} else if (val >= 32 && val < 64) {
1165 		u32 value = val - 32;
1166 
1167 		/* shr dreg_hi,imm8 */
1168 		EMIT3(0xC1, add_1reg(0xE8, dreg_hi), value);
1169 		/* mov dreg_lo,dreg_hi */
1170 		EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1171 		/* xor dreg_hi,dreg_hi */
1172 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1173 	} else {
1174 		/* xor dreg_lo,dreg_lo */
1175 		EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1176 		/* xor dreg_hi,dreg_hi */
1177 		EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1178 	}
1179 
1180 	if (dstk) {
1181 		/* mov dword ptr [ebp+off],dreg_lo */
1182 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1183 		      STACK_VAR(dst_lo));
1184 		/* mov dword ptr [ebp+off],dreg_hi */
1185 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1186 		      STACK_VAR(dst_hi));
1187 	}
1188 	*pprog = prog;
1189 }
1190 
1191 /* dst = dst >> val (signed) */
1192 static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
1193 				      bool dstk, u8 **pprog)
1194 {
1195 	u8 *prog = *pprog;
1196 	int cnt = 0;
1197 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
1198 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
1199 
1200 	if (dstk) {
1201 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1202 		      STACK_VAR(dst_lo));
1203 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1204 		      STACK_VAR(dst_hi));
1205 	}
1206 	/* Do RSH operation */
1207 	if (val < 32) {
1208 		/* shr dreg_lo,imm8 */
1209 		EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
1210 		/* mov ebx,dreg_hi */
1211 		EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
1212 		/* ashr dreg_hi,imm8 */
1213 		EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
1214 
1215 		/* IA32_ECX = 32 - val */
1216 		/* mov ecx,val */
1217 		EMIT2(0xB1, val);
1218 		/* movzx ecx,ecx */
1219 		EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1220 		/* neg ecx */
1221 		EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1222 		/* add ecx,32 */
1223 		EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1224 
1225 		/* shl ebx,cl */
1226 		EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1227 		/* or dreg_lo,ebx */
1228 		EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1229 	} else if (val >= 32 && val < 64) {
1230 		u32 value = val - 32;
1231 
1232 		/* ashr dreg_hi,imm8 */
1233 		EMIT3(0xC1, add_1reg(0xF8, dreg_hi), value);
1234 		/* mov dreg_lo,dreg_hi */
1235 		EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1236 
1237 		/* ashr dreg_hi,imm8 */
1238 		EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
1239 	} else {
1240 		/* ashr dreg_hi,imm8 */
1241 		EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
1242 		/* mov dreg_lo,dreg_hi */
1243 		EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1244 	}
1245 
1246 	if (dstk) {
1247 		/* mov dword ptr [ebp+off],dreg_lo */
1248 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1249 		      STACK_VAR(dst_lo));
1250 		/* mov dword ptr [ebp+off],dreg_hi */
1251 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1252 		      STACK_VAR(dst_hi));
1253 	}
1254 	*pprog = prog;
1255 }
1256 
1257 static inline void emit_ia32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
1258 				     bool sstk, u8 **pprog)
1259 {
1260 	u8 *prog = *pprog;
1261 	int cnt = 0;
1262 
1263 	if (dstk)
1264 		/* mov eax,dword ptr [ebp+off] */
1265 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1266 		      STACK_VAR(dst_hi));
1267 	else
1268 		/* mov eax,dst_hi */
1269 		EMIT2(0x8B, add_2reg(0xC0, dst_hi, IA32_EAX));
1270 
1271 	if (sstk)
1272 		/* mul dword ptr [ebp+off] */
1273 		EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo));
1274 	else
1275 		/* mul src_lo */
1276 		EMIT2(0xF7, add_1reg(0xE0, src_lo));
1277 
1278 	/* mov ecx,eax */
1279 	EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1280 
1281 	if (dstk)
1282 		/* mov eax,dword ptr [ebp+off] */
1283 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1284 		      STACK_VAR(dst_lo));
1285 	else
1286 		/* mov eax,dst_lo */
1287 		EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1288 
1289 	if (sstk)
1290 		/* mul dword ptr [ebp+off] */
1291 		EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_hi));
1292 	else
1293 		/* mul src_hi */
1294 		EMIT2(0xF7, add_1reg(0xE0, src_hi));
1295 
1296 	/* add eax,eax */
1297 	EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1298 
1299 	if (dstk)
1300 		/* mov eax,dword ptr [ebp+off] */
1301 		EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1302 		      STACK_VAR(dst_lo));
1303 	else
1304 		/* mov eax,dst_lo */
1305 		EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1306 
1307 	if (sstk)
1308 		/* mul dword ptr [ebp+off] */
1309 		EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo));
1310 	else
1311 		/* mul src_lo */
1312 		EMIT2(0xF7, add_1reg(0xE0, src_lo));
1313 
1314 	/* add ecx,edx */
1315 	EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX));
1316 
1317 	if (dstk) {
1318 		/* mov dword ptr [ebp+off],eax */
1319 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
1320 		      STACK_VAR(dst_lo));
1321 		/* mov dword ptr [ebp+off],ecx */
1322 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX),
1323 		      STACK_VAR(dst_hi));
1324 	} else {
1325 		/* mov dst_lo,eax */
1326 		EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX));
1327 		/* mov dst_hi,ecx */
1328 		EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX));
1329 	}
1330 
1331 	*pprog = prog;
1332 }
1333 
1334 static inline void emit_ia32_mul_i64(const u8 dst[], const u32 val,
1335 				     bool dstk, u8 **pprog)
1336 {
1337 	u8 *prog = *pprog;
1338 	int cnt = 0;
1339 	u32 hi;
1340 
1341 	hi = val & (1<<31) ? (u32)~0 : 0;
1342 	/* movl eax,imm32 */
1343 	EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val);
1344 	if (dstk)
1345 		/* mul dword ptr [ebp+off] */
1346 		EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_hi));
1347 	else
1348 		/* mul dst_hi */
1349 		EMIT2(0xF7, add_1reg(0xE0, dst_hi));
1350 
1351 	/* mov ecx,eax */
1352 	EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1353 
1354 	/* movl eax,imm32 */
1355 	EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), hi);
1356 	if (dstk)
1357 		/* mul dword ptr [ebp+off] */
1358 		EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo));
1359 	else
1360 		/* mul dst_lo */
1361 		EMIT2(0xF7, add_1reg(0xE0, dst_lo));
1362 	/* add ecx,eax */
1363 	EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1364 
1365 	/* movl eax,imm32 */
1366 	EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val);
1367 	if (dstk)
1368 		/* mul dword ptr [ebp+off] */
1369 		EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo));
1370 	else
1371 		/* mul dst_lo */
1372 		EMIT2(0xF7, add_1reg(0xE0, dst_lo));
1373 
1374 	/* add ecx,edx */
1375 	EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX));
1376 
1377 	if (dstk) {
1378 		/* mov dword ptr [ebp+off],eax */
1379 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
1380 		      STACK_VAR(dst_lo));
1381 		/* mov dword ptr [ebp+off],ecx */
1382 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX),
1383 		      STACK_VAR(dst_hi));
1384 	} else {
1385 		/* mov dword ptr [ebp+off],eax */
1386 		EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX));
1387 		/* mov dword ptr [ebp+off],ecx */
1388 		EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX));
1389 	}
1390 
1391 	*pprog = prog;
1392 }
1393 
1394 static int bpf_size_to_x86_bytes(int bpf_size)
1395 {
1396 	if (bpf_size == BPF_W)
1397 		return 4;
1398 	else if (bpf_size == BPF_H)
1399 		return 2;
1400 	else if (bpf_size == BPF_B)
1401 		return 1;
1402 	else if (bpf_size == BPF_DW)
1403 		return 4; /* imm32 */
1404 	else
1405 		return 0;
1406 }
1407 
1408 struct jit_context {
1409 	int cleanup_addr; /* Epilogue code offset */
1410 };
1411 
1412 /* Maximum number of bytes emitted while JITing one eBPF insn */
1413 #define BPF_MAX_INSN_SIZE	128
1414 #define BPF_INSN_SAFETY		64
1415 
1416 #define PROLOGUE_SIZE 35
1417 
1418 /*
1419  * Emit prologue code for BPF program and check it's size.
1420  * bpf_tail_call helper will skip it while jumping into another program.
1421  */
1422 static void emit_prologue(u8 **pprog, u32 stack_depth)
1423 {
1424 	u8 *prog = *pprog;
1425 	int cnt = 0;
1426 	const u8 *r1 = bpf2ia32[BPF_REG_1];
1427 	const u8 fplo = bpf2ia32[BPF_REG_FP][0];
1428 	const u8 fphi = bpf2ia32[BPF_REG_FP][1];
1429 	const u8 *tcc = bpf2ia32[TCALL_CNT];
1430 
1431 	/* push ebp */
1432 	EMIT1(0x55);
1433 	/* mov ebp,esp */
1434 	EMIT2(0x89, 0xE5);
1435 	/* push edi */
1436 	EMIT1(0x57);
1437 	/* push esi */
1438 	EMIT1(0x56);
1439 	/* push ebx */
1440 	EMIT1(0x53);
1441 
1442 	/* sub esp,STACK_SIZE */
1443 	EMIT2_off32(0x81, 0xEC, STACK_SIZE);
1444 	/* sub ebp,SCRATCH_SIZE+12*/
1445 	EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
1446 	/* xor ebx,ebx */
1447 	EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
1448 
1449 	/* Set up BPF prog stack base register */
1450 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBP), STACK_VAR(fplo));
1451 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(fphi));
1452 
1453 	/* Move BPF_CTX (EAX) to BPF_REG_R1 */
1454 	/* mov dword ptr [ebp+off],eax */
1455 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0]));
1456 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(r1[1]));
1457 
1458 	/* Initialize Tail Count */
1459 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[0]));
1460 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
1461 
1462 	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
1463 	*pprog = prog;
1464 }
1465 
1466 /* Emit epilogue code for BPF program */
1467 static void emit_epilogue(u8 **pprog, u32 stack_depth)
1468 {
1469 	u8 *prog = *pprog;
1470 	const u8 *r0 = bpf2ia32[BPF_REG_0];
1471 	int cnt = 0;
1472 
1473 	/* mov eax,dword ptr [ebp+off]*/
1474 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r0[0]));
1475 	/* mov edx,dword ptr [ebp+off]*/
1476 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
1477 
1478 	/* add ebp,SCRATCH_SIZE+12*/
1479 	EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
1480 
1481 	/* mov ebx,dword ptr [ebp-12]*/
1482 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
1483 	/* mov esi,dword ptr [ebp-8]*/
1484 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ESI), -8);
1485 	/* mov edi,dword ptr [ebp-4]*/
1486 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDI), -4);
1487 
1488 	EMIT1(0xC9); /* leave */
1489 	EMIT1(0xC3); /* ret */
1490 	*pprog = prog;
1491 }
1492 
1493 /*
1494  * Generate the following code:
1495  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
1496  *   if (index >= array->map.max_entries)
1497  *     goto out;
1498  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
1499  *     goto out;
1500  *   prog = array->ptrs[index];
1501  *   if (prog == NULL)
1502  *     goto out;
1503  *   goto *(prog->bpf_func + prologue_size);
1504  * out:
1505  */
1506 static void emit_bpf_tail_call(u8 **pprog)
1507 {
1508 	u8 *prog = *pprog;
1509 	int cnt = 0;
1510 	const u8 *r1 = bpf2ia32[BPF_REG_1];
1511 	const u8 *r2 = bpf2ia32[BPF_REG_2];
1512 	const u8 *r3 = bpf2ia32[BPF_REG_3];
1513 	const u8 *tcc = bpf2ia32[TCALL_CNT];
1514 	u32 lo, hi;
1515 	static int jmp_label1 = -1;
1516 
1517 	/*
1518 	 * if (index >= array->map.max_entries)
1519 	 *     goto out;
1520 	 */
1521 	/* mov eax,dword ptr [ebp+off] */
1522 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r2[0]));
1523 	/* mov edx,dword ptr [ebp+off] */
1524 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r3[0]));
1525 
1526 	/* cmp dword ptr [eax+off],edx */
1527 	EMIT3(0x39, add_2reg(0x40, IA32_EAX, IA32_EDX),
1528 	      offsetof(struct bpf_array, map.max_entries));
1529 	/* jbe out */
1530 	EMIT2(IA32_JBE, jmp_label(jmp_label1, 2));
1531 
1532 	/*
1533 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1534 	 *     goto out;
1535 	 */
1536 	lo = (u32)MAX_TAIL_CALL_CNT;
1537 	hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1538 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0]));
1539 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
1540 
1541 	/* cmp edx,hi */
1542 	EMIT3(0x83, add_1reg(0xF8, IA32_EBX), hi);
1543 	EMIT2(IA32_JNE, 3);
1544 	/* cmp ecx,lo */
1545 	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), lo);
1546 
1547 	/* ja out */
1548 	EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
1549 
1550 	/* add eax,0x1 */
1551 	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 0x01);
1552 	/* adc ebx,0x0 */
1553 	EMIT3(0x83, add_1reg(0xD0, IA32_EBX), 0x00);
1554 
1555 	/* mov dword ptr [ebp+off],eax */
1556 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0]));
1557 	/* mov dword ptr [ebp+off],edx */
1558 	EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
1559 
1560 	/* prog = array->ptrs[index]; */
1561 	/* mov edx, [eax + edx * 4 + offsetof(...)] */
1562 	EMIT3_off32(0x8B, 0x94, 0x90, offsetof(struct bpf_array, ptrs));
1563 
1564 	/*
1565 	 * if (prog == NULL)
1566 	 *     goto out;
1567 	 */
1568 	/* test edx,edx */
1569 	EMIT2(0x85, add_2reg(0xC0, IA32_EDX, IA32_EDX));
1570 	/* je out */
1571 	EMIT2(IA32_JE, jmp_label(jmp_label1, 2));
1572 
1573 	/* goto *(prog->bpf_func + prologue_size); */
1574 	/* mov edx, dword ptr [edx + 32] */
1575 	EMIT3(0x8B, add_2reg(0x40, IA32_EDX, IA32_EDX),
1576 	      offsetof(struct bpf_prog, bpf_func));
1577 	/* add edx,prologue_size */
1578 	EMIT3(0x83, add_1reg(0xC0, IA32_EDX), PROLOGUE_SIZE);
1579 
1580 	/* mov eax,dword ptr [ebp+off] */
1581 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0]));
1582 
1583 	/*
1584 	 * Now we're ready to jump into next BPF program:
1585 	 * eax == ctx (1st arg)
1586 	 * edx == prog->bpf_func + prologue_size
1587 	 */
1588 	RETPOLINE_EDX_BPF_JIT();
1589 
1590 	if (jmp_label1 == -1)
1591 		jmp_label1 = cnt;
1592 
1593 	/* out: */
1594 	*pprog = prog;
1595 }
1596 
1597 /* Push the scratch stack register on top of the stack. */
1598 static inline void emit_push_r64(const u8 src[], u8 **pprog)
1599 {
1600 	u8 *prog = *pprog;
1601 	int cnt = 0;
1602 
1603 	/* mov ecx,dword ptr [ebp+off] */
1604 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_hi));
1605 	/* push ecx */
1606 	EMIT1(0x51);
1607 
1608 	/* mov ecx,dword ptr [ebp+off] */
1609 	EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo));
1610 	/* push ecx */
1611 	EMIT1(0x51);
1612 
1613 	*pprog = prog;
1614 }
1615 
1616 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
1617 		  int oldproglen, struct jit_context *ctx)
1618 {
1619 	struct bpf_insn *insn = bpf_prog->insnsi;
1620 	int insn_cnt = bpf_prog->len;
1621 	bool seen_exit = false;
1622 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1623 	int i, cnt = 0;
1624 	int proglen = 0;
1625 	u8 *prog = temp;
1626 
1627 	emit_prologue(&prog, bpf_prog->aux->stack_depth);
1628 
1629 	for (i = 0; i < insn_cnt; i++, insn++) {
1630 		const s32 imm32 = insn->imm;
1631 		const bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1632 		const bool dstk = insn->dst_reg == BPF_REG_AX ? false : true;
1633 		const bool sstk = insn->src_reg == BPF_REG_AX ? false : true;
1634 		const u8 code = insn->code;
1635 		const u8 *dst = bpf2ia32[insn->dst_reg];
1636 		const u8 *src = bpf2ia32[insn->src_reg];
1637 		const u8 *r0 = bpf2ia32[BPF_REG_0];
1638 		s64 jmp_offset;
1639 		u8 jmp_cond;
1640 		int ilen;
1641 		u8 *func;
1642 
1643 		switch (code) {
1644 		/* ALU operations */
1645 		/* dst = src */
1646 		case BPF_ALU | BPF_MOV | BPF_K:
1647 		case BPF_ALU | BPF_MOV | BPF_X:
1648 		case BPF_ALU64 | BPF_MOV | BPF_K:
1649 		case BPF_ALU64 | BPF_MOV | BPF_X:
1650 			switch (BPF_SRC(code)) {
1651 			case BPF_X:
1652 				emit_ia32_mov_r64(is64, dst, src, dstk,
1653 						  sstk, &prog);
1654 				break;
1655 			case BPF_K:
1656 				/* Sign-extend immediate value to dst reg */
1657 				emit_ia32_mov_i64(is64, dst, imm32,
1658 						  dstk, &prog);
1659 				break;
1660 			}
1661 			break;
1662 		/* dst = dst + src/imm */
1663 		/* dst = dst - src/imm */
1664 		/* dst = dst | src/imm */
1665 		/* dst = dst & src/imm */
1666 		/* dst = dst ^ src/imm */
1667 		/* dst = dst * src/imm */
1668 		/* dst = dst << src */
1669 		/* dst = dst >> src */
1670 		case BPF_ALU | BPF_ADD | BPF_K:
1671 		case BPF_ALU | BPF_ADD | BPF_X:
1672 		case BPF_ALU | BPF_SUB | BPF_K:
1673 		case BPF_ALU | BPF_SUB | BPF_X:
1674 		case BPF_ALU | BPF_OR | BPF_K:
1675 		case BPF_ALU | BPF_OR | BPF_X:
1676 		case BPF_ALU | BPF_AND | BPF_K:
1677 		case BPF_ALU | BPF_AND | BPF_X:
1678 		case BPF_ALU | BPF_XOR | BPF_K:
1679 		case BPF_ALU | BPF_XOR | BPF_X:
1680 		case BPF_ALU64 | BPF_ADD | BPF_K:
1681 		case BPF_ALU64 | BPF_ADD | BPF_X:
1682 		case BPF_ALU64 | BPF_SUB | BPF_K:
1683 		case BPF_ALU64 | BPF_SUB | BPF_X:
1684 		case BPF_ALU64 | BPF_OR | BPF_K:
1685 		case BPF_ALU64 | BPF_OR | BPF_X:
1686 		case BPF_ALU64 | BPF_AND | BPF_K:
1687 		case BPF_ALU64 | BPF_AND | BPF_X:
1688 		case BPF_ALU64 | BPF_XOR | BPF_K:
1689 		case BPF_ALU64 | BPF_XOR | BPF_X:
1690 			switch (BPF_SRC(code)) {
1691 			case BPF_X:
1692 				emit_ia32_alu_r64(is64, BPF_OP(code), dst,
1693 						  src, dstk, sstk, &prog);
1694 				break;
1695 			case BPF_K:
1696 				emit_ia32_alu_i64(is64, BPF_OP(code), dst,
1697 						  imm32, dstk, &prog);
1698 				break;
1699 			}
1700 			break;
1701 		case BPF_ALU | BPF_MUL | BPF_K:
1702 		case BPF_ALU | BPF_MUL | BPF_X:
1703 			switch (BPF_SRC(code)) {
1704 			case BPF_X:
1705 				emit_ia32_mul_r(dst_lo, src_lo, dstk,
1706 						sstk, &prog);
1707 				break;
1708 			case BPF_K:
1709 				/* mov ecx,imm32*/
1710 				EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
1711 					    imm32);
1712 				emit_ia32_mul_r(dst_lo, IA32_ECX, dstk,
1713 						false, &prog);
1714 				break;
1715 			}
1716 			emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1717 			break;
1718 		case BPF_ALU | BPF_LSH | BPF_X:
1719 		case BPF_ALU | BPF_RSH | BPF_X:
1720 		case BPF_ALU | BPF_ARSH | BPF_K:
1721 		case BPF_ALU | BPF_ARSH | BPF_X:
1722 			switch (BPF_SRC(code)) {
1723 			case BPF_X:
1724 				emit_ia32_shift_r(BPF_OP(code), dst_lo, src_lo,
1725 						  dstk, sstk, &prog);
1726 				break;
1727 			case BPF_K:
1728 				/* mov ecx,imm32*/
1729 				EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
1730 					    imm32);
1731 				emit_ia32_shift_r(BPF_OP(code), dst_lo,
1732 						  IA32_ECX, dstk, false,
1733 						  &prog);
1734 				break;
1735 			}
1736 			emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1737 			break;
1738 		/* dst = dst / src(imm) */
1739 		/* dst = dst % src(imm) */
1740 		case BPF_ALU | BPF_DIV | BPF_K:
1741 		case BPF_ALU | BPF_DIV | BPF_X:
1742 		case BPF_ALU | BPF_MOD | BPF_K:
1743 		case BPF_ALU | BPF_MOD | BPF_X:
1744 			switch (BPF_SRC(code)) {
1745 			case BPF_X:
1746 				emit_ia32_div_mod_r(BPF_OP(code), dst_lo,
1747 						    src_lo, dstk, sstk, &prog);
1748 				break;
1749 			case BPF_K:
1750 				/* mov ecx,imm32*/
1751 				EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
1752 					    imm32);
1753 				emit_ia32_div_mod_r(BPF_OP(code), dst_lo,
1754 						    IA32_ECX, dstk, false,
1755 						    &prog);
1756 				break;
1757 			}
1758 			emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1759 			break;
1760 		case BPF_ALU64 | BPF_DIV | BPF_K:
1761 		case BPF_ALU64 | BPF_DIV | BPF_X:
1762 		case BPF_ALU64 | BPF_MOD | BPF_K:
1763 		case BPF_ALU64 | BPF_MOD | BPF_X:
1764 			goto notyet;
1765 		/* dst = dst >> imm */
1766 		/* dst = dst << imm */
1767 		case BPF_ALU | BPF_RSH | BPF_K:
1768 		case BPF_ALU | BPF_LSH | BPF_K:
1769 			if (unlikely(imm32 > 31))
1770 				return -EINVAL;
1771 			/* mov ecx,imm32*/
1772 			EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
1773 			emit_ia32_shift_r(BPF_OP(code), dst_lo, IA32_ECX, dstk,
1774 					  false, &prog);
1775 			emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1776 			break;
1777 		/* dst = dst << imm */
1778 		case BPF_ALU64 | BPF_LSH | BPF_K:
1779 			if (unlikely(imm32 > 63))
1780 				return -EINVAL;
1781 			emit_ia32_lsh_i64(dst, imm32, dstk, &prog);
1782 			break;
1783 		/* dst = dst >> imm */
1784 		case BPF_ALU64 | BPF_RSH | BPF_K:
1785 			if (unlikely(imm32 > 63))
1786 				return -EINVAL;
1787 			emit_ia32_rsh_i64(dst, imm32, dstk, &prog);
1788 			break;
1789 		/* dst = dst << src */
1790 		case BPF_ALU64 | BPF_LSH | BPF_X:
1791 			emit_ia32_lsh_r64(dst, src, dstk, sstk, &prog);
1792 			break;
1793 		/* dst = dst >> src */
1794 		case BPF_ALU64 | BPF_RSH | BPF_X:
1795 			emit_ia32_rsh_r64(dst, src, dstk, sstk, &prog);
1796 			break;
1797 		/* dst = dst >> src (signed) */
1798 		case BPF_ALU64 | BPF_ARSH | BPF_X:
1799 			emit_ia32_arsh_r64(dst, src, dstk, sstk, &prog);
1800 			break;
1801 		/* dst = dst >> imm (signed) */
1802 		case BPF_ALU64 | BPF_ARSH | BPF_K:
1803 			if (unlikely(imm32 > 63))
1804 				return -EINVAL;
1805 			emit_ia32_arsh_i64(dst, imm32, dstk, &prog);
1806 			break;
1807 		/* dst = ~dst */
1808 		case BPF_ALU | BPF_NEG:
1809 			emit_ia32_alu_i(is64, false, BPF_OP(code),
1810 					dst_lo, 0, dstk, &prog);
1811 			emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1812 			break;
1813 		/* dst = ~dst (64 bit) */
1814 		case BPF_ALU64 | BPF_NEG:
1815 			emit_ia32_neg64(dst, dstk, &prog);
1816 			break;
1817 		/* dst = dst * src/imm */
1818 		case BPF_ALU64 | BPF_MUL | BPF_X:
1819 		case BPF_ALU64 | BPF_MUL | BPF_K:
1820 			switch (BPF_SRC(code)) {
1821 			case BPF_X:
1822 				emit_ia32_mul_r64(dst, src, dstk, sstk, &prog);
1823 				break;
1824 			case BPF_K:
1825 				emit_ia32_mul_i64(dst, imm32, dstk, &prog);
1826 				break;
1827 			}
1828 			break;
1829 		/* dst = htole(dst) */
1830 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1831 			emit_ia32_to_le_r64(dst, imm32, dstk, &prog);
1832 			break;
1833 		/* dst = htobe(dst) */
1834 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1835 			emit_ia32_to_be_r64(dst, imm32, dstk, &prog);
1836 			break;
1837 		/* dst = imm64 */
1838 		case BPF_LD | BPF_IMM | BPF_DW: {
1839 			s32 hi, lo = imm32;
1840 
1841 			hi = insn[1].imm;
1842 			emit_ia32_mov_i(dst_lo, lo, dstk, &prog);
1843 			emit_ia32_mov_i(dst_hi, hi, dstk, &prog);
1844 			insn++;
1845 			i++;
1846 			break;
1847 		}
1848 		/* ST: *(u8*)(dst_reg + off) = imm */
1849 		case BPF_ST | BPF_MEM | BPF_H:
1850 		case BPF_ST | BPF_MEM | BPF_B:
1851 		case BPF_ST | BPF_MEM | BPF_W:
1852 		case BPF_ST | BPF_MEM | BPF_DW:
1853 			if (dstk)
1854 				/* mov eax,dword ptr [ebp+off] */
1855 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1856 				      STACK_VAR(dst_lo));
1857 			else
1858 				/* mov eax,dst_lo */
1859 				EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1860 
1861 			switch (BPF_SIZE(code)) {
1862 			case BPF_B:
1863 				EMIT(0xC6, 1); break;
1864 			case BPF_H:
1865 				EMIT2(0x66, 0xC7); break;
1866 			case BPF_W:
1867 			case BPF_DW:
1868 				EMIT(0xC7, 1); break;
1869 			}
1870 
1871 			if (is_imm8(insn->off))
1872 				EMIT2(add_1reg(0x40, IA32_EAX), insn->off);
1873 			else
1874 				EMIT1_off32(add_1reg(0x80, IA32_EAX),
1875 					    insn->off);
1876 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(code)));
1877 
1878 			if (BPF_SIZE(code) == BPF_DW) {
1879 				u32 hi;
1880 
1881 				hi = imm32 & (1<<31) ? (u32)~0 : 0;
1882 				EMIT2_off32(0xC7, add_1reg(0x80, IA32_EAX),
1883 					    insn->off + 4);
1884 				EMIT(hi, 4);
1885 			}
1886 			break;
1887 
1888 		/* STX: *(u8*)(dst_reg + off) = src_reg */
1889 		case BPF_STX | BPF_MEM | BPF_B:
1890 		case BPF_STX | BPF_MEM | BPF_H:
1891 		case BPF_STX | BPF_MEM | BPF_W:
1892 		case BPF_STX | BPF_MEM | BPF_DW:
1893 			if (dstk)
1894 				/* mov eax,dword ptr [ebp+off] */
1895 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1896 				      STACK_VAR(dst_lo));
1897 			else
1898 				/* mov eax,dst_lo */
1899 				EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1900 
1901 			if (sstk)
1902 				/* mov edx,dword ptr [ebp+off] */
1903 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1904 				      STACK_VAR(src_lo));
1905 			else
1906 				/* mov edx,src_lo */
1907 				EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EDX));
1908 
1909 			switch (BPF_SIZE(code)) {
1910 			case BPF_B:
1911 				EMIT(0x88, 1); break;
1912 			case BPF_H:
1913 				EMIT2(0x66, 0x89); break;
1914 			case BPF_W:
1915 			case BPF_DW:
1916 				EMIT(0x89, 1); break;
1917 			}
1918 
1919 			if (is_imm8(insn->off))
1920 				EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX),
1921 				      insn->off);
1922 			else
1923 				EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX),
1924 					    insn->off);
1925 
1926 			if (BPF_SIZE(code) == BPF_DW) {
1927 				if (sstk)
1928 					/* mov edi,dword ptr [ebp+off] */
1929 					EMIT3(0x8B, add_2reg(0x40, IA32_EBP,
1930 							     IA32_EDX),
1931 					      STACK_VAR(src_hi));
1932 				else
1933 					/* mov edi,src_hi */
1934 					EMIT2(0x8B, add_2reg(0xC0, src_hi,
1935 							     IA32_EDX));
1936 				EMIT1(0x89);
1937 				if (is_imm8(insn->off + 4)) {
1938 					EMIT2(add_2reg(0x40, IA32_EAX,
1939 						       IA32_EDX),
1940 					      insn->off + 4);
1941 				} else {
1942 					EMIT1(add_2reg(0x80, IA32_EAX,
1943 						       IA32_EDX));
1944 					EMIT(insn->off + 4, 4);
1945 				}
1946 			}
1947 			break;
1948 
1949 		/* LDX: dst_reg = *(u8*)(src_reg + off) */
1950 		case BPF_LDX | BPF_MEM | BPF_B:
1951 		case BPF_LDX | BPF_MEM | BPF_H:
1952 		case BPF_LDX | BPF_MEM | BPF_W:
1953 		case BPF_LDX | BPF_MEM | BPF_DW:
1954 			if (sstk)
1955 				/* mov eax,dword ptr [ebp+off] */
1956 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1957 				      STACK_VAR(src_lo));
1958 			else
1959 				/* mov eax,dword ptr [ebp+off] */
1960 				EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EAX));
1961 
1962 			switch (BPF_SIZE(code)) {
1963 			case BPF_B:
1964 				EMIT2(0x0F, 0xB6); break;
1965 			case BPF_H:
1966 				EMIT2(0x0F, 0xB7); break;
1967 			case BPF_W:
1968 			case BPF_DW:
1969 				EMIT(0x8B, 1); break;
1970 			}
1971 
1972 			if (is_imm8(insn->off))
1973 				EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX),
1974 				      insn->off);
1975 			else
1976 				EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX),
1977 					    insn->off);
1978 
1979 			if (dstk)
1980 				/* mov dword ptr [ebp+off],edx */
1981 				EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
1982 				      STACK_VAR(dst_lo));
1983 			else
1984 				/* mov dst_lo,edx */
1985 				EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EDX));
1986 			switch (BPF_SIZE(code)) {
1987 			case BPF_B:
1988 			case BPF_H:
1989 			case BPF_W:
1990 				if (dstk) {
1991 					EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
1992 					      STACK_VAR(dst_hi));
1993 					EMIT(0x0, 4);
1994 				} else {
1995 					EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
1996 				}
1997 				break;
1998 			case BPF_DW:
1999 				EMIT2_off32(0x8B,
2000 					    add_2reg(0x80, IA32_EAX, IA32_EDX),
2001 					    insn->off + 4);
2002 				if (dstk)
2003 					EMIT3(0x89,
2004 					      add_2reg(0x40, IA32_EBP,
2005 						       IA32_EDX),
2006 					      STACK_VAR(dst_hi));
2007 				else
2008 					EMIT2(0x89,
2009 					      add_2reg(0xC0, dst_hi, IA32_EDX));
2010 				break;
2011 			default:
2012 				break;
2013 			}
2014 			break;
2015 		/* call */
2016 		case BPF_JMP | BPF_CALL:
2017 		{
2018 			const u8 *r1 = bpf2ia32[BPF_REG_1];
2019 			const u8 *r2 = bpf2ia32[BPF_REG_2];
2020 			const u8 *r3 = bpf2ia32[BPF_REG_3];
2021 			const u8 *r4 = bpf2ia32[BPF_REG_4];
2022 			const u8 *r5 = bpf2ia32[BPF_REG_5];
2023 
2024 			if (insn->src_reg == BPF_PSEUDO_CALL)
2025 				goto notyet;
2026 
2027 			func = (u8 *) __bpf_call_base + imm32;
2028 			jmp_offset = func - (image + addrs[i]);
2029 
2030 			if (!imm32 || !is_simm32(jmp_offset)) {
2031 				pr_err("unsupported BPF func %d addr %p image %p\n",
2032 				       imm32, func, image);
2033 				return -EINVAL;
2034 			}
2035 
2036 			/* mov eax,dword ptr [ebp+off] */
2037 			EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2038 			      STACK_VAR(r1[0]));
2039 			/* mov edx,dword ptr [ebp+off] */
2040 			EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
2041 			      STACK_VAR(r1[1]));
2042 
2043 			emit_push_r64(r5, &prog);
2044 			emit_push_r64(r4, &prog);
2045 			emit_push_r64(r3, &prog);
2046 			emit_push_r64(r2, &prog);
2047 
2048 			EMIT1_off32(0xE8, jmp_offset + 9);
2049 
2050 			/* mov dword ptr [ebp+off],eax */
2051 			EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
2052 			      STACK_VAR(r0[0]));
2053 			/* mov dword ptr [ebp+off],edx */
2054 			EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
2055 			      STACK_VAR(r0[1]));
2056 
2057 			/* add esp,32 */
2058 			EMIT3(0x83, add_1reg(0xC0, IA32_ESP), 32);
2059 			break;
2060 		}
2061 		case BPF_JMP | BPF_TAIL_CALL:
2062 			emit_bpf_tail_call(&prog);
2063 			break;
2064 
2065 		/* cond jump */
2066 		case BPF_JMP | BPF_JEQ | BPF_X:
2067 		case BPF_JMP | BPF_JNE | BPF_X:
2068 		case BPF_JMP | BPF_JGT | BPF_X:
2069 		case BPF_JMP | BPF_JLT | BPF_X:
2070 		case BPF_JMP | BPF_JGE | BPF_X:
2071 		case BPF_JMP | BPF_JLE | BPF_X:
2072 		case BPF_JMP | BPF_JSGT | BPF_X:
2073 		case BPF_JMP | BPF_JSLE | BPF_X:
2074 		case BPF_JMP | BPF_JSLT | BPF_X:
2075 		case BPF_JMP | BPF_JSGE | BPF_X:
2076 		case BPF_JMP32 | BPF_JEQ | BPF_X:
2077 		case BPF_JMP32 | BPF_JNE | BPF_X:
2078 		case BPF_JMP32 | BPF_JGT | BPF_X:
2079 		case BPF_JMP32 | BPF_JLT | BPF_X:
2080 		case BPF_JMP32 | BPF_JGE | BPF_X:
2081 		case BPF_JMP32 | BPF_JLE | BPF_X:
2082 		case BPF_JMP32 | BPF_JSGT | BPF_X:
2083 		case BPF_JMP32 | BPF_JSLE | BPF_X:
2084 		case BPF_JMP32 | BPF_JSLT | BPF_X:
2085 		case BPF_JMP32 | BPF_JSGE | BPF_X: {
2086 			bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2087 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2088 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2089 			u8 sreg_lo = sstk ? IA32_ECX : src_lo;
2090 			u8 sreg_hi = sstk ? IA32_EBX : src_hi;
2091 
2092 			if (dstk) {
2093 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2094 				      STACK_VAR(dst_lo));
2095 				if (is_jmp64)
2096 					EMIT3(0x8B,
2097 					      add_2reg(0x40, IA32_EBP,
2098 						       IA32_EDX),
2099 					      STACK_VAR(dst_hi));
2100 			}
2101 
2102 			if (sstk) {
2103 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
2104 				      STACK_VAR(src_lo));
2105 				if (is_jmp64)
2106 					EMIT3(0x8B,
2107 					      add_2reg(0x40, IA32_EBP,
2108 						       IA32_EBX),
2109 					      STACK_VAR(src_hi));
2110 			}
2111 
2112 			if (is_jmp64) {
2113 				/* cmp dreg_hi,sreg_hi */
2114 				EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
2115 				EMIT2(IA32_JNE, 2);
2116 			}
2117 			/* cmp dreg_lo,sreg_lo */
2118 			EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
2119 			goto emit_cond_jmp;
2120 		}
2121 		case BPF_JMP | BPF_JSET | BPF_X:
2122 		case BPF_JMP32 | BPF_JSET | BPF_X: {
2123 			bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2124 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2125 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2126 			u8 sreg_lo = sstk ? IA32_ECX : src_lo;
2127 			u8 sreg_hi = sstk ? IA32_EBX : src_hi;
2128 
2129 			if (dstk) {
2130 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2131 				      STACK_VAR(dst_lo));
2132 				if (is_jmp64)
2133 					EMIT3(0x8B,
2134 					      add_2reg(0x40, IA32_EBP,
2135 						       IA32_EDX),
2136 					      STACK_VAR(dst_hi));
2137 			}
2138 
2139 			if (sstk) {
2140 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
2141 				      STACK_VAR(src_lo));
2142 				if (is_jmp64)
2143 					EMIT3(0x8B,
2144 					      add_2reg(0x40, IA32_EBP,
2145 						       IA32_EBX),
2146 					      STACK_VAR(src_hi));
2147 			}
2148 			/* and dreg_lo,sreg_lo */
2149 			EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
2150 			/* and dreg_hi,sreg_hi */
2151 			EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
2152 			/* or dreg_lo,dreg_hi */
2153 			EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
2154 			goto emit_cond_jmp;
2155 		}
2156 		case BPF_JMP | BPF_JSET | BPF_K:
2157 		case BPF_JMP32 | BPF_JSET | BPF_K: {
2158 			bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2159 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2160 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2161 			u8 sreg_lo = IA32_ECX;
2162 			u8 sreg_hi = IA32_EBX;
2163 			u32 hi;
2164 
2165 			if (dstk) {
2166 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2167 				      STACK_VAR(dst_lo));
2168 				if (is_jmp64)
2169 					EMIT3(0x8B,
2170 					      add_2reg(0x40, IA32_EBP,
2171 						       IA32_EDX),
2172 					      STACK_VAR(dst_hi));
2173 			}
2174 
2175 			/* mov ecx,imm32 */
2176 			EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32);
2177 
2178 			/* and dreg_lo,sreg_lo */
2179 			EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
2180 			if (is_jmp64) {
2181 				hi = imm32 & (1 << 31) ? (u32)~0 : 0;
2182 				/* mov ebx,imm32 */
2183 				EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi);
2184 				/* and dreg_hi,sreg_hi */
2185 				EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
2186 				/* or dreg_lo,dreg_hi */
2187 				EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
2188 			}
2189 			goto emit_cond_jmp;
2190 		}
2191 		case BPF_JMP | BPF_JEQ | BPF_K:
2192 		case BPF_JMP | BPF_JNE | BPF_K:
2193 		case BPF_JMP | BPF_JGT | BPF_K:
2194 		case BPF_JMP | BPF_JLT | BPF_K:
2195 		case BPF_JMP | BPF_JGE | BPF_K:
2196 		case BPF_JMP | BPF_JLE | BPF_K:
2197 		case BPF_JMP | BPF_JSGT | BPF_K:
2198 		case BPF_JMP | BPF_JSLE | BPF_K:
2199 		case BPF_JMP | BPF_JSLT | BPF_K:
2200 		case BPF_JMP | BPF_JSGE | BPF_K:
2201 		case BPF_JMP32 | BPF_JEQ | BPF_K:
2202 		case BPF_JMP32 | BPF_JNE | BPF_K:
2203 		case BPF_JMP32 | BPF_JGT | BPF_K:
2204 		case BPF_JMP32 | BPF_JLT | BPF_K:
2205 		case BPF_JMP32 | BPF_JGE | BPF_K:
2206 		case BPF_JMP32 | BPF_JLE | BPF_K:
2207 		case BPF_JMP32 | BPF_JSGT | BPF_K:
2208 		case BPF_JMP32 | BPF_JSLE | BPF_K:
2209 		case BPF_JMP32 | BPF_JSLT | BPF_K:
2210 		case BPF_JMP32 | BPF_JSGE | BPF_K: {
2211 			bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2212 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2213 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2214 			u8 sreg_lo = IA32_ECX;
2215 			u8 sreg_hi = IA32_EBX;
2216 			u32 hi;
2217 
2218 			if (dstk) {
2219 				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2220 				      STACK_VAR(dst_lo));
2221 				if (is_jmp64)
2222 					EMIT3(0x8B,
2223 					      add_2reg(0x40, IA32_EBP,
2224 						       IA32_EDX),
2225 					      STACK_VAR(dst_hi));
2226 			}
2227 
2228 			/* mov ecx,imm32 */
2229 			EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
2230 			if (is_jmp64) {
2231 				hi = imm32 & (1 << 31) ? (u32)~0 : 0;
2232 				/* mov ebx,imm32 */
2233 				EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
2234 				/* cmp dreg_hi,sreg_hi */
2235 				EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
2236 				EMIT2(IA32_JNE, 2);
2237 			}
2238 			/* cmp dreg_lo,sreg_lo */
2239 			EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
2240 
2241 emit_cond_jmp:		/* Convert BPF opcode to x86 */
2242 			switch (BPF_OP(code)) {
2243 			case BPF_JEQ:
2244 				jmp_cond = IA32_JE;
2245 				break;
2246 			case BPF_JSET:
2247 			case BPF_JNE:
2248 				jmp_cond = IA32_JNE;
2249 				break;
2250 			case BPF_JGT:
2251 				/* GT is unsigned '>', JA in x86 */
2252 				jmp_cond = IA32_JA;
2253 				break;
2254 			case BPF_JLT:
2255 				/* LT is unsigned '<', JB in x86 */
2256 				jmp_cond = IA32_JB;
2257 				break;
2258 			case BPF_JGE:
2259 				/* GE is unsigned '>=', JAE in x86 */
2260 				jmp_cond = IA32_JAE;
2261 				break;
2262 			case BPF_JLE:
2263 				/* LE is unsigned '<=', JBE in x86 */
2264 				jmp_cond = IA32_JBE;
2265 				break;
2266 			case BPF_JSGT:
2267 				/* Signed '>', GT in x86 */
2268 				jmp_cond = IA32_JG;
2269 				break;
2270 			case BPF_JSLT:
2271 				/* Signed '<', LT in x86 */
2272 				jmp_cond = IA32_JL;
2273 				break;
2274 			case BPF_JSGE:
2275 				/* Signed '>=', GE in x86 */
2276 				jmp_cond = IA32_JGE;
2277 				break;
2278 			case BPF_JSLE:
2279 				/* Signed '<=', LE in x86 */
2280 				jmp_cond = IA32_JLE;
2281 				break;
2282 			default: /* to silence GCC warning */
2283 				return -EFAULT;
2284 			}
2285 			jmp_offset = addrs[i + insn->off] - addrs[i];
2286 			if (is_imm8(jmp_offset)) {
2287 				EMIT2(jmp_cond, jmp_offset);
2288 			} else if (is_simm32(jmp_offset)) {
2289 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2290 			} else {
2291 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2292 				return -EFAULT;
2293 			}
2294 
2295 			break;
2296 		}
2297 		case BPF_JMP | BPF_JA:
2298 			if (insn->off == -1)
2299 				/* -1 jmp instructions will always jump
2300 				 * backwards two bytes. Explicitly handling
2301 				 * this case avoids wasting too many passes
2302 				 * when there are long sequences of replaced
2303 				 * dead code.
2304 				 */
2305 				jmp_offset = -2;
2306 			else
2307 				jmp_offset = addrs[i + insn->off] - addrs[i];
2308 
2309 			if (!jmp_offset)
2310 				/* Optimize out nop jumps */
2311 				break;
2312 emit_jmp:
2313 			if (is_imm8(jmp_offset)) {
2314 				EMIT2(0xEB, jmp_offset);
2315 			} else if (is_simm32(jmp_offset)) {
2316 				EMIT1_off32(0xE9, jmp_offset);
2317 			} else {
2318 				pr_err("jmp gen bug %llx\n", jmp_offset);
2319 				return -EFAULT;
2320 			}
2321 			break;
2322 		/* STX XADD: lock *(u32 *)(dst + off) += src */
2323 		case BPF_STX | BPF_XADD | BPF_W:
2324 		/* STX XADD: lock *(u64 *)(dst + off) += src */
2325 		case BPF_STX | BPF_XADD | BPF_DW:
2326 			goto notyet;
2327 		case BPF_JMP | BPF_EXIT:
2328 			if (seen_exit) {
2329 				jmp_offset = ctx->cleanup_addr - addrs[i];
2330 				goto emit_jmp;
2331 			}
2332 			seen_exit = true;
2333 			/* Update cleanup_addr */
2334 			ctx->cleanup_addr = proglen;
2335 			emit_epilogue(&prog, bpf_prog->aux->stack_depth);
2336 			break;
2337 notyet:
2338 			pr_info_once("*** NOT YET: opcode %02x ***\n", code);
2339 			return -EFAULT;
2340 		default:
2341 			/*
2342 			 * This error will be seen if new instruction was added
2343 			 * to interpreter, but not to JIT or if there is junk in
2344 			 * bpf_prog
2345 			 */
2346 			pr_err("bpf_jit: unknown opcode %02x\n", code);
2347 			return -EINVAL;
2348 		}
2349 
2350 		ilen = prog - temp;
2351 		if (ilen > BPF_MAX_INSN_SIZE) {
2352 			pr_err("bpf_jit: fatal insn size error\n");
2353 			return -EFAULT;
2354 		}
2355 
2356 		if (image) {
2357 			if (unlikely(proglen + ilen > oldproglen)) {
2358 				pr_err("bpf_jit: fatal error\n");
2359 				return -EFAULT;
2360 			}
2361 			memcpy(image + proglen, temp, ilen);
2362 		}
2363 		proglen += ilen;
2364 		addrs[i] = proglen;
2365 		prog = temp;
2366 	}
2367 	return proglen;
2368 }
2369 
2370 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2371 {
2372 	struct bpf_binary_header *header = NULL;
2373 	struct bpf_prog *tmp, *orig_prog = prog;
2374 	int proglen, oldproglen = 0;
2375 	struct jit_context ctx = {};
2376 	bool tmp_blinded = false;
2377 	u8 *image = NULL;
2378 	int *addrs;
2379 	int pass;
2380 	int i;
2381 
2382 	if (!prog->jit_requested)
2383 		return orig_prog;
2384 
2385 	tmp = bpf_jit_blind_constants(prog);
2386 	/*
2387 	 * If blinding was requested and we failed during blinding,
2388 	 * we must fall back to the interpreter.
2389 	 */
2390 	if (IS_ERR(tmp))
2391 		return orig_prog;
2392 	if (tmp != prog) {
2393 		tmp_blinded = true;
2394 		prog = tmp;
2395 	}
2396 
2397 	addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
2398 	if (!addrs) {
2399 		prog = orig_prog;
2400 		goto out;
2401 	}
2402 
2403 	/*
2404 	 * Before first pass, make a rough estimation of addrs[]
2405 	 * each BPF instruction is translated to less than 64 bytes
2406 	 */
2407 	for (proglen = 0, i = 0; i < prog->len; i++) {
2408 		proglen += 64;
2409 		addrs[i] = proglen;
2410 	}
2411 	ctx.cleanup_addr = proglen;
2412 
2413 	/*
2414 	 * JITed image shrinks with every pass and the loop iterates
2415 	 * until the image stops shrinking. Very large BPF programs
2416 	 * may converge on the last pass. In such case do one more
2417 	 * pass to emit the final image.
2418 	 */
2419 	for (pass = 0; pass < 20 || image; pass++) {
2420 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
2421 		if (proglen <= 0) {
2422 out_image:
2423 			image = NULL;
2424 			if (header)
2425 				bpf_jit_binary_free(header);
2426 			prog = orig_prog;
2427 			goto out_addrs;
2428 		}
2429 		if (image) {
2430 			if (proglen != oldproglen) {
2431 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2432 				       proglen, oldproglen);
2433 				goto out_image;
2434 			}
2435 			break;
2436 		}
2437 		if (proglen == oldproglen) {
2438 			header = bpf_jit_binary_alloc(proglen, &image,
2439 						      1, jit_fill_hole);
2440 			if (!header) {
2441 				prog = orig_prog;
2442 				goto out_addrs;
2443 			}
2444 		}
2445 		oldproglen = proglen;
2446 		cond_resched();
2447 	}
2448 
2449 	if (bpf_jit_enable > 1)
2450 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
2451 
2452 	if (image) {
2453 		bpf_jit_binary_lock_ro(header);
2454 		prog->bpf_func = (void *)image;
2455 		prog->jited = 1;
2456 		prog->jited_len = proglen;
2457 	} else {
2458 		prog = orig_prog;
2459 	}
2460 
2461 out_addrs:
2462 	kfree(addrs);
2463 out:
2464 	if (tmp_blinded)
2465 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2466 					   tmp : orig_prog);
2467 	return prog;
2468 }
2469