xref: /linux/arch/arm/net/bpf_jit_32.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Just-In-Time compiler for eBPF filters on 32bit ARM
4  *
5  * Copyright (c) 2023 Puranjay Mohan <puranjay12@gmail.com>
6  * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
7  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
8  */
9 
10 #include <linux/bpf.h>
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
19 #include <linux/math64.h>
20 
21 #include <asm/cacheflush.h>
22 #include <asm/hwcap.h>
23 #include <asm/opcodes.h>
24 #include <asm/system_info.h>
25 
26 #include "bpf_jit_32.h"
27 
28 /*
29  * eBPF prog stack layout:
30  *
31  *                         high
32  * original ARM_SP =>     +-----+
33  *                        |     | callee saved registers
34  *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
35  *                        | ... | eBPF JIT scratch space
36  * eBPF fp register =>    +-----+
37  *   (BPF_FP)             | ... | eBPF prog stack
38  *                        +-----+
39  *                        |RSVD | JIT scratchpad
40  * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
41  *                        | ... | caller-saved registers
42  *                        +-----+
43  *                        | ... | arguments passed on stack
44  * ARM_SP during call =>  +-----|
45  *                        |     |
46  *                        | ... | Function call stack
47  *                        |     |
48  *                        +-----+
49  *                          low
50  *
51  * The callee saved registers depends on whether frame pointers are enabled.
52  * With frame pointers (to be compliant with the ABI):
53  *
54  *                              high
55  * original ARM_SP =>     +--------------+ \
56  *                        |      pc      | |
57  * current ARM_FP =>      +--------------+ } callee saved registers
58  *                        |r4-r9,fp,ip,lr| |
59  *                        +--------------+ /
60  *                              low
61  *
62  * Without frame pointers:
63  *
64  *                              high
65  * original ARM_SP =>     +--------------+
66  *                        |  r4-r9,fp,lr | callee saved registers
67  * current ARM_FP =>      +--------------+
68  *                              low
69  *
70  * When popping registers off the stack at the end of a BPF function, we
71  * reference them via the current ARM_FP register.
72  *
73  * Some eBPF operations are implemented via a call to a helper function.
74  * Such calls are "invisible" in the eBPF code, so it is up to the calling
75  * program to preserve any caller-saved ARM registers during the call. The
76  * JIT emits code to push and pop those registers onto the stack, immediately
77  * above the callee stack frame.
78  */
79 #define CALLEE_MASK	(1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
80 			 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
81 			 1 << ARM_FP)
82 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
83 #define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
84 
85 #define CALLER_MASK	(1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
86 
87 enum {
88 	/* Stack layout - these are offsets from (top of stack - 4) */
89 	BPF_R2_HI,
90 	BPF_R2_LO,
91 	BPF_R3_HI,
92 	BPF_R3_LO,
93 	BPF_R4_HI,
94 	BPF_R4_LO,
95 	BPF_R5_HI,
96 	BPF_R5_LO,
97 	BPF_R7_HI,
98 	BPF_R7_LO,
99 	BPF_R8_HI,
100 	BPF_R8_LO,
101 	BPF_R9_HI,
102 	BPF_R9_LO,
103 	BPF_FP_HI,
104 	BPF_FP_LO,
105 	BPF_TC_HI,
106 	BPF_TC_LO,
107 	BPF_AX_HI,
108 	BPF_AX_LO,
109 	/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
110 	 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
111 	 * BPF_REG_FP and Tail call counts.
112 	 */
113 	BPF_JIT_SCRATCH_REGS,
114 };
115 
116 /*
117  * Negative "register" values indicate the register is stored on the stack
118  * and are the offset from the top of the eBPF JIT scratch space.
119  */
120 #define STACK_OFFSET(k)	(-4 - (k) * 4)
121 #define SCRATCH_SIZE	(BPF_JIT_SCRATCH_REGS * 4)
122 
123 #ifdef CONFIG_FRAME_POINTER
124 #define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
125 #else
126 #define EBPF_SCRATCH_TO_ARM_FP(x) (x)
127 #endif
128 
129 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)	/* TEMP Register 1 */
130 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)	/* TEMP Register 2 */
131 #define TCALL_CNT	(MAX_BPF_JIT_REG + 2)	/* Tail Call Count */
132 
133 #define FLAG_IMM_OVERFLOW	(1 << 0)
134 
135 /*
136  * Map eBPF registers to ARM 32bit registers or stack scratch space.
137  *
138  * 1. First argument is passed using the arm 32bit registers and rest of the
139  * arguments are passed on stack scratch space.
140  * 2. First callee-saved argument is mapped to arm 32 bit registers and rest
141  * arguments are mapped to scratch space on stack.
142  * 3. We need two 64 bit temp registers to do complex operations on eBPF
143  * registers.
144  *
145  * As the eBPF registers are all 64 bit registers and arm has only 32 bit
146  * registers, we have to map each eBPF registers with two arm 32 bit regs or
147  * scratch memory space and we have to build eBPF 64 bit register from those.
148  *
149  */
150 static const s8 bpf2a32[][2] = {
151 	/* return value from in-kernel function, and exit value from eBPF */
152 	[BPF_REG_0] = {ARM_R1, ARM_R0},
153 	/* arguments from eBPF program to in-kernel function */
154 	[BPF_REG_1] = {ARM_R3, ARM_R2},
155 	/* Stored on stack scratch space */
156 	[BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
157 	[BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
158 	[BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
159 	[BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
160 	/* callee saved registers that in-kernel function will preserve */
161 	[BPF_REG_6] = {ARM_R5, ARM_R4},
162 	/* Stored on stack scratch space */
163 	[BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
164 	[BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
165 	[BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
166 	/* Read only Frame Pointer to access Stack */
167 	[BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
168 	/* Temporary Register for BPF JIT, can be used
169 	 * for constant blindings and others.
170 	 */
171 	[TMP_REG_1] = {ARM_R7, ARM_R6},
172 	[TMP_REG_2] = {ARM_R9, ARM_R8},
173 	/* Tail call count. Stored on stack scratch space. */
174 	[TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
175 	/* temporary register for blinding constants.
176 	 * Stored on stack scratch space.
177 	 */
178 	[BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
179 };
180 
181 #define	dst_lo	dst[1]
182 #define dst_hi	dst[0]
183 #define src_lo	src[1]
184 #define src_hi	src[0]
185 
186 /*
187  * JIT Context:
188  *
189  * prog			:	bpf_prog
190  * idx			:	index of current last JITed instruction.
191  * prologue_bytes	:	bytes used in prologue.
192  * epilogue_offset	:	offset of epilogue starting.
193  * offsets		:	array of eBPF instruction offsets in
194  *				JITed code.
195  * target		:	final JITed code.
196  * epilogue_bytes	:	no of bytes used in epilogue.
197  * imm_count		:	no of immediate counts used for global
198  *				variables.
199  * imms			:	array of global variable addresses.
200  */
201 
202 struct jit_ctx {
203 	const struct bpf_prog *prog;
204 	unsigned int idx;
205 	unsigned int prologue_bytes;
206 	unsigned int epilogue_offset;
207 	unsigned int cpu_architecture;
208 	u32 flags;
209 	u32 *offsets;
210 	u32 *target;
211 	u32 stack_size;
212 #if __LINUX_ARM_ARCH__ < 7
213 	u16 epilogue_bytes;
214 	u16 imm_count;
215 	u32 *imms;
216 #endif
217 };
218 
219 /*
220  * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
221  * (where the assembly routines like __aeabi_uidiv could cause problems).
222  */
jit_udiv32(u32 dividend,u32 divisor)223 static u32 jit_udiv32(u32 dividend, u32 divisor)
224 {
225 	return dividend / divisor;
226 }
227 
jit_mod32(u32 dividend,u32 divisor)228 static u32 jit_mod32(u32 dividend, u32 divisor)
229 {
230 	return dividend % divisor;
231 }
232 
jit_sdiv32(s32 dividend,s32 divisor)233 static s32 jit_sdiv32(s32 dividend, s32 divisor)
234 {
235 	return dividend / divisor;
236 }
237 
jit_smod32(s32 dividend,s32 divisor)238 static s32 jit_smod32(s32 dividend, s32 divisor)
239 {
240 	return dividend % divisor;
241 }
242 
243 /* Wrappers for 64-bit div/mod */
jit_udiv64(u64 dividend,u64 divisor)244 static u64 jit_udiv64(u64 dividend, u64 divisor)
245 {
246 	return div64_u64(dividend, divisor);
247 }
248 
jit_mod64(u64 dividend,u64 divisor)249 static u64 jit_mod64(u64 dividend, u64 divisor)
250 {
251 	u64 rem;
252 
253 	div64_u64_rem(dividend, divisor, &rem);
254 	return rem;
255 }
256 
jit_sdiv64(s64 dividend,s64 divisor)257 static s64 jit_sdiv64(s64 dividend, s64 divisor)
258 {
259 	return div64_s64(dividend, divisor);
260 }
261 
jit_smod64(s64 dividend,s64 divisor)262 static s64 jit_smod64(s64 dividend, s64 divisor)
263 {
264 	u64 q;
265 
266 	q = div64_s64(dividend, divisor);
267 
268 	return dividend - q * divisor;
269 }
270 
_emit(int cond,u32 inst,struct jit_ctx * ctx)271 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
272 {
273 	inst |= (cond << 28);
274 	inst = __opcode_to_mem_arm(inst);
275 
276 	if (ctx->target != NULL)
277 		ctx->target[ctx->idx] = inst;
278 
279 	ctx->idx++;
280 }
281 
282 /*
283  * Emit an instruction that will be executed unconditionally.
284  */
emit(u32 inst,struct jit_ctx * ctx)285 static inline void emit(u32 inst, struct jit_ctx *ctx)
286 {
287 	_emit(ARM_COND_AL, inst, ctx);
288 }
289 
290 /*
291  * This is rather horrid, but necessary to convert an integer constant
292  * to an immediate operand for the opcodes, and be able to detect at
293  * build time whether the constant can't be converted (iow, usable in
294  * BUILD_BUG_ON()).
295  */
296 #define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
297 #define const_imm8m(x)					\
298 	({ int r;					\
299 	   u32 v = (x);					\
300 	   if (!(v & ~0x000000ff))			\
301 		r = imm12val(v, 0);			\
302 	   else if (!(v & ~0xc000003f))			\
303 		r = imm12val(v, 2);			\
304 	   else if (!(v & ~0xf000000f))			\
305 		r = imm12val(v, 4);			\
306 	   else if (!(v & ~0xfc000003))			\
307 		r = imm12val(v, 6);			\
308 	   else if (!(v & ~0xff000000))			\
309 		r = imm12val(v, 8);			\
310 	   else if (!(v & ~0x3fc00000))			\
311 		r = imm12val(v, 10);			\
312 	   else if (!(v & ~0x0ff00000))			\
313 		r = imm12val(v, 12);			\
314 	   else if (!(v & ~0x03fc0000))			\
315 		r = imm12val(v, 14);			\
316 	   else if (!(v & ~0x00ff0000))			\
317 		r = imm12val(v, 16);			\
318 	   else if (!(v & ~0x003fc000))			\
319 		r = imm12val(v, 18);			\
320 	   else if (!(v & ~0x000ff000))			\
321 		r = imm12val(v, 20);			\
322 	   else if (!(v & ~0x0003fc00))			\
323 		r = imm12val(v, 22);			\
324 	   else if (!(v & ~0x0000ff00))			\
325 		r = imm12val(v, 24);			\
326 	   else if (!(v & ~0x00003fc0))			\
327 		r = imm12val(v, 26);			\
328 	   else if (!(v & ~0x00000ff0))			\
329 		r = imm12val(v, 28);			\
330 	   else if (!(v & ~0x000003fc))			\
331 		r = imm12val(v, 30);			\
332 	   else						\
333 		r = -1;					\
334 	   r; })
335 
336 /*
337  * Checks if immediate value can be converted to imm12(12 bits) value.
338  */
imm8m(u32 x)339 static int imm8m(u32 x)
340 {
341 	u32 rot;
342 
343 	for (rot = 0; rot < 16; rot++)
344 		if ((x & ~ror32(0xff, 2 * rot)) == 0)
345 			return rol32(x, 2 * rot) | (rot << 8);
346 	return -1;
347 }
348 
349 #define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
350 
arm_bpf_ldst_imm12(u32 op,u8 rt,u8 rn,s16 imm12)351 static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
352 {
353 	op |= rt << 12 | rn << 16;
354 	if (imm12 >= 0)
355 		op |= ARM_INST_LDST__U;
356 	else
357 		imm12 = -imm12;
358 	return op | (imm12 & ARM_INST_LDST__IMM12);
359 }
360 
arm_bpf_ldst_imm8(u32 op,u8 rt,u8 rn,s16 imm8)361 static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
362 {
363 	op |= rt << 12 | rn << 16;
364 	if (imm8 >= 0)
365 		op |= ARM_INST_LDST__U;
366 	else
367 		imm8 = -imm8;
368 	return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
369 }
370 
371 #define ARM_LDR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
372 #define ARM_LDRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
373 #define ARM_LDRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
374 #define ARM_LDRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
375 
376 #define ARM_LDRSH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSH_I, rt, rn, off)
377 #define ARM_LDRSB_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSB_I, rt, rn, off)
378 
379 #define ARM_STR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
380 #define ARM_STRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
381 #define ARM_STRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
382 #define ARM_STRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
383 
384 /*
385  * Initializes the JIT space with undefined instructions.
386  */
jit_fill_hole(void * area,unsigned int size)387 static void jit_fill_hole(void *area, unsigned int size)
388 {
389 	u32 *ptr;
390 	/* We are guaranteed to have aligned memory. */
391 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
392 		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
393 }
394 
395 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
396 /* EABI requires the stack to be aligned to 64-bit boundaries */
397 #define STACK_ALIGNMENT	8
398 #else
399 /* Stack must be aligned to 32-bit boundaries */
400 #define STACK_ALIGNMENT	4
401 #endif
402 
403 /* total stack size used in JITed code */
404 #define _STACK_SIZE	(ctx->prog->aux->stack_depth + SCRATCH_SIZE)
405 #define STACK_SIZE	ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
406 
407 #if __LINUX_ARM_ARCH__ < 7
408 
imm_offset(u32 k,struct jit_ctx * ctx)409 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
410 {
411 	unsigned int i = 0, offset;
412 	u16 imm;
413 
414 	/* on the "fake" run we just count them (duplicates included) */
415 	if (ctx->target == NULL) {
416 		ctx->imm_count++;
417 		return 0;
418 	}
419 
420 	while ((i < ctx->imm_count) && ctx->imms[i]) {
421 		if (ctx->imms[i] == k)
422 			break;
423 		i++;
424 	}
425 
426 	if (ctx->imms[i] == 0)
427 		ctx->imms[i] = k;
428 
429 	/* constants go just after the epilogue */
430 	offset =  ctx->offsets[ctx->prog->len - 1] * 4;
431 	offset += ctx->prologue_bytes;
432 	offset += ctx->epilogue_bytes;
433 	offset += i * 4;
434 
435 	ctx->target[offset / 4] = k;
436 
437 	/* PC in ARM mode == address of the instruction + 8 */
438 	imm = offset - (8 + ctx->idx * 4);
439 
440 	if (imm & ~0xfff) {
441 		/*
442 		 * literal pool is too far, signal it into flags. we
443 		 * can only detect it on the second pass unfortunately.
444 		 */
445 		ctx->flags |= FLAG_IMM_OVERFLOW;
446 		return 0;
447 	}
448 
449 	return imm;
450 }
451 
452 #endif /* __LINUX_ARM_ARCH__ */
453 
bpf2a32_offset(int bpf_to,int bpf_from,const struct jit_ctx * ctx)454 static inline int bpf2a32_offset(int bpf_to, int bpf_from,
455 				 const struct jit_ctx *ctx) {
456 	int to, from;
457 
458 	if (ctx->target == NULL)
459 		return 0;
460 	to = ctx->offsets[bpf_to];
461 	from = ctx->offsets[bpf_from];
462 
463 	return to - from - 1;
464 }
465 
466 /*
467  * Move an immediate that's not an imm8m to a core register.
468  */
emit_mov_i_no8m(const u8 rd,u32 val,struct jit_ctx * ctx)469 static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
470 {
471 #if __LINUX_ARM_ARCH__ < 7
472 	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
473 #else
474 	emit(ARM_MOVW(rd, val & 0xffff), ctx);
475 	if (val > 0xffff)
476 		emit(ARM_MOVT(rd, val >> 16), ctx);
477 #endif
478 }
479 
emit_mov_i(const u8 rd,u32 val,struct jit_ctx * ctx)480 static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
481 {
482 	int imm12 = imm8m(val);
483 
484 	if (imm12 >= 0)
485 		emit(ARM_MOV_I(rd, imm12), ctx);
486 	else
487 		emit_mov_i_no8m(rd, val, ctx);
488 }
489 
emit_bx_r(u8 tgt_reg,struct jit_ctx * ctx)490 static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
491 {
492 	if (elf_hwcap & HWCAP_THUMB)
493 		emit(ARM_BX(tgt_reg), ctx);
494 	else
495 		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
496 }
497 
emit_blx_r(u8 tgt_reg,struct jit_ctx * ctx)498 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
499 {
500 #if __LINUX_ARM_ARCH__ < 5
501 	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
502 	emit_bx_r(tgt_reg, ctx);
503 #else
504 	emit(ARM_BLX_R(tgt_reg), ctx);
505 #endif
506 }
507 
epilogue_offset(const struct jit_ctx * ctx)508 static inline int epilogue_offset(const struct jit_ctx *ctx)
509 {
510 	int to, from;
511 	/* No need for 1st dummy run */
512 	if (ctx->target == NULL)
513 		return 0;
514 	to = ctx->epilogue_offset;
515 	from = ctx->idx;
516 
517 	return to - from - 2;
518 }
519 
emit_udivmod(u8 rd,u8 rm,u8 rn,struct jit_ctx * ctx,u8 op,u8 sign)520 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op, u8 sign)
521 {
522 	const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
523 	const s8 *tmp = bpf2a32[TMP_REG_1];
524 	u32 dst;
525 
526 #if __LINUX_ARM_ARCH__ == 7
527 	if (elf_hwcap & HWCAP_IDIVA) {
528 		if (op == BPF_DIV) {
529 			emit(sign ? ARM_SDIV(rd, rm, rn) : ARM_UDIV(rd, rm, rn), ctx);
530 		} else {
531 			emit(sign ? ARM_SDIV(ARM_IP, rm, rn) : ARM_UDIV(ARM_IP, rm, rn), ctx);
532 			emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
533 		}
534 		return;
535 	}
536 #endif
537 
538 	/*
539 	 * For BPF_ALU | BPF_DIV | BPF_K instructions
540 	 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
541 	 * function, we need to save it on caller side to save
542 	 * it from getting destroyed within callee.
543 	 * After the return from the callee, we restore ARM_R0
544 	 * ARM_R1.
545 	 */
546 	if (rn != ARM_R1) {
547 		emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
548 		emit(ARM_MOV_R(ARM_R1, rn), ctx);
549 	}
550 	if (rm != ARM_R0) {
551 		emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
552 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
553 	}
554 
555 	/* Push caller-saved registers on stack */
556 	emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
557 
558 	/* Call appropriate function */
559 	if (sign) {
560 		if (op == BPF_DIV)
561 			dst = (u32)jit_sdiv32;
562 		else
563 			dst = (u32)jit_smod32;
564 	} else {
565 		if (op == BPF_DIV)
566 			dst = (u32)jit_udiv32;
567 		else
568 			dst = (u32)jit_mod32;
569 	}
570 
571 	emit_mov_i(ARM_IP, dst, ctx);
572 	emit_blx_r(ARM_IP, ctx);
573 
574 	/* Restore caller-saved registers from stack */
575 	emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
576 
577 	/* Save return value */
578 	if (rd != ARM_R0)
579 		emit(ARM_MOV_R(rd, ARM_R0), ctx);
580 
581 	/* Restore ARM_R0 and ARM_R1 */
582 	if (rn != ARM_R1)
583 		emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
584 	if (rm != ARM_R0)
585 		emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
586 }
587 
emit_udivmod64(const s8 * rd,const s8 * rm,const s8 * rn,struct jit_ctx * ctx,u8 op,u8 sign)588 static inline void emit_udivmod64(const s8 *rd, const s8 *rm, const s8 *rn, struct jit_ctx *ctx,
589 				  u8 op, u8 sign)
590 {
591 	u32 dst;
592 
593 	/* Push caller-saved registers on stack */
594 	emit(ARM_PUSH(CALLER_MASK), ctx);
595 
596 	/*
597 	 * As we are implementing 64-bit div/mod as function calls, We need to put the dividend in
598 	 * R0-R1 and the divisor in R2-R3. As we have already pushed these registers on the stack,
599 	 * we can recover them later after returning from the function call.
600 	 */
601 	if (rm[1] != ARM_R0 || rn[1] != ARM_R2) {
602 		/*
603 		 * Move Rm to {R1, R0} if it is not already there.
604 		 */
605 		if (rm[1] != ARM_R0) {
606 			if (rn[1] == ARM_R0)
607 				emit(ARM_PUSH(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
608 			emit(ARM_MOV_R(ARM_R1, rm[0]), ctx);
609 			emit(ARM_MOV_R(ARM_R0, rm[1]), ctx);
610 			if (rn[1] == ARM_R0) {
611 				emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
612 				goto cont;
613 			}
614 		}
615 		/*
616 		 * Move Rn to {R3, R2} if it is not already there.
617 		 */
618 		if (rn[1] != ARM_R2) {
619 			emit(ARM_MOV_R(ARM_R3, rn[0]), ctx);
620 			emit(ARM_MOV_R(ARM_R2, rn[1]), ctx);
621 		}
622 	}
623 
624 cont:
625 
626 	/* Call appropriate function */
627 	if (sign) {
628 		if (op == BPF_DIV)
629 			dst = (u32)jit_sdiv64;
630 		else
631 			dst = (u32)jit_smod64;
632 	} else {
633 		if (op == BPF_DIV)
634 			dst = (u32)jit_udiv64;
635 		else
636 			dst = (u32)jit_mod64;
637 	}
638 
639 	emit_mov_i(ARM_IP, dst, ctx);
640 	emit_blx_r(ARM_IP, ctx);
641 
642 	/* Save return value */
643 	if (rd[1] != ARM_R0) {
644 		emit(ARM_MOV_R(rd[0], ARM_R1), ctx);
645 		emit(ARM_MOV_R(rd[1], ARM_R0), ctx);
646 	}
647 
648 	/* Recover {R3, R2} and {R1, R0} from stack if they are not Rd */
649 	if (rd[1] != ARM_R0 && rd[1] != ARM_R2) {
650 		emit(ARM_POP(CALLER_MASK), ctx);
651 	} else if (rd[1] != ARM_R0) {
652 		emit(ARM_POP(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
653 		emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
654 	} else {
655 		emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
656 		emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
657 	}
658 }
659 
660 /* Is the translated BPF register on stack? */
is_stacked(s8 reg)661 static bool is_stacked(s8 reg)
662 {
663 	return reg < 0;
664 }
665 
666 /* If a BPF register is on the stack (stk is true), load it to the
667  * supplied temporary register and return the temporary register
668  * for subsequent operations, otherwise just use the CPU register.
669  */
arm_bpf_get_reg32(s8 reg,s8 tmp,struct jit_ctx * ctx)670 static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
671 {
672 	if (is_stacked(reg)) {
673 		emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
674 		reg = tmp;
675 	}
676 	return reg;
677 }
678 
arm_bpf_get_reg64(const s8 * reg,const s8 * tmp,struct jit_ctx * ctx)679 static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
680 				   struct jit_ctx *ctx)
681 {
682 	if (is_stacked(reg[1])) {
683 		if (__LINUX_ARM_ARCH__ >= 6 ||
684 		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
685 			emit(ARM_LDRD_I(tmp[1], ARM_FP,
686 					EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
687 		} else {
688 			emit(ARM_LDR_I(tmp[1], ARM_FP,
689 				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
690 			emit(ARM_LDR_I(tmp[0], ARM_FP,
691 				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
692 		}
693 		reg = tmp;
694 	}
695 	return reg;
696 }
697 
698 /* If a BPF register is on the stack (stk is true), save the register
699  * back to the stack.  If the source register is not the same, then
700  * move it into the correct register.
701  */
arm_bpf_put_reg32(s8 reg,s8 src,struct jit_ctx * ctx)702 static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
703 {
704 	if (is_stacked(reg))
705 		emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
706 	else if (reg != src)
707 		emit(ARM_MOV_R(reg, src), ctx);
708 }
709 
arm_bpf_put_reg64(const s8 * reg,const s8 * src,struct jit_ctx * ctx)710 static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
711 			      struct jit_ctx *ctx)
712 {
713 	if (is_stacked(reg[1])) {
714 		if (__LINUX_ARM_ARCH__ >= 6 ||
715 		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
716 			emit(ARM_STRD_I(src[1], ARM_FP,
717 				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
718 		} else {
719 			emit(ARM_STR_I(src[1], ARM_FP,
720 				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
721 			emit(ARM_STR_I(src[0], ARM_FP,
722 				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
723 		}
724 	} else {
725 		if (reg[1] != src[1])
726 			emit(ARM_MOV_R(reg[1], src[1]), ctx);
727 		if (reg[0] != src[0])
728 			emit(ARM_MOV_R(reg[0], src[0]), ctx);
729 	}
730 }
731 
emit_a32_mov_i(const s8 dst,const u32 val,struct jit_ctx * ctx)732 static inline void emit_a32_mov_i(const s8 dst, const u32 val,
733 				  struct jit_ctx *ctx)
734 {
735 	const s8 *tmp = bpf2a32[TMP_REG_1];
736 
737 	if (is_stacked(dst)) {
738 		emit_mov_i(tmp[1], val, ctx);
739 		arm_bpf_put_reg32(dst, tmp[1], ctx);
740 	} else {
741 		emit_mov_i(dst, val, ctx);
742 	}
743 }
744 
emit_a32_mov_i64(const s8 dst[],u64 val,struct jit_ctx * ctx)745 static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
746 {
747 	const s8 *tmp = bpf2a32[TMP_REG_1];
748 	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
749 
750 	emit_mov_i(rd[1], (u32)val, ctx);
751 	emit_mov_i(rd[0], val >> 32, ctx);
752 
753 	arm_bpf_put_reg64(dst, rd, ctx);
754 }
755 
756 /* Sign extended move */
emit_a32_mov_se_i64(const bool is64,const s8 dst[],const u32 val,struct jit_ctx * ctx)757 static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
758 				       const u32 val, struct jit_ctx *ctx) {
759 	u64 val64 = val;
760 
761 	if (is64 && (val & (1<<31)))
762 		val64 |= 0xffffffff00000000ULL;
763 	emit_a32_mov_i64(dst, val64, ctx);
764 }
765 
emit_a32_add_r(const u8 dst,const u8 src,const bool is64,const bool hi,struct jit_ctx * ctx)766 static inline void emit_a32_add_r(const u8 dst, const u8 src,
767 			      const bool is64, const bool hi,
768 			      struct jit_ctx *ctx) {
769 	/* 64 bit :
770 	 *	adds dst_lo, dst_lo, src_lo
771 	 *	adc dst_hi, dst_hi, src_hi
772 	 * 32 bit :
773 	 *	add dst_lo, dst_lo, src_lo
774 	 */
775 	if (!hi && is64)
776 		emit(ARM_ADDS_R(dst, dst, src), ctx);
777 	else if (hi && is64)
778 		emit(ARM_ADC_R(dst, dst, src), ctx);
779 	else
780 		emit(ARM_ADD_R(dst, dst, src), ctx);
781 }
782 
emit_a32_sub_r(const u8 dst,const u8 src,const bool is64,const bool hi,struct jit_ctx * ctx)783 static inline void emit_a32_sub_r(const u8 dst, const u8 src,
784 				  const bool is64, const bool hi,
785 				  struct jit_ctx *ctx) {
786 	/* 64 bit :
787 	 *	subs dst_lo, dst_lo, src_lo
788 	 *	sbc dst_hi, dst_hi, src_hi
789 	 * 32 bit :
790 	 *	sub dst_lo, dst_lo, src_lo
791 	 */
792 	if (!hi && is64)
793 		emit(ARM_SUBS_R(dst, dst, src), ctx);
794 	else if (hi && is64)
795 		emit(ARM_SBC_R(dst, dst, src), ctx);
796 	else
797 		emit(ARM_SUB_R(dst, dst, src), ctx);
798 }
799 
emit_alu_r(const u8 dst,const u8 src,const bool is64,const bool hi,const u8 op,struct jit_ctx * ctx)800 static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
801 			      const bool hi, const u8 op, struct jit_ctx *ctx){
802 	switch (BPF_OP(op)) {
803 	/* dst = dst + src */
804 	case BPF_ADD:
805 		emit_a32_add_r(dst, src, is64, hi, ctx);
806 		break;
807 	/* dst = dst - src */
808 	case BPF_SUB:
809 		emit_a32_sub_r(dst, src, is64, hi, ctx);
810 		break;
811 	/* dst = dst | src */
812 	case BPF_OR:
813 		emit(ARM_ORR_R(dst, dst, src), ctx);
814 		break;
815 	/* dst = dst & src */
816 	case BPF_AND:
817 		emit(ARM_AND_R(dst, dst, src), ctx);
818 		break;
819 	/* dst = dst ^ src */
820 	case BPF_XOR:
821 		emit(ARM_EOR_R(dst, dst, src), ctx);
822 		break;
823 	/* dst = dst * src */
824 	case BPF_MUL:
825 		emit(ARM_MUL(dst, dst, src), ctx);
826 		break;
827 	/* dst = dst << src */
828 	case BPF_LSH:
829 		emit(ARM_LSL_R(dst, dst, src), ctx);
830 		break;
831 	/* dst = dst >> src */
832 	case BPF_RSH:
833 		emit(ARM_LSR_R(dst, dst, src), ctx);
834 		break;
835 	/* dst = dst >> src (signed)*/
836 	case BPF_ARSH:
837 		emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
838 		break;
839 	}
840 }
841 
842 /* ALU operation (64 bit) */
emit_a32_alu_r64(const bool is64,const s8 dst[],const s8 src[],struct jit_ctx * ctx,const u8 op)843 static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
844 				  const s8 src[], struct jit_ctx *ctx,
845 				  const u8 op) {
846 	const s8 *tmp = bpf2a32[TMP_REG_1];
847 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
848 	const s8 *rd;
849 
850 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
851 	if (is64) {
852 		const s8 *rs;
853 
854 		rs = arm_bpf_get_reg64(src, tmp2, ctx);
855 
856 		/* ALU operation */
857 		emit_alu_r(rd[1], rs[1], true, false, op, ctx);
858 		emit_alu_r(rd[0], rs[0], true, true, op, ctx);
859 	} else {
860 		s8 rs;
861 
862 		rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
863 
864 		/* ALU operation */
865 		emit_alu_r(rd[1], rs, true, false, op, ctx);
866 		if (!ctx->prog->aux->verifier_zext)
867 			emit_a32_mov_i(rd[0], 0, ctx);
868 	}
869 
870 	arm_bpf_put_reg64(dst, rd, ctx);
871 }
872 
873 /* dst = src (4 bytes)*/
emit_a32_mov_r(const s8 dst,const s8 src,struct jit_ctx * ctx)874 static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) {
875 	const s8 *tmp = bpf2a32[TMP_REG_1];
876 	s8 rt;
877 
878 	rt = arm_bpf_get_reg32(src, tmp[0], ctx);
879 	arm_bpf_put_reg32(dst, rt, ctx);
880 }
881 
882 /* dst = src */
emit_a32_mov_r64(const bool is64,const s8 dst[],const s8 src[],struct jit_ctx * ctx)883 static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
884 				  const s8 src[],
885 				  struct jit_ctx *ctx) {
886 	if (!is64) {
887 		emit_a32_mov_r(dst_lo, src_lo, ctx);
888 		if (!ctx->prog->aux->verifier_zext)
889 			/* Zero out high 4 bytes */
890 			emit_a32_mov_i(dst_hi, 0, ctx);
891 	} else if (__LINUX_ARM_ARCH__ < 6 &&
892 		   ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
893 		/* complete 8 byte move */
894 		emit_a32_mov_r(dst_lo, src_lo, ctx);
895 		emit_a32_mov_r(dst_hi, src_hi, ctx);
896 	} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
897 		const u8 *tmp = bpf2a32[TMP_REG_1];
898 
899 		emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
900 		emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
901 	} else if (is_stacked(src_lo)) {
902 		emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
903 	} else if (is_stacked(dst_lo)) {
904 		emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
905 	} else {
906 		emit(ARM_MOV_R(dst[0], src[0]), ctx);
907 		emit(ARM_MOV_R(dst[1], src[1]), ctx);
908 	}
909 }
910 
911 /* dst = (signed)src */
emit_a32_movsx_r64(const bool is64,const u8 off,const s8 dst[],const s8 src[],struct jit_ctx * ctx)912 static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
913 				      struct jit_ctx *ctx) {
914 	const s8 *tmp = bpf2a32[TMP_REG_1];
915 	s8 rs;
916 	s8 rd;
917 
918 	if (is_stacked(dst_lo))
919 		rd = tmp[1];
920 	else
921 		rd = dst_lo;
922 	rs = arm_bpf_get_reg32(src_lo, rd, ctx);
923 	/* rs may be one of src[1], dst[1], or tmp[1] */
924 
925 	/* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign
926 	 * extension only happens in the upper 64 bits.
927 	 */
928 	if (off != 32) {
929 		/* Sign extend rs into rd */
930 		emit(ARM_LSL_I(rd, rs, 32 - off), ctx);
931 		emit(ARM_ASR_I(rd, rd, 32 - off), ctx);
932 	} else {
933 		rd = rs;
934 	}
935 
936 	/* Write rd to dst_lo
937 	 *
938 	 * Optimization:
939 	 * Assume:
940 	 * 1. dst == src and stacked.
941 	 * 2. off == 32
942 	 *
943 	 * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32.
944 	 * So, we don't need to write rd back to dst_lo as they have the same value.
945 	 * This saves us one str instruction.
946 	 */
947 	if (dst_lo != src_lo || off != 32)
948 		arm_bpf_put_reg32(dst_lo, rd, ctx);
949 
950 	if (!is64) {
951 		if (!ctx->prog->aux->verifier_zext)
952 			/* Zero out high 4 bytes */
953 			emit_a32_mov_i(dst_hi, 0, ctx);
954 	} else {
955 		if (is_stacked(dst_hi)) {
956 			emit(ARM_ASR_I(tmp[0], rd, 31), ctx);
957 			arm_bpf_put_reg32(dst_hi, tmp[0], ctx);
958 		} else {
959 			emit(ARM_ASR_I(dst_hi, rd, 31), ctx);
960 		}
961 	}
962 }
963 
964 /* Shift operations */
emit_a32_alu_i(const s8 dst,const u32 val,struct jit_ctx * ctx,const u8 op)965 static inline void emit_a32_alu_i(const s8 dst, const u32 val,
966 				struct jit_ctx *ctx, const u8 op) {
967 	const s8 *tmp = bpf2a32[TMP_REG_1];
968 	s8 rd;
969 
970 	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
971 
972 	/* Do shift operation */
973 	switch (op) {
974 	case BPF_LSH:
975 		emit(ARM_LSL_I(rd, rd, val), ctx);
976 		break;
977 	case BPF_RSH:
978 		emit(ARM_LSR_I(rd, rd, val), ctx);
979 		break;
980 	case BPF_ARSH:
981 		emit(ARM_ASR_I(rd, rd, val), ctx);
982 		break;
983 	case BPF_NEG:
984 		emit(ARM_RSB_I(rd, rd, val), ctx);
985 		break;
986 	}
987 
988 	arm_bpf_put_reg32(dst, rd, ctx);
989 }
990 
991 /* dst = ~dst (64 bit) */
emit_a32_neg64(const s8 dst[],struct jit_ctx * ctx)992 static inline void emit_a32_neg64(const s8 dst[],
993 				struct jit_ctx *ctx){
994 	const s8 *tmp = bpf2a32[TMP_REG_1];
995 	const s8 *rd;
996 
997 	/* Setup Operand */
998 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
999 
1000 	/* Do Negate Operation */
1001 	emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
1002 	emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
1003 
1004 	arm_bpf_put_reg64(dst, rd, ctx);
1005 }
1006 
1007 /* dst = dst << src */
emit_a32_lsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)1008 static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
1009 				    struct jit_ctx *ctx) {
1010 	const s8 *tmp = bpf2a32[TMP_REG_1];
1011 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1012 	const s8 *rd;
1013 	s8 rt;
1014 
1015 	/* Setup Operands */
1016 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1017 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1018 
1019 	/* Do LSH operation */
1020 	emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
1021 	emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
1022 	emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
1023 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
1024 	emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
1025 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
1026 
1027 	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
1028 	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
1029 }
1030 
1031 /* dst = dst >> src (signed)*/
emit_a32_arsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)1032 static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
1033 				     struct jit_ctx *ctx) {
1034 	const s8 *tmp = bpf2a32[TMP_REG_1];
1035 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1036 	const s8 *rd;
1037 	s8 rt;
1038 
1039 	/* Setup Operands */
1040 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1041 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1042 
1043 	/* Do the ARSH operation */
1044 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
1045 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
1046 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
1047 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
1048 	_emit(ARM_COND_PL,
1049 	      ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
1050 	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
1051 
1052 	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
1053 	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
1054 }
1055 
1056 /* dst = dst >> src */
emit_a32_rsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)1057 static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
1058 				    struct jit_ctx *ctx) {
1059 	const s8 *tmp = bpf2a32[TMP_REG_1];
1060 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1061 	const s8 *rd;
1062 	s8 rt;
1063 
1064 	/* Setup Operands */
1065 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1066 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1067 
1068 	/* Do RSH operation */
1069 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
1070 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
1071 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
1072 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
1073 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
1074 	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
1075 
1076 	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
1077 	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
1078 }
1079 
1080 /* dst = dst << val */
emit_a32_lsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)1081 static inline void emit_a32_lsh_i64(const s8 dst[],
1082 				    const u32 val, struct jit_ctx *ctx){
1083 	const s8 *tmp = bpf2a32[TMP_REG_1];
1084 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1085 	const s8 *rd;
1086 
1087 	/* Setup operands */
1088 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1089 
1090 	/* Do LSH operation */
1091 	if (val < 32) {
1092 		emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
1093 		emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
1094 		emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
1095 	} else {
1096 		if (val == 32)
1097 			emit(ARM_MOV_R(rd[0], rd[1]), ctx);
1098 		else
1099 			emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
1100 		emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
1101 	}
1102 
1103 	arm_bpf_put_reg64(dst, rd, ctx);
1104 }
1105 
1106 /* dst = dst >> val */
emit_a32_rsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)1107 static inline void emit_a32_rsh_i64(const s8 dst[],
1108 				    const u32 val, struct jit_ctx *ctx) {
1109 	const s8 *tmp = bpf2a32[TMP_REG_1];
1110 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1111 	const s8 *rd;
1112 
1113 	/* Setup operands */
1114 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1115 
1116 	/* Do LSR operation */
1117 	if (val == 0) {
1118 		/* An immediate value of 0 encodes a shift amount of 32
1119 		 * for LSR. To shift by 0, don't do anything.
1120 		 */
1121 	} else if (val < 32) {
1122 		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
1123 		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
1124 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
1125 	} else if (val == 32) {
1126 		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
1127 		emit(ARM_MOV_I(rd[0], 0), ctx);
1128 	} else {
1129 		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
1130 		emit(ARM_MOV_I(rd[0], 0), ctx);
1131 	}
1132 
1133 	arm_bpf_put_reg64(dst, rd, ctx);
1134 }
1135 
1136 /* dst = dst >> val (signed) */
emit_a32_arsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)1137 static inline void emit_a32_arsh_i64(const s8 dst[],
1138 				     const u32 val, struct jit_ctx *ctx){
1139 	const s8 *tmp = bpf2a32[TMP_REG_1];
1140 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1141 	const s8 *rd;
1142 
1143 	/* Setup operands */
1144 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1145 
1146 	/* Do ARSH operation */
1147 	if (val == 0) {
1148 		/* An immediate value of 0 encodes a shift amount of 32
1149 		 * for ASR. To shift by 0, don't do anything.
1150 		 */
1151 	} else if (val < 32) {
1152 		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
1153 		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
1154 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
1155 	} else if (val == 32) {
1156 		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
1157 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
1158 	} else {
1159 		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
1160 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
1161 	}
1162 
1163 	arm_bpf_put_reg64(dst, rd, ctx);
1164 }
1165 
emit_a32_mul_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)1166 static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
1167 				    struct jit_ctx *ctx) {
1168 	const s8 *tmp = bpf2a32[TMP_REG_1];
1169 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1170 	const s8 *rd, *rt;
1171 
1172 	/* Setup operands for multiplication */
1173 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1174 	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1175 
1176 	/* Do Multiplication */
1177 	emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
1178 	emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
1179 	emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
1180 
1181 	emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
1182 	emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
1183 
1184 	arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
1185 	arm_bpf_put_reg32(dst_hi, rd[0], ctx);
1186 }
1187 
is_ldst_imm(s16 off,const u8 size)1188 static bool is_ldst_imm(s16 off, const u8 size)
1189 {
1190 	s16 off_max = 0;
1191 
1192 	switch (size) {
1193 	case BPF_B:
1194 	case BPF_W:
1195 		off_max = 0xfff;
1196 		break;
1197 	case BPF_H:
1198 		off_max = 0xff;
1199 		break;
1200 	case BPF_DW:
1201 		/* Need to make sure off+4 does not overflow. */
1202 		off_max = 0xfff - 4;
1203 		break;
1204 	}
1205 	return -off_max <= off && off <= off_max;
1206 }
1207 
is_ldst_imm8(s16 off,const u8 size)1208 static bool is_ldst_imm8(s16 off, const u8 size)
1209 {
1210 	s16 off_max = 0;
1211 
1212 	switch (size) {
1213 	case BPF_B:
1214 		off_max = 0xff;
1215 		break;
1216 	case BPF_W:
1217 		off_max = 0xfff;
1218 		break;
1219 	case BPF_H:
1220 		off_max = 0xff;
1221 		break;
1222 	}
1223 	return -off_max <= off && off <= off_max;
1224 }
1225 
1226 /* *(size *)(dst + off) = src */
emit_str_r(const s8 dst,const s8 src[],s16 off,struct jit_ctx * ctx,const u8 sz)1227 static inline void emit_str_r(const s8 dst, const s8 src[],
1228 			      s16 off, struct jit_ctx *ctx, const u8 sz){
1229 	const s8 *tmp = bpf2a32[TMP_REG_1];
1230 	s8 rd;
1231 
1232 	rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
1233 
1234 	if (!is_ldst_imm(off, sz)) {
1235 		emit_a32_mov_i(tmp[0], off, ctx);
1236 		emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
1237 		rd = tmp[0];
1238 		off = 0;
1239 	}
1240 	switch (sz) {
1241 	case BPF_B:
1242 		/* Store a Byte */
1243 		emit(ARM_STRB_I(src_lo, rd, off), ctx);
1244 		break;
1245 	case BPF_H:
1246 		/* Store a HalfWord */
1247 		emit(ARM_STRH_I(src_lo, rd, off), ctx);
1248 		break;
1249 	case BPF_W:
1250 		/* Store a Word */
1251 		emit(ARM_STR_I(src_lo, rd, off), ctx);
1252 		break;
1253 	case BPF_DW:
1254 		/* Store a Double Word */
1255 		emit(ARM_STR_I(src_lo, rd, off), ctx);
1256 		emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
1257 		break;
1258 	}
1259 }
1260 
1261 /* dst = *(size*)(src + off) */
emit_ldx_r(const s8 dst[],const s8 src,s16 off,struct jit_ctx * ctx,const u8 sz)1262 static inline void emit_ldx_r(const s8 dst[], const s8 src,
1263 			      s16 off, struct jit_ctx *ctx, const u8 sz){
1264 	const s8 *tmp = bpf2a32[TMP_REG_1];
1265 	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1266 	s8 rm = src;
1267 
1268 	if (!is_ldst_imm(off, sz)) {
1269 		emit_a32_mov_i(tmp[0], off, ctx);
1270 		emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1271 		rm = tmp[0];
1272 		off = 0;
1273 	} else if (rd[1] == rm) {
1274 		emit(ARM_MOV_R(tmp[0], rm), ctx);
1275 		rm = tmp[0];
1276 	}
1277 	switch (sz) {
1278 	case BPF_B:
1279 		/* Load a Byte */
1280 		emit(ARM_LDRB_I(rd[1], rm, off), ctx);
1281 		if (!ctx->prog->aux->verifier_zext)
1282 			emit_a32_mov_i(rd[0], 0, ctx);
1283 		break;
1284 	case BPF_H:
1285 		/* Load a HalfWord */
1286 		emit(ARM_LDRH_I(rd[1], rm, off), ctx);
1287 		if (!ctx->prog->aux->verifier_zext)
1288 			emit_a32_mov_i(rd[0], 0, ctx);
1289 		break;
1290 	case BPF_W:
1291 		/* Load a Word */
1292 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1293 		if (!ctx->prog->aux->verifier_zext)
1294 			emit_a32_mov_i(rd[0], 0, ctx);
1295 		break;
1296 	case BPF_DW:
1297 		/* Load a Double Word */
1298 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1299 		emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
1300 		break;
1301 	}
1302 	arm_bpf_put_reg64(dst, rd, ctx);
1303 }
1304 
1305 /* dst = *(signed size*)(src + off) */
emit_ldsx_r(const s8 dst[],const s8 src,s16 off,struct jit_ctx * ctx,const u8 sz)1306 static inline void emit_ldsx_r(const s8 dst[], const s8 src,
1307 			       s16 off, struct jit_ctx *ctx, const u8 sz){
1308 	const s8 *tmp = bpf2a32[TMP_REG_1];
1309 	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1310 	s8 rm = src;
1311 	int add_off;
1312 
1313 	if (!is_ldst_imm8(off, sz)) {
1314 		/*
1315 		 * offset does not fit in the load/store immediate,
1316 		 * construct an ADD instruction to apply the offset.
1317 		 */
1318 		add_off = imm8m(off);
1319 		if (add_off > 0) {
1320 			emit(ARM_ADD_I(tmp[0], src, add_off), ctx);
1321 			rm = tmp[0];
1322 		} else {
1323 			emit_a32_mov_i(tmp[0], off, ctx);
1324 			emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1325 			rm = tmp[0];
1326 		}
1327 		off = 0;
1328 	}
1329 
1330 	switch (sz) {
1331 	case BPF_B:
1332 		/* Load a Byte with sign extension*/
1333 		emit(ARM_LDRSB_I(rd[1], rm, off), ctx);
1334 		break;
1335 	case BPF_H:
1336 		/* Load a HalfWord with sign extension*/
1337 		emit(ARM_LDRSH_I(rd[1], rm, off), ctx);
1338 		break;
1339 	case BPF_W:
1340 		/* Load a Word*/
1341 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1342 		break;
1343 	}
1344 	/* Carry the sign extension to upper 32 bits */
1345 	emit(ARM_ASR_I(rd[0], rd[1], 31), ctx);
1346 	arm_bpf_put_reg64(dst, rd, ctx);
1347 }
1348 
1349 /* Arithmatic Operation */
emit_ar_r(const u8 rd,const u8 rt,const u8 rm,const u8 rn,struct jit_ctx * ctx,u8 op,bool is_jmp64)1350 static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
1351 			     const u8 rn, struct jit_ctx *ctx, u8 op,
1352 			     bool is_jmp64) {
1353 	switch (op) {
1354 	case BPF_JSET:
1355 		if (is_jmp64) {
1356 			emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
1357 			emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
1358 			emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
1359 		} else {
1360 			emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
1361 		}
1362 		break;
1363 	case BPF_JEQ:
1364 	case BPF_JNE:
1365 	case BPF_JGT:
1366 	case BPF_JGE:
1367 	case BPF_JLE:
1368 	case BPF_JLT:
1369 		if (is_jmp64) {
1370 			emit(ARM_CMP_R(rd, rm), ctx);
1371 			/* Only compare low halve if high halve are equal. */
1372 			_emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
1373 		} else {
1374 			emit(ARM_CMP_R(rt, rn), ctx);
1375 		}
1376 		break;
1377 	case BPF_JSLE:
1378 	case BPF_JSGT:
1379 		emit(ARM_CMP_R(rn, rt), ctx);
1380 		if (is_jmp64)
1381 			emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
1382 		break;
1383 	case BPF_JSLT:
1384 	case BPF_JSGE:
1385 		emit(ARM_CMP_R(rt, rn), ctx);
1386 		if (is_jmp64)
1387 			emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
1388 		break;
1389 	}
1390 }
1391 
1392 static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)1393 static int emit_bpf_tail_call(struct jit_ctx *ctx)
1394 {
1395 
1396 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
1397 	const s8 *r2 = bpf2a32[BPF_REG_2];
1398 	const s8 *r3 = bpf2a32[BPF_REG_3];
1399 	const s8 *tmp = bpf2a32[TMP_REG_1];
1400 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1401 	const s8 *tcc = bpf2a32[TCALL_CNT];
1402 	const s8 *tc;
1403 	const int idx0 = ctx->idx;
1404 #define cur_offset (ctx->idx - idx0)
1405 #define jmp_offset (out_offset - (cur_offset) - 2)
1406 	u32 lo, hi;
1407 	s8 r_array, r_index;
1408 	int off;
1409 
1410 	/* if (index >= array->map.max_entries)
1411 	 *	goto out;
1412 	 */
1413 	BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
1414 		     ARM_INST_LDST__IMM12);
1415 	off = offsetof(struct bpf_array, map.max_entries);
1416 	r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
1417 	/* index is 32-bit for arrays */
1418 	r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
1419 	/* array->map.max_entries */
1420 	emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
1421 	/* index >= array->map.max_entries */
1422 	emit(ARM_CMP_R(r_index, tmp[1]), ctx);
1423 	_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1424 
1425 	/* tmp2[0] = array, tmp2[1] = index */
1426 
1427 	/*
1428 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
1429 	 *	goto out;
1430 	 * tail_call_cnt++;
1431 	 */
1432 	lo = (u32)MAX_TAIL_CALL_CNT;
1433 	hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1434 	tc = arm_bpf_get_reg64(tcc, tmp, ctx);
1435 	emit(ARM_CMP_I(tc[0], hi), ctx);
1436 	_emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
1437 	_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1438 	emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
1439 	emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
1440 	arm_bpf_put_reg64(tcc, tmp, ctx);
1441 
1442 	/* prog = array->ptrs[index]
1443 	 * if (prog == NULL)
1444 	 *	goto out;
1445 	 */
1446 	BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
1447 	off = imm8m(offsetof(struct bpf_array, ptrs));
1448 	emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
1449 	emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
1450 	emit(ARM_CMP_I(tmp[1], 0), ctx);
1451 	_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1452 
1453 	/* goto *(prog->bpf_func + prologue_size); */
1454 	BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
1455 		     ARM_INST_LDST__IMM12);
1456 	off = offsetof(struct bpf_prog, bpf_func);
1457 	emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
1458 	emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1459 	emit_bx_r(tmp[1], ctx);
1460 
1461 	/* out: */
1462 	if (out_offset == -1)
1463 		out_offset = cur_offset;
1464 	if (cur_offset != out_offset) {
1465 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
1466 			    cur_offset, out_offset);
1467 		return -1;
1468 	}
1469 	return 0;
1470 #undef cur_offset
1471 #undef jmp_offset
1472 }
1473 
1474 /* 0xabcd => 0xcdab */
emit_rev16(const u8 rd,const u8 rn,struct jit_ctx * ctx)1475 static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1476 {
1477 #if __LINUX_ARM_ARCH__ < 6
1478 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1479 
1480 	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1481 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1482 	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1483 	emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1484 #else /* ARMv6+ */
1485 	emit(ARM_REV16(rd, rn), ctx);
1486 #endif
1487 }
1488 
1489 /* 0xabcdefgh => 0xghefcdab */
emit_rev32(const u8 rd,const u8 rn,struct jit_ctx * ctx)1490 static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1491 {
1492 #if __LINUX_ARM_ARCH__ < 6
1493 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1494 
1495 	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1496 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1497 	emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1498 
1499 	emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1500 	emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1501 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1502 	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1503 	emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1504 	emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1505 	emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1506 
1507 #else /* ARMv6+ */
1508 	emit(ARM_REV(rd, rn), ctx);
1509 #endif
1510 }
1511 
1512 // push the scratch stack register on top of the stack
emit_push_r64(const s8 src[],struct jit_ctx * ctx)1513 static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
1514 {
1515 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1516 	const s8 *rt;
1517 	u16 reg_set = 0;
1518 
1519 	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1520 
1521 	reg_set = (1 << rt[1]) | (1 << rt[0]);
1522 	emit(ARM_PUSH(reg_set), ctx);
1523 }
1524 
build_prologue(struct jit_ctx * ctx)1525 static void build_prologue(struct jit_ctx *ctx)
1526 {
1527 	const s8 arm_r0 = bpf2a32[BPF_REG_0][1];
1528 	const s8 *bpf_r1 = bpf2a32[BPF_REG_1];
1529 	const s8 *bpf_fp = bpf2a32[BPF_REG_FP];
1530 	const s8 *tcc = bpf2a32[TCALL_CNT];
1531 
1532 	/* Save callee saved registers. */
1533 #ifdef CONFIG_FRAME_POINTER
1534 	u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1535 	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1536 	emit(ARM_PUSH(reg_set), ctx);
1537 	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1538 #else
1539 	emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1540 	emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1541 #endif
1542 	/* mov r3, #0 */
1543 	/* sub r2, sp, #SCRATCH_SIZE */
1544 	emit(ARM_MOV_I(bpf_r1[0], 0), ctx);
1545 	emit(ARM_SUB_I(bpf_r1[1], ARM_SP, SCRATCH_SIZE), ctx);
1546 
1547 	ctx->stack_size = imm8m(STACK_SIZE);
1548 
1549 	/* Set up function call stack */
1550 	emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1551 
1552 	/* Set up BPF prog stack base register */
1553 	emit_a32_mov_r64(true, bpf_fp, bpf_r1, ctx);
1554 
1555 	/* Initialize Tail Count */
1556 	emit(ARM_MOV_I(bpf_r1[1], 0), ctx);
1557 	emit_a32_mov_r64(true, tcc, bpf_r1, ctx);
1558 
1559 	/* Move BPF_CTX to BPF_R1 */
1560 	emit(ARM_MOV_R(bpf_r1[1], arm_r0), ctx);
1561 
1562 	/* end of prologue */
1563 }
1564 
1565 /* restore callee saved registers. */
build_epilogue(struct jit_ctx * ctx)1566 static void build_epilogue(struct jit_ctx *ctx)
1567 {
1568 #ifdef CONFIG_FRAME_POINTER
1569 	/* When using frame pointers, some additional registers need to
1570 	 * be loaded. */
1571 	u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1572 	emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1573 	emit(ARM_LDM(ARM_SP, reg_set), ctx);
1574 #else
1575 	/* Restore callee saved registers. */
1576 	emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1577 	emit(ARM_POP(CALLEE_POP_MASK), ctx);
1578 #endif
1579 }
1580 
1581 /*
1582  * Convert an eBPF instruction to native instruction, i.e
1583  * JITs an eBPF instruction.
1584  * Returns :
1585  *	0  - Successfully JITed an 8-byte eBPF instruction
1586  *	>0 - Successfully JITed a 16-byte eBPF instruction
1587  *	<0 - Failed to JIT.
1588  */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx)1589 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1590 {
1591 	const u8 code = insn->code;
1592 	const s8 *dst = bpf2a32[insn->dst_reg];
1593 	const s8 *src = bpf2a32[insn->src_reg];
1594 	const s8 *tmp = bpf2a32[TMP_REG_1];
1595 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1596 	const s16 off = insn->off;
1597 	const s32 imm = insn->imm;
1598 	const int i = insn - ctx->prog->insnsi;
1599 	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1600 	const s8 *rd, *rs;
1601 	s8 rd_lo, rt, rm, rn;
1602 	s32 jmp_offset;
1603 
1604 #define check_imm(bits, imm) do {				\
1605 	if ((imm) >= (1 << ((bits) - 1)) ||			\
1606 	    (imm) < -(1 << ((bits) - 1))) {			\
1607 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
1608 			i, imm, imm);				\
1609 		return -EINVAL;					\
1610 	}							\
1611 } while (0)
1612 #define check_imm24(imm) check_imm(24, imm)
1613 
1614 	switch (code) {
1615 	/* ALU operations */
1616 
1617 	/* dst = src */
1618 	case BPF_ALU | BPF_MOV | BPF_K:
1619 	case BPF_ALU | BPF_MOV | BPF_X:
1620 	case BPF_ALU64 | BPF_MOV | BPF_K:
1621 	case BPF_ALU64 | BPF_MOV | BPF_X:
1622 		switch (BPF_SRC(code)) {
1623 		case BPF_X:
1624 			if (imm == 1) {
1625 				/* Special mov32 for zext */
1626 				emit_a32_mov_i(dst_hi, 0, ctx);
1627 				break;
1628 			}
1629 			if (insn->off)
1630 				emit_a32_movsx_r64(is64, insn->off, dst, src, ctx);
1631 			else
1632 				emit_a32_mov_r64(is64, dst, src, ctx);
1633 			break;
1634 		case BPF_K:
1635 			/* Sign-extend immediate value to destination reg */
1636 			emit_a32_mov_se_i64(is64, dst, imm, ctx);
1637 			break;
1638 		}
1639 		break;
1640 	/* dst = dst + src/imm */
1641 	/* dst = dst - src/imm */
1642 	/* dst = dst | src/imm */
1643 	/* dst = dst & src/imm */
1644 	/* dst = dst ^ src/imm */
1645 	/* dst = dst * src/imm */
1646 	/* dst = dst << src */
1647 	/* dst = dst >> src */
1648 	case BPF_ALU | BPF_ADD | BPF_K:
1649 	case BPF_ALU | BPF_ADD | BPF_X:
1650 	case BPF_ALU | BPF_SUB | BPF_K:
1651 	case BPF_ALU | BPF_SUB | BPF_X:
1652 	case BPF_ALU | BPF_OR | BPF_K:
1653 	case BPF_ALU | BPF_OR | BPF_X:
1654 	case BPF_ALU | BPF_AND | BPF_K:
1655 	case BPF_ALU | BPF_AND | BPF_X:
1656 	case BPF_ALU | BPF_XOR | BPF_K:
1657 	case BPF_ALU | BPF_XOR | BPF_X:
1658 	case BPF_ALU | BPF_MUL | BPF_K:
1659 	case BPF_ALU | BPF_MUL | BPF_X:
1660 	case BPF_ALU | BPF_LSH | BPF_X:
1661 	case BPF_ALU | BPF_RSH | BPF_X:
1662 	case BPF_ALU | BPF_ARSH | BPF_X:
1663 	case BPF_ALU64 | BPF_ADD | BPF_K:
1664 	case BPF_ALU64 | BPF_ADD | BPF_X:
1665 	case BPF_ALU64 | BPF_SUB | BPF_K:
1666 	case BPF_ALU64 | BPF_SUB | BPF_X:
1667 	case BPF_ALU64 | BPF_OR | BPF_K:
1668 	case BPF_ALU64 | BPF_OR | BPF_X:
1669 	case BPF_ALU64 | BPF_AND | BPF_K:
1670 	case BPF_ALU64 | BPF_AND | BPF_X:
1671 	case BPF_ALU64 | BPF_XOR | BPF_K:
1672 	case BPF_ALU64 | BPF_XOR | BPF_X:
1673 		switch (BPF_SRC(code)) {
1674 		case BPF_X:
1675 			emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
1676 			break;
1677 		case BPF_K:
1678 			/* Move immediate value to the temporary register
1679 			 * and then do the ALU operation on the temporary
1680 			 * register as this will sign-extend the immediate
1681 			 * value into temporary reg and then it would be
1682 			 * safe to do the operation on it.
1683 			 */
1684 			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1685 			emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
1686 			break;
1687 		}
1688 		break;
1689 	/* dst = dst / src(imm) */
1690 	/* dst = dst % src(imm) */
1691 	case BPF_ALU | BPF_DIV | BPF_K:
1692 	case BPF_ALU | BPF_DIV | BPF_X:
1693 	case BPF_ALU | BPF_MOD | BPF_K:
1694 	case BPF_ALU | BPF_MOD | BPF_X:
1695 		rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
1696 		switch (BPF_SRC(code)) {
1697 		case BPF_X:
1698 			rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
1699 			break;
1700 		case BPF_K:
1701 			rt = tmp2[0];
1702 			emit_a32_mov_i(rt, imm, ctx);
1703 			break;
1704 		default:
1705 			rt = src_lo;
1706 			break;
1707 		}
1708 		emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code), off);
1709 		arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
1710 		if (!ctx->prog->aux->verifier_zext)
1711 			emit_a32_mov_i(dst_hi, 0, ctx);
1712 		break;
1713 	case BPF_ALU64 | BPF_DIV | BPF_K:
1714 	case BPF_ALU64 | BPF_DIV | BPF_X:
1715 	case BPF_ALU64 | BPF_MOD | BPF_K:
1716 	case BPF_ALU64 | BPF_MOD | BPF_X:
1717 		rd = arm_bpf_get_reg64(dst, tmp2, ctx);
1718 		switch (BPF_SRC(code)) {
1719 		case BPF_X:
1720 			rs = arm_bpf_get_reg64(src, tmp, ctx);
1721 			break;
1722 		case BPF_K:
1723 			rs = tmp;
1724 			emit_a32_mov_se_i64(is64, rs, imm, ctx);
1725 			break;
1726 		}
1727 		emit_udivmod64(rd, rd, rs, ctx, BPF_OP(code), off);
1728 		arm_bpf_put_reg64(dst, rd, ctx);
1729 		break;
1730 	/* dst = dst << imm */
1731 	/* dst = dst >> imm */
1732 	/* dst = dst >> imm (signed) */
1733 	case BPF_ALU | BPF_LSH | BPF_K:
1734 	case BPF_ALU | BPF_RSH | BPF_K:
1735 	case BPF_ALU | BPF_ARSH | BPF_K:
1736 		if (unlikely(imm > 31))
1737 			return -EINVAL;
1738 		if (imm)
1739 			emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
1740 		if (!ctx->prog->aux->verifier_zext)
1741 			emit_a32_mov_i(dst_hi, 0, ctx);
1742 		break;
1743 	/* dst = dst << imm */
1744 	case BPF_ALU64 | BPF_LSH | BPF_K:
1745 		if (unlikely(imm > 63))
1746 			return -EINVAL;
1747 		emit_a32_lsh_i64(dst, imm, ctx);
1748 		break;
1749 	/* dst = dst >> imm */
1750 	case BPF_ALU64 | BPF_RSH | BPF_K:
1751 		if (unlikely(imm > 63))
1752 			return -EINVAL;
1753 		emit_a32_rsh_i64(dst, imm, ctx);
1754 		break;
1755 	/* dst = dst << src */
1756 	case BPF_ALU64 | BPF_LSH | BPF_X:
1757 		emit_a32_lsh_r64(dst, src, ctx);
1758 		break;
1759 	/* dst = dst >> src */
1760 	case BPF_ALU64 | BPF_RSH | BPF_X:
1761 		emit_a32_rsh_r64(dst, src, ctx);
1762 		break;
1763 	/* dst = dst >> src (signed) */
1764 	case BPF_ALU64 | BPF_ARSH | BPF_X:
1765 		emit_a32_arsh_r64(dst, src, ctx);
1766 		break;
1767 	/* dst = dst >> imm (signed) */
1768 	case BPF_ALU64 | BPF_ARSH | BPF_K:
1769 		if (unlikely(imm > 63))
1770 			return -EINVAL;
1771 		emit_a32_arsh_i64(dst, imm, ctx);
1772 		break;
1773 	/* dst = ~dst */
1774 	case BPF_ALU | BPF_NEG:
1775 		emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
1776 		if (!ctx->prog->aux->verifier_zext)
1777 			emit_a32_mov_i(dst_hi, 0, ctx);
1778 		break;
1779 	/* dst = ~dst (64 bit) */
1780 	case BPF_ALU64 | BPF_NEG:
1781 		emit_a32_neg64(dst, ctx);
1782 		break;
1783 	/* dst = dst * src/imm */
1784 	case BPF_ALU64 | BPF_MUL | BPF_X:
1785 	case BPF_ALU64 | BPF_MUL | BPF_K:
1786 		switch (BPF_SRC(code)) {
1787 		case BPF_X:
1788 			emit_a32_mul_r64(dst, src, ctx);
1789 			break;
1790 		case BPF_K:
1791 			/* Move immediate value to the temporary register
1792 			 * and then do the multiplication on it as this
1793 			 * will sign-extend the immediate value into temp
1794 			 * reg then it would be safe to do the operation
1795 			 * on it.
1796 			 */
1797 			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1798 			emit_a32_mul_r64(dst, tmp2, ctx);
1799 			break;
1800 		}
1801 		break;
1802 	/* dst = htole(dst) */
1803 	/* dst = htobe(dst) */
1804 	case BPF_ALU | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
1805 	case BPF_ALU | BPF_END | BPF_FROM_BE: /* also BPF_TO_BE */
1806 	/* dst = bswap(dst) */
1807 	case BPF_ALU64 | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
1808 		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1809 		if (BPF_SRC(code) == BPF_FROM_LE && BPF_CLASS(code) != BPF_ALU64)
1810 			goto emit_bswap_uxt;
1811 		switch (imm) {
1812 		case 16:
1813 			emit_rev16(rd[1], rd[1], ctx);
1814 			goto emit_bswap_uxt;
1815 		case 32:
1816 			emit_rev32(rd[1], rd[1], ctx);
1817 			goto emit_bswap_uxt;
1818 		case 64:
1819 			emit_rev32(ARM_LR, rd[1], ctx);
1820 			emit_rev32(rd[1], rd[0], ctx);
1821 			emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
1822 			break;
1823 		}
1824 		goto exit;
1825 emit_bswap_uxt:
1826 		switch (imm) {
1827 		case 16:
1828 			/* zero-extend 16 bits into 64 bits */
1829 #if __LINUX_ARM_ARCH__ < 6
1830 			emit_a32_mov_i(tmp2[1], 0xffff, ctx);
1831 			emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
1832 #else /* ARMv6+ */
1833 			emit(ARM_UXTH(rd[1], rd[1]), ctx);
1834 #endif
1835 			if (!ctx->prog->aux->verifier_zext)
1836 				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1837 			break;
1838 		case 32:
1839 			/* zero-extend 32 bits into 64 bits */
1840 			if (!ctx->prog->aux->verifier_zext)
1841 				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1842 			break;
1843 		case 64:
1844 			/* nop */
1845 			break;
1846 		}
1847 exit:
1848 		arm_bpf_put_reg64(dst, rd, ctx);
1849 		break;
1850 	/* dst = imm64 */
1851 	case BPF_LD | BPF_IMM | BPF_DW:
1852 	{
1853 		u64 val = (u32)imm | (u64)insn[1].imm << 32;
1854 
1855 		emit_a32_mov_i64(dst, val, ctx);
1856 
1857 		return 1;
1858 	}
1859 	/* LDX: dst = *(size *)(src + off) */
1860 	case BPF_LDX | BPF_MEM | BPF_W:
1861 	case BPF_LDX | BPF_MEM | BPF_H:
1862 	case BPF_LDX | BPF_MEM | BPF_B:
1863 	case BPF_LDX | BPF_MEM | BPF_DW:
1864 	/* LDSX: dst = *(signed size *)(src + off) */
1865 	case BPF_LDX | BPF_MEMSX | BPF_B:
1866 	case BPF_LDX | BPF_MEMSX | BPF_H:
1867 	case BPF_LDX | BPF_MEMSX | BPF_W:
1868 		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1869 		if (BPF_MODE(insn->code) == BPF_MEMSX)
1870 			emit_ldsx_r(dst, rn, off, ctx, BPF_SIZE(code));
1871 		else
1872 			emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
1873 		break;
1874 	/* speculation barrier */
1875 	case BPF_ST | BPF_NOSPEC:
1876 		break;
1877 	/* ST: *(size *)(dst + off) = imm */
1878 	case BPF_ST | BPF_MEM | BPF_W:
1879 	case BPF_ST | BPF_MEM | BPF_H:
1880 	case BPF_ST | BPF_MEM | BPF_B:
1881 	case BPF_ST | BPF_MEM | BPF_DW:
1882 		switch (BPF_SIZE(code)) {
1883 		case BPF_DW:
1884 			/* Sign-extend immediate value into temp reg */
1885 			emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1886 			break;
1887 		case BPF_W:
1888 		case BPF_H:
1889 		case BPF_B:
1890 			emit_a32_mov_i(tmp2[1], imm, ctx);
1891 			break;
1892 		}
1893 		emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
1894 		break;
1895 	/* Atomic ops */
1896 	case BPF_STX | BPF_ATOMIC | BPF_W:
1897 	case BPF_STX | BPF_ATOMIC | BPF_DW:
1898 		goto notyet;
1899 	/* STX: *(size *)(dst + off) = src */
1900 	case BPF_STX | BPF_MEM | BPF_W:
1901 	case BPF_STX | BPF_MEM | BPF_H:
1902 	case BPF_STX | BPF_MEM | BPF_B:
1903 	case BPF_STX | BPF_MEM | BPF_DW:
1904 		rs = arm_bpf_get_reg64(src, tmp2, ctx);
1905 		emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
1906 		break;
1907 	/* PC += off if dst == src */
1908 	/* PC += off if dst > src */
1909 	/* PC += off if dst >= src */
1910 	/* PC += off if dst < src */
1911 	/* PC += off if dst <= src */
1912 	/* PC += off if dst != src */
1913 	/* PC += off if dst > src (signed) */
1914 	/* PC += off if dst >= src (signed) */
1915 	/* PC += off if dst < src (signed) */
1916 	/* PC += off if dst <= src (signed) */
1917 	/* PC += off if dst & src */
1918 	case BPF_JMP | BPF_JEQ | BPF_X:
1919 	case BPF_JMP | BPF_JGT | BPF_X:
1920 	case BPF_JMP | BPF_JGE | BPF_X:
1921 	case BPF_JMP | BPF_JNE | BPF_X:
1922 	case BPF_JMP | BPF_JSGT | BPF_X:
1923 	case BPF_JMP | BPF_JSGE | BPF_X:
1924 	case BPF_JMP | BPF_JSET | BPF_X:
1925 	case BPF_JMP | BPF_JLE | BPF_X:
1926 	case BPF_JMP | BPF_JLT | BPF_X:
1927 	case BPF_JMP | BPF_JSLT | BPF_X:
1928 	case BPF_JMP | BPF_JSLE | BPF_X:
1929 	case BPF_JMP32 | BPF_JEQ | BPF_X:
1930 	case BPF_JMP32 | BPF_JGT | BPF_X:
1931 	case BPF_JMP32 | BPF_JGE | BPF_X:
1932 	case BPF_JMP32 | BPF_JNE | BPF_X:
1933 	case BPF_JMP32 | BPF_JSGT | BPF_X:
1934 	case BPF_JMP32 | BPF_JSGE | BPF_X:
1935 	case BPF_JMP32 | BPF_JSET | BPF_X:
1936 	case BPF_JMP32 | BPF_JLE | BPF_X:
1937 	case BPF_JMP32 | BPF_JLT | BPF_X:
1938 	case BPF_JMP32 | BPF_JSLT | BPF_X:
1939 	case BPF_JMP32 | BPF_JSLE | BPF_X:
1940 		/* Setup source registers */
1941 		rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
1942 		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1943 		goto go_jmp;
1944 	/* PC += off if dst == imm */
1945 	/* PC += off if dst > imm */
1946 	/* PC += off if dst >= imm */
1947 	/* PC += off if dst < imm */
1948 	/* PC += off if dst <= imm */
1949 	/* PC += off if dst != imm */
1950 	/* PC += off if dst > imm (signed) */
1951 	/* PC += off if dst >= imm (signed) */
1952 	/* PC += off if dst < imm (signed) */
1953 	/* PC += off if dst <= imm (signed) */
1954 	/* PC += off if dst & imm */
1955 	case BPF_JMP | BPF_JEQ | BPF_K:
1956 	case BPF_JMP | BPF_JGT | BPF_K:
1957 	case BPF_JMP | BPF_JGE | BPF_K:
1958 	case BPF_JMP | BPF_JNE | BPF_K:
1959 	case BPF_JMP | BPF_JSGT | BPF_K:
1960 	case BPF_JMP | BPF_JSGE | BPF_K:
1961 	case BPF_JMP | BPF_JSET | BPF_K:
1962 	case BPF_JMP | BPF_JLT | BPF_K:
1963 	case BPF_JMP | BPF_JLE | BPF_K:
1964 	case BPF_JMP | BPF_JSLT | BPF_K:
1965 	case BPF_JMP | BPF_JSLE | BPF_K:
1966 	case BPF_JMP32 | BPF_JEQ | BPF_K:
1967 	case BPF_JMP32 | BPF_JGT | BPF_K:
1968 	case BPF_JMP32 | BPF_JGE | BPF_K:
1969 	case BPF_JMP32 | BPF_JNE | BPF_K:
1970 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1971 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1972 	case BPF_JMP32 | BPF_JSET | BPF_K:
1973 	case BPF_JMP32 | BPF_JLT | BPF_K:
1974 	case BPF_JMP32 | BPF_JLE | BPF_K:
1975 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1976 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1977 		if (off == 0)
1978 			break;
1979 		rm = tmp2[0];
1980 		rn = tmp2[1];
1981 		/* Sign-extend immediate value */
1982 		emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1983 go_jmp:
1984 		/* Setup destination register */
1985 		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1986 
1987 		/* Check for the condition */
1988 		emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
1989 			  BPF_CLASS(code) == BPF_JMP);
1990 
1991 		/* Setup JUMP instruction */
1992 		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1993 		switch (BPF_OP(code)) {
1994 		case BPF_JNE:
1995 		case BPF_JSET:
1996 			_emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1997 			break;
1998 		case BPF_JEQ:
1999 			_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
2000 			break;
2001 		case BPF_JGT:
2002 			_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
2003 			break;
2004 		case BPF_JGE:
2005 			_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
2006 			break;
2007 		case BPF_JSGT:
2008 			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
2009 			break;
2010 		case BPF_JSGE:
2011 			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
2012 			break;
2013 		case BPF_JLE:
2014 			_emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
2015 			break;
2016 		case BPF_JLT:
2017 			_emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
2018 			break;
2019 		case BPF_JSLT:
2020 			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
2021 			break;
2022 		case BPF_JSLE:
2023 			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
2024 			break;
2025 		}
2026 		break;
2027 	/* JMP OFF */
2028 	case BPF_JMP | BPF_JA:
2029 	case BPF_JMP32 | BPF_JA:
2030 	{
2031 		if (BPF_CLASS(code) == BPF_JMP32 && imm != 0)
2032 			jmp_offset = bpf2a32_offset(i + imm, i, ctx);
2033 		else if (BPF_CLASS(code) == BPF_JMP && off != 0)
2034 			jmp_offset = bpf2a32_offset(i + off, i, ctx);
2035 		else
2036 			break;
2037 
2038 		check_imm24(jmp_offset);
2039 		emit(ARM_B(jmp_offset), ctx);
2040 		break;
2041 	}
2042 	/* tail call */
2043 	case BPF_JMP | BPF_TAIL_CALL:
2044 		if (emit_bpf_tail_call(ctx))
2045 			return -EFAULT;
2046 		break;
2047 	/* function call */
2048 	case BPF_JMP | BPF_CALL:
2049 	{
2050 		const s8 *r0 = bpf2a32[BPF_REG_0];
2051 		const s8 *r1 = bpf2a32[BPF_REG_1];
2052 		const s8 *r2 = bpf2a32[BPF_REG_2];
2053 		const s8 *r3 = bpf2a32[BPF_REG_3];
2054 		const s8 *r4 = bpf2a32[BPF_REG_4];
2055 		const s8 *r5 = bpf2a32[BPF_REG_5];
2056 		const u32 func = (u32)__bpf_call_base + (u32)imm;
2057 
2058 		emit_a32_mov_r64(true, r0, r1, ctx);
2059 		emit_a32_mov_r64(true, r1, r2, ctx);
2060 		emit_push_r64(r5, ctx);
2061 		emit_push_r64(r4, ctx);
2062 		emit_push_r64(r3, ctx);
2063 
2064 		emit_a32_mov_i(tmp[1], func, ctx);
2065 		emit_blx_r(tmp[1], ctx);
2066 
2067 		emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
2068 		break;
2069 	}
2070 	/* function return */
2071 	case BPF_JMP | BPF_EXIT:
2072 		/* Optimization: when last instruction is EXIT
2073 		 * simply fallthrough to epilogue.
2074 		 */
2075 		if (i == ctx->prog->len - 1)
2076 			break;
2077 		jmp_offset = epilogue_offset(ctx);
2078 		check_imm24(jmp_offset);
2079 		emit(ARM_B(jmp_offset), ctx);
2080 		break;
2081 notyet:
2082 		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
2083 		return -EFAULT;
2084 	default:
2085 		pr_err_once("unknown opcode %02x\n", code);
2086 		return -EINVAL;
2087 	}
2088 
2089 	if (ctx->flags & FLAG_IMM_OVERFLOW)
2090 		/*
2091 		 * this instruction generated an overflow when
2092 		 * trying to access the literal pool, so
2093 		 * delegate this filter to the kernel interpreter.
2094 		 */
2095 		return -1;
2096 	return 0;
2097 }
2098 
build_body(struct jit_ctx * ctx)2099 static int build_body(struct jit_ctx *ctx)
2100 {
2101 	const struct bpf_prog *prog = ctx->prog;
2102 	unsigned int i;
2103 
2104 	for (i = 0; i < prog->len; i++) {
2105 		const struct bpf_insn *insn = &(prog->insnsi[i]);
2106 		int ret;
2107 
2108 		ret = build_insn(insn, ctx);
2109 
2110 		/* It's used with loading the 64 bit immediate value. */
2111 		if (ret > 0) {
2112 			i++;
2113 			if (ctx->target == NULL)
2114 				ctx->offsets[i] = ctx->idx;
2115 			continue;
2116 		}
2117 
2118 		if (ctx->target == NULL)
2119 			ctx->offsets[i] = ctx->idx;
2120 
2121 		/* If unsuccesful, return with error code */
2122 		if (ret)
2123 			return ret;
2124 	}
2125 	return 0;
2126 }
2127 
validate_code(struct jit_ctx * ctx)2128 static int validate_code(struct jit_ctx *ctx)
2129 {
2130 	int i;
2131 
2132 	for (i = 0; i < ctx->idx; i++) {
2133 		if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
2134 			return -1;
2135 	}
2136 
2137 	return 0;
2138 }
2139 
bpf_jit_needs_zext(void)2140 bool bpf_jit_needs_zext(void)
2141 {
2142 	return true;
2143 }
2144 
bpf_int_jit_compile(struct bpf_prog * prog)2145 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2146 {
2147 	struct bpf_prog *tmp, *orig_prog = prog;
2148 	struct bpf_binary_header *header;
2149 	bool tmp_blinded = false;
2150 	struct jit_ctx ctx;
2151 	unsigned int tmp_idx;
2152 	unsigned int image_size;
2153 	u8 *image_ptr;
2154 
2155 	/* If BPF JIT was not enabled then we must fall back to
2156 	 * the interpreter.
2157 	 */
2158 	if (!prog->jit_requested)
2159 		return orig_prog;
2160 
2161 	/* If constant blinding was enabled and we failed during blinding
2162 	 * then we must fall back to the interpreter. Otherwise, we save
2163 	 * the new JITed code.
2164 	 */
2165 	tmp = bpf_jit_blind_constants(prog);
2166 
2167 	if (IS_ERR(tmp))
2168 		return orig_prog;
2169 	if (tmp != prog) {
2170 		tmp_blinded = true;
2171 		prog = tmp;
2172 	}
2173 
2174 	memset(&ctx, 0, sizeof(ctx));
2175 	ctx.prog = prog;
2176 	ctx.cpu_architecture = cpu_architecture();
2177 
2178 	/* Not able to allocate memory for offsets[] , then
2179 	 * we must fall back to the interpreter
2180 	 */
2181 	ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
2182 	if (ctx.offsets == NULL) {
2183 		prog = orig_prog;
2184 		goto out;
2185 	}
2186 
2187 	/* 1) fake pass to find in the length of the JITed code,
2188 	 * to compute ctx->offsets and other context variables
2189 	 * needed to compute final JITed code.
2190 	 * Also, calculate random starting pointer/start of JITed code
2191 	 * which is prefixed by random number of fault instructions.
2192 	 *
2193 	 * If the first pass fails then there is no chance of it
2194 	 * being successful in the second pass, so just fall back
2195 	 * to the interpreter.
2196 	 */
2197 	if (build_body(&ctx)) {
2198 		prog = orig_prog;
2199 		goto out_off;
2200 	}
2201 
2202 	tmp_idx = ctx.idx;
2203 	build_prologue(&ctx);
2204 	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
2205 
2206 	ctx.epilogue_offset = ctx.idx;
2207 
2208 #if __LINUX_ARM_ARCH__ < 7
2209 	tmp_idx = ctx.idx;
2210 	build_epilogue(&ctx);
2211 	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
2212 
2213 	ctx.idx += ctx.imm_count;
2214 	if (ctx.imm_count) {
2215 		ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
2216 		if (ctx.imms == NULL) {
2217 			prog = orig_prog;
2218 			goto out_off;
2219 		}
2220 	}
2221 #else
2222 	/* there's nothing about the epilogue on ARMv7 */
2223 	build_epilogue(&ctx);
2224 #endif
2225 	/* Now we can get the actual image size of the JITed arm code.
2226 	 * Currently, we are not considering the THUMB-2 instructions
2227 	 * for jit, although it can decrease the size of the image.
2228 	 *
2229 	 * As each arm instruction is of length 32bit, we are translating
2230 	 * number of JITed instructions into the size required to store these
2231 	 * JITed code.
2232 	 */
2233 	image_size = sizeof(u32) * ctx.idx;
2234 
2235 	/* Now we know the size of the structure to make */
2236 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
2237 				      sizeof(u32), jit_fill_hole);
2238 	/* Not able to allocate memory for the structure then
2239 	 * we must fall back to the interpretation
2240 	 */
2241 	if (header == NULL) {
2242 		prog = orig_prog;
2243 		goto out_imms;
2244 	}
2245 
2246 	/* 2.) Actual pass to generate final JIT code */
2247 	ctx.target = (u32 *) image_ptr;
2248 	ctx.idx = 0;
2249 
2250 	build_prologue(&ctx);
2251 
2252 	/* If building the body of the JITed code fails somehow,
2253 	 * we fall back to the interpretation.
2254 	 */
2255 	if (build_body(&ctx) < 0)
2256 		goto out_free;
2257 	build_epilogue(&ctx);
2258 
2259 	/* 3.) Extra pass to validate JITed Code */
2260 	if (validate_code(&ctx))
2261 		goto out_free;
2262 	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
2263 
2264 	if (bpf_jit_enable > 1)
2265 		/* there are 2 passes here */
2266 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
2267 
2268 	if (bpf_jit_binary_lock_ro(header))
2269 		goto out_free;
2270 	prog->bpf_func = (void *)ctx.target;
2271 	prog->jited = 1;
2272 	prog->jited_len = image_size;
2273 
2274 out_imms:
2275 #if __LINUX_ARM_ARCH__ < 7
2276 	if (ctx.imm_count)
2277 		kfree(ctx.imms);
2278 #endif
2279 out_off:
2280 	kfree(ctx.offsets);
2281 out:
2282 	if (tmp_blinded)
2283 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2284 					   tmp : orig_prog);
2285 	return prog;
2286 
2287 out_free:
2288 	image_ptr = NULL;
2289 	bpf_jit_binary_free(header);
2290 	prog = orig_prog;
2291 	goto out_imms;
2292 }
2293 
2294