xref: /linux/arch/arm64/net/bpf_jit_comp.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * BPF JIT compiler for ARM64
3  *
4  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt) "bpf_jit: " fmt
20 
21 #include <linux/filter.h>
22 #include <linux/printk.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
25 
26 #include <asm/byteorder.h>
27 #include <asm/cacheflush.h>
28 #include <asm/debug-monitors.h>
29 
30 #include "bpf_jit.h"
31 
32 int bpf_jit_enable __read_mostly;
33 
34 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
35 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
36 
37 /* Map BPF registers to A64 registers */
38 static const int bpf2a64[] = {
39 	/* return value from in-kernel function, and exit value from eBPF */
40 	[BPF_REG_0] = A64_R(7),
41 	/* arguments from eBPF program to in-kernel function */
42 	[BPF_REG_1] = A64_R(0),
43 	[BPF_REG_2] = A64_R(1),
44 	[BPF_REG_3] = A64_R(2),
45 	[BPF_REG_4] = A64_R(3),
46 	[BPF_REG_5] = A64_R(4),
47 	/* callee saved registers that in-kernel function will preserve */
48 	[BPF_REG_6] = A64_R(19),
49 	[BPF_REG_7] = A64_R(20),
50 	[BPF_REG_8] = A64_R(21),
51 	[BPF_REG_9] = A64_R(22),
52 	/* read-only frame pointer to access stack */
53 	[BPF_REG_FP] = A64_R(25),
54 	/* temporary registers for internal BPF JIT */
55 	[TMP_REG_1] = A64_R(10),
56 	[TMP_REG_2] = A64_R(11),
57 	/* temporary register for blinding constants */
58 	[BPF_REG_AX] = A64_R(9),
59 };
60 
61 struct jit_ctx {
62 	const struct bpf_prog *prog;
63 	int idx;
64 	int epilogue_offset;
65 	int *offset;
66 	u32 *image;
67 };
68 
69 static inline void emit(const u32 insn, struct jit_ctx *ctx)
70 {
71 	if (ctx->image != NULL)
72 		ctx->image[ctx->idx] = cpu_to_le32(insn);
73 
74 	ctx->idx++;
75 }
76 
77 static inline void emit_a64_mov_i64(const int reg, const u64 val,
78 				    struct jit_ctx *ctx)
79 {
80 	u64 tmp = val;
81 	int shift = 0;
82 
83 	emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
84 	tmp >>= 16;
85 	shift += 16;
86 	while (tmp) {
87 		if (tmp & 0xffff)
88 			emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
89 		tmp >>= 16;
90 		shift += 16;
91 	}
92 }
93 
94 static inline void emit_a64_mov_i(const int is64, const int reg,
95 				  const s32 val, struct jit_ctx *ctx)
96 {
97 	u16 hi = val >> 16;
98 	u16 lo = val & 0xffff;
99 
100 	if (hi & 0x8000) {
101 		if (hi == 0xffff) {
102 			emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
103 		} else {
104 			emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
105 			emit(A64_MOVK(is64, reg, lo, 0), ctx);
106 		}
107 	} else {
108 		emit(A64_MOVZ(is64, reg, lo, 0), ctx);
109 		if (hi)
110 			emit(A64_MOVK(is64, reg, hi, 16), ctx);
111 	}
112 }
113 
114 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
115 				 const struct jit_ctx *ctx)
116 {
117 	int to = ctx->offset[bpf_to];
118 	/* -1 to account for the Branch instruction */
119 	int from = ctx->offset[bpf_from] - 1;
120 
121 	return to - from;
122 }
123 
124 static void jit_fill_hole(void *area, unsigned int size)
125 {
126 	u32 *ptr;
127 	/* We are guaranteed to have aligned memory. */
128 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
129 		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
130 }
131 
132 static inline int epilogue_offset(const struct jit_ctx *ctx)
133 {
134 	int to = ctx->epilogue_offset;
135 	int from = ctx->idx;
136 
137 	return to - from;
138 }
139 
140 /* Stack must be multiples of 16B */
141 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
142 
143 #define _STACK_SIZE \
144 	(MAX_BPF_STACK \
145 	 + 4 /* extra for skb_copy_bits buffer */)
146 
147 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
148 
149 static void build_prologue(struct jit_ctx *ctx)
150 {
151 	const u8 r6 = bpf2a64[BPF_REG_6];
152 	const u8 r7 = bpf2a64[BPF_REG_7];
153 	const u8 r8 = bpf2a64[BPF_REG_8];
154 	const u8 r9 = bpf2a64[BPF_REG_9];
155 	const u8 fp = bpf2a64[BPF_REG_FP];
156 
157 	/*
158 	 * BPF prog stack layout
159 	 *
160 	 *                         high
161 	 * original A64_SP =>   0:+-----+ BPF prologue
162 	 *                        |FP/LR|
163 	 * current A64_FP =>  -16:+-----+
164 	 *                        | ... | callee saved registers
165 	 *                        +-----+
166 	 *                        |     | x25/x26
167 	 * BPF fp register => -64:+-----+ <= (BPF_FP)
168 	 *                        |     |
169 	 *                        | ... | BPF prog stack
170 	 *                        |     |
171 	 *                        +-----+ <= (BPF_FP - MAX_BPF_STACK)
172 	 *                        |RSVD | JIT scratchpad
173 	 * current A64_SP =>      +-----+ <= (BPF_FP - STACK_SIZE)
174 	 *                        |     |
175 	 *                        | ... | Function call stack
176 	 *                        |     |
177 	 *                        +-----+
178 	 *                          low
179 	 *
180 	 */
181 
182 	/* Save FP and LR registers to stay align with ARM64 AAPCS */
183 	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
184 	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
185 
186 	/* Save callee-saved register */
187 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
188 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
189 
190 	/* Save fp (x25) and x26. SP requires 16 bytes alignment */
191 	emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
192 
193 	/* Set up BPF prog stack base register (x25) */
194 	emit(A64_MOV(1, fp, A64_SP), ctx);
195 
196 	/* Set up function call stack */
197 	emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
198 }
199 
200 static void build_epilogue(struct jit_ctx *ctx)
201 {
202 	const u8 r0 = bpf2a64[BPF_REG_0];
203 	const u8 r6 = bpf2a64[BPF_REG_6];
204 	const u8 r7 = bpf2a64[BPF_REG_7];
205 	const u8 r8 = bpf2a64[BPF_REG_8];
206 	const u8 r9 = bpf2a64[BPF_REG_9];
207 	const u8 fp = bpf2a64[BPF_REG_FP];
208 
209 	/* We're done with BPF stack */
210 	emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
211 
212 	/* Restore fs (x25) and x26 */
213 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
214 
215 	/* Restore callee-saved register */
216 	emit(A64_POP(r8, r9, A64_SP), ctx);
217 	emit(A64_POP(r6, r7, A64_SP), ctx);
218 
219 	/* Restore FP/LR registers */
220 	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
221 
222 	/* Set return value */
223 	emit(A64_MOV(1, A64_R(0), r0), ctx);
224 
225 	emit(A64_RET(A64_LR), ctx);
226 }
227 
228 /* JITs an eBPF instruction.
229  * Returns:
230  * 0  - successfully JITed an 8-byte eBPF instruction.
231  * >0 - successfully JITed a 16-byte eBPF instruction.
232  * <0 - failed to JIT.
233  */
234 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
235 {
236 	const u8 code = insn->code;
237 	const u8 dst = bpf2a64[insn->dst_reg];
238 	const u8 src = bpf2a64[insn->src_reg];
239 	const u8 tmp = bpf2a64[TMP_REG_1];
240 	const u8 tmp2 = bpf2a64[TMP_REG_2];
241 	const s16 off = insn->off;
242 	const s32 imm = insn->imm;
243 	const int i = insn - ctx->prog->insnsi;
244 	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
245 	u8 jmp_cond;
246 	s32 jmp_offset;
247 
248 #define check_imm(bits, imm) do {				\
249 	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
250 	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
251 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
252 			i, imm, imm);				\
253 		return -EINVAL;					\
254 	}							\
255 } while (0)
256 #define check_imm19(imm) check_imm(19, imm)
257 #define check_imm26(imm) check_imm(26, imm)
258 
259 	switch (code) {
260 	/* dst = src */
261 	case BPF_ALU | BPF_MOV | BPF_X:
262 	case BPF_ALU64 | BPF_MOV | BPF_X:
263 		emit(A64_MOV(is64, dst, src), ctx);
264 		break;
265 	/* dst = dst OP src */
266 	case BPF_ALU | BPF_ADD | BPF_X:
267 	case BPF_ALU64 | BPF_ADD | BPF_X:
268 		emit(A64_ADD(is64, dst, dst, src), ctx);
269 		break;
270 	case BPF_ALU | BPF_SUB | BPF_X:
271 	case BPF_ALU64 | BPF_SUB | BPF_X:
272 		emit(A64_SUB(is64, dst, dst, src), ctx);
273 		break;
274 	case BPF_ALU | BPF_AND | BPF_X:
275 	case BPF_ALU64 | BPF_AND | BPF_X:
276 		emit(A64_AND(is64, dst, dst, src), ctx);
277 		break;
278 	case BPF_ALU | BPF_OR | BPF_X:
279 	case BPF_ALU64 | BPF_OR | BPF_X:
280 		emit(A64_ORR(is64, dst, dst, src), ctx);
281 		break;
282 	case BPF_ALU | BPF_XOR | BPF_X:
283 	case BPF_ALU64 | BPF_XOR | BPF_X:
284 		emit(A64_EOR(is64, dst, dst, src), ctx);
285 		break;
286 	case BPF_ALU | BPF_MUL | BPF_X:
287 	case BPF_ALU64 | BPF_MUL | BPF_X:
288 		emit(A64_MUL(is64, dst, dst, src), ctx);
289 		break;
290 	case BPF_ALU | BPF_DIV | BPF_X:
291 	case BPF_ALU64 | BPF_DIV | BPF_X:
292 	case BPF_ALU | BPF_MOD | BPF_X:
293 	case BPF_ALU64 | BPF_MOD | BPF_X:
294 	{
295 		const u8 r0 = bpf2a64[BPF_REG_0];
296 
297 		/* if (src == 0) return 0 */
298 		jmp_offset = 3; /* skip ahead to else path */
299 		check_imm19(jmp_offset);
300 		emit(A64_CBNZ(is64, src, jmp_offset), ctx);
301 		emit(A64_MOVZ(1, r0, 0, 0), ctx);
302 		jmp_offset = epilogue_offset(ctx);
303 		check_imm26(jmp_offset);
304 		emit(A64_B(jmp_offset), ctx);
305 		/* else */
306 		switch (BPF_OP(code)) {
307 		case BPF_DIV:
308 			emit(A64_UDIV(is64, dst, dst, src), ctx);
309 			break;
310 		case BPF_MOD:
311 			emit(A64_UDIV(is64, tmp, dst, src), ctx);
312 			emit(A64_MUL(is64, tmp, tmp, src), ctx);
313 			emit(A64_SUB(is64, dst, dst, tmp), ctx);
314 			break;
315 		}
316 		break;
317 	}
318 	case BPF_ALU | BPF_LSH | BPF_X:
319 	case BPF_ALU64 | BPF_LSH | BPF_X:
320 		emit(A64_LSLV(is64, dst, dst, src), ctx);
321 		break;
322 	case BPF_ALU | BPF_RSH | BPF_X:
323 	case BPF_ALU64 | BPF_RSH | BPF_X:
324 		emit(A64_LSRV(is64, dst, dst, src), ctx);
325 		break;
326 	case BPF_ALU | BPF_ARSH | BPF_X:
327 	case BPF_ALU64 | BPF_ARSH | BPF_X:
328 		emit(A64_ASRV(is64, dst, dst, src), ctx);
329 		break;
330 	/* dst = -dst */
331 	case BPF_ALU | BPF_NEG:
332 	case BPF_ALU64 | BPF_NEG:
333 		emit(A64_NEG(is64, dst, dst), ctx);
334 		break;
335 	/* dst = BSWAP##imm(dst) */
336 	case BPF_ALU | BPF_END | BPF_FROM_LE:
337 	case BPF_ALU | BPF_END | BPF_FROM_BE:
338 #ifdef CONFIG_CPU_BIG_ENDIAN
339 		if (BPF_SRC(code) == BPF_FROM_BE)
340 			goto emit_bswap_uxt;
341 #else /* !CONFIG_CPU_BIG_ENDIAN */
342 		if (BPF_SRC(code) == BPF_FROM_LE)
343 			goto emit_bswap_uxt;
344 #endif
345 		switch (imm) {
346 		case 16:
347 			emit(A64_REV16(is64, dst, dst), ctx);
348 			/* zero-extend 16 bits into 64 bits */
349 			emit(A64_UXTH(is64, dst, dst), ctx);
350 			break;
351 		case 32:
352 			emit(A64_REV32(is64, dst, dst), ctx);
353 			/* upper 32 bits already cleared */
354 			break;
355 		case 64:
356 			emit(A64_REV64(dst, dst), ctx);
357 			break;
358 		}
359 		break;
360 emit_bswap_uxt:
361 		switch (imm) {
362 		case 16:
363 			/* zero-extend 16 bits into 64 bits */
364 			emit(A64_UXTH(is64, dst, dst), ctx);
365 			break;
366 		case 32:
367 			/* zero-extend 32 bits into 64 bits */
368 			emit(A64_UXTW(is64, dst, dst), ctx);
369 			break;
370 		case 64:
371 			/* nop */
372 			break;
373 		}
374 		break;
375 	/* dst = imm */
376 	case BPF_ALU | BPF_MOV | BPF_K:
377 	case BPF_ALU64 | BPF_MOV | BPF_K:
378 		emit_a64_mov_i(is64, dst, imm, ctx);
379 		break;
380 	/* dst = dst OP imm */
381 	case BPF_ALU | BPF_ADD | BPF_K:
382 	case BPF_ALU64 | BPF_ADD | BPF_K:
383 		emit_a64_mov_i(is64, tmp, imm, ctx);
384 		emit(A64_ADD(is64, dst, dst, tmp), ctx);
385 		break;
386 	case BPF_ALU | BPF_SUB | BPF_K:
387 	case BPF_ALU64 | BPF_SUB | BPF_K:
388 		emit_a64_mov_i(is64, tmp, imm, ctx);
389 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
390 		break;
391 	case BPF_ALU | BPF_AND | BPF_K:
392 	case BPF_ALU64 | BPF_AND | BPF_K:
393 		emit_a64_mov_i(is64, tmp, imm, ctx);
394 		emit(A64_AND(is64, dst, dst, tmp), ctx);
395 		break;
396 	case BPF_ALU | BPF_OR | BPF_K:
397 	case BPF_ALU64 | BPF_OR | BPF_K:
398 		emit_a64_mov_i(is64, tmp, imm, ctx);
399 		emit(A64_ORR(is64, dst, dst, tmp), ctx);
400 		break;
401 	case BPF_ALU | BPF_XOR | BPF_K:
402 	case BPF_ALU64 | BPF_XOR | BPF_K:
403 		emit_a64_mov_i(is64, tmp, imm, ctx);
404 		emit(A64_EOR(is64, dst, dst, tmp), ctx);
405 		break;
406 	case BPF_ALU | BPF_MUL | BPF_K:
407 	case BPF_ALU64 | BPF_MUL | BPF_K:
408 		emit_a64_mov_i(is64, tmp, imm, ctx);
409 		emit(A64_MUL(is64, dst, dst, tmp), ctx);
410 		break;
411 	case BPF_ALU | BPF_DIV | BPF_K:
412 	case BPF_ALU64 | BPF_DIV | BPF_K:
413 		emit_a64_mov_i(is64, tmp, imm, ctx);
414 		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
415 		break;
416 	case BPF_ALU | BPF_MOD | BPF_K:
417 	case BPF_ALU64 | BPF_MOD | BPF_K:
418 		emit_a64_mov_i(is64, tmp2, imm, ctx);
419 		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
420 		emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
421 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
422 		break;
423 	case BPF_ALU | BPF_LSH | BPF_K:
424 	case BPF_ALU64 | BPF_LSH | BPF_K:
425 		emit(A64_LSL(is64, dst, dst, imm), ctx);
426 		break;
427 	case BPF_ALU | BPF_RSH | BPF_K:
428 	case BPF_ALU64 | BPF_RSH | BPF_K:
429 		emit(A64_LSR(is64, dst, dst, imm), ctx);
430 		break;
431 	case BPF_ALU | BPF_ARSH | BPF_K:
432 	case BPF_ALU64 | BPF_ARSH | BPF_K:
433 		emit(A64_ASR(is64, dst, dst, imm), ctx);
434 		break;
435 
436 	/* JUMP off */
437 	case BPF_JMP | BPF_JA:
438 		jmp_offset = bpf2a64_offset(i + off, i, ctx);
439 		check_imm26(jmp_offset);
440 		emit(A64_B(jmp_offset), ctx);
441 		break;
442 	/* IF (dst COND src) JUMP off */
443 	case BPF_JMP | BPF_JEQ | BPF_X:
444 	case BPF_JMP | BPF_JGT | BPF_X:
445 	case BPF_JMP | BPF_JGE | BPF_X:
446 	case BPF_JMP | BPF_JNE | BPF_X:
447 	case BPF_JMP | BPF_JSGT | BPF_X:
448 	case BPF_JMP | BPF_JSGE | BPF_X:
449 		emit(A64_CMP(1, dst, src), ctx);
450 emit_cond_jmp:
451 		jmp_offset = bpf2a64_offset(i + off, i, ctx);
452 		check_imm19(jmp_offset);
453 		switch (BPF_OP(code)) {
454 		case BPF_JEQ:
455 			jmp_cond = A64_COND_EQ;
456 			break;
457 		case BPF_JGT:
458 			jmp_cond = A64_COND_HI;
459 			break;
460 		case BPF_JGE:
461 			jmp_cond = A64_COND_CS;
462 			break;
463 		case BPF_JSET:
464 		case BPF_JNE:
465 			jmp_cond = A64_COND_NE;
466 			break;
467 		case BPF_JSGT:
468 			jmp_cond = A64_COND_GT;
469 			break;
470 		case BPF_JSGE:
471 			jmp_cond = A64_COND_GE;
472 			break;
473 		default:
474 			return -EFAULT;
475 		}
476 		emit(A64_B_(jmp_cond, jmp_offset), ctx);
477 		break;
478 	case BPF_JMP | BPF_JSET | BPF_X:
479 		emit(A64_TST(1, dst, src), ctx);
480 		goto emit_cond_jmp;
481 	/* IF (dst COND imm) JUMP off */
482 	case BPF_JMP | BPF_JEQ | BPF_K:
483 	case BPF_JMP | BPF_JGT | BPF_K:
484 	case BPF_JMP | BPF_JGE | BPF_K:
485 	case BPF_JMP | BPF_JNE | BPF_K:
486 	case BPF_JMP | BPF_JSGT | BPF_K:
487 	case BPF_JMP | BPF_JSGE | BPF_K:
488 		emit_a64_mov_i(1, tmp, imm, ctx);
489 		emit(A64_CMP(1, dst, tmp), ctx);
490 		goto emit_cond_jmp;
491 	case BPF_JMP | BPF_JSET | BPF_K:
492 		emit_a64_mov_i(1, tmp, imm, ctx);
493 		emit(A64_TST(1, dst, tmp), ctx);
494 		goto emit_cond_jmp;
495 	/* function call */
496 	case BPF_JMP | BPF_CALL:
497 	{
498 		const u8 r0 = bpf2a64[BPF_REG_0];
499 		const u64 func = (u64)__bpf_call_base + imm;
500 
501 		emit_a64_mov_i64(tmp, func, ctx);
502 		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
503 		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
504 		emit(A64_BLR(tmp), ctx);
505 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
506 		emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
507 		break;
508 	}
509 	/* function return */
510 	case BPF_JMP | BPF_EXIT:
511 		/* Optimization: when last instruction is EXIT,
512 		   simply fallthrough to epilogue. */
513 		if (i == ctx->prog->len - 1)
514 			break;
515 		jmp_offset = epilogue_offset(ctx);
516 		check_imm26(jmp_offset);
517 		emit(A64_B(jmp_offset), ctx);
518 		break;
519 
520 	/* dst = imm64 */
521 	case BPF_LD | BPF_IMM | BPF_DW:
522 	{
523 		const struct bpf_insn insn1 = insn[1];
524 		u64 imm64;
525 
526 		if (insn1.code != 0 || insn1.src_reg != 0 ||
527 		    insn1.dst_reg != 0 || insn1.off != 0) {
528 			/* Note: verifier in BPF core must catch invalid
529 			 * instructions.
530 			 */
531 			pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
532 			return -EINVAL;
533 		}
534 
535 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
536 		emit_a64_mov_i64(dst, imm64, ctx);
537 
538 		return 1;
539 	}
540 
541 	/* LDX: dst = *(size *)(src + off) */
542 	case BPF_LDX | BPF_MEM | BPF_W:
543 	case BPF_LDX | BPF_MEM | BPF_H:
544 	case BPF_LDX | BPF_MEM | BPF_B:
545 	case BPF_LDX | BPF_MEM | BPF_DW:
546 		emit_a64_mov_i(1, tmp, off, ctx);
547 		switch (BPF_SIZE(code)) {
548 		case BPF_W:
549 			emit(A64_LDR32(dst, src, tmp), ctx);
550 			break;
551 		case BPF_H:
552 			emit(A64_LDRH(dst, src, tmp), ctx);
553 			break;
554 		case BPF_B:
555 			emit(A64_LDRB(dst, src, tmp), ctx);
556 			break;
557 		case BPF_DW:
558 			emit(A64_LDR64(dst, src, tmp), ctx);
559 			break;
560 		}
561 		break;
562 
563 	/* ST: *(size *)(dst + off) = imm */
564 	case BPF_ST | BPF_MEM | BPF_W:
565 	case BPF_ST | BPF_MEM | BPF_H:
566 	case BPF_ST | BPF_MEM | BPF_B:
567 	case BPF_ST | BPF_MEM | BPF_DW:
568 		/* Load imm to a register then store it */
569 		emit_a64_mov_i(1, tmp2, off, ctx);
570 		emit_a64_mov_i(1, tmp, imm, ctx);
571 		switch (BPF_SIZE(code)) {
572 		case BPF_W:
573 			emit(A64_STR32(tmp, dst, tmp2), ctx);
574 			break;
575 		case BPF_H:
576 			emit(A64_STRH(tmp, dst, tmp2), ctx);
577 			break;
578 		case BPF_B:
579 			emit(A64_STRB(tmp, dst, tmp2), ctx);
580 			break;
581 		case BPF_DW:
582 			emit(A64_STR64(tmp, dst, tmp2), ctx);
583 			break;
584 		}
585 		break;
586 
587 	/* STX: *(size *)(dst + off) = src */
588 	case BPF_STX | BPF_MEM | BPF_W:
589 	case BPF_STX | BPF_MEM | BPF_H:
590 	case BPF_STX | BPF_MEM | BPF_B:
591 	case BPF_STX | BPF_MEM | BPF_DW:
592 		emit_a64_mov_i(1, tmp, off, ctx);
593 		switch (BPF_SIZE(code)) {
594 		case BPF_W:
595 			emit(A64_STR32(src, dst, tmp), ctx);
596 			break;
597 		case BPF_H:
598 			emit(A64_STRH(src, dst, tmp), ctx);
599 			break;
600 		case BPF_B:
601 			emit(A64_STRB(src, dst, tmp), ctx);
602 			break;
603 		case BPF_DW:
604 			emit(A64_STR64(src, dst, tmp), ctx);
605 			break;
606 		}
607 		break;
608 	/* STX XADD: lock *(u32 *)(dst + off) += src */
609 	case BPF_STX | BPF_XADD | BPF_W:
610 	/* STX XADD: lock *(u64 *)(dst + off) += src */
611 	case BPF_STX | BPF_XADD | BPF_DW:
612 		goto notyet;
613 
614 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
615 	case BPF_LD | BPF_ABS | BPF_W:
616 	case BPF_LD | BPF_ABS | BPF_H:
617 	case BPF_LD | BPF_ABS | BPF_B:
618 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
619 	case BPF_LD | BPF_IND | BPF_W:
620 	case BPF_LD | BPF_IND | BPF_H:
621 	case BPF_LD | BPF_IND | BPF_B:
622 	{
623 		const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
624 		const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
625 		const u8 fp = bpf2a64[BPF_REG_FP];
626 		const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
627 		const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
628 		const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
629 		const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
630 		const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
631 		int size;
632 
633 		emit(A64_MOV(1, r1, r6), ctx);
634 		emit_a64_mov_i(0, r2, imm, ctx);
635 		if (BPF_MODE(code) == BPF_IND)
636 			emit(A64_ADD(0, r2, r2, src), ctx);
637 		switch (BPF_SIZE(code)) {
638 		case BPF_W:
639 			size = 4;
640 			break;
641 		case BPF_H:
642 			size = 2;
643 			break;
644 		case BPF_B:
645 			size = 1;
646 			break;
647 		default:
648 			return -EINVAL;
649 		}
650 		emit_a64_mov_i64(r3, size, ctx);
651 		emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
652 		emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
653 		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
654 		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
655 		emit(A64_BLR(r5), ctx);
656 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
657 		emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
658 
659 		jmp_offset = epilogue_offset(ctx);
660 		check_imm19(jmp_offset);
661 		emit(A64_CBZ(1, r0, jmp_offset), ctx);
662 		emit(A64_MOV(1, r5, r0), ctx);
663 		switch (BPF_SIZE(code)) {
664 		case BPF_W:
665 			emit(A64_LDR32(r0, r5, A64_ZR), ctx);
666 #ifndef CONFIG_CPU_BIG_ENDIAN
667 			emit(A64_REV32(0, r0, r0), ctx);
668 #endif
669 			break;
670 		case BPF_H:
671 			emit(A64_LDRH(r0, r5, A64_ZR), ctx);
672 #ifndef CONFIG_CPU_BIG_ENDIAN
673 			emit(A64_REV16(0, r0, r0), ctx);
674 #endif
675 			break;
676 		case BPF_B:
677 			emit(A64_LDRB(r0, r5, A64_ZR), ctx);
678 			break;
679 		}
680 		break;
681 	}
682 notyet:
683 		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
684 		return -EFAULT;
685 
686 	default:
687 		pr_err_once("unknown opcode %02x\n", code);
688 		return -EINVAL;
689 	}
690 
691 	return 0;
692 }
693 
694 static int build_body(struct jit_ctx *ctx)
695 {
696 	const struct bpf_prog *prog = ctx->prog;
697 	int i;
698 
699 	for (i = 0; i < prog->len; i++) {
700 		const struct bpf_insn *insn = &prog->insnsi[i];
701 		int ret;
702 
703 		ret = build_insn(insn, ctx);
704 
705 		if (ctx->image == NULL)
706 			ctx->offset[i] = ctx->idx;
707 
708 		if (ret > 0) {
709 			i++;
710 			continue;
711 		}
712 		if (ret)
713 			return ret;
714 	}
715 
716 	return 0;
717 }
718 
719 static int validate_code(struct jit_ctx *ctx)
720 {
721 	int i;
722 
723 	for (i = 0; i < ctx->idx; i++) {
724 		u32 a64_insn = le32_to_cpu(ctx->image[i]);
725 
726 		if (a64_insn == AARCH64_BREAK_FAULT)
727 			return -1;
728 	}
729 
730 	return 0;
731 }
732 
733 static inline void bpf_flush_icache(void *start, void *end)
734 {
735 	flush_icache_range((unsigned long)start, (unsigned long)end);
736 }
737 
738 void bpf_jit_compile(struct bpf_prog *prog)
739 {
740 	/* Nothing to do here. We support Internal BPF. */
741 }
742 
743 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
744 {
745 	struct bpf_prog *tmp, *orig_prog = prog;
746 	struct bpf_binary_header *header;
747 	bool tmp_blinded = false;
748 	struct jit_ctx ctx;
749 	int image_size;
750 	u8 *image_ptr;
751 
752 	if (!bpf_jit_enable)
753 		return orig_prog;
754 
755 	tmp = bpf_jit_blind_constants(prog);
756 	/* If blinding was requested and we failed during blinding,
757 	 * we must fall back to the interpreter.
758 	 */
759 	if (IS_ERR(tmp))
760 		return orig_prog;
761 	if (tmp != prog) {
762 		tmp_blinded = true;
763 		prog = tmp;
764 	}
765 
766 	memset(&ctx, 0, sizeof(ctx));
767 	ctx.prog = prog;
768 
769 	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
770 	if (ctx.offset == NULL) {
771 		prog = orig_prog;
772 		goto out;
773 	}
774 
775 	/* 1. Initial fake pass to compute ctx->idx. */
776 
777 	/* Fake pass to fill in ctx->offset. */
778 	if (build_body(&ctx)) {
779 		prog = orig_prog;
780 		goto out_off;
781 	}
782 
783 	build_prologue(&ctx);
784 
785 	ctx.epilogue_offset = ctx.idx;
786 	build_epilogue(&ctx);
787 
788 	/* Now we know the actual image size. */
789 	image_size = sizeof(u32) * ctx.idx;
790 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
791 				      sizeof(u32), jit_fill_hole);
792 	if (header == NULL) {
793 		prog = orig_prog;
794 		goto out_off;
795 	}
796 
797 	/* 2. Now, the actual pass. */
798 
799 	ctx.image = (u32 *)image_ptr;
800 	ctx.idx = 0;
801 
802 	build_prologue(&ctx);
803 
804 	if (build_body(&ctx)) {
805 		bpf_jit_binary_free(header);
806 		prog = orig_prog;
807 		goto out_off;
808 	}
809 
810 	build_epilogue(&ctx);
811 
812 	/* 3. Extra pass to validate JITed code. */
813 	if (validate_code(&ctx)) {
814 		bpf_jit_binary_free(header);
815 		prog = orig_prog;
816 		goto out_off;
817 	}
818 
819 	/* And we're done. */
820 	if (bpf_jit_enable > 1)
821 		bpf_jit_dump(prog->len, image_size, 2, ctx.image);
822 
823 	bpf_flush_icache(header, ctx.image + ctx.idx);
824 
825 	set_memory_ro((unsigned long)header, header->pages);
826 	prog->bpf_func = (void *)ctx.image;
827 	prog->jited = 1;
828 
829 out_off:
830 	kfree(ctx.offset);
831 out:
832 	if (tmp_blinded)
833 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
834 					   tmp : orig_prog);
835 	return prog;
836 }
837 
838 void bpf_jit_free(struct bpf_prog *prog)
839 {
840 	unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
841 	struct bpf_binary_header *header = (void *)addr;
842 
843 	if (!prog->jited)
844 		goto free_filter;
845 
846 	set_memory_rw(addr, header->pages);
847 	bpf_jit_binary_free(header);
848 
849 free_filter:
850 	bpf_prog_unlock_free(prog);
851 }
852