xref: /linux/arch/arm64/net/bpf_jit_comp.c (revision b6459415b384cb829f0b2a4268f211c789f6cf0b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler for ARM64
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  */
7 
8 #define pr_fmt(fmt) "bpf_jit: " fmt
9 
10 #include <linux/bitfield.h>
11 #include <linux/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/printk.h>
14 #include <linux/slab.h>
15 
16 #include <asm/asm-extable.h>
17 #include <asm/byteorder.h>
18 #include <asm/cacheflush.h>
19 #include <asm/debug-monitors.h>
20 #include <asm/insn.h>
21 #include <asm/set_memory.h>
22 
23 #include "bpf_jit.h"
24 
25 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
26 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
27 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
28 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
29 
30 /* Map BPF registers to A64 registers */
31 static const int bpf2a64[] = {
32 	/* return value from in-kernel function, and exit value from eBPF */
33 	[BPF_REG_0] = A64_R(7),
34 	/* arguments from eBPF program to in-kernel function */
35 	[BPF_REG_1] = A64_R(0),
36 	[BPF_REG_2] = A64_R(1),
37 	[BPF_REG_3] = A64_R(2),
38 	[BPF_REG_4] = A64_R(3),
39 	[BPF_REG_5] = A64_R(4),
40 	/* callee saved registers that in-kernel function will preserve */
41 	[BPF_REG_6] = A64_R(19),
42 	[BPF_REG_7] = A64_R(20),
43 	[BPF_REG_8] = A64_R(21),
44 	[BPF_REG_9] = A64_R(22),
45 	/* read-only frame pointer to access stack */
46 	[BPF_REG_FP] = A64_R(25),
47 	/* temporary registers for BPF JIT */
48 	[TMP_REG_1] = A64_R(10),
49 	[TMP_REG_2] = A64_R(11),
50 	[TMP_REG_3] = A64_R(12),
51 	/* tail_call_cnt */
52 	[TCALL_CNT] = A64_R(26),
53 	/* temporary register for blinding constants */
54 	[BPF_REG_AX] = A64_R(9),
55 };
56 
57 struct jit_ctx {
58 	const struct bpf_prog *prog;
59 	int idx;
60 	int epilogue_offset;
61 	int *offset;
62 	int exentry_idx;
63 	__le32 *image;
64 	u32 stack_size;
65 };
66 
67 static inline void emit(const u32 insn, struct jit_ctx *ctx)
68 {
69 	if (ctx->image != NULL)
70 		ctx->image[ctx->idx] = cpu_to_le32(insn);
71 
72 	ctx->idx++;
73 }
74 
75 static inline void emit_a64_mov_i(const int is64, const int reg,
76 				  const s32 val, struct jit_ctx *ctx)
77 {
78 	u16 hi = val >> 16;
79 	u16 lo = val & 0xffff;
80 
81 	if (hi & 0x8000) {
82 		if (hi == 0xffff) {
83 			emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
84 		} else {
85 			emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
86 			if (lo != 0xffff)
87 				emit(A64_MOVK(is64, reg, lo, 0), ctx);
88 		}
89 	} else {
90 		emit(A64_MOVZ(is64, reg, lo, 0), ctx);
91 		if (hi)
92 			emit(A64_MOVK(is64, reg, hi, 16), ctx);
93 	}
94 }
95 
96 static int i64_i16_blocks(const u64 val, bool inverse)
97 {
98 	return (((val >>  0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
99 	       (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
100 	       (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
101 	       (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
102 }
103 
104 static inline void emit_a64_mov_i64(const int reg, const u64 val,
105 				    struct jit_ctx *ctx)
106 {
107 	u64 nrm_tmp = val, rev_tmp = ~val;
108 	bool inverse;
109 	int shift;
110 
111 	if (!(nrm_tmp >> 32))
112 		return emit_a64_mov_i(0, reg, (u32)val, ctx);
113 
114 	inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
115 	shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
116 					  (fls64(nrm_tmp) - 1)), 16), 0);
117 	if (inverse)
118 		emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
119 	else
120 		emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
121 	shift -= 16;
122 	while (shift >= 0) {
123 		if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
124 			emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
125 		shift -= 16;
126 	}
127 }
128 
129 /*
130  * Kernel addresses in the vmalloc space use at most 48 bits, and the
131  * remaining bits are guaranteed to be 0x1. So we can compose the address
132  * with a fixed length movn/movk/movk sequence.
133  */
134 static inline void emit_addr_mov_i64(const int reg, const u64 val,
135 				     struct jit_ctx *ctx)
136 {
137 	u64 tmp = val;
138 	int shift = 0;
139 
140 	emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
141 	while (shift < 32) {
142 		tmp >>= 16;
143 		shift += 16;
144 		emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
145 	}
146 }
147 
148 static inline int bpf2a64_offset(int bpf_insn, int off,
149 				 const struct jit_ctx *ctx)
150 {
151 	/* BPF JMP offset is relative to the next instruction */
152 	bpf_insn++;
153 	/*
154 	 * Whereas arm64 branch instructions encode the offset
155 	 * from the branch itself, so we must subtract 1 from the
156 	 * instruction offset.
157 	 */
158 	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
159 }
160 
161 static void jit_fill_hole(void *area, unsigned int size)
162 {
163 	__le32 *ptr;
164 	/* We are guaranteed to have aligned memory. */
165 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
166 		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
167 }
168 
169 static inline int epilogue_offset(const struct jit_ctx *ctx)
170 {
171 	int to = ctx->epilogue_offset;
172 	int from = ctx->idx;
173 
174 	return to - from;
175 }
176 
177 static bool is_addsub_imm(u32 imm)
178 {
179 	/* Either imm12 or shifted imm12. */
180 	return !(imm & ~0xfff) || !(imm & ~0xfff000);
181 }
182 
183 /* Tail call offset to jump into */
184 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
185 #define PROLOGUE_OFFSET 8
186 #else
187 #define PROLOGUE_OFFSET 7
188 #endif
189 
190 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
191 {
192 	const struct bpf_prog *prog = ctx->prog;
193 	const u8 r6 = bpf2a64[BPF_REG_6];
194 	const u8 r7 = bpf2a64[BPF_REG_7];
195 	const u8 r8 = bpf2a64[BPF_REG_8];
196 	const u8 r9 = bpf2a64[BPF_REG_9];
197 	const u8 fp = bpf2a64[BPF_REG_FP];
198 	const u8 tcc = bpf2a64[TCALL_CNT];
199 	const int idx0 = ctx->idx;
200 	int cur_offset;
201 
202 	/*
203 	 * BPF prog stack layout
204 	 *
205 	 *                         high
206 	 * original A64_SP =>   0:+-----+ BPF prologue
207 	 *                        |FP/LR|
208 	 * current A64_FP =>  -16:+-----+
209 	 *                        | ... | callee saved registers
210 	 * BPF fp register => -64:+-----+ <= (BPF_FP)
211 	 *                        |     |
212 	 *                        | ... | BPF prog stack
213 	 *                        |     |
214 	 *                        +-----+ <= (BPF_FP - prog->aux->stack_depth)
215 	 *                        |RSVD | padding
216 	 * current A64_SP =>      +-----+ <= (BPF_FP - ctx->stack_size)
217 	 *                        |     |
218 	 *                        | ... | Function call stack
219 	 *                        |     |
220 	 *                        +-----+
221 	 *                          low
222 	 *
223 	 */
224 
225 	/* BTI landing pad */
226 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
227 		emit(A64_BTI_C, ctx);
228 
229 	/* Save FP and LR registers to stay align with ARM64 AAPCS */
230 	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
231 	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
232 
233 	/* Save callee-saved registers */
234 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
235 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
236 	emit(A64_PUSH(fp, tcc, A64_SP), ctx);
237 
238 	/* Set up BPF prog stack base register */
239 	emit(A64_MOV(1, fp, A64_SP), ctx);
240 
241 	if (!ebpf_from_cbpf) {
242 		/* Initialize tail_call_cnt */
243 		emit(A64_MOVZ(1, tcc, 0, 0), ctx);
244 
245 		cur_offset = ctx->idx - idx0;
246 		if (cur_offset != PROLOGUE_OFFSET) {
247 			pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
248 				    cur_offset, PROLOGUE_OFFSET);
249 			return -1;
250 		}
251 
252 		/* BTI landing pad for the tail call, done with a BR */
253 		if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
254 			emit(A64_BTI_J, ctx);
255 	}
256 
257 	/* Stack must be multiples of 16B */
258 	ctx->stack_size = round_up(prog->aux->stack_depth, 16);
259 
260 	/* Set up function call stack */
261 	emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
262 	return 0;
263 }
264 
265 static int out_offset = -1; /* initialized on the first pass of build_body() */
266 static int emit_bpf_tail_call(struct jit_ctx *ctx)
267 {
268 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
269 	const u8 r2 = bpf2a64[BPF_REG_2];
270 	const u8 r3 = bpf2a64[BPF_REG_3];
271 
272 	const u8 tmp = bpf2a64[TMP_REG_1];
273 	const u8 prg = bpf2a64[TMP_REG_2];
274 	const u8 tcc = bpf2a64[TCALL_CNT];
275 	const int idx0 = ctx->idx;
276 #define cur_offset (ctx->idx - idx0)
277 #define jmp_offset (out_offset - (cur_offset))
278 	size_t off;
279 
280 	/* if (index >= array->map.max_entries)
281 	 *     goto out;
282 	 */
283 	off = offsetof(struct bpf_array, map.max_entries);
284 	emit_a64_mov_i64(tmp, off, ctx);
285 	emit(A64_LDR32(tmp, r2, tmp), ctx);
286 	emit(A64_MOV(0, r3, r3), ctx);
287 	emit(A64_CMP(0, r3, tmp), ctx);
288 	emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
289 
290 	/*
291 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
292 	 *     goto out;
293 	 * tail_call_cnt++;
294 	 */
295 	emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
296 	emit(A64_CMP(1, tcc, tmp), ctx);
297 	emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
298 	emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
299 
300 	/* prog = array->ptrs[index];
301 	 * if (prog == NULL)
302 	 *     goto out;
303 	 */
304 	off = offsetof(struct bpf_array, ptrs);
305 	emit_a64_mov_i64(tmp, off, ctx);
306 	emit(A64_ADD(1, tmp, r2, tmp), ctx);
307 	emit(A64_LSL(1, prg, r3, 3), ctx);
308 	emit(A64_LDR64(prg, tmp, prg), ctx);
309 	emit(A64_CBZ(1, prg, jmp_offset), ctx);
310 
311 	/* goto *(prog->bpf_func + prologue_offset); */
312 	off = offsetof(struct bpf_prog, bpf_func);
313 	emit_a64_mov_i64(tmp, off, ctx);
314 	emit(A64_LDR64(tmp, prg, tmp), ctx);
315 	emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
316 	emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
317 	emit(A64_BR(tmp), ctx);
318 
319 	/* out: */
320 	if (out_offset == -1)
321 		out_offset = cur_offset;
322 	if (cur_offset != out_offset) {
323 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
324 			    cur_offset, out_offset);
325 		return -1;
326 	}
327 	return 0;
328 #undef cur_offset
329 #undef jmp_offset
330 }
331 
332 static void build_epilogue(struct jit_ctx *ctx)
333 {
334 	const u8 r0 = bpf2a64[BPF_REG_0];
335 	const u8 r6 = bpf2a64[BPF_REG_6];
336 	const u8 r7 = bpf2a64[BPF_REG_7];
337 	const u8 r8 = bpf2a64[BPF_REG_8];
338 	const u8 r9 = bpf2a64[BPF_REG_9];
339 	const u8 fp = bpf2a64[BPF_REG_FP];
340 
341 	/* We're done with BPF stack */
342 	emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
343 
344 	/* Restore fs (x25) and x26 */
345 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
346 
347 	/* Restore callee-saved register */
348 	emit(A64_POP(r8, r9, A64_SP), ctx);
349 	emit(A64_POP(r6, r7, A64_SP), ctx);
350 
351 	/* Restore FP/LR registers */
352 	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
353 
354 	/* Set return value */
355 	emit(A64_MOV(1, A64_R(0), r0), ctx);
356 
357 	emit(A64_RET(A64_LR), ctx);
358 }
359 
360 #define BPF_FIXUP_OFFSET_MASK	GENMASK(26, 0)
361 #define BPF_FIXUP_REG_MASK	GENMASK(31, 27)
362 
363 bool ex_handler_bpf(const struct exception_table_entry *ex,
364 		    struct pt_regs *regs)
365 {
366 	off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
367 	int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
368 
369 	regs->regs[dst_reg] = 0;
370 	regs->pc = (unsigned long)&ex->fixup - offset;
371 	return true;
372 }
373 
374 /* For accesses to BTF pointers, add an entry to the exception table */
375 static int add_exception_handler(const struct bpf_insn *insn,
376 				 struct jit_ctx *ctx,
377 				 int dst_reg)
378 {
379 	off_t offset;
380 	unsigned long pc;
381 	struct exception_table_entry *ex;
382 
383 	if (!ctx->image)
384 		/* First pass */
385 		return 0;
386 
387 	if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
388 		return 0;
389 
390 	if (!ctx->prog->aux->extable ||
391 	    WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
392 		return -EINVAL;
393 
394 	ex = &ctx->prog->aux->extable[ctx->exentry_idx];
395 	pc = (unsigned long)&ctx->image[ctx->idx - 1];
396 
397 	offset = pc - (long)&ex->insn;
398 	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
399 		return -ERANGE;
400 	ex->insn = offset;
401 
402 	/*
403 	 * Since the extable follows the program, the fixup offset is always
404 	 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
405 	 * to keep things simple, and put the destination register in the upper
406 	 * bits. We don't need to worry about buildtime or runtime sort
407 	 * modifying the upper bits because the table is already sorted, and
408 	 * isn't part of the main exception table.
409 	 */
410 	offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
411 	if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
412 		return -ERANGE;
413 
414 	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
415 		    FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
416 
417 	ex->type = EX_TYPE_BPF;
418 
419 	ctx->exentry_idx++;
420 	return 0;
421 }
422 
423 /* JITs an eBPF instruction.
424  * Returns:
425  * 0  - successfully JITed an 8-byte eBPF instruction.
426  * >0 - successfully JITed a 16-byte eBPF instruction.
427  * <0 - failed to JIT.
428  */
429 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
430 		      bool extra_pass)
431 {
432 	const u8 code = insn->code;
433 	const u8 dst = bpf2a64[insn->dst_reg];
434 	const u8 src = bpf2a64[insn->src_reg];
435 	const u8 tmp = bpf2a64[TMP_REG_1];
436 	const u8 tmp2 = bpf2a64[TMP_REG_2];
437 	const u8 tmp3 = bpf2a64[TMP_REG_3];
438 	const s16 off = insn->off;
439 	const s32 imm = insn->imm;
440 	const int i = insn - ctx->prog->insnsi;
441 	const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
442 			  BPF_CLASS(code) == BPF_JMP;
443 	const bool isdw = BPF_SIZE(code) == BPF_DW;
444 	u8 jmp_cond, reg;
445 	s32 jmp_offset;
446 	u32 a64_insn;
447 	int ret;
448 
449 #define check_imm(bits, imm) do {				\
450 	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
451 	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
452 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
453 			i, imm, imm);				\
454 		return -EINVAL;					\
455 	}							\
456 } while (0)
457 #define check_imm19(imm) check_imm(19, imm)
458 #define check_imm26(imm) check_imm(26, imm)
459 
460 	switch (code) {
461 	/* dst = src */
462 	case BPF_ALU | BPF_MOV | BPF_X:
463 	case BPF_ALU64 | BPF_MOV | BPF_X:
464 		emit(A64_MOV(is64, dst, src), ctx);
465 		break;
466 	/* dst = dst OP src */
467 	case BPF_ALU | BPF_ADD | BPF_X:
468 	case BPF_ALU64 | BPF_ADD | BPF_X:
469 		emit(A64_ADD(is64, dst, dst, src), ctx);
470 		break;
471 	case BPF_ALU | BPF_SUB | BPF_X:
472 	case BPF_ALU64 | BPF_SUB | BPF_X:
473 		emit(A64_SUB(is64, dst, dst, src), ctx);
474 		break;
475 	case BPF_ALU | BPF_AND | BPF_X:
476 	case BPF_ALU64 | BPF_AND | BPF_X:
477 		emit(A64_AND(is64, dst, dst, src), ctx);
478 		break;
479 	case BPF_ALU | BPF_OR | BPF_X:
480 	case BPF_ALU64 | BPF_OR | BPF_X:
481 		emit(A64_ORR(is64, dst, dst, src), ctx);
482 		break;
483 	case BPF_ALU | BPF_XOR | BPF_X:
484 	case BPF_ALU64 | BPF_XOR | BPF_X:
485 		emit(A64_EOR(is64, dst, dst, src), ctx);
486 		break;
487 	case BPF_ALU | BPF_MUL | BPF_X:
488 	case BPF_ALU64 | BPF_MUL | BPF_X:
489 		emit(A64_MUL(is64, dst, dst, src), ctx);
490 		break;
491 	case BPF_ALU | BPF_DIV | BPF_X:
492 	case BPF_ALU64 | BPF_DIV | BPF_X:
493 		emit(A64_UDIV(is64, dst, dst, src), ctx);
494 		break;
495 	case BPF_ALU | BPF_MOD | BPF_X:
496 	case BPF_ALU64 | BPF_MOD | BPF_X:
497 		emit(A64_UDIV(is64, tmp, dst, src), ctx);
498 		emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
499 		break;
500 	case BPF_ALU | BPF_LSH | BPF_X:
501 	case BPF_ALU64 | BPF_LSH | BPF_X:
502 		emit(A64_LSLV(is64, dst, dst, src), ctx);
503 		break;
504 	case BPF_ALU | BPF_RSH | BPF_X:
505 	case BPF_ALU64 | BPF_RSH | BPF_X:
506 		emit(A64_LSRV(is64, dst, dst, src), ctx);
507 		break;
508 	case BPF_ALU | BPF_ARSH | BPF_X:
509 	case BPF_ALU64 | BPF_ARSH | BPF_X:
510 		emit(A64_ASRV(is64, dst, dst, src), ctx);
511 		break;
512 	/* dst = -dst */
513 	case BPF_ALU | BPF_NEG:
514 	case BPF_ALU64 | BPF_NEG:
515 		emit(A64_NEG(is64, dst, dst), ctx);
516 		break;
517 	/* dst = BSWAP##imm(dst) */
518 	case BPF_ALU | BPF_END | BPF_FROM_LE:
519 	case BPF_ALU | BPF_END | BPF_FROM_BE:
520 #ifdef CONFIG_CPU_BIG_ENDIAN
521 		if (BPF_SRC(code) == BPF_FROM_BE)
522 			goto emit_bswap_uxt;
523 #else /* !CONFIG_CPU_BIG_ENDIAN */
524 		if (BPF_SRC(code) == BPF_FROM_LE)
525 			goto emit_bswap_uxt;
526 #endif
527 		switch (imm) {
528 		case 16:
529 			emit(A64_REV16(is64, dst, dst), ctx);
530 			/* zero-extend 16 bits into 64 bits */
531 			emit(A64_UXTH(is64, dst, dst), ctx);
532 			break;
533 		case 32:
534 			emit(A64_REV32(is64, dst, dst), ctx);
535 			/* upper 32 bits already cleared */
536 			break;
537 		case 64:
538 			emit(A64_REV64(dst, dst), ctx);
539 			break;
540 		}
541 		break;
542 emit_bswap_uxt:
543 		switch (imm) {
544 		case 16:
545 			/* zero-extend 16 bits into 64 bits */
546 			emit(A64_UXTH(is64, dst, dst), ctx);
547 			break;
548 		case 32:
549 			/* zero-extend 32 bits into 64 bits */
550 			emit(A64_UXTW(is64, dst, dst), ctx);
551 			break;
552 		case 64:
553 			/* nop */
554 			break;
555 		}
556 		break;
557 	/* dst = imm */
558 	case BPF_ALU | BPF_MOV | BPF_K:
559 	case BPF_ALU64 | BPF_MOV | BPF_K:
560 		emit_a64_mov_i(is64, dst, imm, ctx);
561 		break;
562 	/* dst = dst OP imm */
563 	case BPF_ALU | BPF_ADD | BPF_K:
564 	case BPF_ALU64 | BPF_ADD | BPF_K:
565 		if (is_addsub_imm(imm)) {
566 			emit(A64_ADD_I(is64, dst, dst, imm), ctx);
567 		} else if (is_addsub_imm(-imm)) {
568 			emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
569 		} else {
570 			emit_a64_mov_i(is64, tmp, imm, ctx);
571 			emit(A64_ADD(is64, dst, dst, tmp), ctx);
572 		}
573 		break;
574 	case BPF_ALU | BPF_SUB | BPF_K:
575 	case BPF_ALU64 | BPF_SUB | BPF_K:
576 		if (is_addsub_imm(imm)) {
577 			emit(A64_SUB_I(is64, dst, dst, imm), ctx);
578 		} else if (is_addsub_imm(-imm)) {
579 			emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
580 		} else {
581 			emit_a64_mov_i(is64, tmp, imm, ctx);
582 			emit(A64_SUB(is64, dst, dst, tmp), ctx);
583 		}
584 		break;
585 	case BPF_ALU | BPF_AND | BPF_K:
586 	case BPF_ALU64 | BPF_AND | BPF_K:
587 		a64_insn = A64_AND_I(is64, dst, dst, imm);
588 		if (a64_insn != AARCH64_BREAK_FAULT) {
589 			emit(a64_insn, ctx);
590 		} else {
591 			emit_a64_mov_i(is64, tmp, imm, ctx);
592 			emit(A64_AND(is64, dst, dst, tmp), ctx);
593 		}
594 		break;
595 	case BPF_ALU | BPF_OR | BPF_K:
596 	case BPF_ALU64 | BPF_OR | BPF_K:
597 		a64_insn = A64_ORR_I(is64, dst, dst, imm);
598 		if (a64_insn != AARCH64_BREAK_FAULT) {
599 			emit(a64_insn, ctx);
600 		} else {
601 			emit_a64_mov_i(is64, tmp, imm, ctx);
602 			emit(A64_ORR(is64, dst, dst, tmp), ctx);
603 		}
604 		break;
605 	case BPF_ALU | BPF_XOR | BPF_K:
606 	case BPF_ALU64 | BPF_XOR | BPF_K:
607 		a64_insn = A64_EOR_I(is64, dst, dst, imm);
608 		if (a64_insn != AARCH64_BREAK_FAULT) {
609 			emit(a64_insn, ctx);
610 		} else {
611 			emit_a64_mov_i(is64, tmp, imm, ctx);
612 			emit(A64_EOR(is64, dst, dst, tmp), ctx);
613 		}
614 		break;
615 	case BPF_ALU | BPF_MUL | BPF_K:
616 	case BPF_ALU64 | BPF_MUL | BPF_K:
617 		emit_a64_mov_i(is64, tmp, imm, ctx);
618 		emit(A64_MUL(is64, dst, dst, tmp), ctx);
619 		break;
620 	case BPF_ALU | BPF_DIV | BPF_K:
621 	case BPF_ALU64 | BPF_DIV | BPF_K:
622 		emit_a64_mov_i(is64, tmp, imm, ctx);
623 		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
624 		break;
625 	case BPF_ALU | BPF_MOD | BPF_K:
626 	case BPF_ALU64 | BPF_MOD | BPF_K:
627 		emit_a64_mov_i(is64, tmp2, imm, ctx);
628 		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
629 		emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
630 		break;
631 	case BPF_ALU | BPF_LSH | BPF_K:
632 	case BPF_ALU64 | BPF_LSH | BPF_K:
633 		emit(A64_LSL(is64, dst, dst, imm), ctx);
634 		break;
635 	case BPF_ALU | BPF_RSH | BPF_K:
636 	case BPF_ALU64 | BPF_RSH | BPF_K:
637 		emit(A64_LSR(is64, dst, dst, imm), ctx);
638 		break;
639 	case BPF_ALU | BPF_ARSH | BPF_K:
640 	case BPF_ALU64 | BPF_ARSH | BPF_K:
641 		emit(A64_ASR(is64, dst, dst, imm), ctx);
642 		break;
643 
644 	/* JUMP off */
645 	case BPF_JMP | BPF_JA:
646 		jmp_offset = bpf2a64_offset(i, off, ctx);
647 		check_imm26(jmp_offset);
648 		emit(A64_B(jmp_offset), ctx);
649 		break;
650 	/* IF (dst COND src) JUMP off */
651 	case BPF_JMP | BPF_JEQ | BPF_X:
652 	case BPF_JMP | BPF_JGT | BPF_X:
653 	case BPF_JMP | BPF_JLT | BPF_X:
654 	case BPF_JMP | BPF_JGE | BPF_X:
655 	case BPF_JMP | BPF_JLE | BPF_X:
656 	case BPF_JMP | BPF_JNE | BPF_X:
657 	case BPF_JMP | BPF_JSGT | BPF_X:
658 	case BPF_JMP | BPF_JSLT | BPF_X:
659 	case BPF_JMP | BPF_JSGE | BPF_X:
660 	case BPF_JMP | BPF_JSLE | BPF_X:
661 	case BPF_JMP32 | BPF_JEQ | BPF_X:
662 	case BPF_JMP32 | BPF_JGT | BPF_X:
663 	case BPF_JMP32 | BPF_JLT | BPF_X:
664 	case BPF_JMP32 | BPF_JGE | BPF_X:
665 	case BPF_JMP32 | BPF_JLE | BPF_X:
666 	case BPF_JMP32 | BPF_JNE | BPF_X:
667 	case BPF_JMP32 | BPF_JSGT | BPF_X:
668 	case BPF_JMP32 | BPF_JSLT | BPF_X:
669 	case BPF_JMP32 | BPF_JSGE | BPF_X:
670 	case BPF_JMP32 | BPF_JSLE | BPF_X:
671 		emit(A64_CMP(is64, dst, src), ctx);
672 emit_cond_jmp:
673 		jmp_offset = bpf2a64_offset(i, off, ctx);
674 		check_imm19(jmp_offset);
675 		switch (BPF_OP(code)) {
676 		case BPF_JEQ:
677 			jmp_cond = A64_COND_EQ;
678 			break;
679 		case BPF_JGT:
680 			jmp_cond = A64_COND_HI;
681 			break;
682 		case BPF_JLT:
683 			jmp_cond = A64_COND_CC;
684 			break;
685 		case BPF_JGE:
686 			jmp_cond = A64_COND_CS;
687 			break;
688 		case BPF_JLE:
689 			jmp_cond = A64_COND_LS;
690 			break;
691 		case BPF_JSET:
692 		case BPF_JNE:
693 			jmp_cond = A64_COND_NE;
694 			break;
695 		case BPF_JSGT:
696 			jmp_cond = A64_COND_GT;
697 			break;
698 		case BPF_JSLT:
699 			jmp_cond = A64_COND_LT;
700 			break;
701 		case BPF_JSGE:
702 			jmp_cond = A64_COND_GE;
703 			break;
704 		case BPF_JSLE:
705 			jmp_cond = A64_COND_LE;
706 			break;
707 		default:
708 			return -EFAULT;
709 		}
710 		emit(A64_B_(jmp_cond, jmp_offset), ctx);
711 		break;
712 	case BPF_JMP | BPF_JSET | BPF_X:
713 	case BPF_JMP32 | BPF_JSET | BPF_X:
714 		emit(A64_TST(is64, dst, src), ctx);
715 		goto emit_cond_jmp;
716 	/* IF (dst COND imm) JUMP off */
717 	case BPF_JMP | BPF_JEQ | BPF_K:
718 	case BPF_JMP | BPF_JGT | BPF_K:
719 	case BPF_JMP | BPF_JLT | BPF_K:
720 	case BPF_JMP | BPF_JGE | BPF_K:
721 	case BPF_JMP | BPF_JLE | BPF_K:
722 	case BPF_JMP | BPF_JNE | BPF_K:
723 	case BPF_JMP | BPF_JSGT | BPF_K:
724 	case BPF_JMP | BPF_JSLT | BPF_K:
725 	case BPF_JMP | BPF_JSGE | BPF_K:
726 	case BPF_JMP | BPF_JSLE | BPF_K:
727 	case BPF_JMP32 | BPF_JEQ | BPF_K:
728 	case BPF_JMP32 | BPF_JGT | BPF_K:
729 	case BPF_JMP32 | BPF_JLT | BPF_K:
730 	case BPF_JMP32 | BPF_JGE | BPF_K:
731 	case BPF_JMP32 | BPF_JLE | BPF_K:
732 	case BPF_JMP32 | BPF_JNE | BPF_K:
733 	case BPF_JMP32 | BPF_JSGT | BPF_K:
734 	case BPF_JMP32 | BPF_JSLT | BPF_K:
735 	case BPF_JMP32 | BPF_JSGE | BPF_K:
736 	case BPF_JMP32 | BPF_JSLE | BPF_K:
737 		if (is_addsub_imm(imm)) {
738 			emit(A64_CMP_I(is64, dst, imm), ctx);
739 		} else if (is_addsub_imm(-imm)) {
740 			emit(A64_CMN_I(is64, dst, -imm), ctx);
741 		} else {
742 			emit_a64_mov_i(is64, tmp, imm, ctx);
743 			emit(A64_CMP(is64, dst, tmp), ctx);
744 		}
745 		goto emit_cond_jmp;
746 	case BPF_JMP | BPF_JSET | BPF_K:
747 	case BPF_JMP32 | BPF_JSET | BPF_K:
748 		a64_insn = A64_TST_I(is64, dst, imm);
749 		if (a64_insn != AARCH64_BREAK_FAULT) {
750 			emit(a64_insn, ctx);
751 		} else {
752 			emit_a64_mov_i(is64, tmp, imm, ctx);
753 			emit(A64_TST(is64, dst, tmp), ctx);
754 		}
755 		goto emit_cond_jmp;
756 	/* function call */
757 	case BPF_JMP | BPF_CALL:
758 	{
759 		const u8 r0 = bpf2a64[BPF_REG_0];
760 		bool func_addr_fixed;
761 		u64 func_addr;
762 
763 		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
764 					    &func_addr, &func_addr_fixed);
765 		if (ret < 0)
766 			return ret;
767 		emit_addr_mov_i64(tmp, func_addr, ctx);
768 		emit(A64_BLR(tmp), ctx);
769 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
770 		break;
771 	}
772 	/* tail call */
773 	case BPF_JMP | BPF_TAIL_CALL:
774 		if (emit_bpf_tail_call(ctx))
775 			return -EFAULT;
776 		break;
777 	/* function return */
778 	case BPF_JMP | BPF_EXIT:
779 		/* Optimization: when last instruction is EXIT,
780 		   simply fallthrough to epilogue. */
781 		if (i == ctx->prog->len - 1)
782 			break;
783 		jmp_offset = epilogue_offset(ctx);
784 		check_imm26(jmp_offset);
785 		emit(A64_B(jmp_offset), ctx);
786 		break;
787 
788 	/* dst = imm64 */
789 	case BPF_LD | BPF_IMM | BPF_DW:
790 	{
791 		const struct bpf_insn insn1 = insn[1];
792 		u64 imm64;
793 
794 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
795 		emit_a64_mov_i64(dst, imm64, ctx);
796 
797 		return 1;
798 	}
799 
800 	/* LDX: dst = *(size *)(src + off) */
801 	case BPF_LDX | BPF_MEM | BPF_W:
802 	case BPF_LDX | BPF_MEM | BPF_H:
803 	case BPF_LDX | BPF_MEM | BPF_B:
804 	case BPF_LDX | BPF_MEM | BPF_DW:
805 	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
806 	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
807 	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
808 	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
809 		emit_a64_mov_i(1, tmp, off, ctx);
810 		switch (BPF_SIZE(code)) {
811 		case BPF_W:
812 			emit(A64_LDR32(dst, src, tmp), ctx);
813 			break;
814 		case BPF_H:
815 			emit(A64_LDRH(dst, src, tmp), ctx);
816 			break;
817 		case BPF_B:
818 			emit(A64_LDRB(dst, src, tmp), ctx);
819 			break;
820 		case BPF_DW:
821 			emit(A64_LDR64(dst, src, tmp), ctx);
822 			break;
823 		}
824 
825 		ret = add_exception_handler(insn, ctx, dst);
826 		if (ret)
827 			return ret;
828 		break;
829 
830 	/* speculation barrier */
831 	case BPF_ST | BPF_NOSPEC:
832 		/*
833 		 * Nothing required here.
834 		 *
835 		 * In case of arm64, we rely on the firmware mitigation of
836 		 * Speculative Store Bypass as controlled via the ssbd kernel
837 		 * parameter. Whenever the mitigation is enabled, it works
838 		 * for all of the kernel code with no need to provide any
839 		 * additional instructions.
840 		 */
841 		break;
842 
843 	/* ST: *(size *)(dst + off) = imm */
844 	case BPF_ST | BPF_MEM | BPF_W:
845 	case BPF_ST | BPF_MEM | BPF_H:
846 	case BPF_ST | BPF_MEM | BPF_B:
847 	case BPF_ST | BPF_MEM | BPF_DW:
848 		/* Load imm to a register then store it */
849 		emit_a64_mov_i(1, tmp2, off, ctx);
850 		emit_a64_mov_i(1, tmp, imm, ctx);
851 		switch (BPF_SIZE(code)) {
852 		case BPF_W:
853 			emit(A64_STR32(tmp, dst, tmp2), ctx);
854 			break;
855 		case BPF_H:
856 			emit(A64_STRH(tmp, dst, tmp2), ctx);
857 			break;
858 		case BPF_B:
859 			emit(A64_STRB(tmp, dst, tmp2), ctx);
860 			break;
861 		case BPF_DW:
862 			emit(A64_STR64(tmp, dst, tmp2), ctx);
863 			break;
864 		}
865 		break;
866 
867 	/* STX: *(size *)(dst + off) = src */
868 	case BPF_STX | BPF_MEM | BPF_W:
869 	case BPF_STX | BPF_MEM | BPF_H:
870 	case BPF_STX | BPF_MEM | BPF_B:
871 	case BPF_STX | BPF_MEM | BPF_DW:
872 		emit_a64_mov_i(1, tmp, off, ctx);
873 		switch (BPF_SIZE(code)) {
874 		case BPF_W:
875 			emit(A64_STR32(src, dst, tmp), ctx);
876 			break;
877 		case BPF_H:
878 			emit(A64_STRH(src, dst, tmp), ctx);
879 			break;
880 		case BPF_B:
881 			emit(A64_STRB(src, dst, tmp), ctx);
882 			break;
883 		case BPF_DW:
884 			emit(A64_STR64(src, dst, tmp), ctx);
885 			break;
886 		}
887 		break;
888 
889 	case BPF_STX | BPF_ATOMIC | BPF_W:
890 	case BPF_STX | BPF_ATOMIC | BPF_DW:
891 		if (insn->imm != BPF_ADD) {
892 			pr_err_once("unknown atomic op code %02x\n", insn->imm);
893 			return -EINVAL;
894 		}
895 
896 		/* STX XADD: lock *(u32 *)(dst + off) += src
897 		 * and
898 		 * STX XADD: lock *(u64 *)(dst + off) += src
899 		 */
900 
901 		if (!off) {
902 			reg = dst;
903 		} else {
904 			emit_a64_mov_i(1, tmp, off, ctx);
905 			emit(A64_ADD(1, tmp, tmp, dst), ctx);
906 			reg = tmp;
907 		}
908 		if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
909 			emit(A64_STADD(isdw, reg, src), ctx);
910 		} else {
911 			emit(A64_LDXR(isdw, tmp2, reg), ctx);
912 			emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
913 			emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
914 			jmp_offset = -3;
915 			check_imm19(jmp_offset);
916 			emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
917 		}
918 		break;
919 
920 	default:
921 		pr_err_once("unknown opcode %02x\n", code);
922 		return -EINVAL;
923 	}
924 
925 	return 0;
926 }
927 
928 static int build_body(struct jit_ctx *ctx, bool extra_pass)
929 {
930 	const struct bpf_prog *prog = ctx->prog;
931 	int i;
932 
933 	/*
934 	 * - offset[0] offset of the end of prologue,
935 	 *   start of the 1st instruction.
936 	 * - offset[1] - offset of the end of 1st instruction,
937 	 *   start of the 2nd instruction
938 	 * [....]
939 	 * - offset[3] - offset of the end of 3rd instruction,
940 	 *   start of 4th instruction
941 	 */
942 	for (i = 0; i < prog->len; i++) {
943 		const struct bpf_insn *insn = &prog->insnsi[i];
944 		int ret;
945 
946 		if (ctx->image == NULL)
947 			ctx->offset[i] = ctx->idx;
948 		ret = build_insn(insn, ctx, extra_pass);
949 		if (ret > 0) {
950 			i++;
951 			if (ctx->image == NULL)
952 				ctx->offset[i] = ctx->idx;
953 			continue;
954 		}
955 		if (ret)
956 			return ret;
957 	}
958 	/*
959 	 * offset is allocated with prog->len + 1 so fill in
960 	 * the last element with the offset after the last
961 	 * instruction (end of program)
962 	 */
963 	if (ctx->image == NULL)
964 		ctx->offset[i] = ctx->idx;
965 
966 	return 0;
967 }
968 
969 static int validate_code(struct jit_ctx *ctx)
970 {
971 	int i;
972 
973 	for (i = 0; i < ctx->idx; i++) {
974 		u32 a64_insn = le32_to_cpu(ctx->image[i]);
975 
976 		if (a64_insn == AARCH64_BREAK_FAULT)
977 			return -1;
978 	}
979 
980 	if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
981 		return -1;
982 
983 	return 0;
984 }
985 
986 static inline void bpf_flush_icache(void *start, void *end)
987 {
988 	flush_icache_range((unsigned long)start, (unsigned long)end);
989 }
990 
991 struct arm64_jit_data {
992 	struct bpf_binary_header *header;
993 	u8 *image;
994 	struct jit_ctx ctx;
995 };
996 
997 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
998 {
999 	int image_size, prog_size, extable_size;
1000 	struct bpf_prog *tmp, *orig_prog = prog;
1001 	struct bpf_binary_header *header;
1002 	struct arm64_jit_data *jit_data;
1003 	bool was_classic = bpf_prog_was_classic(prog);
1004 	bool tmp_blinded = false;
1005 	bool extra_pass = false;
1006 	struct jit_ctx ctx;
1007 	u8 *image_ptr;
1008 
1009 	if (!prog->jit_requested)
1010 		return orig_prog;
1011 
1012 	tmp = bpf_jit_blind_constants(prog);
1013 	/* If blinding was requested and we failed during blinding,
1014 	 * we must fall back to the interpreter.
1015 	 */
1016 	if (IS_ERR(tmp))
1017 		return orig_prog;
1018 	if (tmp != prog) {
1019 		tmp_blinded = true;
1020 		prog = tmp;
1021 	}
1022 
1023 	jit_data = prog->aux->jit_data;
1024 	if (!jit_data) {
1025 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1026 		if (!jit_data) {
1027 			prog = orig_prog;
1028 			goto out;
1029 		}
1030 		prog->aux->jit_data = jit_data;
1031 	}
1032 	if (jit_data->ctx.offset) {
1033 		ctx = jit_data->ctx;
1034 		image_ptr = jit_data->image;
1035 		header = jit_data->header;
1036 		extra_pass = true;
1037 		prog_size = sizeof(u32) * ctx.idx;
1038 		goto skip_init_ctx;
1039 	}
1040 	memset(&ctx, 0, sizeof(ctx));
1041 	ctx.prog = prog;
1042 
1043 	ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1044 	if (ctx.offset == NULL) {
1045 		prog = orig_prog;
1046 		goto out_off;
1047 	}
1048 
1049 	/* 1. Initial fake pass to compute ctx->idx. */
1050 
1051 	/* Fake pass to fill in ctx->offset. */
1052 	if (build_body(&ctx, extra_pass)) {
1053 		prog = orig_prog;
1054 		goto out_off;
1055 	}
1056 
1057 	if (build_prologue(&ctx, was_classic)) {
1058 		prog = orig_prog;
1059 		goto out_off;
1060 	}
1061 
1062 	ctx.epilogue_offset = ctx.idx;
1063 	build_epilogue(&ctx);
1064 
1065 	extable_size = prog->aux->num_exentries *
1066 		sizeof(struct exception_table_entry);
1067 
1068 	/* Now we know the actual image size. */
1069 	prog_size = sizeof(u32) * ctx.idx;
1070 	image_size = prog_size + extable_size;
1071 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1072 				      sizeof(u32), jit_fill_hole);
1073 	if (header == NULL) {
1074 		prog = orig_prog;
1075 		goto out_off;
1076 	}
1077 
1078 	/* 2. Now, the actual pass. */
1079 
1080 	ctx.image = (__le32 *)image_ptr;
1081 	if (extable_size)
1082 		prog->aux->extable = (void *)image_ptr + prog_size;
1083 skip_init_ctx:
1084 	ctx.idx = 0;
1085 	ctx.exentry_idx = 0;
1086 
1087 	build_prologue(&ctx, was_classic);
1088 
1089 	if (build_body(&ctx, extra_pass)) {
1090 		bpf_jit_binary_free(header);
1091 		prog = orig_prog;
1092 		goto out_off;
1093 	}
1094 
1095 	build_epilogue(&ctx);
1096 
1097 	/* 3. Extra pass to validate JITed code. */
1098 	if (validate_code(&ctx)) {
1099 		bpf_jit_binary_free(header);
1100 		prog = orig_prog;
1101 		goto out_off;
1102 	}
1103 
1104 	/* And we're done. */
1105 	if (bpf_jit_enable > 1)
1106 		bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1107 
1108 	bpf_flush_icache(header, ctx.image + ctx.idx);
1109 
1110 	if (!prog->is_func || extra_pass) {
1111 		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1112 			pr_err_once("multi-func JIT bug %d != %d\n",
1113 				    ctx.idx, jit_data->ctx.idx);
1114 			bpf_jit_binary_free(header);
1115 			prog->bpf_func = NULL;
1116 			prog->jited = 0;
1117 			goto out_off;
1118 		}
1119 		bpf_jit_binary_lock_ro(header);
1120 	} else {
1121 		jit_data->ctx = ctx;
1122 		jit_data->image = image_ptr;
1123 		jit_data->header = header;
1124 	}
1125 	prog->bpf_func = (void *)ctx.image;
1126 	prog->jited = 1;
1127 	prog->jited_len = prog_size;
1128 
1129 	if (!prog->is_func || extra_pass) {
1130 		bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1131 out_off:
1132 		kfree(ctx.offset);
1133 		kfree(jit_data);
1134 		prog->aux->jit_data = NULL;
1135 	}
1136 out:
1137 	if (tmp_blinded)
1138 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1139 					   tmp : orig_prog);
1140 	return prog;
1141 }
1142 
1143 u64 bpf_jit_alloc_exec_limit(void)
1144 {
1145 	return VMALLOC_END - VMALLOC_START;
1146 }
1147 
1148 void *bpf_jit_alloc_exec(unsigned long size)
1149 {
1150 	return vmalloc(size);
1151 }
1152 
1153 void bpf_jit_free_exec(void *addr)
1154 {
1155 	return vfree(addr);
1156 }
1157