xref: /linux/arch/arc/net/bpf_jit_core.c (revision c771600c6af14749609b49565ffb4cac2959710d)
1f122668dSShahab Vahedi // SPDX-License-Identifier: GPL-2.0
2f122668dSShahab Vahedi /*
3f122668dSShahab Vahedi  * The back-end-agnostic part of Just-In-Time compiler for eBPF bytecode.
4f122668dSShahab Vahedi  *
5f122668dSShahab Vahedi  * Copyright (c) 2024 Synopsys Inc.
6f122668dSShahab Vahedi  * Author: Shahab Vahedi <shahab@synopsys.com>
7f122668dSShahab Vahedi  */
8f122668dSShahab Vahedi #include <linux/bug.h>
9f122668dSShahab Vahedi #include "bpf_jit.h"
10f122668dSShahab Vahedi 
11f122668dSShahab Vahedi /*
12f122668dSShahab Vahedi  * Check for the return value. A pattern used often in this file.
13f122668dSShahab Vahedi  * There must be a "ret" variable of type "int" in the scope.
14f122668dSShahab Vahedi  */
15f122668dSShahab Vahedi #define CHECK_RET(cmd)			\
16f122668dSShahab Vahedi 	do {				\
17f122668dSShahab Vahedi 		ret = (cmd);		\
18f122668dSShahab Vahedi 		if (ret < 0)		\
19f122668dSShahab Vahedi 			return ret;	\
20f122668dSShahab Vahedi 	} while (0)
21f122668dSShahab Vahedi 
22f122668dSShahab Vahedi #ifdef ARC_BPF_JIT_DEBUG
23f122668dSShahab Vahedi /* Dumps bytes in /var/log/messages at KERN_INFO level (4). */
24f122668dSShahab Vahedi static void dump_bytes(const u8 *buf, u32 len, const char *header)
25f122668dSShahab Vahedi {
26f122668dSShahab Vahedi 	u8 line[64];
27f122668dSShahab Vahedi 	size_t i, j;
28f122668dSShahab Vahedi 
29f122668dSShahab Vahedi 	pr_info("-----------------[ %s ]-----------------\n", header);
30f122668dSShahab Vahedi 
31f122668dSShahab Vahedi 	for (i = 0, j = 0; i < len; i++) {
32f122668dSShahab Vahedi 		/* Last input byte? */
33f122668dSShahab Vahedi 		if (i == len - 1) {
34f122668dSShahab Vahedi 			j += scnprintf(line + j, 64 - j, "0x%02x", buf[i]);
35f122668dSShahab Vahedi 			pr_info("%s\n", line);
36f122668dSShahab Vahedi 			break;
37f122668dSShahab Vahedi 		}
38f122668dSShahab Vahedi 		/* End of line? */
39f122668dSShahab Vahedi 		else if (i % 8 == 7) {
40f122668dSShahab Vahedi 			j += scnprintf(line + j, 64 - j, "0x%02x", buf[i]);
41f122668dSShahab Vahedi 			pr_info("%s\n", line);
42f122668dSShahab Vahedi 			j = 0;
43f122668dSShahab Vahedi 		} else {
44f122668dSShahab Vahedi 			j += scnprintf(line + j, 64 - j, "0x%02x, ", buf[i]);
45f122668dSShahab Vahedi 		}
46f122668dSShahab Vahedi 	}
47f122668dSShahab Vahedi }
48f122668dSShahab Vahedi #endif /* ARC_BPF_JIT_DEBUG */
49f122668dSShahab Vahedi 
50f122668dSShahab Vahedi /********************* JIT context ***********************/
51f122668dSShahab Vahedi 
52f122668dSShahab Vahedi /*
53f122668dSShahab Vahedi  * buf:		Translated instructions end up here.
54f122668dSShahab Vahedi  * len:		The length of whole block in bytes.
55f122668dSShahab Vahedi  * index:	The offset at which the _next_ instruction may be put.
56f122668dSShahab Vahedi  */
57f122668dSShahab Vahedi struct jit_buffer {
58f122668dSShahab Vahedi 	u8	*buf;
59f122668dSShahab Vahedi 	u32	len;
60f122668dSShahab Vahedi 	u32	index;
61f122668dSShahab Vahedi };
62f122668dSShahab Vahedi 
63f122668dSShahab Vahedi /*
64f122668dSShahab Vahedi  * This is a subset of "struct jit_context" that its information is deemed
65f122668dSShahab Vahedi  * necessary for the next extra pass to come.
66f122668dSShahab Vahedi  *
67f122668dSShahab Vahedi  * bpf_header:	Needed to finally lock the region.
68f122668dSShahab Vahedi  * bpf2insn:	Used to find the translation for instructions of interest.
69f122668dSShahab Vahedi  *
70f122668dSShahab Vahedi  * Things like "jit.buf" and "jit.len" can be retrieved respectively from
71f122668dSShahab Vahedi  * "prog->bpf_func" and "prog->jited_len".
72f122668dSShahab Vahedi  */
73f122668dSShahab Vahedi struct arc_jit_data {
74f122668dSShahab Vahedi 	struct bpf_binary_header *bpf_header;
75f122668dSShahab Vahedi 	u32                      *bpf2insn;
76f122668dSShahab Vahedi };
77f122668dSShahab Vahedi 
78f122668dSShahab Vahedi /*
79f122668dSShahab Vahedi  * The JIT pertinent context that is used by different functions.
80f122668dSShahab Vahedi  *
81f122668dSShahab Vahedi  * prog:		The current eBPF program being handled.
82f122668dSShahab Vahedi  * orig_prog:		The original eBPF program before any possible change.
83f122668dSShahab Vahedi  * jit:			The JIT buffer and its length.
84f122668dSShahab Vahedi  * bpf_header:		The JITed program header. "jit.buf" points inside it.
85f122668dSShahab Vahedi  * emit:		If set, opcodes are written to memory; else, a dry-run.
86f122668dSShahab Vahedi  * do_zext:		If true, 32-bit sub-regs must be zero extended.
87f122668dSShahab Vahedi  * bpf2insn:		Maps BPF insn indices to their counterparts in jit.buf.
88f122668dSShahab Vahedi  * bpf2insn_valid:	Indicates if "bpf2ins" is populated with the mappings.
89f122668dSShahab Vahedi  * jit_data:		A piece of memory to transfer data to the next pass.
90f122668dSShahab Vahedi  * arc_regs_clobbered:	Each bit status determines if that arc reg is clobbered.
91f122668dSShahab Vahedi  * save_blink:		Whether ARC's "blink" register needs to be saved.
92f122668dSShahab Vahedi  * frame_size:		Derived from "prog->aux->stack_depth".
93f122668dSShahab Vahedi  * epilogue_offset:	Used by early "return"s in the code to jump here.
94f122668dSShahab Vahedi  * need_extra_pass:	A forecast if an "extra_pass" will occur.
95f122668dSShahab Vahedi  * is_extra_pass:	Indicates if the current pass is an extra pass.
96f122668dSShahab Vahedi  * user_bpf_prog:	True, if VM opcodes come from a real program.
97f122668dSShahab Vahedi  * blinded:		True if "constant blinding" step returned a new "prog".
98f122668dSShahab Vahedi  * success:		Indicates if the whole JIT went OK.
99f122668dSShahab Vahedi  */
100f122668dSShahab Vahedi struct jit_context {
101f122668dSShahab Vahedi 	struct bpf_prog			*prog;
102f122668dSShahab Vahedi 	struct bpf_prog			*orig_prog;
103f122668dSShahab Vahedi 	struct jit_buffer		jit;
104f122668dSShahab Vahedi 	struct bpf_binary_header	*bpf_header;
105f122668dSShahab Vahedi 	bool				emit;
106f122668dSShahab Vahedi 	bool				do_zext;
107f122668dSShahab Vahedi 	u32				*bpf2insn;
108f122668dSShahab Vahedi 	bool				bpf2insn_valid;
109f122668dSShahab Vahedi 	struct arc_jit_data		*jit_data;
110f122668dSShahab Vahedi 	u32				arc_regs_clobbered;
111f122668dSShahab Vahedi 	bool				save_blink;
112f122668dSShahab Vahedi 	u16				frame_size;
113f122668dSShahab Vahedi 	u32				epilogue_offset;
114f122668dSShahab Vahedi 	bool				need_extra_pass;
115f122668dSShahab Vahedi 	bool				is_extra_pass;
116f122668dSShahab Vahedi 	bool				user_bpf_prog;
117f122668dSShahab Vahedi 	bool				blinded;
118f122668dSShahab Vahedi 	bool				success;
119f122668dSShahab Vahedi };
120f122668dSShahab Vahedi 
121f122668dSShahab Vahedi /*
122f122668dSShahab Vahedi  * If we're in ARC_BPF_JIT_DEBUG mode and the debug level is right, dump the
123f122668dSShahab Vahedi  * input BPF stream. "bpf_jit_dump()" is not fully suited for this purpose.
124f122668dSShahab Vahedi  */
125f122668dSShahab Vahedi static void vm_dump(const struct bpf_prog *prog)
126f122668dSShahab Vahedi {
127f122668dSShahab Vahedi #ifdef ARC_BPF_JIT_DEBUG
128f122668dSShahab Vahedi 	if (bpf_jit_enable > 1)
129f122668dSShahab Vahedi 		dump_bytes((u8 *)prog->insns, 8 * prog->len, " VM  ");
130f122668dSShahab Vahedi #endif
131f122668dSShahab Vahedi }
132f122668dSShahab Vahedi 
133f122668dSShahab Vahedi /*
134f122668dSShahab Vahedi  * If the right level of debug is set, dump the bytes. There are 2 variants
135f122668dSShahab Vahedi  * of this function:
136f122668dSShahab Vahedi  *
137f122668dSShahab Vahedi  * 1. Use the standard bpf_jit_dump() which is meant only for JITed code.
138f122668dSShahab Vahedi  * 2. Use the dump_bytes() to match its "vm_dump()" instance.
139f122668dSShahab Vahedi  */
140f122668dSShahab Vahedi static void jit_dump(const struct jit_context *ctx)
141f122668dSShahab Vahedi {
142f122668dSShahab Vahedi #ifdef ARC_BPF_JIT_DEBUG
143f122668dSShahab Vahedi 	u8 header[8];
144f122668dSShahab Vahedi #endif
145f122668dSShahab Vahedi 	const int pass = ctx->is_extra_pass ? 2 : 1;
146f122668dSShahab Vahedi 
147f122668dSShahab Vahedi 	if (bpf_jit_enable <= 1 || !ctx->prog->jited)
148f122668dSShahab Vahedi 		return;
149f122668dSShahab Vahedi 
150f122668dSShahab Vahedi #ifdef ARC_BPF_JIT_DEBUG
151f122668dSShahab Vahedi 	scnprintf(header, sizeof(header), "JIT:%d", pass);
152f122668dSShahab Vahedi 	dump_bytes(ctx->jit.buf, ctx->jit.len, header);
153f122668dSShahab Vahedi 	pr_info("\n");
154f122668dSShahab Vahedi #else
155f122668dSShahab Vahedi 	bpf_jit_dump(ctx->prog->len, ctx->jit.len, pass, ctx->jit.buf);
156f122668dSShahab Vahedi #endif
157f122668dSShahab Vahedi }
158f122668dSShahab Vahedi 
159f122668dSShahab Vahedi /* Initialise the context so there's no garbage. */
160f122668dSShahab Vahedi static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
161f122668dSShahab Vahedi {
162*dd6a4037SShahab Vahedi 	memset(ctx, 0, sizeof(*ctx));
163f122668dSShahab Vahedi 
164f122668dSShahab Vahedi 	ctx->orig_prog = prog;
165f122668dSShahab Vahedi 
166f122668dSShahab Vahedi 	/* If constant blinding was requested but failed, scram. */
167f122668dSShahab Vahedi 	ctx->prog = bpf_jit_blind_constants(prog);
168f122668dSShahab Vahedi 	if (IS_ERR(ctx->prog))
169f122668dSShahab Vahedi 		return PTR_ERR(ctx->prog);
170*dd6a4037SShahab Vahedi 	ctx->blinded = (ctx->prog != ctx->orig_prog);
171f122668dSShahab Vahedi 
172f122668dSShahab Vahedi 	/* If the verifier doesn't zero-extend, then we have to do it. */
173f122668dSShahab Vahedi 	ctx->do_zext = !ctx->prog->aux->verifier_zext;
174f122668dSShahab Vahedi 
175f122668dSShahab Vahedi 	ctx->is_extra_pass = ctx->prog->jited;
176f122668dSShahab Vahedi 	ctx->user_bpf_prog = ctx->prog->is_func;
177f122668dSShahab Vahedi 
178f122668dSShahab Vahedi 	return 0;
179f122668dSShahab Vahedi }
180f122668dSShahab Vahedi 
181f122668dSShahab Vahedi /*
182f122668dSShahab Vahedi  * Only after the first iteration of normal pass (the dry-run),
183f122668dSShahab Vahedi  * there are valid offsets in ctx->bpf2insn array.
184f122668dSShahab Vahedi  */
185f122668dSShahab Vahedi static inline bool offsets_available(const struct jit_context *ctx)
186f122668dSShahab Vahedi {
187f122668dSShahab Vahedi 	return ctx->bpf2insn_valid;
188f122668dSShahab Vahedi }
189f122668dSShahab Vahedi 
190f122668dSShahab Vahedi /*
191f122668dSShahab Vahedi  * "*mem" should be freed when there is no "extra pass" to come,
192f122668dSShahab Vahedi  * or the compilation terminated abruptly. A few of such memory
193f122668dSShahab Vahedi  * allocations are: ctx->jit_data and ctx->bpf2insn.
194f122668dSShahab Vahedi  */
195f122668dSShahab Vahedi static inline void maybe_free(struct jit_context *ctx, void **mem)
196f122668dSShahab Vahedi {
197f122668dSShahab Vahedi 	if (*mem) {
198f122668dSShahab Vahedi 		if (!ctx->success || !ctx->need_extra_pass) {
199f122668dSShahab Vahedi 			kfree(*mem);
200f122668dSShahab Vahedi 			*mem = NULL;
201f122668dSShahab Vahedi 		}
202f122668dSShahab Vahedi 	}
203f122668dSShahab Vahedi }
204f122668dSShahab Vahedi 
205f122668dSShahab Vahedi /*
206f122668dSShahab Vahedi  * Free memories based on the status of the context.
207f122668dSShahab Vahedi  *
208f122668dSShahab Vahedi  * A note about "bpf_header": On successful runs, "bpf_header" is
209f122668dSShahab Vahedi  * not freed, because "jit.buf", a sub-array of it, is returned as
210f122668dSShahab Vahedi  * the "bpf_func". However, "bpf_header" is lost and nothing points
211f122668dSShahab Vahedi  * to it. This should not cause a leakage, because apparently
212f122668dSShahab Vahedi  * "bpf_header" can be revived by "bpf_jit_binary_hdr()". This is
213f122668dSShahab Vahedi  * how "bpf_jit_free()" in "kernel/bpf/core.c" releases the memory.
214f122668dSShahab Vahedi  */
215f122668dSShahab Vahedi static void jit_ctx_cleanup(struct jit_context *ctx)
216f122668dSShahab Vahedi {
217f122668dSShahab Vahedi 	if (ctx->blinded) {
218f122668dSShahab Vahedi 		/* if all went well, release the orig_prog. */
219f122668dSShahab Vahedi 		if (ctx->success)
220f122668dSShahab Vahedi 			bpf_jit_prog_release_other(ctx->prog, ctx->orig_prog);
221f122668dSShahab Vahedi 		else
222f122668dSShahab Vahedi 			bpf_jit_prog_release_other(ctx->orig_prog, ctx->prog);
223f122668dSShahab Vahedi 	}
224f122668dSShahab Vahedi 
225f122668dSShahab Vahedi 	maybe_free(ctx, (void **)&ctx->bpf2insn);
226f122668dSShahab Vahedi 	maybe_free(ctx, (void **)&ctx->jit_data);
227f122668dSShahab Vahedi 
228f122668dSShahab Vahedi 	if (!ctx->bpf2insn)
229f122668dSShahab Vahedi 		ctx->bpf2insn_valid = false;
230f122668dSShahab Vahedi 
231f122668dSShahab Vahedi 	/* Freeing "bpf_header" is enough. "jit.buf" is a sub-array of it. */
232f122668dSShahab Vahedi 	if (!ctx->success && ctx->bpf_header) {
233f122668dSShahab Vahedi 		bpf_jit_binary_free(ctx->bpf_header);
234f122668dSShahab Vahedi 		ctx->bpf_header = NULL;
235f122668dSShahab Vahedi 		ctx->jit.buf    = NULL;
236f122668dSShahab Vahedi 		ctx->jit.index  = 0;
237f122668dSShahab Vahedi 		ctx->jit.len    = 0;
238f122668dSShahab Vahedi 	}
239f122668dSShahab Vahedi 
240f122668dSShahab Vahedi 	ctx->emit = false;
241f122668dSShahab Vahedi 	ctx->do_zext = false;
242f122668dSShahab Vahedi }
243f122668dSShahab Vahedi 
244f122668dSShahab Vahedi /*
245f122668dSShahab Vahedi  * Analyse the register usage and record the frame size.
246f122668dSShahab Vahedi  * The register usage is determined by consulting the back-end.
247f122668dSShahab Vahedi  */
248f122668dSShahab Vahedi static void analyze_reg_usage(struct jit_context *ctx)
249f122668dSShahab Vahedi {
250f122668dSShahab Vahedi 	size_t i;
251f122668dSShahab Vahedi 	u32 usage = 0;
252f122668dSShahab Vahedi 	const struct bpf_insn *insn = ctx->prog->insnsi;
253f122668dSShahab Vahedi 
254f122668dSShahab Vahedi 	for (i = 0; i < ctx->prog->len; i++) {
255f122668dSShahab Vahedi 		u8 bpf_reg;
256f122668dSShahab Vahedi 		bool call;
257f122668dSShahab Vahedi 
258f122668dSShahab Vahedi 		bpf_reg = insn[i].dst_reg;
259f122668dSShahab Vahedi 		call = (insn[i].code == (BPF_JMP | BPF_CALL)) ? true : false;
260f122668dSShahab Vahedi 		usage |= mask_for_used_regs(bpf_reg, call);
261f122668dSShahab Vahedi 	}
262f122668dSShahab Vahedi 
263f122668dSShahab Vahedi 	ctx->arc_regs_clobbered = usage;
264f122668dSShahab Vahedi 	ctx->frame_size = ctx->prog->aux->stack_depth;
265f122668dSShahab Vahedi }
266f122668dSShahab Vahedi 
267f122668dSShahab Vahedi /* Verify that no instruction will be emitted when there is no buffer. */
268f122668dSShahab Vahedi static inline int jit_buffer_check(const struct jit_context *ctx)
269f122668dSShahab Vahedi {
270f122668dSShahab Vahedi 	if (ctx->emit) {
271f122668dSShahab Vahedi 		if (!ctx->jit.buf) {
272f122668dSShahab Vahedi 			pr_err("bpf-jit: inconsistence state; no "
273f122668dSShahab Vahedi 			       "buffer to emit instructions.\n");
274f122668dSShahab Vahedi 			return -EINVAL;
275f122668dSShahab Vahedi 		} else if (ctx->jit.index > ctx->jit.len) {
276f122668dSShahab Vahedi 			pr_err("bpf-jit: estimated JIT length is less "
277f122668dSShahab Vahedi 			       "than the emitted instructions.\n");
278f122668dSShahab Vahedi 			return -EFAULT;
279f122668dSShahab Vahedi 		}
280f122668dSShahab Vahedi 	}
281f122668dSShahab Vahedi 	return 0;
282f122668dSShahab Vahedi }
283f122668dSShahab Vahedi 
284f122668dSShahab Vahedi /* On a dry-run (emit=false), "jit.len" is growing gradually. */
285f122668dSShahab Vahedi static inline void jit_buffer_update(struct jit_context *ctx, u32 n)
286f122668dSShahab Vahedi {
287f122668dSShahab Vahedi 	if (!ctx->emit)
288f122668dSShahab Vahedi 		ctx->jit.len += n;
289f122668dSShahab Vahedi 	else
290f122668dSShahab Vahedi 		ctx->jit.index += n;
291f122668dSShahab Vahedi }
292f122668dSShahab Vahedi 
293f122668dSShahab Vahedi /* Based on "emit", determine the address where instructions are emitted. */
294f122668dSShahab Vahedi static inline u8 *effective_jit_buf(const struct jit_context *ctx)
295f122668dSShahab Vahedi {
296f122668dSShahab Vahedi 	return ctx->emit ? (ctx->jit.buf + ctx->jit.index) : NULL;
297f122668dSShahab Vahedi }
298f122668dSShahab Vahedi 
299f122668dSShahab Vahedi /* Prologue based on context variables set by "analyze_reg_usage()". */
300f122668dSShahab Vahedi static int handle_prologue(struct jit_context *ctx)
301f122668dSShahab Vahedi {
302f122668dSShahab Vahedi 	int ret;
303f122668dSShahab Vahedi 	u8 *buf = effective_jit_buf(ctx);
304f122668dSShahab Vahedi 	u32 len = 0;
305f122668dSShahab Vahedi 
306f122668dSShahab Vahedi 	CHECK_RET(jit_buffer_check(ctx));
307f122668dSShahab Vahedi 
308f122668dSShahab Vahedi 	len = arc_prologue(buf, ctx->arc_regs_clobbered, ctx->frame_size);
309f122668dSShahab Vahedi 	jit_buffer_update(ctx, len);
310f122668dSShahab Vahedi 
311f122668dSShahab Vahedi 	return 0;
312f122668dSShahab Vahedi }
313f122668dSShahab Vahedi 
314f122668dSShahab Vahedi /* The counter part for "handle_prologue()". */
315f122668dSShahab Vahedi static int handle_epilogue(struct jit_context *ctx)
316f122668dSShahab Vahedi {
317f122668dSShahab Vahedi 	int ret;
318f122668dSShahab Vahedi 	u8 *buf = effective_jit_buf(ctx);
319f122668dSShahab Vahedi 	u32 len = 0;
320f122668dSShahab Vahedi 
321f122668dSShahab Vahedi 	CHECK_RET(jit_buffer_check(ctx));
322f122668dSShahab Vahedi 
323f122668dSShahab Vahedi 	len = arc_epilogue(buf, ctx->arc_regs_clobbered, ctx->frame_size);
324f122668dSShahab Vahedi 	jit_buffer_update(ctx, len);
325f122668dSShahab Vahedi 
326f122668dSShahab Vahedi 	return 0;
327f122668dSShahab Vahedi }
328f122668dSShahab Vahedi 
329f122668dSShahab Vahedi /* Tell which number of the BPF instruction we are dealing with. */
330f122668dSShahab Vahedi static inline s32 get_index_for_insn(const struct jit_context *ctx,
331f122668dSShahab Vahedi 				     const struct bpf_insn *insn)
332f122668dSShahab Vahedi {
333f122668dSShahab Vahedi 	return (insn - ctx->prog->insnsi);
334f122668dSShahab Vahedi }
335f122668dSShahab Vahedi 
336f122668dSShahab Vahedi /*
337f122668dSShahab Vahedi  * In most of the cases, the "offset" is read from "insn->off". However,
338f122668dSShahab Vahedi  * if it is an unconditional BPF_JMP32, then it comes from "insn->imm".
339f122668dSShahab Vahedi  *
340f122668dSShahab Vahedi  * (Courtesy of "cpu=v4" support)
341f122668dSShahab Vahedi  */
342f122668dSShahab Vahedi static inline s32 get_offset(const struct bpf_insn *insn)
343f122668dSShahab Vahedi {
344f122668dSShahab Vahedi 	if ((BPF_CLASS(insn->code) == BPF_JMP32) &&
345f122668dSShahab Vahedi 	    (BPF_OP(insn->code) == BPF_JA))
346f122668dSShahab Vahedi 		return insn->imm;
347f122668dSShahab Vahedi 	else
348f122668dSShahab Vahedi 		return insn->off;
349f122668dSShahab Vahedi }
350f122668dSShahab Vahedi 
351f122668dSShahab Vahedi /*
352f122668dSShahab Vahedi  * Determine to which number of the BPF instruction we're jumping to.
353f122668dSShahab Vahedi  *
354f122668dSShahab Vahedi  * The "offset" is interpreted as the "number" of BPF instructions
355f122668dSShahab Vahedi  * from the _next_ BPF instruction. e.g.:
356f122668dSShahab Vahedi  *
357f122668dSShahab Vahedi  *  4 means 4 instructions after  the next insn
358f122668dSShahab Vahedi  *  0 means 0 instructions after  the next insn -> fallthrough.
359f122668dSShahab Vahedi  * -1 means 1 instruction  before the next insn -> jmp to current insn.
360f122668dSShahab Vahedi  *
361f122668dSShahab Vahedi  *  Another way to look at this, "offset" is the number of instructions
362f122668dSShahab Vahedi  *  that exist between the current instruction and the target instruction.
363f122668dSShahab Vahedi  *
364f122668dSShahab Vahedi  *  It is worth noting that a "mov r,i64", which is 16-byte long, is
365f122668dSShahab Vahedi  *  treated as two instructions long, therefore "offset" needn't be
366f122668dSShahab Vahedi  *  treated specially for those. Everything is uniform.
367f122668dSShahab Vahedi  */
368f122668dSShahab Vahedi static inline s32 get_target_index_for_insn(const struct jit_context *ctx,
369f122668dSShahab Vahedi 					    const struct bpf_insn *insn)
370f122668dSShahab Vahedi {
371f122668dSShahab Vahedi 	return (get_index_for_insn(ctx, insn) + 1) + get_offset(insn);
372f122668dSShahab Vahedi }
373f122668dSShahab Vahedi 
374f122668dSShahab Vahedi /* Is there an immediate operand encoded in the "insn"? */
375f122668dSShahab Vahedi static inline bool has_imm(const struct bpf_insn *insn)
376f122668dSShahab Vahedi {
377f122668dSShahab Vahedi 	return BPF_SRC(insn->code) == BPF_K;
378f122668dSShahab Vahedi }
379f122668dSShahab Vahedi 
380f122668dSShahab Vahedi /* Is the last BPF instruction? */
381f122668dSShahab Vahedi static inline bool is_last_insn(const struct bpf_prog *prog, u32 idx)
382f122668dSShahab Vahedi {
383f122668dSShahab Vahedi 	return idx == (prog->len - 1);
384f122668dSShahab Vahedi }
385f122668dSShahab Vahedi 
386f122668dSShahab Vahedi /*
387f122668dSShahab Vahedi  * Invocation of this function, conditionally signals the need for
388f122668dSShahab Vahedi  * an extra pass. The conditions that must be met are:
389f122668dSShahab Vahedi  *
390f122668dSShahab Vahedi  * 1. The current pass itself shouldn't be an extra pass.
391f122668dSShahab Vahedi  * 2. The stream of bytes being JITed must come from a user program.
392f122668dSShahab Vahedi  */
393f122668dSShahab Vahedi static inline void set_need_for_extra_pass(struct jit_context *ctx)
394f122668dSShahab Vahedi {
395f122668dSShahab Vahedi 	if (!ctx->is_extra_pass)
396f122668dSShahab Vahedi 		ctx->need_extra_pass = ctx->user_bpf_prog;
397f122668dSShahab Vahedi }
398f122668dSShahab Vahedi 
399f122668dSShahab Vahedi /*
400f122668dSShahab Vahedi  * Check if the "size" is valid and then transfer the control to
401f122668dSShahab Vahedi  * the back-end for the swap.
402f122668dSShahab Vahedi  */
403f122668dSShahab Vahedi static int handle_swap(u8 *buf, u8 rd, u8 size, u8 endian,
404f122668dSShahab Vahedi 		       bool force, bool do_zext, u8 *len)
405f122668dSShahab Vahedi {
406f122668dSShahab Vahedi 	/* Sanity check on the size. */
407f122668dSShahab Vahedi 	switch (size) {
408f122668dSShahab Vahedi 	case 16:
409f122668dSShahab Vahedi 	case 32:
410f122668dSShahab Vahedi 	case 64:
411f122668dSShahab Vahedi 		break;
412f122668dSShahab Vahedi 	default:
413f122668dSShahab Vahedi 		pr_err("bpf-jit: invalid size for swap.\n");
414f122668dSShahab Vahedi 		return -EINVAL;
415f122668dSShahab Vahedi 	}
416f122668dSShahab Vahedi 
417f122668dSShahab Vahedi 	*len = gen_swap(buf, rd, size, endian, force, do_zext);
418f122668dSShahab Vahedi 
419f122668dSShahab Vahedi 	return 0;
420f122668dSShahab Vahedi }
421f122668dSShahab Vahedi 
422f122668dSShahab Vahedi /* Checks if the (instruction) index is in valid range. */
423f122668dSShahab Vahedi static inline bool check_insn_idx_valid(const struct jit_context *ctx,
424f122668dSShahab Vahedi 					const s32 idx)
425f122668dSShahab Vahedi {
426f122668dSShahab Vahedi 	return (idx >= 0 && idx < ctx->prog->len);
427f122668dSShahab Vahedi }
428f122668dSShahab Vahedi 
429f122668dSShahab Vahedi /*
430f122668dSShahab Vahedi  * Decouple the back-end from BPF by converting BPF conditions
431f122668dSShahab Vahedi  * to internal enum. ARC_CC_* start from 0 and are used as index
432f122668dSShahab Vahedi  * to an array. BPF_J* usage must end after this conversion.
433f122668dSShahab Vahedi  */
434f122668dSShahab Vahedi static int bpf_cond_to_arc(const u8 op, u8 *arc_cc)
435f122668dSShahab Vahedi {
436f122668dSShahab Vahedi 	switch (op) {
437f122668dSShahab Vahedi 	case BPF_JA:
438f122668dSShahab Vahedi 		*arc_cc = ARC_CC_AL;
439f122668dSShahab Vahedi 		break;
440f122668dSShahab Vahedi 	case BPF_JEQ:
441f122668dSShahab Vahedi 		*arc_cc = ARC_CC_EQ;
442f122668dSShahab Vahedi 		break;
443f122668dSShahab Vahedi 	case BPF_JGT:
444f122668dSShahab Vahedi 		*arc_cc = ARC_CC_UGT;
445f122668dSShahab Vahedi 		break;
446f122668dSShahab Vahedi 	case BPF_JGE:
447f122668dSShahab Vahedi 		*arc_cc = ARC_CC_UGE;
448f122668dSShahab Vahedi 		break;
449f122668dSShahab Vahedi 	case BPF_JSET:
450f122668dSShahab Vahedi 		*arc_cc = ARC_CC_SET;
451f122668dSShahab Vahedi 		break;
452f122668dSShahab Vahedi 	case BPF_JNE:
453f122668dSShahab Vahedi 		*arc_cc = ARC_CC_NE;
454f122668dSShahab Vahedi 		break;
455f122668dSShahab Vahedi 	case BPF_JSGT:
456f122668dSShahab Vahedi 		*arc_cc = ARC_CC_SGT;
457f122668dSShahab Vahedi 		break;
458f122668dSShahab Vahedi 	case BPF_JSGE:
459f122668dSShahab Vahedi 		*arc_cc = ARC_CC_SGE;
460f122668dSShahab Vahedi 		break;
461f122668dSShahab Vahedi 	case BPF_JLT:
462f122668dSShahab Vahedi 		*arc_cc = ARC_CC_ULT;
463f122668dSShahab Vahedi 		break;
464f122668dSShahab Vahedi 	case BPF_JLE:
465f122668dSShahab Vahedi 		*arc_cc = ARC_CC_ULE;
466f122668dSShahab Vahedi 		break;
467f122668dSShahab Vahedi 	case BPF_JSLT:
468f122668dSShahab Vahedi 		*arc_cc = ARC_CC_SLT;
469f122668dSShahab Vahedi 		break;
470f122668dSShahab Vahedi 	case BPF_JSLE:
471f122668dSShahab Vahedi 		*arc_cc = ARC_CC_SLE;
472f122668dSShahab Vahedi 		break;
473f122668dSShahab Vahedi 	default:
474f122668dSShahab Vahedi 		pr_err("bpf-jit: can't handle condition 0x%02X\n", op);
475f122668dSShahab Vahedi 		return -EINVAL;
476f122668dSShahab Vahedi 	}
477f122668dSShahab Vahedi 	return 0;
478f122668dSShahab Vahedi }
479f122668dSShahab Vahedi 
480f122668dSShahab Vahedi /*
481f122668dSShahab Vahedi  * Check a few things for a supposedly "jump" instruction:
482f122668dSShahab Vahedi  *
483f122668dSShahab Vahedi  * 0. "insn" is a "jump" instruction, but not the "call/exit" variant.
484f122668dSShahab Vahedi  * 1. The current "insn" index is in valid range.
485f122668dSShahab Vahedi  * 2. The index of target instruction is in valid range.
486f122668dSShahab Vahedi  */
487f122668dSShahab Vahedi static int check_bpf_jump(const struct jit_context *ctx,
488f122668dSShahab Vahedi 			  const struct bpf_insn *insn)
489f122668dSShahab Vahedi {
490f122668dSShahab Vahedi 	const u8 class = BPF_CLASS(insn->code);
491f122668dSShahab Vahedi 	const u8 op = BPF_OP(insn->code);
492f122668dSShahab Vahedi 
493f122668dSShahab Vahedi 	/* Must be a jmp(32) instruction that is not a "call/exit". */
494f122668dSShahab Vahedi 	if ((class != BPF_JMP && class != BPF_JMP32) ||
495f122668dSShahab Vahedi 	    (op == BPF_CALL || op == BPF_EXIT)) {
496f122668dSShahab Vahedi 		pr_err("bpf-jit: not a jump instruction.\n");
497f122668dSShahab Vahedi 		return -EINVAL;
498f122668dSShahab Vahedi 	}
499f122668dSShahab Vahedi 
500f122668dSShahab Vahedi 	if (!check_insn_idx_valid(ctx, get_index_for_insn(ctx, insn))) {
501f122668dSShahab Vahedi 		pr_err("bpf-jit: the bpf jump insn is not in prog.\n");
502f122668dSShahab Vahedi 		return -EINVAL;
503f122668dSShahab Vahedi 	}
504f122668dSShahab Vahedi 
505f122668dSShahab Vahedi 	if (!check_insn_idx_valid(ctx, get_target_index_for_insn(ctx, insn))) {
506f122668dSShahab Vahedi 		pr_err("bpf-jit: bpf jump label is out of range.\n");
507f122668dSShahab Vahedi 		return -EINVAL;
508f122668dSShahab Vahedi 	}
509f122668dSShahab Vahedi 
510f122668dSShahab Vahedi 	return 0;
511f122668dSShahab Vahedi }
512f122668dSShahab Vahedi 
513f122668dSShahab Vahedi /*
514f122668dSShahab Vahedi  * Based on input "insn", consult "ctx->bpf2insn" to get the
515f122668dSShahab Vahedi  * related index (offset) of the translation in JIT stream.
516f122668dSShahab Vahedi  */
517f122668dSShahab Vahedi static u32 get_curr_jit_off(const struct jit_context *ctx,
518f122668dSShahab Vahedi 			    const struct bpf_insn *insn)
519f122668dSShahab Vahedi {
520f122668dSShahab Vahedi 	const s32 idx = get_index_for_insn(ctx, insn);
521f122668dSShahab Vahedi #ifdef ARC_BPF_JIT_DEBUG
522f122668dSShahab Vahedi 	BUG_ON(!offsets_available(ctx) || !check_insn_idx_valid(ctx, idx));
523f122668dSShahab Vahedi #endif
524f122668dSShahab Vahedi 	return ctx->bpf2insn[idx];
525f122668dSShahab Vahedi }
526f122668dSShahab Vahedi 
527f122668dSShahab Vahedi /*
528f122668dSShahab Vahedi  * The input "insn" must be a jump instruction.
529f122668dSShahab Vahedi  *
530f122668dSShahab Vahedi  * Based on input "insn", consult "ctx->bpf2insn" to get the
531f122668dSShahab Vahedi  * related JIT index (offset) of "target instruction" that
532f122668dSShahab Vahedi  * "insn" would jump to.
533f122668dSShahab Vahedi  */
534f122668dSShahab Vahedi static u32 get_targ_jit_off(const struct jit_context *ctx,
535f122668dSShahab Vahedi 			    const struct bpf_insn *insn)
536f122668dSShahab Vahedi {
537f122668dSShahab Vahedi 	const s32 tidx = get_target_index_for_insn(ctx, insn);
538f122668dSShahab Vahedi #ifdef ARC_BPF_JIT_DEBUG
539f122668dSShahab Vahedi 	BUG_ON(!offsets_available(ctx) || !check_insn_idx_valid(ctx, tidx));
540f122668dSShahab Vahedi #endif
541f122668dSShahab Vahedi 	return ctx->bpf2insn[tidx];
542f122668dSShahab Vahedi }
543f122668dSShahab Vahedi 
544f122668dSShahab Vahedi /*
545f122668dSShahab Vahedi  * This function will return 0 for a feasible jump.
546f122668dSShahab Vahedi  *
547f122668dSShahab Vahedi  * Consult the back-end to check if it finds it feasible to emit
548f122668dSShahab Vahedi  * the necessary instructions based on "cond" and the displacement
549f122668dSShahab Vahedi  * between the "from_off" and the "to_off".
550f122668dSShahab Vahedi  */
551f122668dSShahab Vahedi static int feasible_jit_jump(u32 from_off, u32 to_off, u8 cond, bool j32)
552f122668dSShahab Vahedi {
553f122668dSShahab Vahedi 	int ret = 0;
554f122668dSShahab Vahedi 
555f122668dSShahab Vahedi 	if (j32) {
556f122668dSShahab Vahedi 		if (!check_jmp_32(from_off, to_off, cond))
557f122668dSShahab Vahedi 			ret = -EFAULT;
558f122668dSShahab Vahedi 	} else {
559f122668dSShahab Vahedi 		if (!check_jmp_64(from_off, to_off, cond))
560f122668dSShahab Vahedi 			ret = -EFAULT;
561f122668dSShahab Vahedi 	}
562f122668dSShahab Vahedi 
563f122668dSShahab Vahedi 	if (ret != 0)
564f122668dSShahab Vahedi 		pr_err("bpf-jit: the JIT displacement is not OK.\n");
565f122668dSShahab Vahedi 
566f122668dSShahab Vahedi 	return ret;
567f122668dSShahab Vahedi }
568f122668dSShahab Vahedi 
569f122668dSShahab Vahedi /*
570f122668dSShahab Vahedi  * This jump handler performs the following steps:
571f122668dSShahab Vahedi  *
572f122668dSShahab Vahedi  * 1. Compute ARC's internal condition code from BPF's
573f122668dSShahab Vahedi  * 2. Determine the bitness of the operation (32 vs. 64)
574f122668dSShahab Vahedi  * 3. Sanity check on BPF stream
575f122668dSShahab Vahedi  * 4. Sanity check on what is supposed to be JIT's displacement
576f122668dSShahab Vahedi  * 5. And finally, emit the necessary instructions
577f122668dSShahab Vahedi  *
578f122668dSShahab Vahedi  * The last two steps are performed through the back-end.
579f122668dSShahab Vahedi  * The value of steps 1 and 2 are necessary inputs for the back-end.
580f122668dSShahab Vahedi  */
581f122668dSShahab Vahedi static int handle_jumps(const struct jit_context *ctx,
582f122668dSShahab Vahedi 			const struct bpf_insn *insn,
583f122668dSShahab Vahedi 			u8 *len)
584f122668dSShahab Vahedi {
585f122668dSShahab Vahedi 	u8 cond;
586f122668dSShahab Vahedi 	int ret = 0;
587f122668dSShahab Vahedi 	u8 *buf = effective_jit_buf(ctx);
588f122668dSShahab Vahedi 	const bool j32 = (BPF_CLASS(insn->code) == BPF_JMP32) ? true : false;
589f122668dSShahab Vahedi 	const u8 rd = insn->dst_reg;
590f122668dSShahab Vahedi 	u8 rs = insn->src_reg;
591f122668dSShahab Vahedi 	u32 curr_off = 0, targ_off = 0;
592f122668dSShahab Vahedi 
593f122668dSShahab Vahedi 	*len = 0;
594f122668dSShahab Vahedi 
595f122668dSShahab Vahedi 	/* Map the BPF condition to internal enum. */
596f122668dSShahab Vahedi 	CHECK_RET(bpf_cond_to_arc(BPF_OP(insn->code), &cond));
597f122668dSShahab Vahedi 
598f122668dSShahab Vahedi 	/* Sanity check on the BPF byte stream. */
599f122668dSShahab Vahedi 	CHECK_RET(check_bpf_jump(ctx, insn));
600f122668dSShahab Vahedi 
601f122668dSShahab Vahedi 	/*
602f122668dSShahab Vahedi 	 * Move the immediate into a temporary register _now_ for 2 reasons:
603f122668dSShahab Vahedi 	 *
604f122668dSShahab Vahedi 	 * 1. "gen_jmp_{32,64}()" deal with operands in registers.
605f122668dSShahab Vahedi 	 *
606f122668dSShahab Vahedi 	 * 2. The "len" parameter will grow so that the current jit offset
607f122668dSShahab Vahedi 	 *    (curr_off) will have increased to a point where the necessary
608f122668dSShahab Vahedi 	 *    instructions can be inserted by "gen_jmp_{32,64}()".
609f122668dSShahab Vahedi 	 */
610f122668dSShahab Vahedi 	if (has_imm(insn) && cond != ARC_CC_AL) {
611f122668dSShahab Vahedi 		if (j32) {
612f122668dSShahab Vahedi 			*len += mov_r32_i32(BUF(buf, *len), JIT_REG_TMP,
613f122668dSShahab Vahedi 					    insn->imm);
614f122668dSShahab Vahedi 		} else {
615f122668dSShahab Vahedi 			*len += mov_r64_i32(BUF(buf, *len), JIT_REG_TMP,
616f122668dSShahab Vahedi 					    insn->imm);
617f122668dSShahab Vahedi 		}
618f122668dSShahab Vahedi 		rs = JIT_REG_TMP;
619f122668dSShahab Vahedi 	}
620f122668dSShahab Vahedi 
621f122668dSShahab Vahedi 	/* If the offsets are known, check if the branch can occur. */
622f122668dSShahab Vahedi 	if (offsets_available(ctx)) {
623f122668dSShahab Vahedi 		curr_off = get_curr_jit_off(ctx, insn) + *len;
624f122668dSShahab Vahedi 		targ_off = get_targ_jit_off(ctx, insn);
625f122668dSShahab Vahedi 
626f122668dSShahab Vahedi 		/* Sanity check on the back-end side. */
627f122668dSShahab Vahedi 		CHECK_RET(feasible_jit_jump(curr_off, targ_off, cond, j32));
628f122668dSShahab Vahedi 	}
629f122668dSShahab Vahedi 
630f122668dSShahab Vahedi 	if (j32) {
631f122668dSShahab Vahedi 		*len += gen_jmp_32(BUF(buf, *len), rd, rs, cond,
632f122668dSShahab Vahedi 				   curr_off, targ_off);
633f122668dSShahab Vahedi 	} else {
634f122668dSShahab Vahedi 		*len += gen_jmp_64(BUF(buf, *len), rd, rs, cond,
635f122668dSShahab Vahedi 				   curr_off, targ_off);
636f122668dSShahab Vahedi 	}
637f122668dSShahab Vahedi 
638f122668dSShahab Vahedi 	return ret;
639f122668dSShahab Vahedi }
640f122668dSShahab Vahedi 
641f122668dSShahab Vahedi /* Jump to translated epilogue address. */
642f122668dSShahab Vahedi static int handle_jmp_epilogue(struct jit_context *ctx,
643f122668dSShahab Vahedi 			       const struct bpf_insn *insn, u8 *len)
644f122668dSShahab Vahedi {
645f122668dSShahab Vahedi 	u8 *buf = effective_jit_buf(ctx);
646f122668dSShahab Vahedi 	u32 curr_off = 0, epi_off = 0;
647f122668dSShahab Vahedi 
648f122668dSShahab Vahedi 	/* Check the offset only if the data is available. */
649f122668dSShahab Vahedi 	if (offsets_available(ctx)) {
650f122668dSShahab Vahedi 		curr_off = get_curr_jit_off(ctx, insn);
651f122668dSShahab Vahedi 		epi_off = ctx->epilogue_offset;
652f122668dSShahab Vahedi 
653f122668dSShahab Vahedi 		if (!check_jmp_64(curr_off, epi_off, ARC_CC_AL)) {
654f122668dSShahab Vahedi 			pr_err("bpf-jit: epilogue offset is not valid.\n");
655f122668dSShahab Vahedi 			return -EINVAL;
656f122668dSShahab Vahedi 		}
657f122668dSShahab Vahedi 	}
658f122668dSShahab Vahedi 
659f122668dSShahab Vahedi 	/* Jump to "epilogue offset" (rd and rs don't matter). */
660f122668dSShahab Vahedi 	*len = gen_jmp_64(buf, 0, 0, ARC_CC_AL, curr_off, epi_off);
661f122668dSShahab Vahedi 
662f122668dSShahab Vahedi 	return 0;
663f122668dSShahab Vahedi }
664f122668dSShahab Vahedi 
665f122668dSShahab Vahedi /* Try to get the resolved address and generate the instructions. */
666f122668dSShahab Vahedi static int handle_call(struct jit_context *ctx,
667f122668dSShahab Vahedi 		       const struct bpf_insn *insn,
668f122668dSShahab Vahedi 		       u8 *len)
669f122668dSShahab Vahedi {
670f122668dSShahab Vahedi 	int  ret;
671f122668dSShahab Vahedi 	bool in_kernel_func, fixed = false;
672f122668dSShahab Vahedi 	u64  addr = 0;
673f122668dSShahab Vahedi 	u8  *buf = effective_jit_buf(ctx);
674f122668dSShahab Vahedi 
675f122668dSShahab Vahedi 	ret = bpf_jit_get_func_addr(ctx->prog, insn, ctx->is_extra_pass,
676f122668dSShahab Vahedi 				    &addr, &fixed);
677f122668dSShahab Vahedi 	if (ret < 0) {
678f122668dSShahab Vahedi 		pr_err("bpf-jit: can't get the address for call.\n");
679f122668dSShahab Vahedi 		return ret;
680f122668dSShahab Vahedi 	}
681f122668dSShahab Vahedi 	in_kernel_func = (fixed ? true : false);
682f122668dSShahab Vahedi 
683f122668dSShahab Vahedi 	/* No valuable address retrieved (yet). */
684f122668dSShahab Vahedi 	if (!fixed && !addr)
685f122668dSShahab Vahedi 		set_need_for_extra_pass(ctx);
686f122668dSShahab Vahedi 
687f122668dSShahab Vahedi 	*len = gen_func_call(buf, (ARC_ADDR)addr, in_kernel_func);
688f122668dSShahab Vahedi 
689f122668dSShahab Vahedi 	if (insn->src_reg != BPF_PSEUDO_CALL) {
690f122668dSShahab Vahedi 		/* Assigning ABI's return reg to JIT's return reg. */
691f122668dSShahab Vahedi 		*len += arc_to_bpf_return(BUF(buf, *len));
692f122668dSShahab Vahedi 	}
693f122668dSShahab Vahedi 
694f122668dSShahab Vahedi 	return 0;
695f122668dSShahab Vahedi }
696f122668dSShahab Vahedi 
697f122668dSShahab Vahedi /*
698f122668dSShahab Vahedi  * Try to generate instructions for loading a 64-bit immediate.
699f122668dSShahab Vahedi  * These sort of instructions are usually associated with the 64-bit
700f122668dSShahab Vahedi  * relocations: R_BPF_64_64. Therefore, signal the need for an extra
701f122668dSShahab Vahedi  * pass if the circumstances are right.
702f122668dSShahab Vahedi  */
703f122668dSShahab Vahedi static int handle_ld_imm64(struct jit_context *ctx,
704f122668dSShahab Vahedi 			   const struct bpf_insn *insn,
705f122668dSShahab Vahedi 			   u8 *len)
706f122668dSShahab Vahedi {
707f122668dSShahab Vahedi 	const s32 idx = get_index_for_insn(ctx, insn);
708f122668dSShahab Vahedi 	u8 *buf = effective_jit_buf(ctx);
709f122668dSShahab Vahedi 
710f122668dSShahab Vahedi 	/* We're about to consume 2 VM instructions. */
711f122668dSShahab Vahedi 	if (is_last_insn(ctx->prog, idx)) {
712f122668dSShahab Vahedi 		pr_err("bpf-jit: need more data for 64-bit immediate.\n");
713f122668dSShahab Vahedi 		return -EINVAL;
714f122668dSShahab Vahedi 	}
715f122668dSShahab Vahedi 
716f122668dSShahab Vahedi 	*len = mov_r64_i64(buf, insn->dst_reg, insn->imm, (insn + 1)->imm);
717f122668dSShahab Vahedi 
718f122668dSShahab Vahedi 	if (bpf_pseudo_func(insn))
719f122668dSShahab Vahedi 		set_need_for_extra_pass(ctx);
720f122668dSShahab Vahedi 
721f122668dSShahab Vahedi 	return 0;
722f122668dSShahab Vahedi }
723f122668dSShahab Vahedi 
724f122668dSShahab Vahedi /*
725f122668dSShahab Vahedi  * Handles one eBPF instruction at a time. To make this function faster,
726f122668dSShahab Vahedi  * it does not call "jit_buffer_check()". Else, it would call it for every
727f122668dSShahab Vahedi  * instruction. As a result, it should not be invoked directly. Only
728f122668dSShahab Vahedi  * "handle_body()", that has already executed the "check", may call this
729f122668dSShahab Vahedi  * function.
730f122668dSShahab Vahedi  *
731f122668dSShahab Vahedi  * If the "ret" value is negative, something has went wrong. Else,
732f122668dSShahab Vahedi  * it mostly holds the value 0 and rarely 1. Number 1 signals
733f122668dSShahab Vahedi  * the loop in "handle_body()" to skip the next instruction, because
734f122668dSShahab Vahedi  * it has been consumed as part of a 64-bit immediate value.
735f122668dSShahab Vahedi  */
736f122668dSShahab Vahedi static int handle_insn(struct jit_context *ctx, u32 idx)
737f122668dSShahab Vahedi {
738f122668dSShahab Vahedi 	const struct bpf_insn *insn = &ctx->prog->insnsi[idx];
739f122668dSShahab Vahedi 	const u8  code = insn->code;
740f122668dSShahab Vahedi 	const u8  dst  = insn->dst_reg;
741f122668dSShahab Vahedi 	const u8  src  = insn->src_reg;
742f122668dSShahab Vahedi 	const s16 off  = insn->off;
743f122668dSShahab Vahedi 	const s32 imm  = insn->imm;
744f122668dSShahab Vahedi 	u8 *buf = effective_jit_buf(ctx);
745f122668dSShahab Vahedi 	u8  len = 0;
746f122668dSShahab Vahedi 	int ret = 0;
747f122668dSShahab Vahedi 
748f122668dSShahab Vahedi 	switch (code) {
749f122668dSShahab Vahedi 	/* dst += src (32-bit) */
750f122668dSShahab Vahedi 	case BPF_ALU | BPF_ADD | BPF_X:
751f122668dSShahab Vahedi 		len = add_r32(buf, dst, src);
752f122668dSShahab Vahedi 		break;
753f122668dSShahab Vahedi 	/* dst += imm (32-bit) */
754f122668dSShahab Vahedi 	case BPF_ALU | BPF_ADD | BPF_K:
755f122668dSShahab Vahedi 		len = add_r32_i32(buf, dst, imm);
756f122668dSShahab Vahedi 		break;
757f122668dSShahab Vahedi 	/* dst -= src (32-bit) */
758f122668dSShahab Vahedi 	case BPF_ALU | BPF_SUB | BPF_X:
759f122668dSShahab Vahedi 		len = sub_r32(buf, dst, src);
760f122668dSShahab Vahedi 		break;
761f122668dSShahab Vahedi 	/* dst -= imm (32-bit) */
762f122668dSShahab Vahedi 	case BPF_ALU | BPF_SUB | BPF_K:
763f122668dSShahab Vahedi 		len = sub_r32_i32(buf, dst, imm);
764f122668dSShahab Vahedi 		break;
765f122668dSShahab Vahedi 	/* dst = -dst (32-bit) */
766f122668dSShahab Vahedi 	case BPF_ALU | BPF_NEG:
767f122668dSShahab Vahedi 		len = neg_r32(buf, dst);
768f122668dSShahab Vahedi 		break;
769f122668dSShahab Vahedi 	/* dst *= src (32-bit) */
770f122668dSShahab Vahedi 	case BPF_ALU | BPF_MUL | BPF_X:
771f122668dSShahab Vahedi 		len = mul_r32(buf, dst, src);
772f122668dSShahab Vahedi 		break;
773f122668dSShahab Vahedi 	/* dst *= imm (32-bit) */
774f122668dSShahab Vahedi 	case BPF_ALU | BPF_MUL | BPF_K:
775f122668dSShahab Vahedi 		len = mul_r32_i32(buf, dst, imm);
776f122668dSShahab Vahedi 		break;
777f122668dSShahab Vahedi 	/* dst /= src (32-bit) */
778f122668dSShahab Vahedi 	case BPF_ALU | BPF_DIV | BPF_X:
779f122668dSShahab Vahedi 		len = div_r32(buf, dst, src, off == 1);
780f122668dSShahab Vahedi 		break;
781f122668dSShahab Vahedi 	/* dst /= imm (32-bit) */
782f122668dSShahab Vahedi 	case BPF_ALU | BPF_DIV | BPF_K:
783f122668dSShahab Vahedi 		len = div_r32_i32(buf, dst, imm, off == 1);
784f122668dSShahab Vahedi 		break;
785f122668dSShahab Vahedi 	/* dst %= src (32-bit) */
786f122668dSShahab Vahedi 	case BPF_ALU | BPF_MOD | BPF_X:
787f122668dSShahab Vahedi 		len = mod_r32(buf, dst, src, off == 1);
788f122668dSShahab Vahedi 		break;
789f122668dSShahab Vahedi 	/* dst %= imm (32-bit) */
790f122668dSShahab Vahedi 	case BPF_ALU | BPF_MOD | BPF_K:
791f122668dSShahab Vahedi 		len = mod_r32_i32(buf, dst, imm, off == 1);
792f122668dSShahab Vahedi 		break;
793f122668dSShahab Vahedi 	/* dst &= src (32-bit) */
794f122668dSShahab Vahedi 	case BPF_ALU | BPF_AND | BPF_X:
795f122668dSShahab Vahedi 		len = and_r32(buf, dst, src);
796f122668dSShahab Vahedi 		break;
797f122668dSShahab Vahedi 	/* dst &= imm (32-bit) */
798f122668dSShahab Vahedi 	case BPF_ALU | BPF_AND | BPF_K:
799f122668dSShahab Vahedi 		len = and_r32_i32(buf, dst, imm);
800f122668dSShahab Vahedi 		break;
801f122668dSShahab Vahedi 	/* dst |= src (32-bit) */
802f122668dSShahab Vahedi 	case BPF_ALU | BPF_OR | BPF_X:
803f122668dSShahab Vahedi 		len = or_r32(buf, dst, src);
804f122668dSShahab Vahedi 		break;
805f122668dSShahab Vahedi 	/* dst |= imm (32-bit) */
806f122668dSShahab Vahedi 	case BPF_ALU | BPF_OR | BPF_K:
807f122668dSShahab Vahedi 		len = or_r32_i32(buf, dst, imm);
808f122668dSShahab Vahedi 		break;
809f122668dSShahab Vahedi 	/* dst ^= src (32-bit) */
810f122668dSShahab Vahedi 	case BPF_ALU | BPF_XOR | BPF_X:
811f122668dSShahab Vahedi 		len = xor_r32(buf, dst, src);
812f122668dSShahab Vahedi 		break;
813f122668dSShahab Vahedi 	/* dst ^= imm (32-bit) */
814f122668dSShahab Vahedi 	case BPF_ALU | BPF_XOR | BPF_K:
815f122668dSShahab Vahedi 		len = xor_r32_i32(buf, dst, imm);
816f122668dSShahab Vahedi 		break;
817f122668dSShahab Vahedi 	/* dst <<= src (32-bit) */
818f122668dSShahab Vahedi 	case BPF_ALU | BPF_LSH | BPF_X:
819f122668dSShahab Vahedi 		len = lsh_r32(buf, dst, src);
820f122668dSShahab Vahedi 		break;
821f122668dSShahab Vahedi 	/* dst <<= imm (32-bit) */
822f122668dSShahab Vahedi 	case BPF_ALU | BPF_LSH | BPF_K:
823f122668dSShahab Vahedi 		len = lsh_r32_i32(buf, dst, imm);
824f122668dSShahab Vahedi 		break;
825f122668dSShahab Vahedi 	/* dst >>= src (32-bit) [unsigned] */
826f122668dSShahab Vahedi 	case BPF_ALU | BPF_RSH | BPF_X:
827f122668dSShahab Vahedi 		len = rsh_r32(buf, dst, src);
828f122668dSShahab Vahedi 		break;
829f122668dSShahab Vahedi 	/* dst >>= imm (32-bit) [unsigned] */
830f122668dSShahab Vahedi 	case BPF_ALU | BPF_RSH | BPF_K:
831f122668dSShahab Vahedi 		len = rsh_r32_i32(buf, dst, imm);
832f122668dSShahab Vahedi 		break;
833f122668dSShahab Vahedi 	/* dst >>= src (32-bit) [signed] */
834f122668dSShahab Vahedi 	case BPF_ALU | BPF_ARSH | BPF_X:
835f122668dSShahab Vahedi 		len = arsh_r32(buf, dst, src);
836f122668dSShahab Vahedi 		break;
837f122668dSShahab Vahedi 	/* dst >>= imm (32-bit) [signed] */
838f122668dSShahab Vahedi 	case BPF_ALU | BPF_ARSH | BPF_K:
839f122668dSShahab Vahedi 		len = arsh_r32_i32(buf, dst, imm);
840f122668dSShahab Vahedi 		break;
841f122668dSShahab Vahedi 	/* dst = src (32-bit) */
842f122668dSShahab Vahedi 	case BPF_ALU | BPF_MOV | BPF_X:
843f122668dSShahab Vahedi 		len = mov_r32(buf, dst, src, (u8)off);
844f122668dSShahab Vahedi 		break;
845f122668dSShahab Vahedi 	/* dst = imm32 (32-bit) */
846f122668dSShahab Vahedi 	case BPF_ALU | BPF_MOV | BPF_K:
847f122668dSShahab Vahedi 		len = mov_r32_i32(buf, dst, imm);
848f122668dSShahab Vahedi 		break;
849f122668dSShahab Vahedi 	/* dst = swap(dst) */
850f122668dSShahab Vahedi 	case BPF_ALU   | BPF_END | BPF_FROM_LE:
851f122668dSShahab Vahedi 	case BPF_ALU   | BPF_END | BPF_FROM_BE:
852f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_END | BPF_FROM_LE: {
853f122668dSShahab Vahedi 		CHECK_RET(handle_swap(buf, dst, imm, BPF_SRC(code),
854f122668dSShahab Vahedi 				      BPF_CLASS(code) == BPF_ALU64,
855f122668dSShahab Vahedi 				      ctx->do_zext, &len));
856f122668dSShahab Vahedi 		break;
857f122668dSShahab Vahedi 	}
858f122668dSShahab Vahedi 	/* dst += src (64-bit) */
859f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_ADD | BPF_X:
860f122668dSShahab Vahedi 		len = add_r64(buf, dst, src);
861f122668dSShahab Vahedi 		break;
862f122668dSShahab Vahedi 	/* dst += imm32 (64-bit) */
863f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_ADD | BPF_K:
864f122668dSShahab Vahedi 		len = add_r64_i32(buf, dst, imm);
865f122668dSShahab Vahedi 		break;
866f122668dSShahab Vahedi 	/* dst -= src (64-bit) */
867f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_SUB | BPF_X:
868f122668dSShahab Vahedi 		len = sub_r64(buf, dst, src);
869f122668dSShahab Vahedi 		break;
870f122668dSShahab Vahedi 	/* dst -= imm32 (64-bit) */
871f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_SUB | BPF_K:
872f122668dSShahab Vahedi 		len = sub_r64_i32(buf, dst, imm);
873f122668dSShahab Vahedi 		break;
874f122668dSShahab Vahedi 	/* dst = -dst (64-bit) */
875f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_NEG:
876f122668dSShahab Vahedi 		len = neg_r64(buf, dst);
877f122668dSShahab Vahedi 		break;
878f122668dSShahab Vahedi 	/* dst *= src (64-bit) */
879f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_MUL | BPF_X:
880f122668dSShahab Vahedi 		len = mul_r64(buf, dst, src);
881f122668dSShahab Vahedi 		break;
882f122668dSShahab Vahedi 	/* dst *= imm32 (64-bit) */
883f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_MUL | BPF_K:
884f122668dSShahab Vahedi 		len = mul_r64_i32(buf, dst, imm);
885f122668dSShahab Vahedi 		break;
886f122668dSShahab Vahedi 	/* dst &= src (64-bit) */
887f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_AND | BPF_X:
888f122668dSShahab Vahedi 		len = and_r64(buf, dst, src);
889f122668dSShahab Vahedi 		break;
890f122668dSShahab Vahedi 	/* dst &= imm32 (64-bit) */
891f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_AND | BPF_K:
892f122668dSShahab Vahedi 		len = and_r64_i32(buf, dst, imm);
893f122668dSShahab Vahedi 		break;
894f122668dSShahab Vahedi 	/* dst |= src (64-bit) */
895f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_OR | BPF_X:
896f122668dSShahab Vahedi 		len = or_r64(buf, dst, src);
897f122668dSShahab Vahedi 		break;
898f122668dSShahab Vahedi 	/* dst |= imm32 (64-bit) */
899f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_OR | BPF_K:
900f122668dSShahab Vahedi 		len = or_r64_i32(buf, dst, imm);
901f122668dSShahab Vahedi 		break;
902f122668dSShahab Vahedi 	/* dst ^= src (64-bit) */
903f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_XOR | BPF_X:
904f122668dSShahab Vahedi 		len = xor_r64(buf, dst, src);
905f122668dSShahab Vahedi 		break;
906f122668dSShahab Vahedi 	/* dst ^= imm32 (64-bit) */
907f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_XOR | BPF_K:
908f122668dSShahab Vahedi 		len = xor_r64_i32(buf, dst, imm);
909f122668dSShahab Vahedi 		break;
910f122668dSShahab Vahedi 	/* dst <<= src (64-bit) */
911f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_LSH | BPF_X:
912f122668dSShahab Vahedi 		len = lsh_r64(buf, dst, src);
913f122668dSShahab Vahedi 		break;
914f122668dSShahab Vahedi 	/* dst <<= imm32 (64-bit) */
915f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_LSH | BPF_K:
916f122668dSShahab Vahedi 		len = lsh_r64_i32(buf, dst, imm);
917f122668dSShahab Vahedi 		break;
918f122668dSShahab Vahedi 	/* dst >>= src (64-bit) [unsigned] */
919f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_RSH | BPF_X:
920f122668dSShahab Vahedi 		len = rsh_r64(buf, dst, src);
921f122668dSShahab Vahedi 		break;
922f122668dSShahab Vahedi 	/* dst >>= imm32 (64-bit) [unsigned] */
923f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_RSH | BPF_K:
924f122668dSShahab Vahedi 		len = rsh_r64_i32(buf, dst, imm);
925f122668dSShahab Vahedi 		break;
926f122668dSShahab Vahedi 	/* dst >>= src (64-bit) [signed] */
927f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_ARSH | BPF_X:
928f122668dSShahab Vahedi 		len = arsh_r64(buf, dst, src);
929f122668dSShahab Vahedi 		break;
930f122668dSShahab Vahedi 	/* dst >>= imm32 (64-bit) [signed] */
931f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_ARSH | BPF_K:
932f122668dSShahab Vahedi 		len = arsh_r64_i32(buf, dst, imm);
933f122668dSShahab Vahedi 		break;
934f122668dSShahab Vahedi 	/* dst = src (64-bit) */
935f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_MOV | BPF_X:
936f122668dSShahab Vahedi 		len = mov_r64(buf, dst, src, (u8)off);
937f122668dSShahab Vahedi 		break;
938f122668dSShahab Vahedi 	/* dst = imm32 (sign extend to 64-bit) */
939f122668dSShahab Vahedi 	case BPF_ALU64 | BPF_MOV | BPF_K:
940f122668dSShahab Vahedi 		len = mov_r64_i32(buf, dst, imm);
941f122668dSShahab Vahedi 		break;
942f122668dSShahab Vahedi 	/* dst = imm64 */
943f122668dSShahab Vahedi 	case BPF_LD | BPF_DW | BPF_IMM:
944f122668dSShahab Vahedi 		CHECK_RET(handle_ld_imm64(ctx, insn, &len));
945f122668dSShahab Vahedi 		/* Tell the loop to skip the next instruction. */
946f122668dSShahab Vahedi 		ret = 1;
947f122668dSShahab Vahedi 		break;
948f122668dSShahab Vahedi 	/* dst = *(size *)(src + off) */
949f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEM | BPF_W:
950f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEM | BPF_H:
951f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEM | BPF_B:
952f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEM | BPF_DW:
953f122668dSShahab Vahedi 		len = load_r(buf, dst, src, off, BPF_SIZE(code), false);
954f122668dSShahab Vahedi 		break;
955f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEMSX | BPF_W:
956f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEMSX | BPF_H:
957f122668dSShahab Vahedi 	case BPF_LDX | BPF_MEMSX | BPF_B:
958f122668dSShahab Vahedi 		len = load_r(buf, dst, src, off, BPF_SIZE(code), true);
959f122668dSShahab Vahedi 		break;
960f122668dSShahab Vahedi 	/* *(size *)(dst + off) = src */
961f122668dSShahab Vahedi 	case BPF_STX | BPF_MEM | BPF_W:
962f122668dSShahab Vahedi 	case BPF_STX | BPF_MEM | BPF_H:
963f122668dSShahab Vahedi 	case BPF_STX | BPF_MEM | BPF_B:
964f122668dSShahab Vahedi 	case BPF_STX | BPF_MEM | BPF_DW:
965f122668dSShahab Vahedi 		len = store_r(buf, src, dst, off, BPF_SIZE(code));
966f122668dSShahab Vahedi 		break;
967f122668dSShahab Vahedi 	case BPF_ST | BPF_MEM | BPF_W:
968f122668dSShahab Vahedi 	case BPF_ST | BPF_MEM | BPF_H:
969f122668dSShahab Vahedi 	case BPF_ST | BPF_MEM | BPF_B:
970f122668dSShahab Vahedi 	case BPF_ST | BPF_MEM | BPF_DW:
971f122668dSShahab Vahedi 		len = store_i(buf, imm, dst, off, BPF_SIZE(code));
972f122668dSShahab Vahedi 		break;
973f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JA:
974f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JEQ  | BPF_X:
975f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JEQ  | BPF_K:
976f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JNE  | BPF_X:
977f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JNE  | BPF_K:
978f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSET | BPF_X:
979f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSET | BPF_K:
980f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JGT  | BPF_X:
981f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JGT  | BPF_K:
982f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JGE  | BPF_X:
983f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JGE  | BPF_K:
984f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSGT | BPF_X:
985f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSGT | BPF_K:
986f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSGE | BPF_X:
987f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSGE | BPF_K:
988f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JLT  | BPF_X:
989f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JLT  | BPF_K:
990f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JLE  | BPF_X:
991f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JLE  | BPF_K:
992f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSLT | BPF_X:
993f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSLT | BPF_K:
994f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSLE | BPF_X:
995f122668dSShahab Vahedi 	case BPF_JMP   | BPF_JSLE | BPF_K:
996f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JA:
997f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JEQ  | BPF_X:
998f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
999f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JNE  | BPF_X:
1000f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1001f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSET | BPF_X:
1002f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSET | BPF_K:
1003f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JGT  | BPF_X:
1004f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1005f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JGE  | BPF_X:
1006f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1007f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSGT | BPF_X:
1008f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1009f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSGE | BPF_X:
1010f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1011f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JLT  | BPF_X:
1012f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1013f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JLE  | BPF_X:
1014f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1015f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSLT | BPF_X:
1016f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1017f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSLE | BPF_X:
1018f122668dSShahab Vahedi 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1019f122668dSShahab Vahedi 		CHECK_RET(handle_jumps(ctx, insn, &len));
1020f122668dSShahab Vahedi 		break;
1021f122668dSShahab Vahedi 	case BPF_JMP | BPF_CALL:
1022f122668dSShahab Vahedi 		CHECK_RET(handle_call(ctx, insn, &len));
1023f122668dSShahab Vahedi 		break;
1024f122668dSShahab Vahedi 
1025f122668dSShahab Vahedi 	case BPF_JMP | BPF_EXIT:
1026f122668dSShahab Vahedi 		/* If this is the last instruction, epilogue will follow. */
1027f122668dSShahab Vahedi 		if (is_last_insn(ctx->prog, idx))
1028f122668dSShahab Vahedi 			break;
1029f122668dSShahab Vahedi 		CHECK_RET(handle_jmp_epilogue(ctx, insn, &len));
1030f122668dSShahab Vahedi 		break;
1031f122668dSShahab Vahedi 	default:
1032f122668dSShahab Vahedi 		pr_err("bpf-jit: can't handle instruction code 0x%02X\n", code);
1033f122668dSShahab Vahedi 		return -EOPNOTSUPP;
1034f122668dSShahab Vahedi 	}
1035f122668dSShahab Vahedi 
1036f122668dSShahab Vahedi 	if (BPF_CLASS(code) == BPF_ALU) {
1037f122668dSShahab Vahedi 		/*
1038f122668dSShahab Vahedi 		 * Skip the "swap" instructions. Even 64-bit swaps are of type
1039f122668dSShahab Vahedi 		 * BPF_ALU (and not BPF_ALU64). Therefore, for the swaps, one
1040f122668dSShahab Vahedi 		 * has to look at the "size" of the operations rather than the
1041f122668dSShahab Vahedi 		 * ALU type. "gen_swap()" specifically takes care of that.
1042f122668dSShahab Vahedi 		 */
1043f122668dSShahab Vahedi 		if (BPF_OP(code) != BPF_END && ctx->do_zext)
1044f122668dSShahab Vahedi 			len += zext(BUF(buf, len), dst);
1045f122668dSShahab Vahedi 	}
1046f122668dSShahab Vahedi 
1047f122668dSShahab Vahedi 	jit_buffer_update(ctx, len);
1048f122668dSShahab Vahedi 
1049f122668dSShahab Vahedi 	return ret;
1050f122668dSShahab Vahedi }
1051f122668dSShahab Vahedi 
1052f122668dSShahab Vahedi static int handle_body(struct jit_context *ctx)
1053f122668dSShahab Vahedi {
1054f122668dSShahab Vahedi 	int ret;
1055f122668dSShahab Vahedi 	bool populate_bpf2insn = false;
1056f122668dSShahab Vahedi 	const struct bpf_prog *prog = ctx->prog;
1057f122668dSShahab Vahedi 
1058f122668dSShahab Vahedi 	CHECK_RET(jit_buffer_check(ctx));
1059f122668dSShahab Vahedi 
1060f122668dSShahab Vahedi 	/*
1061f122668dSShahab Vahedi 	 * Record the mapping for the instructions during the dry-run.
1062f122668dSShahab Vahedi 	 * Doing it this way allows us to have the mapping ready for
1063f122668dSShahab Vahedi 	 * the jump instructions during the real compilation phase.
1064f122668dSShahab Vahedi 	 */
1065f122668dSShahab Vahedi 	if (!ctx->emit)
1066f122668dSShahab Vahedi 		populate_bpf2insn = true;
1067f122668dSShahab Vahedi 
1068f122668dSShahab Vahedi 	for (u32 i = 0; i < prog->len; i++) {
1069f122668dSShahab Vahedi 		/* During the dry-run, jit.len grows gradually per BPF insn. */
1070f122668dSShahab Vahedi 		if (populate_bpf2insn)
1071f122668dSShahab Vahedi 			ctx->bpf2insn[i] = ctx->jit.len;
1072f122668dSShahab Vahedi 
1073f122668dSShahab Vahedi 		CHECK_RET(handle_insn(ctx, i));
1074f122668dSShahab Vahedi 		if (ret > 0) {
1075f122668dSShahab Vahedi 			/* "ret" is 1 if two (64-bit) chunks were consumed. */
1076f122668dSShahab Vahedi 			ctx->bpf2insn[i + 1] = ctx->bpf2insn[i];
1077f122668dSShahab Vahedi 			i++;
1078f122668dSShahab Vahedi 		}
1079f122668dSShahab Vahedi 	}
1080f122668dSShahab Vahedi 
1081f122668dSShahab Vahedi 	/* If bpf2insn had to be populated, then it is done at this point. */
1082f122668dSShahab Vahedi 	if (populate_bpf2insn)
1083f122668dSShahab Vahedi 		ctx->bpf2insn_valid = true;
1084f122668dSShahab Vahedi 
1085f122668dSShahab Vahedi 	return 0;
1086f122668dSShahab Vahedi }
1087f122668dSShahab Vahedi 
1088f122668dSShahab Vahedi /*
1089f122668dSShahab Vahedi  * Initialize the memory with "unimp_s" which is the mnemonic for
1090f122668dSShahab Vahedi  * "unimplemented" instruction and always raises an exception.
1091f122668dSShahab Vahedi  *
1092f122668dSShahab Vahedi  * The instruction is 2 bytes. If "size" is odd, there is not much
1093f122668dSShahab Vahedi  * that can be done about the last byte in "area". Because, the
1094f122668dSShahab Vahedi  * CPU always fetches instructions in two bytes. Therefore, the
1095f122668dSShahab Vahedi  * byte beyond the last one is going to accompany it during a
1096f122668dSShahab Vahedi  * possible fetch. In the most likely case of a little endian
1097f122668dSShahab Vahedi  * system, that beyond-byte will become the major opcode and
1098f122668dSShahab Vahedi  * we have no control over its initialisation.
1099f122668dSShahab Vahedi  */
1100f122668dSShahab Vahedi static void fill_ill_insn(void *area, unsigned int size)
1101f122668dSShahab Vahedi {
1102f122668dSShahab Vahedi 	const u16 unimp_s = 0x79e0;
1103f122668dSShahab Vahedi 
1104f122668dSShahab Vahedi 	if (size & 1) {
1105f122668dSShahab Vahedi 		*((u8 *)area + (size - 1)) = 0xff;
1106f122668dSShahab Vahedi 		size -= 1;
1107f122668dSShahab Vahedi 	}
1108f122668dSShahab Vahedi 
1109f122668dSShahab Vahedi 	memset16(area, unimp_s, size >> 1);
1110f122668dSShahab Vahedi }
1111f122668dSShahab Vahedi 
1112f122668dSShahab Vahedi /* Piece of memory that can be allocated at the beginning of jit_prepare(). */
1113f122668dSShahab Vahedi static int jit_prepare_early_mem_alloc(struct jit_context *ctx)
1114f122668dSShahab Vahedi {
1115f122668dSShahab Vahedi 	ctx->bpf2insn = kcalloc(ctx->prog->len, sizeof(ctx->jit.len),
1116f122668dSShahab Vahedi 				GFP_KERNEL);
1117f122668dSShahab Vahedi 
1118f122668dSShahab Vahedi 	if (!ctx->bpf2insn) {
1119f122668dSShahab Vahedi 		pr_err("bpf-jit: could not allocate memory for "
1120f122668dSShahab Vahedi 		       "mapping of the instructions.\n");
1121f122668dSShahab Vahedi 		return -ENOMEM;
1122f122668dSShahab Vahedi 	}
1123f122668dSShahab Vahedi 
1124f122668dSShahab Vahedi 	return 0;
1125f122668dSShahab Vahedi }
1126f122668dSShahab Vahedi 
1127f122668dSShahab Vahedi /*
1128f122668dSShahab Vahedi  * Memory allocations that rely on parameters known at the end of
1129f122668dSShahab Vahedi  * jit_prepare().
1130f122668dSShahab Vahedi  */
1131f122668dSShahab Vahedi static int jit_prepare_final_mem_alloc(struct jit_context *ctx)
1132f122668dSShahab Vahedi {
1133f122668dSShahab Vahedi 	const size_t alignment = sizeof(u32);
1134f122668dSShahab Vahedi 
1135f122668dSShahab Vahedi 	ctx->bpf_header = bpf_jit_binary_alloc(ctx->jit.len, &ctx->jit.buf,
1136f122668dSShahab Vahedi 					       alignment, fill_ill_insn);
1137f122668dSShahab Vahedi 	if (!ctx->bpf_header) {
1138f122668dSShahab Vahedi 		pr_err("bpf-jit: could not allocate memory for translation.\n");
1139f122668dSShahab Vahedi 		return -ENOMEM;
1140f122668dSShahab Vahedi 	}
1141f122668dSShahab Vahedi 
1142f122668dSShahab Vahedi 	if (ctx->need_extra_pass) {
1143f122668dSShahab Vahedi 		ctx->jit_data = kzalloc(sizeof(*ctx->jit_data), GFP_KERNEL);
1144f122668dSShahab Vahedi 		if (!ctx->jit_data)
1145f122668dSShahab Vahedi 			return -ENOMEM;
1146f122668dSShahab Vahedi 	}
1147f122668dSShahab Vahedi 
1148f122668dSShahab Vahedi 	return 0;
1149f122668dSShahab Vahedi }
1150f122668dSShahab Vahedi 
1151f122668dSShahab Vahedi /*
1152f122668dSShahab Vahedi  * The first phase of the translation without actually emitting any
1153f122668dSShahab Vahedi  * instruction. It helps in getting a forecast on some aspects, such
1154f122668dSShahab Vahedi  * as the length of the whole program or where the epilogue starts.
1155f122668dSShahab Vahedi  *
1156f122668dSShahab Vahedi  * Whenever the necessary parameters are known, memories are allocated.
1157f122668dSShahab Vahedi  */
1158f122668dSShahab Vahedi static int jit_prepare(struct jit_context *ctx)
1159f122668dSShahab Vahedi {
1160f122668dSShahab Vahedi 	int ret;
1161f122668dSShahab Vahedi 
1162f122668dSShahab Vahedi 	/* Dry run. */
1163f122668dSShahab Vahedi 	ctx->emit = false;
1164f122668dSShahab Vahedi 
1165f122668dSShahab Vahedi 	CHECK_RET(jit_prepare_early_mem_alloc(ctx));
1166f122668dSShahab Vahedi 
1167f122668dSShahab Vahedi 	/* Get the length of prologue section after some register analysis. */
1168f122668dSShahab Vahedi 	analyze_reg_usage(ctx);
1169f122668dSShahab Vahedi 	CHECK_RET(handle_prologue(ctx));
1170f122668dSShahab Vahedi 
1171f122668dSShahab Vahedi 	CHECK_RET(handle_body(ctx));
1172f122668dSShahab Vahedi 
1173f122668dSShahab Vahedi 	/* Record at which offset epilogue begins. */
1174f122668dSShahab Vahedi 	ctx->epilogue_offset = ctx->jit.len;
1175f122668dSShahab Vahedi 
1176f122668dSShahab Vahedi 	/* Process the epilogue section now. */
1177f122668dSShahab Vahedi 	CHECK_RET(handle_epilogue(ctx));
1178f122668dSShahab Vahedi 
1179f122668dSShahab Vahedi 	CHECK_RET(jit_prepare_final_mem_alloc(ctx));
1180f122668dSShahab Vahedi 
1181f122668dSShahab Vahedi 	return 0;
1182f122668dSShahab Vahedi }
1183f122668dSShahab Vahedi 
1184f122668dSShahab Vahedi /*
1185*dd6a4037SShahab Vahedi  * jit_compile() is the real compilation phase. jit_prepare() is
1186*dd6a4037SShahab Vahedi  * invoked before jit_compile() as a dry-run to make sure everything
1187*dd6a4037SShahab Vahedi  * will go OK and allocate the necessary memory.
1188*dd6a4037SShahab Vahedi  *
1189*dd6a4037SShahab Vahedi  * In the end, jit_compile() checks if it has produced the same number
1190*dd6a4037SShahab Vahedi  * of instructions as jit_prepare() would.
1191f122668dSShahab Vahedi  */
1192f122668dSShahab Vahedi static int jit_compile(struct jit_context *ctx)
1193f122668dSShahab Vahedi {
1194f122668dSShahab Vahedi 	int ret;
1195f122668dSShahab Vahedi 
1196f122668dSShahab Vahedi 	/* Let there be code. */
1197f122668dSShahab Vahedi 	ctx->emit = true;
1198f122668dSShahab Vahedi 
1199f122668dSShahab Vahedi 	CHECK_RET(handle_prologue(ctx));
1200f122668dSShahab Vahedi 
1201f122668dSShahab Vahedi 	CHECK_RET(handle_body(ctx));
1202f122668dSShahab Vahedi 
1203f122668dSShahab Vahedi 	CHECK_RET(handle_epilogue(ctx));
1204f122668dSShahab Vahedi 
1205f122668dSShahab Vahedi 	if (ctx->jit.index != ctx->jit.len) {
1206f122668dSShahab Vahedi 		pr_err("bpf-jit: divergence between the phases; "
1207f122668dSShahab Vahedi 		       "%u vs. %u (bytes).\n",
1208f122668dSShahab Vahedi 		       ctx->jit.len, ctx->jit.index);
1209f122668dSShahab Vahedi 		return -EFAULT;
1210f122668dSShahab Vahedi 	}
1211f122668dSShahab Vahedi 
1212f122668dSShahab Vahedi 	return 0;
1213f122668dSShahab Vahedi }
1214f122668dSShahab Vahedi 
1215f122668dSShahab Vahedi /*
1216f122668dSShahab Vahedi  * Calling this function implies a successful JIT. A successful
1217f122668dSShahab Vahedi  * translation is signaled by setting the right parameters:
1218f122668dSShahab Vahedi  *
1219f122668dSShahab Vahedi  * prog->jited=1, prog->jited_len=..., prog->bpf_func=...
1220f122668dSShahab Vahedi  */
1221f122668dSShahab Vahedi static int jit_finalize(struct jit_context *ctx)
1222f122668dSShahab Vahedi {
1223f122668dSShahab Vahedi 	struct bpf_prog *prog = ctx->prog;
1224f122668dSShahab Vahedi 
1225f122668dSShahab Vahedi 	/* We're going to need this information for the "do_extra_pass()". */
1226f122668dSShahab Vahedi 	if (ctx->need_extra_pass) {
1227f122668dSShahab Vahedi 		ctx->jit_data->bpf_header = ctx->bpf_header;
1228f122668dSShahab Vahedi 		ctx->jit_data->bpf2insn = ctx->bpf2insn;
1229f122668dSShahab Vahedi 		prog->aux->jit_data = (void *)ctx->jit_data;
1230f122668dSShahab Vahedi 	} else {
1231f122668dSShahab Vahedi 		/*
1232f122668dSShahab Vahedi 		 * If things seem finalised, then mark the JITed memory
1233f122668dSShahab Vahedi 		 * as R-X and flush it.
1234f122668dSShahab Vahedi 		 */
1235f122668dSShahab Vahedi 		if (bpf_jit_binary_lock_ro(ctx->bpf_header)) {
1236f122668dSShahab Vahedi 			pr_err("bpf-jit: Could not lock the JIT memory.\n");
1237f122668dSShahab Vahedi 			return -EFAULT;
1238f122668dSShahab Vahedi 		}
1239f122668dSShahab Vahedi 		flush_icache_range((unsigned long)ctx->bpf_header,
1240f122668dSShahab Vahedi 				   (unsigned long)
1241f122668dSShahab Vahedi 				   BUF(ctx->jit.buf, ctx->jit.len));
1242f122668dSShahab Vahedi 		prog->aux->jit_data = NULL;
1243f122668dSShahab Vahedi 		bpf_prog_fill_jited_linfo(prog, ctx->bpf2insn);
1244f122668dSShahab Vahedi 	}
1245f122668dSShahab Vahedi 
1246f122668dSShahab Vahedi 	ctx->success = true;
1247f122668dSShahab Vahedi 	prog->bpf_func = (void *)ctx->jit.buf;
1248f122668dSShahab Vahedi 	prog->jited_len = ctx->jit.len;
1249f122668dSShahab Vahedi 	prog->jited = 1;
1250f122668dSShahab Vahedi 
1251f122668dSShahab Vahedi 	jit_ctx_cleanup(ctx);
1252f122668dSShahab Vahedi 	jit_dump(ctx);
1253f122668dSShahab Vahedi 
1254f122668dSShahab Vahedi 	return 0;
1255f122668dSShahab Vahedi }
1256f122668dSShahab Vahedi 
1257f122668dSShahab Vahedi /*
1258f122668dSShahab Vahedi  * A lenient verification for the existence of JIT context in "prog".
1259f122668dSShahab Vahedi  * Apparently the JIT internals, namely jit_subprogs() in bpf/verifier.c,
1260f122668dSShahab Vahedi  * may request for a second compilation although nothing needs to be done.
1261f122668dSShahab Vahedi  */
1262f122668dSShahab Vahedi static inline int check_jit_context(const struct bpf_prog *prog)
1263f122668dSShahab Vahedi {
1264f122668dSShahab Vahedi 	if (!prog->aux->jit_data) {
1265f122668dSShahab Vahedi 		pr_notice("bpf-jit: no jit data for the extra pass.\n");
1266f122668dSShahab Vahedi 		return 1;
1267f122668dSShahab Vahedi 	} else {
1268f122668dSShahab Vahedi 		return 0;
1269f122668dSShahab Vahedi 	}
1270f122668dSShahab Vahedi }
1271f122668dSShahab Vahedi 
1272f122668dSShahab Vahedi /* Reuse the previous pass's data. */
1273f122668dSShahab Vahedi static int jit_resume_context(struct jit_context *ctx)
1274f122668dSShahab Vahedi {
1275f122668dSShahab Vahedi 	struct arc_jit_data *jdata =
1276f122668dSShahab Vahedi 		(struct arc_jit_data *)ctx->prog->aux->jit_data;
1277f122668dSShahab Vahedi 
1278f122668dSShahab Vahedi 	if (!jdata) {
1279f122668dSShahab Vahedi 		pr_err("bpf-jit: no jit data for the extra pass.\n");
1280f122668dSShahab Vahedi 		return -EINVAL;
1281f122668dSShahab Vahedi 	}
1282f122668dSShahab Vahedi 
1283f122668dSShahab Vahedi 	ctx->jit.buf = (u8 *)ctx->prog->bpf_func;
1284f122668dSShahab Vahedi 	ctx->jit.len = ctx->prog->jited_len;
1285f122668dSShahab Vahedi 	ctx->bpf_header = jdata->bpf_header;
1286f122668dSShahab Vahedi 	ctx->bpf2insn = (u32 *)jdata->bpf2insn;
1287f122668dSShahab Vahedi 	ctx->bpf2insn_valid = ctx->bpf2insn ? true : false;
1288f122668dSShahab Vahedi 	ctx->jit_data = jdata;
1289f122668dSShahab Vahedi 
1290f122668dSShahab Vahedi 	return 0;
1291f122668dSShahab Vahedi }
1292f122668dSShahab Vahedi 
1293f122668dSShahab Vahedi /*
1294f122668dSShahab Vahedi  * Patch in the new addresses. The instructions of interest are:
1295f122668dSShahab Vahedi  *
1296f122668dSShahab Vahedi  * - call
1297f122668dSShahab Vahedi  * - ld r64, imm64
1298f122668dSShahab Vahedi  *
1299f122668dSShahab Vahedi  * For "call"s, it resolves the addresses one more time through the
1300f122668dSShahab Vahedi  * handle_call().
1301f122668dSShahab Vahedi  *
1302f122668dSShahab Vahedi  * For 64-bit immediate loads, it just retranslates them, because the BPF
1303f122668dSShahab Vahedi  * core in kernel might have changed the value since the normal pass.
1304f122668dSShahab Vahedi  */
1305f122668dSShahab Vahedi static int jit_patch_relocations(struct jit_context *ctx)
1306f122668dSShahab Vahedi {
1307f122668dSShahab Vahedi 	const u8 bpf_opc_call = BPF_JMP | BPF_CALL;
1308f122668dSShahab Vahedi 	const u8 bpf_opc_ldi64 = BPF_LD | BPF_DW | BPF_IMM;
1309f122668dSShahab Vahedi 	const struct bpf_prog *prog = ctx->prog;
1310f122668dSShahab Vahedi 	int ret;
1311f122668dSShahab Vahedi 
1312f122668dSShahab Vahedi 	ctx->emit = true;
1313f122668dSShahab Vahedi 	for (u32 i = 0; i < prog->len; i++) {
1314f122668dSShahab Vahedi 		const struct bpf_insn *insn = &prog->insnsi[i];
1315f122668dSShahab Vahedi 		u8 dummy;
1316f122668dSShahab Vahedi 		/*
1317f122668dSShahab Vahedi 		 * Adjust "ctx.jit.index", so "gen_*()" functions below
1318f122668dSShahab Vahedi 		 * can use it for their output addresses.
1319f122668dSShahab Vahedi 		 */
1320f122668dSShahab Vahedi 		ctx->jit.index = ctx->bpf2insn[i];
1321f122668dSShahab Vahedi 
1322f122668dSShahab Vahedi 		if (insn->code == bpf_opc_call) {
1323f122668dSShahab Vahedi 			CHECK_RET(handle_call(ctx, insn, &dummy));
1324f122668dSShahab Vahedi 		} else if (insn->code == bpf_opc_ldi64) {
1325f122668dSShahab Vahedi 			CHECK_RET(handle_ld_imm64(ctx, insn, &dummy));
1326f122668dSShahab Vahedi 			/* Skip the next instruction. */
1327f122668dSShahab Vahedi 			++i;
1328f122668dSShahab Vahedi 		}
1329f122668dSShahab Vahedi 	}
1330f122668dSShahab Vahedi 	return 0;
1331f122668dSShahab Vahedi }
1332f122668dSShahab Vahedi 
1333f122668dSShahab Vahedi /*
1334f122668dSShahab Vahedi  * A normal pass that involves a "dry-run" phase, jit_prepare(),
1335f122668dSShahab Vahedi  * to get the necessary data for the real compilation phase,
1336f122668dSShahab Vahedi  * jit_compile().
1337f122668dSShahab Vahedi  */
1338f122668dSShahab Vahedi static struct bpf_prog *do_normal_pass(struct bpf_prog *prog)
1339f122668dSShahab Vahedi {
1340f122668dSShahab Vahedi 	struct jit_context ctx;
1341f122668dSShahab Vahedi 
1342f122668dSShahab Vahedi 	/* Bail out if JIT is disabled. */
1343f122668dSShahab Vahedi 	if (!prog->jit_requested)
1344f122668dSShahab Vahedi 		return prog;
1345f122668dSShahab Vahedi 
1346f122668dSShahab Vahedi 	if (jit_ctx_init(&ctx, prog)) {
1347f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1348f122668dSShahab Vahedi 		return prog;
1349f122668dSShahab Vahedi 	}
1350f122668dSShahab Vahedi 
1351f122668dSShahab Vahedi 	/* Get the lengths and allocate buffer. */
1352f122668dSShahab Vahedi 	if (jit_prepare(&ctx)) {
1353f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1354f122668dSShahab Vahedi 		return prog;
1355f122668dSShahab Vahedi 	}
1356f122668dSShahab Vahedi 
1357f122668dSShahab Vahedi 	if (jit_compile(&ctx)) {
1358f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1359f122668dSShahab Vahedi 		return prog;
1360f122668dSShahab Vahedi 	}
1361f122668dSShahab Vahedi 
1362f122668dSShahab Vahedi 	if (jit_finalize(&ctx)) {
1363f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1364f122668dSShahab Vahedi 		return prog;
1365f122668dSShahab Vahedi 	}
1366f122668dSShahab Vahedi 
1367f122668dSShahab Vahedi 	return ctx.prog;
1368f122668dSShahab Vahedi }
1369f122668dSShahab Vahedi 
1370f122668dSShahab Vahedi /*
1371f122668dSShahab Vahedi  * If there are multi-function BPF programs that call each other,
1372f122668dSShahab Vahedi  * their translated addresses are not known all at once. Therefore,
1373f122668dSShahab Vahedi  * an extra pass is needed to consult the bpf_jit_get_func_addr()
1374f122668dSShahab Vahedi  * again to get the newly translated addresses in order to resolve
1375f122668dSShahab Vahedi  * the "call"s.
1376f122668dSShahab Vahedi  */
1377f122668dSShahab Vahedi static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
1378f122668dSShahab Vahedi {
1379f122668dSShahab Vahedi 	struct jit_context ctx;
1380f122668dSShahab Vahedi 
1381f122668dSShahab Vahedi 	/* Skip if there's no context to resume from. */
1382f122668dSShahab Vahedi 	if (check_jit_context(prog))
1383f122668dSShahab Vahedi 		return prog;
1384f122668dSShahab Vahedi 
1385f122668dSShahab Vahedi 	if (jit_ctx_init(&ctx, prog)) {
1386f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1387f122668dSShahab Vahedi 		return prog;
1388f122668dSShahab Vahedi 	}
1389f122668dSShahab Vahedi 
1390f122668dSShahab Vahedi 	if (jit_resume_context(&ctx)) {
1391f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1392f122668dSShahab Vahedi 		return prog;
1393f122668dSShahab Vahedi 	}
1394f122668dSShahab Vahedi 
1395f122668dSShahab Vahedi 	if (jit_patch_relocations(&ctx)) {
1396f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1397f122668dSShahab Vahedi 		return prog;
1398f122668dSShahab Vahedi 	}
1399f122668dSShahab Vahedi 
1400f122668dSShahab Vahedi 	if (jit_finalize(&ctx)) {
1401f122668dSShahab Vahedi 		jit_ctx_cleanup(&ctx);
1402f122668dSShahab Vahedi 		return prog;
1403f122668dSShahab Vahedi 	}
1404f122668dSShahab Vahedi 
1405f122668dSShahab Vahedi 	return ctx.prog;
1406f122668dSShahab Vahedi }
1407f122668dSShahab Vahedi 
1408f122668dSShahab Vahedi /*
1409f122668dSShahab Vahedi  * This function may be invoked twice for the same stream of BPF
1410*dd6a4037SShahab Vahedi  * instructions. The "extra pass" happens, when there are
1411*dd6a4037SShahab Vahedi  * (re)locations involved that their addresses are not known
1412*dd6a4037SShahab Vahedi  * during the first run.
1413f122668dSShahab Vahedi  */
1414f122668dSShahab Vahedi struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1415f122668dSShahab Vahedi {
1416f122668dSShahab Vahedi 	vm_dump(prog);
1417f122668dSShahab Vahedi 
1418f122668dSShahab Vahedi 	/* Was this program already translated? */
1419f122668dSShahab Vahedi 	if (!prog->jited)
1420f122668dSShahab Vahedi 		return do_normal_pass(prog);
1421f122668dSShahab Vahedi 	else
1422f122668dSShahab Vahedi 		return do_extra_pass(prog);
1423f122668dSShahab Vahedi 
1424f122668dSShahab Vahedi 	return prog;
1425f122668dSShahab Vahedi }
1426