xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision f9320c49993ca3c0ec0f9a7026b313735306bb8b)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2156d0e29SNaveen N. Rao /*
3156d0e29SNaveen N. Rao  * bpf_jit_comp64.c: eBPF JIT compiler
4156d0e29SNaveen N. Rao  *
5156d0e29SNaveen N. Rao  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6156d0e29SNaveen N. Rao  *		  IBM Corporation
7156d0e29SNaveen N. Rao  *
8156d0e29SNaveen N. Rao  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9156d0e29SNaveen N. Rao  */
10156d0e29SNaveen N. Rao #include <linux/moduleloader.h>
11156d0e29SNaveen N. Rao #include <asm/cacheflush.h>
12ec0c464cSChristophe Leroy #include <asm/asm-compat.h>
13156d0e29SNaveen N. Rao #include <linux/netdevice.h>
14156d0e29SNaveen N. Rao #include <linux/filter.h>
15156d0e29SNaveen N. Rao #include <linux/if_vlan.h>
16156d0e29SNaveen N. Rao #include <asm/kprobes.h>
17ce076141SNaveen N. Rao #include <linux/bpf.h>
18b7540d62SNaveen N. Rao #include <asm/security_features.h>
19156d0e29SNaveen N. Rao 
20156d0e29SNaveen N. Rao #include "bpf_jit64.h"
21156d0e29SNaveen N. Rao 
22156d0e29SNaveen N. Rao static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
23156d0e29SNaveen N. Rao {
24156d0e29SNaveen N. Rao 	/*
25156d0e29SNaveen N. Rao 	 * We only need a stack frame if:
26156d0e29SNaveen N. Rao 	 * - we call other functions (kernel helpers), or
27156d0e29SNaveen N. Rao 	 * - the bpf program uses its stack area
28156d0e29SNaveen N. Rao 	 * The latter condition is deduced from the usage of BPF_REG_FP
29156d0e29SNaveen N. Rao 	 */
30ed573b57SChristophe Leroy 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
31156d0e29SNaveen N. Rao }
32156d0e29SNaveen N. Rao 
337b847f52SNaveen N. Rao /*
347b847f52SNaveen N. Rao  * When not setting up our own stackframe, the redzone usage is:
357b847f52SNaveen N. Rao  *
367b847f52SNaveen N. Rao  *		[	prev sp		] <-------------
377b847f52SNaveen N. Rao  *		[	  ...       	] 		|
387b847f52SNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
39b7540d62SNaveen N. Rao  *		[   nv gpr save area	] 5*8
407b847f52SNaveen N. Rao  *		[    tail_call_cnt	] 8
41b7540d62SNaveen N. Rao  *		[    local_tmp_var	] 16
427b847f52SNaveen N. Rao  *		[   unused red zone	] 208 bytes protected
437b847f52SNaveen N. Rao  */
447b847f52SNaveen N. Rao static int bpf_jit_stack_local(struct codegen_context *ctx)
457b847f52SNaveen N. Rao {
467b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx))
47ac0761ebSSandipan Das 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
487b847f52SNaveen N. Rao 	else
49b7540d62SNaveen N. Rao 		return -(BPF_PPC_STACK_SAVE + 24);
507b847f52SNaveen N. Rao }
517b847f52SNaveen N. Rao 
52ce076141SNaveen N. Rao static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
53ce076141SNaveen N. Rao {
54b7540d62SNaveen N. Rao 	return bpf_jit_stack_local(ctx) + 16;
55ce076141SNaveen N. Rao }
56ce076141SNaveen N. Rao 
577b847f52SNaveen N. Rao static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
587b847f52SNaveen N. Rao {
597b847f52SNaveen N. Rao 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60ac0761ebSSandipan Das 		return (bpf_has_stack_frame(ctx) ?
61ac0761ebSSandipan Das 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
627b847f52SNaveen N. Rao 				- (8 * (32 - reg));
637b847f52SNaveen N. Rao 
647b847f52SNaveen N. Rao 	pr_err("BPF JIT is asking about unknown registers");
657b847f52SNaveen N. Rao 	BUG();
667b847f52SNaveen N. Rao }
677b847f52SNaveen N. Rao 
6840272035SChristophe Leroy void bpf_jit_realloc_regs(struct codegen_context *ctx)
6940272035SChristophe Leroy {
7040272035SChristophe Leroy }
7140272035SChristophe Leroy 
724ea76e90SChristophe Leroy void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
73156d0e29SNaveen N. Rao {
74156d0e29SNaveen N. Rao 	int i;
75156d0e29SNaveen N. Rao 
76ce076141SNaveen N. Rao 	/*
77ce076141SNaveen N. Rao 	 * Initialize tail_call_cnt if we do tail calls.
78ce076141SNaveen N. Rao 	 * Otherwise, put in NOPs so that it can be skipped when we are
79ce076141SNaveen N. Rao 	 * invoked through a tail call.
80ce076141SNaveen N. Rao 	 */
81ce076141SNaveen N. Rao 	if (ctx->seen & SEEN_TAILCALL) {
823a181237SBalamuruhan S 		EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83ce076141SNaveen N. Rao 		/* this goes in the redzone */
84ce076141SNaveen N. Rao 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
85ce076141SNaveen N. Rao 	} else {
863a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
873a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
88ce076141SNaveen N. Rao 	}
89ce076141SNaveen N. Rao 
90ce076141SNaveen N. Rao #define BPF_TAILCALL_PROLOGUE_SIZE	8
91ce076141SNaveen N. Rao 
927b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
93156d0e29SNaveen N. Rao 		/*
94156d0e29SNaveen N. Rao 		 * We need a stack frame, but we don't necessarily need to
95156d0e29SNaveen N. Rao 		 * save/restore LR unless we call other functions
96156d0e29SNaveen N. Rao 		 */
97156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
98e08021f8SChristophe Leroy 			EMIT(PPC_RAW_MFLR(_R0));
99156d0e29SNaveen N. Rao 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
100156d0e29SNaveen N. Rao 		}
101156d0e29SNaveen N. Rao 
102ac0761ebSSandipan Das 		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
103156d0e29SNaveen N. Rao 	}
104156d0e29SNaveen N. Rao 
105156d0e29SNaveen N. Rao 	/*
106156d0e29SNaveen N. Rao 	 * Back up non-volatile regs -- BPF registers 6-10
107156d0e29SNaveen N. Rao 	 * If we haven't created our own stack frame, we save these
108156d0e29SNaveen N. Rao 	 * in the protected zone below the previous stack frame
109156d0e29SNaveen N. Rao 	 */
110156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111ed573b57SChristophe Leroy 		if (bpf_is_seen_register(ctx, b2p[i]))
1127b847f52SNaveen N. Rao 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
113156d0e29SNaveen N. Rao 
114156d0e29SNaveen N. Rao 	/* Setup frame pointer to point to the bpf stack area */
115ed573b57SChristophe Leroy 	if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
1163a181237SBalamuruhan S 		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
1173a181237SBalamuruhan S 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
118156d0e29SNaveen N. Rao }
119156d0e29SNaveen N. Rao 
120ce076141SNaveen N. Rao static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
121156d0e29SNaveen N. Rao {
122156d0e29SNaveen N. Rao 	int i;
123156d0e29SNaveen N. Rao 
124156d0e29SNaveen N. Rao 	/* Restore NVRs */
125156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126ed573b57SChristophe Leroy 		if (bpf_is_seen_register(ctx, b2p[i]))
1277b847f52SNaveen N. Rao 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
128156d0e29SNaveen N. Rao 
129156d0e29SNaveen N. Rao 	/* Tear down our stack frame */
1307b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
1313a181237SBalamuruhan S 		EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
133156d0e29SNaveen N. Rao 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
1343a181237SBalamuruhan S 			EMIT(PPC_RAW_MTLR(0));
135156d0e29SNaveen N. Rao 		}
136156d0e29SNaveen N. Rao 	}
137ce076141SNaveen N. Rao }
138ce076141SNaveen N. Rao 
1394ea76e90SChristophe Leroy void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
140ce076141SNaveen N. Rao {
141ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
142ce076141SNaveen N. Rao 
143ce076141SNaveen N. Rao 	/* Move result to r3 */
1443a181237SBalamuruhan S 	EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
145156d0e29SNaveen N. Rao 
1463a181237SBalamuruhan S 	EMIT(PPC_RAW_BLR());
147156d0e29SNaveen N. Rao }
148156d0e29SNaveen N. Rao 
149e2c95a61SDaniel Borkmann static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
150e2c95a61SDaniel Borkmann 				       u64 func)
151e2c95a61SDaniel Borkmann {
152e2c95a61SDaniel Borkmann #ifdef PPC64_ELF_ABI_v1
153e2c95a61SDaniel Borkmann 	/* func points to the function descriptor */
154e2c95a61SDaniel Borkmann 	PPC_LI64(b2p[TMP_REG_2], func);
155e2c95a61SDaniel Borkmann 	/* Load actual entry point from function descriptor */
156e2c95a61SDaniel Borkmann 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
15720ccb004SNaveen N. Rao 	/* ... and move it to CTR */
15820ccb004SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
159e2c95a61SDaniel Borkmann 	/*
160e2c95a61SDaniel Borkmann 	 * Load TOC from function descriptor at offset 8.
161e2c95a61SDaniel Borkmann 	 * We can clobber r2 since we get called through a
162e2c95a61SDaniel Borkmann 	 * function pointer (so caller will save/restore r2)
163e2c95a61SDaniel Borkmann 	 * and since we don't use a TOC ourself.
164e2c95a61SDaniel Borkmann 	 */
165e2c95a61SDaniel Borkmann 	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
166e2c95a61SDaniel Borkmann #else
167e2c95a61SDaniel Borkmann 	/* We can clobber r12 */
168e2c95a61SDaniel Borkmann 	PPC_FUNC_ADDR(12, func);
16920ccb004SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(12));
170e2c95a61SDaniel Borkmann #endif
17120ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
172e2c95a61SDaniel Borkmann }
173e2c95a61SDaniel Borkmann 
1744ea76e90SChristophe Leroy void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
175ce076141SNaveen N. Rao {
1764ea69b2fSSandipan Das 	unsigned int i, ctx_idx = ctx->idx;
1774ea69b2fSSandipan Das 
1784ea69b2fSSandipan Das 	/* Load function address into r12 */
1794ea69b2fSSandipan Das 	PPC_LI64(12, func);
1804ea69b2fSSandipan Das 
1814ea69b2fSSandipan Das 	/* For bpf-to-bpf function calls, the callee's address is unknown
1824ea69b2fSSandipan Das 	 * until the last extra pass. As seen above, we use PPC_LI64() to
1834ea69b2fSSandipan Das 	 * load the callee's address, but this may optimize the number of
1844ea69b2fSSandipan Das 	 * instructions required based on the nature of the address.
1854ea69b2fSSandipan Das 	 *
1864ea69b2fSSandipan Das 	 * Since we don't want the number of instructions emitted to change,
1874ea69b2fSSandipan Das 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
1884ea69b2fSSandipan Das 	 * we always have a five-instruction sequence, which is the maximum
1894ea69b2fSSandipan Das 	 * that PPC_LI64() can emit.
1904ea69b2fSSandipan Das 	 */
1914ea69b2fSSandipan Das 	for (i = ctx->idx - ctx_idx; i < 5; i++)
1923a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
1934ea69b2fSSandipan Das 
194ce076141SNaveen N. Rao #ifdef PPC64_ELF_ABI_v1
195ce076141SNaveen N. Rao 	/*
196ce076141SNaveen N. Rao 	 * Load TOC from function descriptor at offset 8.
197ce076141SNaveen N. Rao 	 * We can clobber r2 since we get called through a
198ce076141SNaveen N. Rao 	 * function pointer (so caller will save/restore r2)
199ce076141SNaveen N. Rao 	 * and since we don't use a TOC ourself.
200ce076141SNaveen N. Rao 	 */
2014ea69b2fSSandipan Das 	PPC_BPF_LL(2, 12, 8);
2024ea69b2fSSandipan Das 	/* Load actual entry point from function descriptor */
2034ea69b2fSSandipan Das 	PPC_BPF_LL(12, 12, 0);
204ce076141SNaveen N. Rao #endif
2054ea69b2fSSandipan Das 
20620ccb004SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(12));
20720ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
208ce076141SNaveen N. Rao }
209ce076141SNaveen N. Rao 
2103832ba4eSNaveen N. Rao static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
211ce076141SNaveen N. Rao {
212ce076141SNaveen N. Rao 	/*
213ce076141SNaveen N. Rao 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214ce076141SNaveen N. Rao 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215ce076141SNaveen N. Rao 	 * r4/BPF_REG_2 - pointer to bpf_array
216ce076141SNaveen N. Rao 	 * r5/BPF_REG_3 - index in bpf_array
217ce076141SNaveen N. Rao 	 */
218ce076141SNaveen N. Rao 	int b2p_bpf_array = b2p[BPF_REG_2];
219ce076141SNaveen N. Rao 	int b2p_index = b2p[BPF_REG_3];
220ce076141SNaveen N. Rao 
221ce076141SNaveen N. Rao 	/*
222ce076141SNaveen N. Rao 	 * if (index >= array->map.max_entries)
223ce076141SNaveen N. Rao 	 *   goto out;
224ce076141SNaveen N. Rao 	 */
22506541865SBalamuruhan S 	EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
2263a181237SBalamuruhan S 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
2273a181237SBalamuruhan S 	EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228ce076141SNaveen N. Rao 	PPC_BCC(COND_GE, out);
229ce076141SNaveen N. Rao 
230ce076141SNaveen N. Rao 	/*
231ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
232ce076141SNaveen N. Rao 	 *   goto out;
233ce076141SNaveen N. Rao 	 */
23486be36f6SNaveen N. Rao 	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
2353a181237SBalamuruhan S 	EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236ebf7f6f0STiezhu Yang 	PPC_BCC(COND_GE, out);
237ce076141SNaveen N. Rao 
238ce076141SNaveen N. Rao 	/*
239ce076141SNaveen N. Rao 	 * tail_call_cnt++;
240ce076141SNaveen N. Rao 	 */
2413a181237SBalamuruhan S 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242ce076141SNaveen N. Rao 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
243ce076141SNaveen N. Rao 
244ce076141SNaveen N. Rao 	/* prog = array->ptrs[index]; */
2453a181237SBalamuruhan S 	EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
24606541865SBalamuruhan S 	EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
24786be36f6SNaveen N. Rao 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
248ce076141SNaveen N. Rao 
249ce076141SNaveen N. Rao 	/*
250ce076141SNaveen N. Rao 	 * if (prog == NULL)
251ce076141SNaveen N. Rao 	 *   goto out;
252ce076141SNaveen N. Rao 	 */
2533a181237SBalamuruhan S 	EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254ce076141SNaveen N. Rao 	PPC_BCC(COND_EQ, out);
255ce076141SNaveen N. Rao 
256ce076141SNaveen N. Rao 	/* goto *(prog->bpf_func + prologue_size); */
25786be36f6SNaveen N. Rao 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258ce076141SNaveen N. Rao #ifdef PPC64_ELF_ABI_v1
259ce076141SNaveen N. Rao 	/* skip past the function descriptor */
2603a181237SBalamuruhan S 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
2613a181237SBalamuruhan S 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
262ce076141SNaveen N. Rao #else
2633a181237SBalamuruhan S 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
264ce076141SNaveen N. Rao #endif
2653a181237SBalamuruhan S 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
266ce076141SNaveen N. Rao 
267ce076141SNaveen N. Rao 	/* tear down stack, restore NVRs, ... */
268ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
269ce076141SNaveen N. Rao 
2703a181237SBalamuruhan S 	EMIT(PPC_RAW_BCTR());
2713832ba4eSNaveen N. Rao 
272ce076141SNaveen N. Rao 	/* out: */
2733832ba4eSNaveen N. Rao 	return 0;
274ce076141SNaveen N. Rao }
275ce076141SNaveen N. Rao 
276b7540d62SNaveen N. Rao /*
277b7540d62SNaveen N. Rao  * We spill into the redzone always, even if the bpf program has its own stackframe.
278b7540d62SNaveen N. Rao  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
279b7540d62SNaveen N. Rao  */
280b7540d62SNaveen N. Rao void bpf_stf_barrier(void);
281b7540d62SNaveen N. Rao 
282b7540d62SNaveen N. Rao asm (
283b7540d62SNaveen N. Rao "		.global bpf_stf_barrier		;"
284b7540d62SNaveen N. Rao "	bpf_stf_barrier:			;"
285b7540d62SNaveen N. Rao "		std	21,-64(1)		;"
286b7540d62SNaveen N. Rao "		std	22,-56(1)		;"
287b7540d62SNaveen N. Rao "		sync				;"
288b7540d62SNaveen N. Rao "		ld	21,-64(1)		;"
289b7540d62SNaveen N. Rao "		ld	22,-56(1)		;"
290b7540d62SNaveen N. Rao "		ori	31,31,0			;"
291b7540d62SNaveen N. Rao "		.rept 14			;"
292b7540d62SNaveen N. Rao "		b	1f			;"
293b7540d62SNaveen N. Rao "	1:					;"
294b7540d62SNaveen N. Rao "		.endr				;"
295b7540d62SNaveen N. Rao "		blr				;"
296b7540d62SNaveen N. Rao );
297b7540d62SNaveen N. Rao 
298156d0e29SNaveen N. Rao /* Assemble the body code between the prologue & epilogue */
2994ea76e90SChristophe Leroy int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
300983bdc02SRavi Bangoria 		       u32 *addrs, int pass)
301156d0e29SNaveen N. Rao {
302b7540d62SNaveen N. Rao 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303156d0e29SNaveen N. Rao 	const struct bpf_insn *insn = fp->insnsi;
304156d0e29SNaveen N. Rao 	int flen = fp->len;
305e2c95a61SDaniel Borkmann 	int i, ret;
306156d0e29SNaveen N. Rao 
307156d0e29SNaveen N. Rao 	/* Start of epilogue code - will only be valid 2nd pass onwards */
308156d0e29SNaveen N. Rao 	u32 exit_addr = addrs[flen];
309156d0e29SNaveen N. Rao 
310156d0e29SNaveen N. Rao 	for (i = 0; i < flen; i++) {
311156d0e29SNaveen N. Rao 		u32 code = insn[i].code;
312156d0e29SNaveen N. Rao 		u32 dst_reg = b2p[insn[i].dst_reg];
313156d0e29SNaveen N. Rao 		u32 src_reg = b2p[insn[i].src_reg];
314efa95f03SHari Bathini 		u32 size = BPF_SIZE(code);
315156d0e29SNaveen N. Rao 		s16 off = insn[i].off;
316156d0e29SNaveen N. Rao 		s32 imm = insn[i].imm;
317e2c95a61SDaniel Borkmann 		bool func_addr_fixed;
318e2c95a61SDaniel Borkmann 		u64 func_addr;
319156d0e29SNaveen N. Rao 		u64 imm64;
320156d0e29SNaveen N. Rao 		u32 true_cond;
321b9c1e60eSDaniel Borkmann 		u32 tmp_idx;
322*f9320c49SNaveen N. Rao 		int j;
323156d0e29SNaveen N. Rao 
324156d0e29SNaveen N. Rao 		/*
325156d0e29SNaveen N. Rao 		 * addrs[] maps a BPF bytecode address into a real offset from
326156d0e29SNaveen N. Rao 		 * the start of the body code.
327156d0e29SNaveen N. Rao 		 */
328156d0e29SNaveen N. Rao 		addrs[i] = ctx->idx * 4;
329156d0e29SNaveen N. Rao 
330156d0e29SNaveen N. Rao 		/*
331156d0e29SNaveen N. Rao 		 * As an optimization, we note down which non-volatile registers
332156d0e29SNaveen N. Rao 		 * are used so that we can only save/restore those in our
333156d0e29SNaveen N. Rao 		 * prologue and epilogue. We do this here regardless of whether
334156d0e29SNaveen N. Rao 		 * the actual BPF instruction uses src/dst registers or not
335156d0e29SNaveen N. Rao 		 * (for instance, BPF_CALL does not use them). The expectation
336156d0e29SNaveen N. Rao 		 * is that those instructions will have src_reg/dst_reg set to
337156d0e29SNaveen N. Rao 		 * 0. Even otherwise, we just lose some prologue/epilogue
338156d0e29SNaveen N. Rao 		 * optimization but everything else should work without
339156d0e29SNaveen N. Rao 		 * any issues.
340156d0e29SNaveen N. Rao 		 */
3417b847f52SNaveen N. Rao 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
342ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, dst_reg);
3437b847f52SNaveen N. Rao 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
344ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, src_reg);
345156d0e29SNaveen N. Rao 
346156d0e29SNaveen N. Rao 		switch (code) {
347156d0e29SNaveen N. Rao 		/*
348156d0e29SNaveen N. Rao 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
349156d0e29SNaveen N. Rao 		 */
350156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
351156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
35206541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
353156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
354156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
355156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
3563a181237SBalamuruhan S 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
357156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
358156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
359156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
3605855c4c1SNaveen N. Rao 			if (!imm) {
3615855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
3625855c4c1SNaveen N. Rao 			} else if (imm >= -32768 && imm < 32768) {
3633a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
3645855c4c1SNaveen N. Rao 			} else {
365156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
36606541865SBalamuruhan S 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
367156d0e29SNaveen N. Rao 			}
3685855c4c1SNaveen N. Rao 			goto bpf_alu32_trunc;
3695855c4c1SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
3705855c4c1SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
3715855c4c1SNaveen N. Rao 			if (!imm) {
3725855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
3735855c4c1SNaveen N. Rao 			} else if (imm > -32768 && imm <= 32768) {
3745855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
3755855c4c1SNaveen N. Rao 			} else {
3765855c4c1SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
3775855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
378156d0e29SNaveen N. Rao 			}
379156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
380156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
381156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
382156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ALU)
3833a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
384156d0e29SNaveen N. Rao 			else
3853a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
386156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
387156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
388156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
389156d0e29SNaveen N. Rao 			if (imm >= -32768 && imm < 32768)
3903a181237SBalamuruhan S 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
391156d0e29SNaveen N. Rao 			else {
392156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
393156d0e29SNaveen N. Rao 				if (BPF_CLASS(code) == BPF_ALU)
3943a181237SBalamuruhan S 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
3953a181237SBalamuruhan S 							b2p[TMP_REG_1]));
396156d0e29SNaveen N. Rao 				else
3973a181237SBalamuruhan S 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
3983a181237SBalamuruhan S 							b2p[TMP_REG_1]));
399156d0e29SNaveen N. Rao 			}
400156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
401156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
402156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
403156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
4043a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
4053a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
4063a181237SBalamuruhan S 						b2p[TMP_REG_1]));
4073a181237SBalamuruhan S 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
408156d0e29SNaveen N. Rao 			} else
4093a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
410156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
411156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
412156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
413156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
4143a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
4153a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
4163a181237SBalamuruhan S 						b2p[TMP_REG_1]));
4173a181237SBalamuruhan S 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
418156d0e29SNaveen N. Rao 			} else
4193a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
420156d0e29SNaveen N. Rao 			break;
421156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
422156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
423156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
424156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
425156d0e29SNaveen N. Rao 			if (imm == 0)
426156d0e29SNaveen N. Rao 				return -EINVAL;
4278bbc9d82SNaveen N. Rao 			if (imm == 1) {
4288bbc9d82SNaveen N. Rao 				if (BPF_OP(code) == BPF_DIV) {
429156d0e29SNaveen N. Rao 					goto bpf_alu32_trunc;
4308bbc9d82SNaveen N. Rao 				} else {
4318bbc9d82SNaveen N. Rao 					EMIT(PPC_RAW_LI(dst_reg, 0));
4328bbc9d82SNaveen N. Rao 					break;
4338bbc9d82SNaveen N. Rao 				}
4348bbc9d82SNaveen N. Rao 			}
435156d0e29SNaveen N. Rao 
436156d0e29SNaveen N. Rao 			PPC_LI32(b2p[TMP_REG_1], imm);
437156d0e29SNaveen N. Rao 			switch (BPF_CLASS(code)) {
438156d0e29SNaveen N. Rao 			case BPF_ALU:
439156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
4403a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
4413a181237SBalamuruhan S 							dst_reg,
4423a181237SBalamuruhan S 							b2p[TMP_REG_1]));
4433a181237SBalamuruhan S 					EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
444156d0e29SNaveen N. Rao 							b2p[TMP_REG_1],
4453a181237SBalamuruhan S 							b2p[TMP_REG_2]));
4463a181237SBalamuruhan S 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
4473a181237SBalamuruhan S 							b2p[TMP_REG_1]));
448156d0e29SNaveen N. Rao 				} else
4493a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
4503a181237SBalamuruhan S 							b2p[TMP_REG_1]));
451156d0e29SNaveen N. Rao 				break;
452156d0e29SNaveen N. Rao 			case BPF_ALU64:
453156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
4543a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
4553a181237SBalamuruhan S 							dst_reg,
4563a181237SBalamuruhan S 							b2p[TMP_REG_1]));
4573a181237SBalamuruhan S 					EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
458156d0e29SNaveen N. Rao 							b2p[TMP_REG_1],
4593a181237SBalamuruhan S 							b2p[TMP_REG_2]));
4603a181237SBalamuruhan S 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
4613a181237SBalamuruhan S 							b2p[TMP_REG_1]));
462156d0e29SNaveen N. Rao 				} else
4633a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
4643a181237SBalamuruhan S 							b2p[TMP_REG_1]));
465156d0e29SNaveen N. Rao 				break;
466156d0e29SNaveen N. Rao 			}
467156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
468156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
469156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
4703a181237SBalamuruhan S 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
471156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
472156d0e29SNaveen N. Rao 
473156d0e29SNaveen N. Rao 		/*
474156d0e29SNaveen N. Rao 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
475156d0e29SNaveen N. Rao 		 */
476156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
477156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
4783a181237SBalamuruhan S 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
479156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
480156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
481156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
482156d0e29SNaveen N. Rao 			if (!IMM_H(imm))
4833a181237SBalamuruhan S 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
484156d0e29SNaveen N. Rao 			else {
485156d0e29SNaveen N. Rao 				/* Sign-extended */
486156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
4873a181237SBalamuruhan S 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
488156d0e29SNaveen N. Rao 			}
489156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
490156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
491156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
4923a181237SBalamuruhan S 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
493156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
494156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
495156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
496156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
497156d0e29SNaveen N. Rao 				/* Sign-extended */
498156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
4993a181237SBalamuruhan S 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
500156d0e29SNaveen N. Rao 			} else {
501156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5023a181237SBalamuruhan S 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
503156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5043a181237SBalamuruhan S 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
505156d0e29SNaveen N. Rao 			}
506156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
507156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
508156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
5093a181237SBalamuruhan S 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
510156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
511156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
512156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
513156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
514156d0e29SNaveen N. Rao 				/* Sign-extended */
515156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
5163a181237SBalamuruhan S 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
517156d0e29SNaveen N. Rao 			} else {
518156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5193a181237SBalamuruhan S 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
520156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5213a181237SBalamuruhan S 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
522156d0e29SNaveen N. Rao 			}
523156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
524156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
525156d0e29SNaveen N. Rao 			/* slw clears top 32 bits */
5263a181237SBalamuruhan S 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
527a4c92773SJiong Wang 			/* skip zero extension move, but set address map. */
528a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
529a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
530156d0e29SNaveen N. Rao 			break;
531156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
5323a181237SBalamuruhan S 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
533156d0e29SNaveen N. Rao 			break;
534156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
535156d0e29SNaveen N. Rao 			/* with imm 0, we still need to clear top 32 bits */
5363a181237SBalamuruhan S 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
537a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
538a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
539156d0e29SNaveen N. Rao 			break;
540156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
541156d0e29SNaveen N. Rao 			if (imm != 0)
5423a181237SBalamuruhan S 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
543156d0e29SNaveen N. Rao 			break;
544156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
5453a181237SBalamuruhan S 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
546a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
547a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
548156d0e29SNaveen N. Rao 			break;
549156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
5503a181237SBalamuruhan S 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
551156d0e29SNaveen N. Rao 			break;
552156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
5533a181237SBalamuruhan S 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
554a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
555a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
556156d0e29SNaveen N. Rao 			break;
557156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
558156d0e29SNaveen N. Rao 			if (imm != 0)
5593a181237SBalamuruhan S 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
560156d0e29SNaveen N. Rao 			break;
56144cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
5623a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
56344cf43c0SJiong Wang 			goto bpf_alu32_trunc;
564156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
5653a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
566156d0e29SNaveen N. Rao 			break;
56744cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
5683a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
56944cf43c0SJiong Wang 			goto bpf_alu32_trunc;
570156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
571156d0e29SNaveen N. Rao 			if (imm != 0)
5723a181237SBalamuruhan S 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
573156d0e29SNaveen N. Rao 			break;
574156d0e29SNaveen N. Rao 
575156d0e29SNaveen N. Rao 		/*
576156d0e29SNaveen N. Rao 		 * MOV
577156d0e29SNaveen N. Rao 		 */
578156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
579156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
580a4c92773SJiong Wang 			if (imm == 1) {
581a4c92773SJiong Wang 				/* special mov32 for zext */
5823a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
583a4c92773SJiong Wang 				break;
584a4c92773SJiong Wang 			}
5853a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
586156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
587156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
588156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
589156d0e29SNaveen N. Rao 			PPC_LI32(dst_reg, imm);
590156d0e29SNaveen N. Rao 			if (imm < 0)
591156d0e29SNaveen N. Rao 				goto bpf_alu32_trunc;
592a4c92773SJiong Wang 			else if (insn_is_zext(&insn[i + 1]))
593a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
594156d0e29SNaveen N. Rao 			break;
595156d0e29SNaveen N. Rao 
596156d0e29SNaveen N. Rao bpf_alu32_trunc:
597156d0e29SNaveen N. Rao 		/* Truncate to 32-bits */
598a4c92773SJiong Wang 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
5993a181237SBalamuruhan S 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
600156d0e29SNaveen N. Rao 		break;
601156d0e29SNaveen N. Rao 
602156d0e29SNaveen N. Rao 		/*
603156d0e29SNaveen N. Rao 		 * BPF_FROM_BE/LE
604156d0e29SNaveen N. Rao 		 */
605156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_LE:
606156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_BE:
607156d0e29SNaveen N. Rao #ifdef __BIG_ENDIAN__
608156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_BE)
609156d0e29SNaveen N. Rao 				goto emit_clear;
610156d0e29SNaveen N. Rao #else /* !__BIG_ENDIAN__ */
611156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_LE)
612156d0e29SNaveen N. Rao 				goto emit_clear;
613156d0e29SNaveen N. Rao #endif
614156d0e29SNaveen N. Rao 			switch (imm) {
615156d0e29SNaveen N. Rao 			case 16:
616156d0e29SNaveen N. Rao 				/* Rotate 8 bits left & mask with 0x0000ff00 */
6173a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
618156d0e29SNaveen N. Rao 				/* Rotate 8 bits right & insert LSB to reg */
6193a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
620156d0e29SNaveen N. Rao 				/* Move result back to dst_reg */
6213a181237SBalamuruhan S 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
622156d0e29SNaveen N. Rao 				break;
623156d0e29SNaveen N. Rao 			case 32:
624156d0e29SNaveen N. Rao 				/*
625156d0e29SNaveen N. Rao 				 * Rotate word left by 8 bits:
626156d0e29SNaveen N. Rao 				 * 2 bytes are already in their final position
627156d0e29SNaveen N. Rao 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
628156d0e29SNaveen N. Rao 				 */
6293a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
630156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 1 */
6313a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
632156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 3 */
6333a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
6343a181237SBalamuruhan S 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
635156d0e29SNaveen N. Rao 				break;
636156d0e29SNaveen N. Rao 			case 64:
637156d0e29SNaveen N. Rao 				/*
638156d0e29SNaveen N. Rao 				 * Way easier and faster(?) to store the value
639156d0e29SNaveen N. Rao 				 * into stack and then use ldbrx
640156d0e29SNaveen N. Rao 				 *
641156d0e29SNaveen N. Rao 				 * ctx->seen will be reliable in pass2, but
642156d0e29SNaveen N. Rao 				 * the instructions generated will remain the
643156d0e29SNaveen N. Rao 				 * same across all passes
644156d0e29SNaveen N. Rao 				 */
64586be36f6SNaveen N. Rao 				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
6463a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
6473a181237SBalamuruhan S 				EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
648156d0e29SNaveen N. Rao 				break;
649156d0e29SNaveen N. Rao 			}
650156d0e29SNaveen N. Rao 			break;
651156d0e29SNaveen N. Rao 
652156d0e29SNaveen N. Rao emit_clear:
653156d0e29SNaveen N. Rao 			switch (imm) {
654156d0e29SNaveen N. Rao 			case 16:
655156d0e29SNaveen N. Rao 				/* zero-extend 16 bits into 64 bits */
6563a181237SBalamuruhan S 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
657a4c92773SJiong Wang 				if (insn_is_zext(&insn[i + 1]))
658a4c92773SJiong Wang 					addrs[++i] = ctx->idx * 4;
659156d0e29SNaveen N. Rao 				break;
660156d0e29SNaveen N. Rao 			case 32:
661a4c92773SJiong Wang 				if (!fp->aux->verifier_zext)
662156d0e29SNaveen N. Rao 					/* zero-extend 32 bits into 64 bits */
6633a181237SBalamuruhan S 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
664156d0e29SNaveen N. Rao 				break;
665156d0e29SNaveen N. Rao 			case 64:
666156d0e29SNaveen N. Rao 				/* nop */
667156d0e29SNaveen N. Rao 				break;
668156d0e29SNaveen N. Rao 			}
669156d0e29SNaveen N. Rao 			break;
670156d0e29SNaveen N. Rao 
671156d0e29SNaveen N. Rao 		/*
672f5e81d11SDaniel Borkmann 		 * BPF_ST NOSPEC (speculation barrier)
673f5e81d11SDaniel Borkmann 		 */
674f5e81d11SDaniel Borkmann 		case BPF_ST | BPF_NOSPEC:
675b7540d62SNaveen N. Rao 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
676b7540d62SNaveen N. Rao 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
677b7540d62SNaveen N. Rao 				break;
678b7540d62SNaveen N. Rao 
679b7540d62SNaveen N. Rao 			switch (stf_barrier) {
680b7540d62SNaveen N. Rao 			case STF_BARRIER_EIEIO:
681b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
682b7540d62SNaveen N. Rao 				break;
683b7540d62SNaveen N. Rao 			case STF_BARRIER_SYNC_ORI:
684b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_SYNC());
685b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
686b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
687b7540d62SNaveen N. Rao 				break;
688b7540d62SNaveen N. Rao 			case STF_BARRIER_FALLBACK:
689b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
690b7540d62SNaveen N. Rao 				PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
691b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_MTCTR(12));
692b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_BCTRL());
693b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
694b7540d62SNaveen N. Rao 				break;
695b7540d62SNaveen N. Rao 			case STF_BARRIER_NONE:
696b7540d62SNaveen N. Rao 				break;
697b7540d62SNaveen N. Rao 			}
698f5e81d11SDaniel Borkmann 			break;
699f5e81d11SDaniel Borkmann 
700f5e81d11SDaniel Borkmann 		/*
701156d0e29SNaveen N. Rao 		 * BPF_ST(X)
702156d0e29SNaveen N. Rao 		 */
703156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
704156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
705156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7063a181237SBalamuruhan S 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
707156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
708156d0e29SNaveen N. Rao 			}
7093a181237SBalamuruhan S 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
710156d0e29SNaveen N. Rao 			break;
711156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
712156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
713156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7143a181237SBalamuruhan S 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
715156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
716156d0e29SNaveen N. Rao 			}
7173a181237SBalamuruhan S 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
718156d0e29SNaveen N. Rao 			break;
719156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
720156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
721156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
722156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
723156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
724156d0e29SNaveen N. Rao 			}
7253a181237SBalamuruhan S 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
726156d0e29SNaveen N. Rao 			break;
727156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
728156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
729156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
730156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
731156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
732156d0e29SNaveen N. Rao 			}
73386be36f6SNaveen N. Rao 			PPC_BPF_STL(src_reg, dst_reg, off);
734156d0e29SNaveen N. Rao 			break;
735156d0e29SNaveen N. Rao 
736156d0e29SNaveen N. Rao 		/*
73791c960b0SBrendan Jackman 		 * BPF_STX ATOMIC (atomic ops)
738156d0e29SNaveen N. Rao 		 */
73991c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_W:
740419ac821SNaveen N. Rao 			if (imm != BPF_ADD) {
74191c960b0SBrendan Jackman 				pr_err_ratelimited(
74291c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
74391c960b0SBrendan Jackman 					code, i);
74491c960b0SBrendan Jackman 				return -ENOTSUPP;
74591c960b0SBrendan Jackman 			}
74691c960b0SBrendan Jackman 
747156d0e29SNaveen N. Rao 			/* *(u32 *)(dst + off) += src */
74891c960b0SBrendan Jackman 
749156d0e29SNaveen N. Rao 			/* Get EA into TMP_REG_1 */
7503a181237SBalamuruhan S 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
751b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
752156d0e29SNaveen N. Rao 			/* load value from memory into TMP_REG_2 */
75306541865SBalamuruhan S 			EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
754156d0e29SNaveen N. Rao 			/* add value from src_reg into this */
75506541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
756156d0e29SNaveen N. Rao 			/* store result back */
7573a181237SBalamuruhan S 			EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
758156d0e29SNaveen N. Rao 			/* we're done if this succeeded */
759b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
760156d0e29SNaveen N. Rao 			break;
76191c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_DW:
762419ac821SNaveen N. Rao 			if (imm != BPF_ADD) {
76391c960b0SBrendan Jackman 				pr_err_ratelimited(
76491c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
76591c960b0SBrendan Jackman 					code, i);
76691c960b0SBrendan Jackman 				return -ENOTSUPP;
76791c960b0SBrendan Jackman 			}
768156d0e29SNaveen N. Rao 			/* *(u64 *)(dst + off) += src */
76991c960b0SBrendan Jackman 
7703a181237SBalamuruhan S 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
771b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
77206541865SBalamuruhan S 			EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
77306541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
77406541865SBalamuruhan S 			EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
775b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
776156d0e29SNaveen N. Rao 			break;
777156d0e29SNaveen N. Rao 
778156d0e29SNaveen N. Rao 		/*
779156d0e29SNaveen N. Rao 		 * BPF_LDX
780156d0e29SNaveen N. Rao 		 */
781156d0e29SNaveen N. Rao 		/* dst = *(u8 *)(ul) (src + off) */
782156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_B:
783983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
784156d0e29SNaveen N. Rao 		/* dst = *(u16 *)(ul) (src + off) */
785156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_H:
786983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
787156d0e29SNaveen N. Rao 		/* dst = *(u32 *)(ul) (src + off) */
788156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_W:
789983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
790156d0e29SNaveen N. Rao 		/* dst = *(u64 *)(ul) (src + off) */
791156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_DW:
792983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
7939c70c714SRavi Bangoria 			/*
7949c70c714SRavi Bangoria 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
7959c70c714SRavi Bangoria 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
7969c70c714SRavi Bangoria 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
7979c70c714SRavi Bangoria 			 * set dst_reg=0 and move on.
7989c70c714SRavi Bangoria 			 */
7999c70c714SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
8009c70c714SRavi Bangoria 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
8019c70c714SRavi Bangoria 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
8029c70c714SRavi Bangoria 					PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
8039c70c714SRavi Bangoria 				else /* BOOK3S_64 */
8049c70c714SRavi Bangoria 					PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
8059c70c714SRavi Bangoria 				EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
8069c70c714SRavi Bangoria 				PPC_BCC(COND_GT, (ctx->idx + 4) * 4);
8079c70c714SRavi Bangoria 				EMIT(PPC_RAW_LI(dst_reg, 0));
8089c70c714SRavi Bangoria 				/*
8099c70c714SRavi Bangoria 				 * Check if 'off' is word aligned because PPC_BPF_LL()
8109c70c714SRavi Bangoria 				 * (BPF_DW case) generates two instructions if 'off' is not
8119c70c714SRavi Bangoria 				 * word-aligned and one instruction otherwise.
8129c70c714SRavi Bangoria 				 */
8139c70c714SRavi Bangoria 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
8149c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 3) * 4);
8159c70c714SRavi Bangoria 				else
8169c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 2) * 4);
8179c70c714SRavi Bangoria 			}
8189c70c714SRavi Bangoria 
819efa95f03SHari Bathini 			switch (size) {
820efa95f03SHari Bathini 			case BPF_B:
821efa95f03SHari Bathini 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
822efa95f03SHari Bathini 				break;
823efa95f03SHari Bathini 			case BPF_H:
824efa95f03SHari Bathini 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
825efa95f03SHari Bathini 				break;
826efa95f03SHari Bathini 			case BPF_W:
827efa95f03SHari Bathini 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
828efa95f03SHari Bathini 				break;
829efa95f03SHari Bathini 			case BPF_DW:
83086be36f6SNaveen N. Rao 				PPC_BPF_LL(dst_reg, src_reg, off);
831156d0e29SNaveen N. Rao 				break;
832efa95f03SHari Bathini 			}
833efa95f03SHari Bathini 
834efa95f03SHari Bathini 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
835efa95f03SHari Bathini 				addrs[++i] = ctx->idx * 4;
836983bdc02SRavi Bangoria 
837983bdc02SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
838983bdc02SRavi Bangoria 				ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
839983bdc02SRavi Bangoria 							    4, dst_reg);
840983bdc02SRavi Bangoria 				if (ret)
841983bdc02SRavi Bangoria 					return ret;
842983bdc02SRavi Bangoria 			}
843efa95f03SHari Bathini 			break;
844156d0e29SNaveen N. Rao 
845156d0e29SNaveen N. Rao 		/*
846156d0e29SNaveen N. Rao 		 * Doubleword load
847156d0e29SNaveen N. Rao 		 * 16 byte instruction that uses two 'struct bpf_insn'
848156d0e29SNaveen N. Rao 		 */
849156d0e29SNaveen N. Rao 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
850156d0e29SNaveen N. Rao 			imm64 = ((u64)(u32) insn[i].imm) |
851156d0e29SNaveen N. Rao 				    (((u64)(u32) insn[i+1].imm) << 32);
852*f9320c49SNaveen N. Rao 			tmp_idx = ctx->idx;
853*f9320c49SNaveen N. Rao 			PPC_LI64(dst_reg, imm64);
854*f9320c49SNaveen N. Rao 			/* padding to allow full 5 instructions for later patching */
855*f9320c49SNaveen N. Rao 			for (j = ctx->idx - tmp_idx; j < 5; j++)
856*f9320c49SNaveen N. Rao 				EMIT(PPC_RAW_NOP());
857156d0e29SNaveen N. Rao 			/* Adjust for two bpf instructions */
858156d0e29SNaveen N. Rao 			addrs[++i] = ctx->idx * 4;
859156d0e29SNaveen N. Rao 			break;
860156d0e29SNaveen N. Rao 
861156d0e29SNaveen N. Rao 		/*
862156d0e29SNaveen N. Rao 		 * Return/Exit
863156d0e29SNaveen N. Rao 		 */
864156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_EXIT:
865156d0e29SNaveen N. Rao 			/*
866156d0e29SNaveen N. Rao 			 * If this isn't the very last instruction, branch to
867156d0e29SNaveen N. Rao 			 * the epilogue. If we _are_ the last instruction,
868156d0e29SNaveen N. Rao 			 * we'll just fall through to the epilogue.
869156d0e29SNaveen N. Rao 			 */
870156d0e29SNaveen N. Rao 			if (i != flen - 1)
871156d0e29SNaveen N. Rao 				PPC_JMP(exit_addr);
872156d0e29SNaveen N. Rao 			/* else fall through to the epilogue */
873156d0e29SNaveen N. Rao 			break;
874156d0e29SNaveen N. Rao 
875156d0e29SNaveen N. Rao 		/*
8768484ce83SSandipan Das 		 * Call kernel helper or bpf function
877156d0e29SNaveen N. Rao 		 */
878156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_CALL:
879156d0e29SNaveen N. Rao 			ctx->seen |= SEEN_FUNC;
8808484ce83SSandipan Das 
88104c04205SRavi Bangoria 			ret = bpf_jit_get_func_addr(fp, &insn[i], false,
882e2c95a61SDaniel Borkmann 						    &func_addr, &func_addr_fixed);
883e2c95a61SDaniel Borkmann 			if (ret < 0)
884e2c95a61SDaniel Borkmann 				return ret;
885156d0e29SNaveen N. Rao 
886e2c95a61SDaniel Borkmann 			if (func_addr_fixed)
887e2c95a61SDaniel Borkmann 				bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
888e2c95a61SDaniel Borkmann 			else
889e2c95a61SDaniel Borkmann 				bpf_jit_emit_func_call_rel(image, ctx, func_addr);
890156d0e29SNaveen N. Rao 			/* move return value from r3 to BPF_REG_0 */
8913a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
892156d0e29SNaveen N. Rao 			break;
893156d0e29SNaveen N. Rao 
894156d0e29SNaveen N. Rao 		/*
895156d0e29SNaveen N. Rao 		 * Jumps and branches
896156d0e29SNaveen N. Rao 		 */
897156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JA:
898156d0e29SNaveen N. Rao 			PPC_JMP(addrs[i + 1 + off]);
899156d0e29SNaveen N. Rao 			break;
900156d0e29SNaveen N. Rao 
901156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_K:
902156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_X:
903156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_K:
904156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_X:
9055f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_K:
9065f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_X:
9075f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_K:
9085f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_X:
909156d0e29SNaveen N. Rao 			true_cond = COND_GT;
910156d0e29SNaveen N. Rao 			goto cond_branch;
91120dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_K:
91220dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_X:
91320dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_K:
91420dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_X:
9155f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_K:
9165f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_X:
9175f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_K:
9185f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_X:
91920dbf5ccSDaniel Borkmann 			true_cond = COND_LT;
92020dbf5ccSDaniel Borkmann 			goto cond_branch;
921156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_K:
922156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_X:
923156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_K:
924156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_X:
9255f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_K:
9265f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_X:
9275f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_K:
9285f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_X:
929156d0e29SNaveen N. Rao 			true_cond = COND_GE;
930156d0e29SNaveen N. Rao 			goto cond_branch;
93120dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_K:
93220dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_X:
93320dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_K:
93420dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_X:
9355f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_K:
9365f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_X:
9375f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_K:
9385f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_X:
93920dbf5ccSDaniel Borkmann 			true_cond = COND_LE;
94020dbf5ccSDaniel Borkmann 			goto cond_branch;
941156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_K:
942156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_X:
9435f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_K:
9445f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_X:
945156d0e29SNaveen N. Rao 			true_cond = COND_EQ;
946156d0e29SNaveen N. Rao 			goto cond_branch;
947156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_K:
948156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_X:
9495f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_K:
9505f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_X:
951156d0e29SNaveen N. Rao 			true_cond = COND_NE;
952156d0e29SNaveen N. Rao 			goto cond_branch;
953156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_K:
954156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_X:
9555f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_K:
9565f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_X:
957156d0e29SNaveen N. Rao 			true_cond = COND_NE;
958156d0e29SNaveen N. Rao 			/* Fall through */
959156d0e29SNaveen N. Rao 
960156d0e29SNaveen N. Rao cond_branch:
961156d0e29SNaveen N. Rao 			switch (code) {
962156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_X:
96320dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_X:
964156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_X:
96520dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_X:
966156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_X:
967156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_X:
9685f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_X:
9695f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_X:
9705f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_X:
9715f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_X:
9725f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_X:
9735f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_X:
974156d0e29SNaveen N. Rao 				/* unsigned comparison */
9755f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
9763a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
9775f645996SJiong Wang 				else
9783a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
979156d0e29SNaveen N. Rao 				break;
980156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_X:
98120dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_X:
982156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_X:
98320dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_X:
9845f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_X:
9855f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_X:
9865f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_X:
9875f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_X:
988156d0e29SNaveen N. Rao 				/* signed comparison */
9895f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
9903a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
9915f645996SJiong Wang 				else
9923a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
993156d0e29SNaveen N. Rao 				break;
994156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_X:
9955f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_X:
9965f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP) {
9973a181237SBalamuruhan S 					EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
9983a181237SBalamuruhan S 						    src_reg));
9995f645996SJiong Wang 				} else {
10005f645996SJiong Wang 					int tmp_reg = b2p[TMP_REG_1];
10015f645996SJiong Wang 
10023a181237SBalamuruhan S 					EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
10033a181237SBalamuruhan S 					EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
10043a181237SBalamuruhan S 						       31));
10055f645996SJiong Wang 				}
1006156d0e29SNaveen N. Rao 				break;
1007156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_K:
1008156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_K:
1009156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_K:
101020dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_K:
1011156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_K:
101220dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_K:
10135f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_K:
10145f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_K:
10155f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_K:
10165f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_K:
10175f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_K:
10185f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_K:
10195f645996SJiong Wang 			{
10205f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
10215f645996SJiong Wang 
1022156d0e29SNaveen N. Rao 				/*
1023156d0e29SNaveen N. Rao 				 * Need sign-extended load, so only positive
1024156d0e29SNaveen N. Rao 				 * values can be used as imm in cmpldi
1025156d0e29SNaveen N. Rao 				 */
10265f645996SJiong Wang 				if (imm >= 0 && imm < 32768) {
10275f645996SJiong Wang 					if (is_jmp32)
10283a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
10295f645996SJiong Wang 					else
10303a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
10315f645996SJiong Wang 				} else {
1032156d0e29SNaveen N. Rao 					/* sign-extending load */
1033156d0e29SNaveen N. Rao 					PPC_LI32(b2p[TMP_REG_1], imm);
1034156d0e29SNaveen N. Rao 					/* ... but unsigned comparison */
10355f645996SJiong Wang 					if (is_jmp32)
10363a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLW(dst_reg,
10373a181237SBalamuruhan S 							  b2p[TMP_REG_1]));
10385f645996SJiong Wang 					else
10393a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLD(dst_reg,
10403a181237SBalamuruhan S 							  b2p[TMP_REG_1]));
1041156d0e29SNaveen N. Rao 				}
1042156d0e29SNaveen N. Rao 				break;
10435f645996SJiong Wang 			}
1044156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_K:
104520dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_K:
1046156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_K:
104720dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_K:
10485f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_K:
10495f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_K:
10505f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_K:
10515f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_K:
10525f645996SJiong Wang 			{
10535f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
10545f645996SJiong Wang 
1055156d0e29SNaveen N. Rao 				/*
1056156d0e29SNaveen N. Rao 				 * signed comparison, so any 16-bit value
1057156d0e29SNaveen N. Rao 				 * can be used in cmpdi
1058156d0e29SNaveen N. Rao 				 */
10595f645996SJiong Wang 				if (imm >= -32768 && imm < 32768) {
10605f645996SJiong Wang 					if (is_jmp32)
10613a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
10625f645996SJiong Wang 					else
10633a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
10645f645996SJiong Wang 				} else {
1065156d0e29SNaveen N. Rao 					PPC_LI32(b2p[TMP_REG_1], imm);
10665f645996SJiong Wang 					if (is_jmp32)
10673a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPW(dst_reg,
10683a181237SBalamuruhan S 							 b2p[TMP_REG_1]));
10695f645996SJiong Wang 					else
10703a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPD(dst_reg,
10713a181237SBalamuruhan S 							 b2p[TMP_REG_1]));
1072156d0e29SNaveen N. Rao 				}
1073156d0e29SNaveen N. Rao 				break;
10745f645996SJiong Wang 			}
1075156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_K:
10765f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_K:
1077156d0e29SNaveen N. Rao 				/* andi does not sign-extend the immediate */
1078156d0e29SNaveen N. Rao 				if (imm >= 0 && imm < 32768)
1079156d0e29SNaveen N. Rao 					/* PPC_ANDI is _only/always_ dot-form */
10803a181237SBalamuruhan S 					EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1081156d0e29SNaveen N. Rao 				else {
10825f645996SJiong Wang 					int tmp_reg = b2p[TMP_REG_1];
10835f645996SJiong Wang 
10845f645996SJiong Wang 					PPC_LI32(tmp_reg, imm);
10855f645996SJiong Wang 					if (BPF_CLASS(code) == BPF_JMP) {
10863a181237SBalamuruhan S 						EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
10873a181237SBalamuruhan S 							    tmp_reg));
10885f645996SJiong Wang 					} else {
10893a181237SBalamuruhan S 						EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
10903a181237SBalamuruhan S 							tmp_reg));
10913a181237SBalamuruhan S 						EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
10923a181237SBalamuruhan S 							       0, 0, 31));
10935f645996SJiong Wang 					}
1094156d0e29SNaveen N. Rao 				}
1095156d0e29SNaveen N. Rao 				break;
1096156d0e29SNaveen N. Rao 			}
1097156d0e29SNaveen N. Rao 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1098156d0e29SNaveen N. Rao 			break;
1099156d0e29SNaveen N. Rao 
1100156d0e29SNaveen N. Rao 		/*
1101ce076141SNaveen N. Rao 		 * Tail call
1102156d0e29SNaveen N. Rao 		 */
110371189fa9SAlexei Starovoitov 		case BPF_JMP | BPF_TAIL_CALL:
1104ce076141SNaveen N. Rao 			ctx->seen |= SEEN_TAILCALL;
11053832ba4eSNaveen N. Rao 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
11063832ba4eSNaveen N. Rao 			if (ret < 0)
11073832ba4eSNaveen N. Rao 				return ret;
1108ce076141SNaveen N. Rao 			break;
1109156d0e29SNaveen N. Rao 
1110156d0e29SNaveen N. Rao 		default:
1111156d0e29SNaveen N. Rao 			/*
1112156d0e29SNaveen N. Rao 			 * The filter contains something cruel & unusual.
1113156d0e29SNaveen N. Rao 			 * We don't handle it, but also there shouldn't be
1114156d0e29SNaveen N. Rao 			 * anything missing from our list.
1115156d0e29SNaveen N. Rao 			 */
1116156d0e29SNaveen N. Rao 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1117156d0e29SNaveen N. Rao 					code, i);
1118156d0e29SNaveen N. Rao 			return -ENOTSUPP;
1119156d0e29SNaveen N. Rao 		}
1120156d0e29SNaveen N. Rao 	}
1121156d0e29SNaveen N. Rao 
1122156d0e29SNaveen N. Rao 	/* Set end-of-body-code address for exit. */
1123156d0e29SNaveen N. Rao 	addrs[i] = ctx->idx * 4;
1124156d0e29SNaveen N. Rao 
1125156d0e29SNaveen N. Rao 	return 0;
1126156d0e29SNaveen N. Rao }
1127