xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 3f5f766d5f7f95a69a630da3544a1a0cee1cdddf)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2156d0e29SNaveen N. Rao /*
3156d0e29SNaveen N. Rao  * bpf_jit_comp64.c: eBPF JIT compiler
4156d0e29SNaveen N. Rao  *
5156d0e29SNaveen N. Rao  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6156d0e29SNaveen N. Rao  *		  IBM Corporation
7156d0e29SNaveen N. Rao  *
8156d0e29SNaveen N. Rao  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9156d0e29SNaveen N. Rao  */
10156d0e29SNaveen N. Rao #include <linux/moduleloader.h>
11156d0e29SNaveen N. Rao #include <asm/cacheflush.h>
12ec0c464cSChristophe Leroy #include <asm/asm-compat.h>
13156d0e29SNaveen N. Rao #include <linux/netdevice.h>
14156d0e29SNaveen N. Rao #include <linux/filter.h>
15156d0e29SNaveen N. Rao #include <linux/if_vlan.h>
16156d0e29SNaveen N. Rao #include <asm/kprobes.h>
17ce076141SNaveen N. Rao #include <linux/bpf.h>
18b7540d62SNaveen N. Rao #include <asm/security_features.h>
19156d0e29SNaveen N. Rao 
20156d0e29SNaveen N. Rao #include "bpf_jit64.h"
21156d0e29SNaveen N. Rao 
22156d0e29SNaveen N. Rao static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
23156d0e29SNaveen N. Rao {
24156d0e29SNaveen N. Rao 	/*
25156d0e29SNaveen N. Rao 	 * We only need a stack frame if:
26156d0e29SNaveen N. Rao 	 * - we call other functions (kernel helpers), or
27156d0e29SNaveen N. Rao 	 * - the bpf program uses its stack area
28156d0e29SNaveen N. Rao 	 * The latter condition is deduced from the usage of BPF_REG_FP
29156d0e29SNaveen N. Rao 	 */
30ed573b57SChristophe Leroy 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
31156d0e29SNaveen N. Rao }
32156d0e29SNaveen N. Rao 
337b847f52SNaveen N. Rao /*
347b847f52SNaveen N. Rao  * When not setting up our own stackframe, the redzone usage is:
357b847f52SNaveen N. Rao  *
367b847f52SNaveen N. Rao  *		[	prev sp		] <-------------
377b847f52SNaveen N. Rao  *		[	  ...       	] 		|
387b847f52SNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
39b7540d62SNaveen N. Rao  *		[   nv gpr save area	] 5*8
407b847f52SNaveen N. Rao  *		[    tail_call_cnt	] 8
41b7540d62SNaveen N. Rao  *		[    local_tmp_var	] 16
427b847f52SNaveen N. Rao  *		[   unused red zone	] 208 bytes protected
437b847f52SNaveen N. Rao  */
447b847f52SNaveen N. Rao static int bpf_jit_stack_local(struct codegen_context *ctx)
457b847f52SNaveen N. Rao {
467b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx))
47ac0761ebSSandipan Das 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
487b847f52SNaveen N. Rao 	else
49b7540d62SNaveen N. Rao 		return -(BPF_PPC_STACK_SAVE + 24);
507b847f52SNaveen N. Rao }
517b847f52SNaveen N. Rao 
52ce076141SNaveen N. Rao static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
53ce076141SNaveen N. Rao {
54b7540d62SNaveen N. Rao 	return bpf_jit_stack_local(ctx) + 16;
55ce076141SNaveen N. Rao }
56ce076141SNaveen N. Rao 
577b847f52SNaveen N. Rao static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
587b847f52SNaveen N. Rao {
597b847f52SNaveen N. Rao 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60ac0761ebSSandipan Das 		return (bpf_has_stack_frame(ctx) ?
61ac0761ebSSandipan Das 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
627b847f52SNaveen N. Rao 				- (8 * (32 - reg));
637b847f52SNaveen N. Rao 
647b847f52SNaveen N. Rao 	pr_err("BPF JIT is asking about unknown registers");
657b847f52SNaveen N. Rao 	BUG();
667b847f52SNaveen N. Rao }
677b847f52SNaveen N. Rao 
6840272035SChristophe Leroy void bpf_jit_realloc_regs(struct codegen_context *ctx)
6940272035SChristophe Leroy {
7040272035SChristophe Leroy }
7140272035SChristophe Leroy 
724ea76e90SChristophe Leroy void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
73156d0e29SNaveen N. Rao {
74156d0e29SNaveen N. Rao 	int i;
75156d0e29SNaveen N. Rao 
76ce076141SNaveen N. Rao 	/*
77ce076141SNaveen N. Rao 	 * Initialize tail_call_cnt if we do tail calls.
78ce076141SNaveen N. Rao 	 * Otherwise, put in NOPs so that it can be skipped when we are
79ce076141SNaveen N. Rao 	 * invoked through a tail call.
80ce076141SNaveen N. Rao 	 */
81ce076141SNaveen N. Rao 	if (ctx->seen & SEEN_TAILCALL) {
823a181237SBalamuruhan S 		EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83ce076141SNaveen N. Rao 		/* this goes in the redzone */
84ce076141SNaveen N. Rao 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
85ce076141SNaveen N. Rao 	} else {
863a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
873a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
88ce076141SNaveen N. Rao 	}
89ce076141SNaveen N. Rao 
90ce076141SNaveen N. Rao #define BPF_TAILCALL_PROLOGUE_SIZE	8
91ce076141SNaveen N. Rao 
927b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
93156d0e29SNaveen N. Rao 		/*
94156d0e29SNaveen N. Rao 		 * We need a stack frame, but we don't necessarily need to
95156d0e29SNaveen N. Rao 		 * save/restore LR unless we call other functions
96156d0e29SNaveen N. Rao 		 */
97156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
98e08021f8SChristophe Leroy 			EMIT(PPC_RAW_MFLR(_R0));
99156d0e29SNaveen N. Rao 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
100156d0e29SNaveen N. Rao 		}
101156d0e29SNaveen N. Rao 
102ac0761ebSSandipan Das 		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
103156d0e29SNaveen N. Rao 	}
104156d0e29SNaveen N. Rao 
105156d0e29SNaveen N. Rao 	/*
106156d0e29SNaveen N. Rao 	 * Back up non-volatile regs -- BPF registers 6-10
107156d0e29SNaveen N. Rao 	 * If we haven't created our own stack frame, we save these
108156d0e29SNaveen N. Rao 	 * in the protected zone below the previous stack frame
109156d0e29SNaveen N. Rao 	 */
110156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111ed573b57SChristophe Leroy 		if (bpf_is_seen_register(ctx, b2p[i]))
1127b847f52SNaveen N. Rao 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
113156d0e29SNaveen N. Rao 
114156d0e29SNaveen N. Rao 	/* Setup frame pointer to point to the bpf stack area */
115ed573b57SChristophe Leroy 	if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
1163a181237SBalamuruhan S 		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
1173a181237SBalamuruhan S 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
118156d0e29SNaveen N. Rao }
119156d0e29SNaveen N. Rao 
120ce076141SNaveen N. Rao static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
121156d0e29SNaveen N. Rao {
122156d0e29SNaveen N. Rao 	int i;
123156d0e29SNaveen N. Rao 
124156d0e29SNaveen N. Rao 	/* Restore NVRs */
125156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126ed573b57SChristophe Leroy 		if (bpf_is_seen_register(ctx, b2p[i]))
1277b847f52SNaveen N. Rao 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
128156d0e29SNaveen N. Rao 
129156d0e29SNaveen N. Rao 	/* Tear down our stack frame */
1307b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
1313a181237SBalamuruhan S 		EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
133156d0e29SNaveen N. Rao 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
1343a181237SBalamuruhan S 			EMIT(PPC_RAW_MTLR(0));
135156d0e29SNaveen N. Rao 		}
136156d0e29SNaveen N. Rao 	}
137ce076141SNaveen N. Rao }
138ce076141SNaveen N. Rao 
1394ea76e90SChristophe Leroy void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
140ce076141SNaveen N. Rao {
141ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
142ce076141SNaveen N. Rao 
143ce076141SNaveen N. Rao 	/* Move result to r3 */
1443a181237SBalamuruhan S 	EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
145156d0e29SNaveen N. Rao 
1463a181237SBalamuruhan S 	EMIT(PPC_RAW_BLR());
147156d0e29SNaveen N. Rao }
148156d0e29SNaveen N. Rao 
149e2c95a61SDaniel Borkmann static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
150e2c95a61SDaniel Borkmann 				       u64 func)
151e2c95a61SDaniel Borkmann {
152e2c95a61SDaniel Borkmann #ifdef PPC64_ELF_ABI_v1
153e2c95a61SDaniel Borkmann 	/* func points to the function descriptor */
154e2c95a61SDaniel Borkmann 	PPC_LI64(b2p[TMP_REG_2], func);
155e2c95a61SDaniel Borkmann 	/* Load actual entry point from function descriptor */
156e2c95a61SDaniel Borkmann 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
15720ccb004SNaveen N. Rao 	/* ... and move it to CTR */
15820ccb004SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
159e2c95a61SDaniel Borkmann 	/*
160e2c95a61SDaniel Borkmann 	 * Load TOC from function descriptor at offset 8.
161e2c95a61SDaniel Borkmann 	 * We can clobber r2 since we get called through a
162e2c95a61SDaniel Borkmann 	 * function pointer (so caller will save/restore r2)
163e2c95a61SDaniel Borkmann 	 * and since we don't use a TOC ourself.
164e2c95a61SDaniel Borkmann 	 */
165e2c95a61SDaniel Borkmann 	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
166e2c95a61SDaniel Borkmann #else
167e2c95a61SDaniel Borkmann 	/* We can clobber r12 */
168e2c95a61SDaniel Borkmann 	PPC_FUNC_ADDR(12, func);
16920ccb004SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(12));
170e2c95a61SDaniel Borkmann #endif
17120ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
172e2c95a61SDaniel Borkmann }
173e2c95a61SDaniel Borkmann 
1744ea76e90SChristophe Leroy void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
175ce076141SNaveen N. Rao {
1764ea69b2fSSandipan Das 	unsigned int i, ctx_idx = ctx->idx;
1774ea69b2fSSandipan Das 
1784ea69b2fSSandipan Das 	/* Load function address into r12 */
1794ea69b2fSSandipan Das 	PPC_LI64(12, func);
1804ea69b2fSSandipan Das 
1814ea69b2fSSandipan Das 	/* For bpf-to-bpf function calls, the callee's address is unknown
1824ea69b2fSSandipan Das 	 * until the last extra pass. As seen above, we use PPC_LI64() to
1834ea69b2fSSandipan Das 	 * load the callee's address, but this may optimize the number of
1844ea69b2fSSandipan Das 	 * instructions required based on the nature of the address.
1854ea69b2fSSandipan Das 	 *
1864ea69b2fSSandipan Das 	 * Since we don't want the number of instructions emitted to change,
1874ea69b2fSSandipan Das 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
1884ea69b2fSSandipan Das 	 * we always have a five-instruction sequence, which is the maximum
1894ea69b2fSSandipan Das 	 * that PPC_LI64() can emit.
1904ea69b2fSSandipan Das 	 */
1914ea69b2fSSandipan Das 	for (i = ctx->idx - ctx_idx; i < 5; i++)
1923a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
1934ea69b2fSSandipan Das 
194ce076141SNaveen N. Rao #ifdef PPC64_ELF_ABI_v1
195ce076141SNaveen N. Rao 	/*
196ce076141SNaveen N. Rao 	 * Load TOC from function descriptor at offset 8.
197ce076141SNaveen N. Rao 	 * We can clobber r2 since we get called through a
198ce076141SNaveen N. Rao 	 * function pointer (so caller will save/restore r2)
199ce076141SNaveen N. Rao 	 * and since we don't use a TOC ourself.
200ce076141SNaveen N. Rao 	 */
2014ea69b2fSSandipan Das 	PPC_BPF_LL(2, 12, 8);
2024ea69b2fSSandipan Das 	/* Load actual entry point from function descriptor */
2034ea69b2fSSandipan Das 	PPC_BPF_LL(12, 12, 0);
204ce076141SNaveen N. Rao #endif
2054ea69b2fSSandipan Das 
20620ccb004SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(12));
20720ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
208ce076141SNaveen N. Rao }
209ce076141SNaveen N. Rao 
2103832ba4eSNaveen N. Rao static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
211ce076141SNaveen N. Rao {
212ce076141SNaveen N. Rao 	/*
213ce076141SNaveen N. Rao 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214ce076141SNaveen N. Rao 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215ce076141SNaveen N. Rao 	 * r4/BPF_REG_2 - pointer to bpf_array
216ce076141SNaveen N. Rao 	 * r5/BPF_REG_3 - index in bpf_array
217ce076141SNaveen N. Rao 	 */
218ce076141SNaveen N. Rao 	int b2p_bpf_array = b2p[BPF_REG_2];
219ce076141SNaveen N. Rao 	int b2p_index = b2p[BPF_REG_3];
220ce076141SNaveen N. Rao 
221ce076141SNaveen N. Rao 	/*
222ce076141SNaveen N. Rao 	 * if (index >= array->map.max_entries)
223ce076141SNaveen N. Rao 	 *   goto out;
224ce076141SNaveen N. Rao 	 */
22506541865SBalamuruhan S 	EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
2263a181237SBalamuruhan S 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
2273a181237SBalamuruhan S 	EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228ce076141SNaveen N. Rao 	PPC_BCC(COND_GE, out);
229ce076141SNaveen N. Rao 
230ce076141SNaveen N. Rao 	/*
231ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
232ce076141SNaveen N. Rao 	 *   goto out;
233ce076141SNaveen N. Rao 	 */
23486be36f6SNaveen N. Rao 	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
2353a181237SBalamuruhan S 	EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236ebf7f6f0STiezhu Yang 	PPC_BCC(COND_GE, out);
237ce076141SNaveen N. Rao 
238ce076141SNaveen N. Rao 	/*
239ce076141SNaveen N. Rao 	 * tail_call_cnt++;
240ce076141SNaveen N. Rao 	 */
2413a181237SBalamuruhan S 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242ce076141SNaveen N. Rao 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
243ce076141SNaveen N. Rao 
244ce076141SNaveen N. Rao 	/* prog = array->ptrs[index]; */
2453a181237SBalamuruhan S 	EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
24606541865SBalamuruhan S 	EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
24786be36f6SNaveen N. Rao 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
248ce076141SNaveen N. Rao 
249ce076141SNaveen N. Rao 	/*
250ce076141SNaveen N. Rao 	 * if (prog == NULL)
251ce076141SNaveen N. Rao 	 *   goto out;
252ce076141SNaveen N. Rao 	 */
2533a181237SBalamuruhan S 	EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254ce076141SNaveen N. Rao 	PPC_BCC(COND_EQ, out);
255ce076141SNaveen N. Rao 
256ce076141SNaveen N. Rao 	/* goto *(prog->bpf_func + prologue_size); */
25786be36f6SNaveen N. Rao 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258ce076141SNaveen N. Rao #ifdef PPC64_ELF_ABI_v1
259ce076141SNaveen N. Rao 	/* skip past the function descriptor */
2603a181237SBalamuruhan S 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
2613a181237SBalamuruhan S 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
262ce076141SNaveen N. Rao #else
2633a181237SBalamuruhan S 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
264ce076141SNaveen N. Rao #endif
2653a181237SBalamuruhan S 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
266ce076141SNaveen N. Rao 
267ce076141SNaveen N. Rao 	/* tear down stack, restore NVRs, ... */
268ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
269ce076141SNaveen N. Rao 
2703a181237SBalamuruhan S 	EMIT(PPC_RAW_BCTR());
2713832ba4eSNaveen N. Rao 
272ce076141SNaveen N. Rao 	/* out: */
2733832ba4eSNaveen N. Rao 	return 0;
274ce076141SNaveen N. Rao }
275ce076141SNaveen N. Rao 
276b7540d62SNaveen N. Rao /*
277b7540d62SNaveen N. Rao  * We spill into the redzone always, even if the bpf program has its own stackframe.
278b7540d62SNaveen N. Rao  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
279b7540d62SNaveen N. Rao  */
280b7540d62SNaveen N. Rao void bpf_stf_barrier(void);
281b7540d62SNaveen N. Rao 
282b7540d62SNaveen N. Rao asm (
283b7540d62SNaveen N. Rao "		.global bpf_stf_barrier		;"
284b7540d62SNaveen N. Rao "	bpf_stf_barrier:			;"
285b7540d62SNaveen N. Rao "		std	21,-64(1)		;"
286b7540d62SNaveen N. Rao "		std	22,-56(1)		;"
287b7540d62SNaveen N. Rao "		sync				;"
288b7540d62SNaveen N. Rao "		ld	21,-64(1)		;"
289b7540d62SNaveen N. Rao "		ld	22,-56(1)		;"
290b7540d62SNaveen N. Rao "		ori	31,31,0			;"
291b7540d62SNaveen N. Rao "		.rept 14			;"
292b7540d62SNaveen N. Rao "		b	1f			;"
293b7540d62SNaveen N. Rao "	1:					;"
294b7540d62SNaveen N. Rao "		.endr				;"
295b7540d62SNaveen N. Rao "		blr				;"
296b7540d62SNaveen N. Rao );
297b7540d62SNaveen N. Rao 
298156d0e29SNaveen N. Rao /* Assemble the body code between the prologue & epilogue */
2994ea76e90SChristophe Leroy int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
300983bdc02SRavi Bangoria 		       u32 *addrs, int pass)
301156d0e29SNaveen N. Rao {
302b7540d62SNaveen N. Rao 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303156d0e29SNaveen N. Rao 	const struct bpf_insn *insn = fp->insnsi;
304156d0e29SNaveen N. Rao 	int flen = fp->len;
305e2c95a61SDaniel Borkmann 	int i, ret;
306156d0e29SNaveen N. Rao 
307156d0e29SNaveen N. Rao 	/* Start of epilogue code - will only be valid 2nd pass onwards */
308156d0e29SNaveen N. Rao 	u32 exit_addr = addrs[flen];
309156d0e29SNaveen N. Rao 
310156d0e29SNaveen N. Rao 	for (i = 0; i < flen; i++) {
311156d0e29SNaveen N. Rao 		u32 code = insn[i].code;
312156d0e29SNaveen N. Rao 		u32 dst_reg = b2p[insn[i].dst_reg];
313156d0e29SNaveen N. Rao 		u32 src_reg = b2p[insn[i].src_reg];
314efa95f03SHari Bathini 		u32 size = BPF_SIZE(code);
315156d0e29SNaveen N. Rao 		s16 off = insn[i].off;
316156d0e29SNaveen N. Rao 		s32 imm = insn[i].imm;
317e2c95a61SDaniel Borkmann 		bool func_addr_fixed;
318e2c95a61SDaniel Borkmann 		u64 func_addr;
319156d0e29SNaveen N. Rao 		u64 imm64;
320156d0e29SNaveen N. Rao 		u32 true_cond;
321b9c1e60eSDaniel Borkmann 		u32 tmp_idx;
322f9320c49SNaveen N. Rao 		int j;
323156d0e29SNaveen N. Rao 
324156d0e29SNaveen N. Rao 		/*
325156d0e29SNaveen N. Rao 		 * addrs[] maps a BPF bytecode address into a real offset from
326156d0e29SNaveen N. Rao 		 * the start of the body code.
327156d0e29SNaveen N. Rao 		 */
328156d0e29SNaveen N. Rao 		addrs[i] = ctx->idx * 4;
329156d0e29SNaveen N. Rao 
330156d0e29SNaveen N. Rao 		/*
331156d0e29SNaveen N. Rao 		 * As an optimization, we note down which non-volatile registers
332156d0e29SNaveen N. Rao 		 * are used so that we can only save/restore those in our
333156d0e29SNaveen N. Rao 		 * prologue and epilogue. We do this here regardless of whether
334156d0e29SNaveen N. Rao 		 * the actual BPF instruction uses src/dst registers or not
335156d0e29SNaveen N. Rao 		 * (for instance, BPF_CALL does not use them). The expectation
336156d0e29SNaveen N. Rao 		 * is that those instructions will have src_reg/dst_reg set to
337156d0e29SNaveen N. Rao 		 * 0. Even otherwise, we just lose some prologue/epilogue
338156d0e29SNaveen N. Rao 		 * optimization but everything else should work without
339156d0e29SNaveen N. Rao 		 * any issues.
340156d0e29SNaveen N. Rao 		 */
3417b847f52SNaveen N. Rao 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
342ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, dst_reg);
3437b847f52SNaveen N. Rao 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
344ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, src_reg);
345156d0e29SNaveen N. Rao 
346156d0e29SNaveen N. Rao 		switch (code) {
347156d0e29SNaveen N. Rao 		/*
348156d0e29SNaveen N. Rao 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
349156d0e29SNaveen N. Rao 		 */
350156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
351156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
35206541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
353156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
354156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
355156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
3563a181237SBalamuruhan S 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
357156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
358156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
359156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
3605855c4c1SNaveen N. Rao 			if (!imm) {
3615855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
3625855c4c1SNaveen N. Rao 			} else if (imm >= -32768 && imm < 32768) {
3633a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
3645855c4c1SNaveen N. Rao 			} else {
365156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
36606541865SBalamuruhan S 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
367156d0e29SNaveen N. Rao 			}
3685855c4c1SNaveen N. Rao 			goto bpf_alu32_trunc;
3695855c4c1SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
3705855c4c1SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
3715855c4c1SNaveen N. Rao 			if (!imm) {
3725855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
3735855c4c1SNaveen N. Rao 			} else if (imm > -32768 && imm <= 32768) {
3745855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
3755855c4c1SNaveen N. Rao 			} else {
3765855c4c1SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
3775855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
378156d0e29SNaveen N. Rao 			}
379156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
380156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
381156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
382156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ALU)
3833a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
384156d0e29SNaveen N. Rao 			else
3853a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
386156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
387156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
388156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
389156d0e29SNaveen N. Rao 			if (imm >= -32768 && imm < 32768)
3903a181237SBalamuruhan S 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
391156d0e29SNaveen N. Rao 			else {
392156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
393156d0e29SNaveen N. Rao 				if (BPF_CLASS(code) == BPF_ALU)
3943a181237SBalamuruhan S 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
3953a181237SBalamuruhan S 							b2p[TMP_REG_1]));
396156d0e29SNaveen N. Rao 				else
3973a181237SBalamuruhan S 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
3983a181237SBalamuruhan S 							b2p[TMP_REG_1]));
399156d0e29SNaveen N. Rao 			}
400156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
401156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
402156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
403156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
4043a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
4053a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
4063a181237SBalamuruhan S 						b2p[TMP_REG_1]));
4073a181237SBalamuruhan S 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
408156d0e29SNaveen N. Rao 			} else
4093a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
410156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
411156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
412156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
413156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
4143a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
4153a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
4163a181237SBalamuruhan S 						b2p[TMP_REG_1]));
4173a181237SBalamuruhan S 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
418156d0e29SNaveen N. Rao 			} else
4193a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
420156d0e29SNaveen N. Rao 			break;
421156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
422156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
423156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
424156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
425156d0e29SNaveen N. Rao 			if (imm == 0)
426156d0e29SNaveen N. Rao 				return -EINVAL;
4278bbc9d82SNaveen N. Rao 			if (imm == 1) {
4288bbc9d82SNaveen N. Rao 				if (BPF_OP(code) == BPF_DIV) {
429156d0e29SNaveen N. Rao 					goto bpf_alu32_trunc;
4308bbc9d82SNaveen N. Rao 				} else {
4318bbc9d82SNaveen N. Rao 					EMIT(PPC_RAW_LI(dst_reg, 0));
4328bbc9d82SNaveen N. Rao 					break;
4338bbc9d82SNaveen N. Rao 				}
4348bbc9d82SNaveen N. Rao 			}
435156d0e29SNaveen N. Rao 
436156d0e29SNaveen N. Rao 			PPC_LI32(b2p[TMP_REG_1], imm);
437156d0e29SNaveen N. Rao 			switch (BPF_CLASS(code)) {
438156d0e29SNaveen N. Rao 			case BPF_ALU:
439156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
4403a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
4413a181237SBalamuruhan S 							dst_reg,
4423a181237SBalamuruhan S 							b2p[TMP_REG_1]));
4433a181237SBalamuruhan S 					EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
444156d0e29SNaveen N. Rao 							b2p[TMP_REG_1],
4453a181237SBalamuruhan S 							b2p[TMP_REG_2]));
4463a181237SBalamuruhan S 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
4473a181237SBalamuruhan S 							b2p[TMP_REG_1]));
448156d0e29SNaveen N. Rao 				} else
4493a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
4503a181237SBalamuruhan S 							b2p[TMP_REG_1]));
451156d0e29SNaveen N. Rao 				break;
452156d0e29SNaveen N. Rao 			case BPF_ALU64:
453156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
4543a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
4553a181237SBalamuruhan S 							dst_reg,
4563a181237SBalamuruhan S 							b2p[TMP_REG_1]));
4573a181237SBalamuruhan S 					EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
458156d0e29SNaveen N. Rao 							b2p[TMP_REG_1],
4593a181237SBalamuruhan S 							b2p[TMP_REG_2]));
4603a181237SBalamuruhan S 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
4613a181237SBalamuruhan S 							b2p[TMP_REG_1]));
462156d0e29SNaveen N. Rao 				} else
4633a181237SBalamuruhan S 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
4643a181237SBalamuruhan S 							b2p[TMP_REG_1]));
465156d0e29SNaveen N. Rao 				break;
466156d0e29SNaveen N. Rao 			}
467156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
468156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
469156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
4703a181237SBalamuruhan S 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
471156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
472156d0e29SNaveen N. Rao 
473156d0e29SNaveen N. Rao 		/*
474156d0e29SNaveen N. Rao 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
475156d0e29SNaveen N. Rao 		 */
476156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
477156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
4783a181237SBalamuruhan S 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
479156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
480156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
481156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
482156d0e29SNaveen N. Rao 			if (!IMM_H(imm))
4833a181237SBalamuruhan S 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
484156d0e29SNaveen N. Rao 			else {
485156d0e29SNaveen N. Rao 				/* Sign-extended */
486156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
4873a181237SBalamuruhan S 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
488156d0e29SNaveen N. Rao 			}
489156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
490156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
491156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
4923a181237SBalamuruhan S 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
493156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
494156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
495156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
496156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
497156d0e29SNaveen N. Rao 				/* Sign-extended */
498156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
4993a181237SBalamuruhan S 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
500156d0e29SNaveen N. Rao 			} else {
501156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5023a181237SBalamuruhan S 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
503156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5043a181237SBalamuruhan S 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
505156d0e29SNaveen N. Rao 			}
506156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
507156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
508156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
5093a181237SBalamuruhan S 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
510156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
511156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
512156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
513156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
514156d0e29SNaveen N. Rao 				/* Sign-extended */
515156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
5163a181237SBalamuruhan S 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
517156d0e29SNaveen N. Rao 			} else {
518156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5193a181237SBalamuruhan S 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
520156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5213a181237SBalamuruhan S 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
522156d0e29SNaveen N. Rao 			}
523156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
524156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
525156d0e29SNaveen N. Rao 			/* slw clears top 32 bits */
5263a181237SBalamuruhan S 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
527a4c92773SJiong Wang 			/* skip zero extension move, but set address map. */
528a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
529a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
530156d0e29SNaveen N. Rao 			break;
531156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
5323a181237SBalamuruhan S 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
533156d0e29SNaveen N. Rao 			break;
534156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
535156d0e29SNaveen N. Rao 			/* with imm 0, we still need to clear top 32 bits */
5363a181237SBalamuruhan S 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
537a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
538a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
539156d0e29SNaveen N. Rao 			break;
540156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
541156d0e29SNaveen N. Rao 			if (imm != 0)
5423a181237SBalamuruhan S 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
543156d0e29SNaveen N. Rao 			break;
544156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
5453a181237SBalamuruhan S 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
546a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
547a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
548156d0e29SNaveen N. Rao 			break;
549156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
5503a181237SBalamuruhan S 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
551156d0e29SNaveen N. Rao 			break;
552156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
5533a181237SBalamuruhan S 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
554a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
555a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
556156d0e29SNaveen N. Rao 			break;
557156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
558156d0e29SNaveen N. Rao 			if (imm != 0)
5593a181237SBalamuruhan S 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
560156d0e29SNaveen N. Rao 			break;
56144cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
5623a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
56344cf43c0SJiong Wang 			goto bpf_alu32_trunc;
564156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
5653a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
566156d0e29SNaveen N. Rao 			break;
56744cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
5683a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
56944cf43c0SJiong Wang 			goto bpf_alu32_trunc;
570156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
571156d0e29SNaveen N. Rao 			if (imm != 0)
5723a181237SBalamuruhan S 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
573156d0e29SNaveen N. Rao 			break;
574156d0e29SNaveen N. Rao 
575156d0e29SNaveen N. Rao 		/*
576156d0e29SNaveen N. Rao 		 * MOV
577156d0e29SNaveen N. Rao 		 */
578156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
579156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
580a4c92773SJiong Wang 			if (imm == 1) {
581a4c92773SJiong Wang 				/* special mov32 for zext */
5823a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
583a4c92773SJiong Wang 				break;
584a4c92773SJiong Wang 			}
5853a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
586156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
587156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
588156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
589156d0e29SNaveen N. Rao 			PPC_LI32(dst_reg, imm);
590156d0e29SNaveen N. Rao 			if (imm < 0)
591156d0e29SNaveen N. Rao 				goto bpf_alu32_trunc;
592a4c92773SJiong Wang 			else if (insn_is_zext(&insn[i + 1]))
593a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
594156d0e29SNaveen N. Rao 			break;
595156d0e29SNaveen N. Rao 
596156d0e29SNaveen N. Rao bpf_alu32_trunc:
597156d0e29SNaveen N. Rao 		/* Truncate to 32-bits */
598a4c92773SJiong Wang 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
5993a181237SBalamuruhan S 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
600156d0e29SNaveen N. Rao 		break;
601156d0e29SNaveen N. Rao 
602156d0e29SNaveen N. Rao 		/*
603156d0e29SNaveen N. Rao 		 * BPF_FROM_BE/LE
604156d0e29SNaveen N. Rao 		 */
605156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_LE:
606156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_BE:
607156d0e29SNaveen N. Rao #ifdef __BIG_ENDIAN__
608156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_BE)
609156d0e29SNaveen N. Rao 				goto emit_clear;
610156d0e29SNaveen N. Rao #else /* !__BIG_ENDIAN__ */
611156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_LE)
612156d0e29SNaveen N. Rao 				goto emit_clear;
613156d0e29SNaveen N. Rao #endif
614156d0e29SNaveen N. Rao 			switch (imm) {
615156d0e29SNaveen N. Rao 			case 16:
616156d0e29SNaveen N. Rao 				/* Rotate 8 bits left & mask with 0x0000ff00 */
6173a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
618156d0e29SNaveen N. Rao 				/* Rotate 8 bits right & insert LSB to reg */
6193a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
620156d0e29SNaveen N. Rao 				/* Move result back to dst_reg */
6213a181237SBalamuruhan S 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
622156d0e29SNaveen N. Rao 				break;
623156d0e29SNaveen N. Rao 			case 32:
624156d0e29SNaveen N. Rao 				/*
625156d0e29SNaveen N. Rao 				 * Rotate word left by 8 bits:
626156d0e29SNaveen N. Rao 				 * 2 bytes are already in their final position
627156d0e29SNaveen N. Rao 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
628156d0e29SNaveen N. Rao 				 */
6293a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
630156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 1 */
6313a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
632156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 3 */
6333a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
6343a181237SBalamuruhan S 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
635156d0e29SNaveen N. Rao 				break;
636156d0e29SNaveen N. Rao 			case 64:
637*3f5f766dSNaveen N. Rao 				/* Store the value to stack and then use byte-reverse loads */
63886be36f6SNaveen N. Rao 				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
6393a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
640*3f5f766dSNaveen N. Rao 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
6413a181237SBalamuruhan S 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
642*3f5f766dSNaveen N. Rao 				} else {
643*3f5f766dSNaveen N. Rao 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
644*3f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
645*3f5f766dSNaveen N. Rao 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
646*3f5f766dSNaveen N. Rao 					EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
647*3f5f766dSNaveen N. Rao 					EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
648*3f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
649*3f5f766dSNaveen N. Rao 						EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
650*3f5f766dSNaveen N. Rao 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
651*3f5f766dSNaveen N. Rao 				}
652156d0e29SNaveen N. Rao 				break;
653156d0e29SNaveen N. Rao 			}
654156d0e29SNaveen N. Rao 			break;
655156d0e29SNaveen N. Rao 
656156d0e29SNaveen N. Rao emit_clear:
657156d0e29SNaveen N. Rao 			switch (imm) {
658156d0e29SNaveen N. Rao 			case 16:
659156d0e29SNaveen N. Rao 				/* zero-extend 16 bits into 64 bits */
6603a181237SBalamuruhan S 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
661a4c92773SJiong Wang 				if (insn_is_zext(&insn[i + 1]))
662a4c92773SJiong Wang 					addrs[++i] = ctx->idx * 4;
663156d0e29SNaveen N. Rao 				break;
664156d0e29SNaveen N. Rao 			case 32:
665a4c92773SJiong Wang 				if (!fp->aux->verifier_zext)
666156d0e29SNaveen N. Rao 					/* zero-extend 32 bits into 64 bits */
6673a181237SBalamuruhan S 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
668156d0e29SNaveen N. Rao 				break;
669156d0e29SNaveen N. Rao 			case 64:
670156d0e29SNaveen N. Rao 				/* nop */
671156d0e29SNaveen N. Rao 				break;
672156d0e29SNaveen N. Rao 			}
673156d0e29SNaveen N. Rao 			break;
674156d0e29SNaveen N. Rao 
675156d0e29SNaveen N. Rao 		/*
676f5e81d11SDaniel Borkmann 		 * BPF_ST NOSPEC (speculation barrier)
677f5e81d11SDaniel Borkmann 		 */
678f5e81d11SDaniel Borkmann 		case BPF_ST | BPF_NOSPEC:
679b7540d62SNaveen N. Rao 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
680b7540d62SNaveen N. Rao 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
681b7540d62SNaveen N. Rao 				break;
682b7540d62SNaveen N. Rao 
683b7540d62SNaveen N. Rao 			switch (stf_barrier) {
684b7540d62SNaveen N. Rao 			case STF_BARRIER_EIEIO:
685b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
686b7540d62SNaveen N. Rao 				break;
687b7540d62SNaveen N. Rao 			case STF_BARRIER_SYNC_ORI:
688b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_SYNC());
689b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
690b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
691b7540d62SNaveen N. Rao 				break;
692b7540d62SNaveen N. Rao 			case STF_BARRIER_FALLBACK:
693b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
694b7540d62SNaveen N. Rao 				PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
695b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_MTCTR(12));
696b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_BCTRL());
697b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
698b7540d62SNaveen N. Rao 				break;
699b7540d62SNaveen N. Rao 			case STF_BARRIER_NONE:
700b7540d62SNaveen N. Rao 				break;
701b7540d62SNaveen N. Rao 			}
702f5e81d11SDaniel Borkmann 			break;
703f5e81d11SDaniel Borkmann 
704f5e81d11SDaniel Borkmann 		/*
705156d0e29SNaveen N. Rao 		 * BPF_ST(X)
706156d0e29SNaveen N. Rao 		 */
707156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
708156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
709156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7103a181237SBalamuruhan S 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
711156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
712156d0e29SNaveen N. Rao 			}
7133a181237SBalamuruhan S 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
714156d0e29SNaveen N. Rao 			break;
715156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
716156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
717156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7183a181237SBalamuruhan S 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
719156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
720156d0e29SNaveen N. Rao 			}
7213a181237SBalamuruhan S 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
722156d0e29SNaveen N. Rao 			break;
723156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
724156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
725156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
726156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
727156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
728156d0e29SNaveen N. Rao 			}
7293a181237SBalamuruhan S 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
730156d0e29SNaveen N. Rao 			break;
731156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
732156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
733156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
734156d0e29SNaveen N. Rao 				PPC_LI32(b2p[TMP_REG_1], imm);
735156d0e29SNaveen N. Rao 				src_reg = b2p[TMP_REG_1];
736156d0e29SNaveen N. Rao 			}
73786be36f6SNaveen N. Rao 			PPC_BPF_STL(src_reg, dst_reg, off);
738156d0e29SNaveen N. Rao 			break;
739156d0e29SNaveen N. Rao 
740156d0e29SNaveen N. Rao 		/*
74191c960b0SBrendan Jackman 		 * BPF_STX ATOMIC (atomic ops)
742156d0e29SNaveen N. Rao 		 */
74391c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_W:
744419ac821SNaveen N. Rao 			if (imm != BPF_ADD) {
74591c960b0SBrendan Jackman 				pr_err_ratelimited(
74691c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
74791c960b0SBrendan Jackman 					code, i);
74891c960b0SBrendan Jackman 				return -ENOTSUPP;
74991c960b0SBrendan Jackman 			}
75091c960b0SBrendan Jackman 
751156d0e29SNaveen N. Rao 			/* *(u32 *)(dst + off) += src */
75291c960b0SBrendan Jackman 
753156d0e29SNaveen N. Rao 			/* Get EA into TMP_REG_1 */
7543a181237SBalamuruhan S 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
755b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
756156d0e29SNaveen N. Rao 			/* load value from memory into TMP_REG_2 */
75706541865SBalamuruhan S 			EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
758156d0e29SNaveen N. Rao 			/* add value from src_reg into this */
75906541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
760156d0e29SNaveen N. Rao 			/* store result back */
7613a181237SBalamuruhan S 			EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
762156d0e29SNaveen N. Rao 			/* we're done if this succeeded */
763b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
764156d0e29SNaveen N. Rao 			break;
76591c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_DW:
766419ac821SNaveen N. Rao 			if (imm != BPF_ADD) {
76791c960b0SBrendan Jackman 				pr_err_ratelimited(
76891c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
76991c960b0SBrendan Jackman 					code, i);
77091c960b0SBrendan Jackman 				return -ENOTSUPP;
77191c960b0SBrendan Jackman 			}
772156d0e29SNaveen N. Rao 			/* *(u64 *)(dst + off) += src */
77391c960b0SBrendan Jackman 
7743a181237SBalamuruhan S 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
775b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
77606541865SBalamuruhan S 			EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
77706541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
77806541865SBalamuruhan S 			EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
779b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
780156d0e29SNaveen N. Rao 			break;
781156d0e29SNaveen N. Rao 
782156d0e29SNaveen N. Rao 		/*
783156d0e29SNaveen N. Rao 		 * BPF_LDX
784156d0e29SNaveen N. Rao 		 */
785156d0e29SNaveen N. Rao 		/* dst = *(u8 *)(ul) (src + off) */
786156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_B:
787983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
788156d0e29SNaveen N. Rao 		/* dst = *(u16 *)(ul) (src + off) */
789156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_H:
790983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
791156d0e29SNaveen N. Rao 		/* dst = *(u32 *)(ul) (src + off) */
792156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_W:
793983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
794156d0e29SNaveen N. Rao 		/* dst = *(u64 *)(ul) (src + off) */
795156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_DW:
796983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
7979c70c714SRavi Bangoria 			/*
7989c70c714SRavi Bangoria 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
7999c70c714SRavi Bangoria 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
8009c70c714SRavi Bangoria 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
8019c70c714SRavi Bangoria 			 * set dst_reg=0 and move on.
8029c70c714SRavi Bangoria 			 */
8039c70c714SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
8049c70c714SRavi Bangoria 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
8059c70c714SRavi Bangoria 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
8069c70c714SRavi Bangoria 					PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
8079c70c714SRavi Bangoria 				else /* BOOK3S_64 */
8089c70c714SRavi Bangoria 					PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
8099c70c714SRavi Bangoria 				EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
8109c70c714SRavi Bangoria 				PPC_BCC(COND_GT, (ctx->idx + 4) * 4);
8119c70c714SRavi Bangoria 				EMIT(PPC_RAW_LI(dst_reg, 0));
8129c70c714SRavi Bangoria 				/*
8139c70c714SRavi Bangoria 				 * Check if 'off' is word aligned because PPC_BPF_LL()
8149c70c714SRavi Bangoria 				 * (BPF_DW case) generates two instructions if 'off' is not
8159c70c714SRavi Bangoria 				 * word-aligned and one instruction otherwise.
8169c70c714SRavi Bangoria 				 */
8179c70c714SRavi Bangoria 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
8189c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 3) * 4);
8199c70c714SRavi Bangoria 				else
8209c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 2) * 4);
8219c70c714SRavi Bangoria 			}
8229c70c714SRavi Bangoria 
823efa95f03SHari Bathini 			switch (size) {
824efa95f03SHari Bathini 			case BPF_B:
825efa95f03SHari Bathini 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
826efa95f03SHari Bathini 				break;
827efa95f03SHari Bathini 			case BPF_H:
828efa95f03SHari Bathini 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
829efa95f03SHari Bathini 				break;
830efa95f03SHari Bathini 			case BPF_W:
831efa95f03SHari Bathini 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
832efa95f03SHari Bathini 				break;
833efa95f03SHari Bathini 			case BPF_DW:
83486be36f6SNaveen N. Rao 				PPC_BPF_LL(dst_reg, src_reg, off);
835156d0e29SNaveen N. Rao 				break;
836efa95f03SHari Bathini 			}
837efa95f03SHari Bathini 
838efa95f03SHari Bathini 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
839efa95f03SHari Bathini 				addrs[++i] = ctx->idx * 4;
840983bdc02SRavi Bangoria 
841983bdc02SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
842983bdc02SRavi Bangoria 				ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
843983bdc02SRavi Bangoria 							    4, dst_reg);
844983bdc02SRavi Bangoria 				if (ret)
845983bdc02SRavi Bangoria 					return ret;
846983bdc02SRavi Bangoria 			}
847efa95f03SHari Bathini 			break;
848156d0e29SNaveen N. Rao 
849156d0e29SNaveen N. Rao 		/*
850156d0e29SNaveen N. Rao 		 * Doubleword load
851156d0e29SNaveen N. Rao 		 * 16 byte instruction that uses two 'struct bpf_insn'
852156d0e29SNaveen N. Rao 		 */
853156d0e29SNaveen N. Rao 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
854156d0e29SNaveen N. Rao 			imm64 = ((u64)(u32) insn[i].imm) |
855156d0e29SNaveen N. Rao 				    (((u64)(u32) insn[i+1].imm) << 32);
856f9320c49SNaveen N. Rao 			tmp_idx = ctx->idx;
857f9320c49SNaveen N. Rao 			PPC_LI64(dst_reg, imm64);
858f9320c49SNaveen N. Rao 			/* padding to allow full 5 instructions for later patching */
859f9320c49SNaveen N. Rao 			for (j = ctx->idx - tmp_idx; j < 5; j++)
860f9320c49SNaveen N. Rao 				EMIT(PPC_RAW_NOP());
861156d0e29SNaveen N. Rao 			/* Adjust for two bpf instructions */
862156d0e29SNaveen N. Rao 			addrs[++i] = ctx->idx * 4;
863156d0e29SNaveen N. Rao 			break;
864156d0e29SNaveen N. Rao 
865156d0e29SNaveen N. Rao 		/*
866156d0e29SNaveen N. Rao 		 * Return/Exit
867156d0e29SNaveen N. Rao 		 */
868156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_EXIT:
869156d0e29SNaveen N. Rao 			/*
870156d0e29SNaveen N. Rao 			 * If this isn't the very last instruction, branch to
871156d0e29SNaveen N. Rao 			 * the epilogue. If we _are_ the last instruction,
872156d0e29SNaveen N. Rao 			 * we'll just fall through to the epilogue.
873156d0e29SNaveen N. Rao 			 */
874156d0e29SNaveen N. Rao 			if (i != flen - 1)
875156d0e29SNaveen N. Rao 				PPC_JMP(exit_addr);
876156d0e29SNaveen N. Rao 			/* else fall through to the epilogue */
877156d0e29SNaveen N. Rao 			break;
878156d0e29SNaveen N. Rao 
879156d0e29SNaveen N. Rao 		/*
8808484ce83SSandipan Das 		 * Call kernel helper or bpf function
881156d0e29SNaveen N. Rao 		 */
882156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_CALL:
883156d0e29SNaveen N. Rao 			ctx->seen |= SEEN_FUNC;
8848484ce83SSandipan Das 
88504c04205SRavi Bangoria 			ret = bpf_jit_get_func_addr(fp, &insn[i], false,
886e2c95a61SDaniel Borkmann 						    &func_addr, &func_addr_fixed);
887e2c95a61SDaniel Borkmann 			if (ret < 0)
888e2c95a61SDaniel Borkmann 				return ret;
889156d0e29SNaveen N. Rao 
890e2c95a61SDaniel Borkmann 			if (func_addr_fixed)
891e2c95a61SDaniel Borkmann 				bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
892e2c95a61SDaniel Borkmann 			else
893e2c95a61SDaniel Borkmann 				bpf_jit_emit_func_call_rel(image, ctx, func_addr);
894156d0e29SNaveen N. Rao 			/* move return value from r3 to BPF_REG_0 */
8953a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
896156d0e29SNaveen N. Rao 			break;
897156d0e29SNaveen N. Rao 
898156d0e29SNaveen N. Rao 		/*
899156d0e29SNaveen N. Rao 		 * Jumps and branches
900156d0e29SNaveen N. Rao 		 */
901156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JA:
902156d0e29SNaveen N. Rao 			PPC_JMP(addrs[i + 1 + off]);
903156d0e29SNaveen N. Rao 			break;
904156d0e29SNaveen N. Rao 
905156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_K:
906156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_X:
907156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_K:
908156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_X:
9095f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_K:
9105f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_X:
9115f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_K:
9125f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_X:
913156d0e29SNaveen N. Rao 			true_cond = COND_GT;
914156d0e29SNaveen N. Rao 			goto cond_branch;
91520dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_K:
91620dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_X:
91720dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_K:
91820dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_X:
9195f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_K:
9205f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_X:
9215f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_K:
9225f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_X:
92320dbf5ccSDaniel Borkmann 			true_cond = COND_LT;
92420dbf5ccSDaniel Borkmann 			goto cond_branch;
925156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_K:
926156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_X:
927156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_K:
928156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_X:
9295f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_K:
9305f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_X:
9315f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_K:
9325f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_X:
933156d0e29SNaveen N. Rao 			true_cond = COND_GE;
934156d0e29SNaveen N. Rao 			goto cond_branch;
93520dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_K:
93620dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_X:
93720dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_K:
93820dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_X:
9395f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_K:
9405f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_X:
9415f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_K:
9425f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_X:
94320dbf5ccSDaniel Borkmann 			true_cond = COND_LE;
94420dbf5ccSDaniel Borkmann 			goto cond_branch;
945156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_K:
946156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_X:
9475f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_K:
9485f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_X:
949156d0e29SNaveen N. Rao 			true_cond = COND_EQ;
950156d0e29SNaveen N. Rao 			goto cond_branch;
951156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_K:
952156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_X:
9535f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_K:
9545f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_X:
955156d0e29SNaveen N. Rao 			true_cond = COND_NE;
956156d0e29SNaveen N. Rao 			goto cond_branch;
957156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_K:
958156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_X:
9595f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_K:
9605f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_X:
961156d0e29SNaveen N. Rao 			true_cond = COND_NE;
962156d0e29SNaveen N. Rao 			/* Fall through */
963156d0e29SNaveen N. Rao 
964156d0e29SNaveen N. Rao cond_branch:
965156d0e29SNaveen N. Rao 			switch (code) {
966156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_X:
96720dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_X:
968156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_X:
96920dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_X:
970156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_X:
971156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_X:
9725f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_X:
9735f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_X:
9745f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_X:
9755f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_X:
9765f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_X:
9775f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_X:
978156d0e29SNaveen N. Rao 				/* unsigned comparison */
9795f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
9803a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
9815f645996SJiong Wang 				else
9823a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
983156d0e29SNaveen N. Rao 				break;
984156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_X:
98520dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_X:
986156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_X:
98720dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_X:
9885f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_X:
9895f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_X:
9905f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_X:
9915f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_X:
992156d0e29SNaveen N. Rao 				/* signed comparison */
9935f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
9943a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
9955f645996SJiong Wang 				else
9963a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
997156d0e29SNaveen N. Rao 				break;
998156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_X:
9995f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_X:
10005f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP) {
10013a181237SBalamuruhan S 					EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
10023a181237SBalamuruhan S 						    src_reg));
10035f645996SJiong Wang 				} else {
10045f645996SJiong Wang 					int tmp_reg = b2p[TMP_REG_1];
10055f645996SJiong Wang 
10063a181237SBalamuruhan S 					EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
10073a181237SBalamuruhan S 					EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
10083a181237SBalamuruhan S 						       31));
10095f645996SJiong Wang 				}
1010156d0e29SNaveen N. Rao 				break;
1011156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_K:
1012156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_K:
1013156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_K:
101420dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_K:
1015156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_K:
101620dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_K:
10175f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_K:
10185f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_K:
10195f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_K:
10205f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_K:
10215f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_K:
10225f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_K:
10235f645996SJiong Wang 			{
10245f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
10255f645996SJiong Wang 
1026156d0e29SNaveen N. Rao 				/*
1027156d0e29SNaveen N. Rao 				 * Need sign-extended load, so only positive
1028156d0e29SNaveen N. Rao 				 * values can be used as imm in cmpldi
1029156d0e29SNaveen N. Rao 				 */
10305f645996SJiong Wang 				if (imm >= 0 && imm < 32768) {
10315f645996SJiong Wang 					if (is_jmp32)
10323a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
10335f645996SJiong Wang 					else
10343a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
10355f645996SJiong Wang 				} else {
1036156d0e29SNaveen N. Rao 					/* sign-extending load */
1037156d0e29SNaveen N. Rao 					PPC_LI32(b2p[TMP_REG_1], imm);
1038156d0e29SNaveen N. Rao 					/* ... but unsigned comparison */
10395f645996SJiong Wang 					if (is_jmp32)
10403a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLW(dst_reg,
10413a181237SBalamuruhan S 							  b2p[TMP_REG_1]));
10425f645996SJiong Wang 					else
10433a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLD(dst_reg,
10443a181237SBalamuruhan S 							  b2p[TMP_REG_1]));
1045156d0e29SNaveen N. Rao 				}
1046156d0e29SNaveen N. Rao 				break;
10475f645996SJiong Wang 			}
1048156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_K:
104920dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_K:
1050156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_K:
105120dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_K:
10525f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_K:
10535f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_K:
10545f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_K:
10555f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_K:
10565f645996SJiong Wang 			{
10575f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
10585f645996SJiong Wang 
1059156d0e29SNaveen N. Rao 				/*
1060156d0e29SNaveen N. Rao 				 * signed comparison, so any 16-bit value
1061156d0e29SNaveen N. Rao 				 * can be used in cmpdi
1062156d0e29SNaveen N. Rao 				 */
10635f645996SJiong Wang 				if (imm >= -32768 && imm < 32768) {
10645f645996SJiong Wang 					if (is_jmp32)
10653a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
10665f645996SJiong Wang 					else
10673a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
10685f645996SJiong Wang 				} else {
1069156d0e29SNaveen N. Rao 					PPC_LI32(b2p[TMP_REG_1], imm);
10705f645996SJiong Wang 					if (is_jmp32)
10713a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPW(dst_reg,
10723a181237SBalamuruhan S 							 b2p[TMP_REG_1]));
10735f645996SJiong Wang 					else
10743a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPD(dst_reg,
10753a181237SBalamuruhan S 							 b2p[TMP_REG_1]));
1076156d0e29SNaveen N. Rao 				}
1077156d0e29SNaveen N. Rao 				break;
10785f645996SJiong Wang 			}
1079156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_K:
10805f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_K:
1081156d0e29SNaveen N. Rao 				/* andi does not sign-extend the immediate */
1082156d0e29SNaveen N. Rao 				if (imm >= 0 && imm < 32768)
1083156d0e29SNaveen N. Rao 					/* PPC_ANDI is _only/always_ dot-form */
10843a181237SBalamuruhan S 					EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1085156d0e29SNaveen N. Rao 				else {
10865f645996SJiong Wang 					int tmp_reg = b2p[TMP_REG_1];
10875f645996SJiong Wang 
10885f645996SJiong Wang 					PPC_LI32(tmp_reg, imm);
10895f645996SJiong Wang 					if (BPF_CLASS(code) == BPF_JMP) {
10903a181237SBalamuruhan S 						EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
10913a181237SBalamuruhan S 							    tmp_reg));
10925f645996SJiong Wang 					} else {
10933a181237SBalamuruhan S 						EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
10943a181237SBalamuruhan S 							tmp_reg));
10953a181237SBalamuruhan S 						EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
10963a181237SBalamuruhan S 							       0, 0, 31));
10975f645996SJiong Wang 					}
1098156d0e29SNaveen N. Rao 				}
1099156d0e29SNaveen N. Rao 				break;
1100156d0e29SNaveen N. Rao 			}
1101156d0e29SNaveen N. Rao 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1102156d0e29SNaveen N. Rao 			break;
1103156d0e29SNaveen N. Rao 
1104156d0e29SNaveen N. Rao 		/*
1105ce076141SNaveen N. Rao 		 * Tail call
1106156d0e29SNaveen N. Rao 		 */
110771189fa9SAlexei Starovoitov 		case BPF_JMP | BPF_TAIL_CALL:
1108ce076141SNaveen N. Rao 			ctx->seen |= SEEN_TAILCALL;
11093832ba4eSNaveen N. Rao 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
11103832ba4eSNaveen N. Rao 			if (ret < 0)
11113832ba4eSNaveen N. Rao 				return ret;
1112ce076141SNaveen N. Rao 			break;
1113156d0e29SNaveen N. Rao 
1114156d0e29SNaveen N. Rao 		default:
1115156d0e29SNaveen N. Rao 			/*
1116156d0e29SNaveen N. Rao 			 * The filter contains something cruel & unusual.
1117156d0e29SNaveen N. Rao 			 * We don't handle it, but also there shouldn't be
1118156d0e29SNaveen N. Rao 			 * anything missing from our list.
1119156d0e29SNaveen N. Rao 			 */
1120156d0e29SNaveen N. Rao 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1121156d0e29SNaveen N. Rao 					code, i);
1122156d0e29SNaveen N. Rao 			return -ENOTSUPP;
1123156d0e29SNaveen N. Rao 		}
1124156d0e29SNaveen N. Rao 	}
1125156d0e29SNaveen N. Rao 
1126156d0e29SNaveen N. Rao 	/* Set end-of-body-code address for exit. */
1127156d0e29SNaveen N. Rao 	addrs[i] = ctx->idx * 4;
1128156d0e29SNaveen N. Rao 
1129156d0e29SNaveen N. Rao 	return 0;
1130156d0e29SNaveen N. Rao }
1131