xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 3c086ce222cefcf16d412faa10d456161d076796)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2156d0e29SNaveen N. Rao /*
3156d0e29SNaveen N. Rao  * bpf_jit_comp64.c: eBPF JIT compiler
4156d0e29SNaveen N. Rao  *
5156d0e29SNaveen N. Rao  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6156d0e29SNaveen N. Rao  *		  IBM Corporation
7156d0e29SNaveen N. Rao  *
8156d0e29SNaveen N. Rao  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9156d0e29SNaveen N. Rao  */
10156d0e29SNaveen N. Rao #include <linux/moduleloader.h>
11156d0e29SNaveen N. Rao #include <asm/cacheflush.h>
12ec0c464cSChristophe Leroy #include <asm/asm-compat.h>
13156d0e29SNaveen N. Rao #include <linux/netdevice.h>
14156d0e29SNaveen N. Rao #include <linux/filter.h>
15156d0e29SNaveen N. Rao #include <linux/if_vlan.h>
16156d0e29SNaveen N. Rao #include <asm/kprobes.h>
17ce076141SNaveen N. Rao #include <linux/bpf.h>
18b7540d62SNaveen N. Rao #include <asm/security_features.h>
19156d0e29SNaveen N. Rao 
20576a6c3aSNaveen N. Rao #include "bpf_jit.h"
21576a6c3aSNaveen N. Rao 
22576a6c3aSNaveen N. Rao /*
23576a6c3aSNaveen N. Rao  * Stack layout:
24576a6c3aSNaveen N. Rao  * Ensure the top half (upto local_tmp_var) stays consistent
25576a6c3aSNaveen N. Rao  * with our redzone usage.
26576a6c3aSNaveen N. Rao  *
27576a6c3aSNaveen N. Rao  *		[	prev sp		] <-------------
28576a6c3aSNaveen N. Rao  *		[   nv gpr save area	] 5*8		|
29576a6c3aSNaveen N. Rao  *		[    tail_call_cnt	] 8		|
30576a6c3aSNaveen N. Rao  *		[    local_tmp_var	] 16		|
31576a6c3aSNaveen N. Rao  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32576a6c3aSNaveen N. Rao  *		[     frame header	] 32/112	|
33576a6c3aSNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
34576a6c3aSNaveen N. Rao  */
35576a6c3aSNaveen N. Rao 
36576a6c3aSNaveen N. Rao /* for gpr non volatile registers BPG_REG_6 to 10 */
37576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_SAVE	(5*8)
38576a6c3aSNaveen N. Rao /* for bpf JIT code internal usage */
39576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_LOCALS	24
40576a6c3aSNaveen N. Rao /* stack frame excluding BPF stack, ensure this is quadword aligned */
41576a6c3aSNaveen N. Rao #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42576a6c3aSNaveen N. Rao 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43576a6c3aSNaveen N. Rao 
44576a6c3aSNaveen N. Rao /* BPF register usage */
45576a6c3aSNaveen N. Rao #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46576a6c3aSNaveen N. Rao #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47576a6c3aSNaveen N. Rao 
48576a6c3aSNaveen N. Rao /* BPF to ppc register mappings */
4949c3af43SNaveen N. Rao void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
5049c3af43SNaveen N. Rao {
51576a6c3aSNaveen N. Rao 	/* function return value */
5249c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_0] = _R8;
53576a6c3aSNaveen N. Rao 	/* function arguments */
5449c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_1] = _R3;
5549c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_2] = _R4;
5649c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_3] = _R5;
5749c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_4] = _R6;
5849c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_5] = _R7;
59576a6c3aSNaveen N. Rao 	/* non volatile registers */
6049c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_6] = _R27;
6149c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_7] = _R28;
6249c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_8] = _R29;
6349c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_9] = _R30;
64576a6c3aSNaveen N. Rao 	/* frame pointer aka BPF_REG_10 */
6549c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_FP] = _R31;
66576a6c3aSNaveen N. Rao 	/* eBPF jit internal registers */
6749c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_AX] = _R12;
6849c3af43SNaveen N. Rao 	ctx->b2p[TMP_REG_1] = _R9;
6949c3af43SNaveen N. Rao 	ctx->b2p[TMP_REG_2] = _R10;
7049c3af43SNaveen N. Rao }
71576a6c3aSNaveen N. Rao 
72576a6c3aSNaveen N. Rao /* PPC NVR range -- update this if we ever use NVRs below r27 */
73036d559cSNaveen N. Rao #define BPF_PPC_NVR_MIN		_R27
74156d0e29SNaveen N. Rao 
75156d0e29SNaveen N. Rao static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76156d0e29SNaveen N. Rao {
77156d0e29SNaveen N. Rao 	/*
78156d0e29SNaveen N. Rao 	 * We only need a stack frame if:
79156d0e29SNaveen N. Rao 	 * - we call other functions (kernel helpers), or
80156d0e29SNaveen N. Rao 	 * - the bpf program uses its stack area
81156d0e29SNaveen N. Rao 	 * The latter condition is deduced from the usage of BPF_REG_FP
82156d0e29SNaveen N. Rao 	 */
8349c3af43SNaveen N. Rao 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84156d0e29SNaveen N. Rao }
85156d0e29SNaveen N. Rao 
867b847f52SNaveen N. Rao /*
877b847f52SNaveen N. Rao  * When not setting up our own stackframe, the redzone usage is:
887b847f52SNaveen N. Rao  *
897b847f52SNaveen N. Rao  *		[	prev sp		] <-------------
907b847f52SNaveen N. Rao  *		[	  ...       	] 		|
917b847f52SNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
92b7540d62SNaveen N. Rao  *		[   nv gpr save area	] 5*8
937b847f52SNaveen N. Rao  *		[    tail_call_cnt	] 8
94b7540d62SNaveen N. Rao  *		[    local_tmp_var	] 16
957b847f52SNaveen N. Rao  *		[   unused red zone	] 208 bytes protected
967b847f52SNaveen N. Rao  */
977b847f52SNaveen N. Rao static int bpf_jit_stack_local(struct codegen_context *ctx)
987b847f52SNaveen N. Rao {
997b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx))
100ac0761ebSSandipan Das 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
1017b847f52SNaveen N. Rao 	else
102b7540d62SNaveen N. Rao 		return -(BPF_PPC_STACK_SAVE + 24);
1037b847f52SNaveen N. Rao }
1047b847f52SNaveen N. Rao 
105ce076141SNaveen N. Rao static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106ce076141SNaveen N. Rao {
107b7540d62SNaveen N. Rao 	return bpf_jit_stack_local(ctx) + 16;
108ce076141SNaveen N. Rao }
109ce076141SNaveen N. Rao 
1107b847f52SNaveen N. Rao static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
1117b847f52SNaveen N. Rao {
1127b847f52SNaveen N. Rao 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113ac0761ebSSandipan Das 		return (bpf_has_stack_frame(ctx) ?
114ac0761ebSSandipan Das 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
1157b847f52SNaveen N. Rao 				- (8 * (32 - reg));
1167b847f52SNaveen N. Rao 
1177b847f52SNaveen N. Rao 	pr_err("BPF JIT is asking about unknown registers");
1187b847f52SNaveen N. Rao 	BUG();
1197b847f52SNaveen N. Rao }
1207b847f52SNaveen N. Rao 
12140272035SChristophe Leroy void bpf_jit_realloc_regs(struct codegen_context *ctx)
12240272035SChristophe Leroy {
12340272035SChristophe Leroy }
12440272035SChristophe Leroy 
1254ea76e90SChristophe Leroy void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126156d0e29SNaveen N. Rao {
127156d0e29SNaveen N. Rao 	int i;
128156d0e29SNaveen N. Rao 
1297e3a68beSNicholas Piggin #ifndef CONFIG_PPC_KERNEL_PCREL
1305b89492cSChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131391c271fSNaveen N. Rao 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
1327e3a68beSNicholas Piggin #endif
133b10cb163SNaveen N. Rao 
134ce076141SNaveen N. Rao 	/*
135ce076141SNaveen N. Rao 	 * Initialize tail_call_cnt if we do tail calls.
136ce076141SNaveen N. Rao 	 * Otherwise, put in NOPs so that it can be skipped when we are
137ce076141SNaveen N. Rao 	 * invoked through a tail call.
138ce076141SNaveen N. Rao 	 */
139ce076141SNaveen N. Rao 	if (ctx->seen & SEEN_TAILCALL) {
14049c3af43SNaveen N. Rao 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141ce076141SNaveen N. Rao 		/* this goes in the redzone */
14249c3af43SNaveen N. Rao 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
143ce076141SNaveen N. Rao 	} else {
1443a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
1453a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
146ce076141SNaveen N. Rao 	}
147ce076141SNaveen N. Rao 
1487b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
149156d0e29SNaveen N. Rao 		/*
150156d0e29SNaveen N. Rao 		 * We need a stack frame, but we don't necessarily need to
151156d0e29SNaveen N. Rao 		 * save/restore LR unless we call other functions
152156d0e29SNaveen N. Rao 		 */
153156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
154e08021f8SChristophe Leroy 			EMIT(PPC_RAW_MFLR(_R0));
155036d559cSNaveen N. Rao 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
156156d0e29SNaveen N. Rao 		}
157156d0e29SNaveen N. Rao 
158036d559cSNaveen N. Rao 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
159156d0e29SNaveen N. Rao 	}
160156d0e29SNaveen N. Rao 
161156d0e29SNaveen N. Rao 	/*
162156d0e29SNaveen N. Rao 	 * Back up non-volatile regs -- BPF registers 6-10
163156d0e29SNaveen N. Rao 	 * If we haven't created our own stack frame, we save these
164156d0e29SNaveen N. Rao 	 * in the protected zone below the previous stack frame
165156d0e29SNaveen N. Rao 	 */
166156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
16749c3af43SNaveen N. Rao 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
16849c3af43SNaveen N. Rao 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
169156d0e29SNaveen N. Rao 
170156d0e29SNaveen N. Rao 	/* Setup frame pointer to point to the bpf stack area */
17149c3af43SNaveen N. Rao 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
17249c3af43SNaveen N. Rao 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
1733a181237SBalamuruhan S 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
174156d0e29SNaveen N. Rao }
175156d0e29SNaveen N. Rao 
176ce076141SNaveen N. Rao static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
177156d0e29SNaveen N. Rao {
178156d0e29SNaveen N. Rao 	int i;
179156d0e29SNaveen N. Rao 
180156d0e29SNaveen N. Rao 	/* Restore NVRs */
181156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
18249c3af43SNaveen N. Rao 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
18349c3af43SNaveen N. Rao 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
184156d0e29SNaveen N. Rao 
185156d0e29SNaveen N. Rao 	/* Tear down our stack frame */
1867b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
187036d559cSNaveen N. Rao 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
189036d559cSNaveen N. Rao 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190036d559cSNaveen N. Rao 			EMIT(PPC_RAW_MTLR(_R0));
191156d0e29SNaveen N. Rao 		}
192156d0e29SNaveen N. Rao 	}
193ce076141SNaveen N. Rao }
194ce076141SNaveen N. Rao 
1954ea76e90SChristophe Leroy void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196ce076141SNaveen N. Rao {
197ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
198ce076141SNaveen N. Rao 
199ce076141SNaveen N. Rao 	/* Move result to r3 */
20049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201156d0e29SNaveen N. Rao 
2023a181237SBalamuruhan S 	EMIT(PPC_RAW_BLR());
203156d0e29SNaveen N. Rao }
204156d0e29SNaveen N. Rao 
2052ecfe59cSHari Bathini static int
2062ecfe59cSHari Bathini bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
207e2c95a61SDaniel Borkmann {
20843d636f8SNaveen N. Rao 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
209feb63072SNaveen N. Rao 	long reladdr;
21043d636f8SNaveen N. Rao 
21161688a82SHari Bathini 	if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
21243d636f8SNaveen N. Rao 		return -EINVAL;
21343d636f8SNaveen N. Rao 
21461688a82SHari Bathini #ifdef CONFIG_PPC_KERNEL_PCREL
2152ecfe59cSHari Bathini 	reladdr = func_addr - local_paca->kernelbase;
2167e3a68beSNicholas Piggin 
21761688a82SHari Bathini 	if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
2182ecfe59cSHari Bathini 		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
2192ecfe59cSHari Bathini 		/* Align for subsequent prefix instruction */
2202ecfe59cSHari Bathini 		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
2212ecfe59cSHari Bathini 			EMIT(PPC_RAW_NOP());
2222ecfe59cSHari Bathini 		/* paddi r12,r12,addr */
2232ecfe59cSHari Bathini 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
2242ecfe59cSHari Bathini 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
2257e3a68beSNicholas Piggin 	} else {
22661688a82SHari Bathini 		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
22761688a82SHari Bathini 		bool alignment_needed = !IS_ALIGNED(pc, 8);
22861688a82SHari Bathini 
22961688a82SHari Bathini 		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
23061688a82SHari Bathini 
23161688a82SHari Bathini 		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
23261688a82SHari Bathini 			if (alignment_needed)
23361688a82SHari Bathini 				EMIT(PPC_RAW_NOP());
23461688a82SHari Bathini 			/* pla r12,addr */
23561688a82SHari Bathini 			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
23661688a82SHari Bathini 			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
23761688a82SHari Bathini 		} else {
23861688a82SHari Bathini 			/* We can clobber r12 */
23961688a82SHari Bathini 			PPC_LI64(_R12, func);
24061688a82SHari Bathini 		}
24161688a82SHari Bathini 	}
24261688a82SHari Bathini 	EMIT(PPC_RAW_MTCTR(_R12));
24361688a82SHari Bathini 	EMIT(PPC_RAW_BCTRL());
24461688a82SHari Bathini #else
24561688a82SHari Bathini 	if (core_kernel_text(func_addr)) {
246feb63072SNaveen N. Rao 		reladdr = func_addr - kernel_toc_addr();
247feb63072SNaveen N. Rao 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
248feb63072SNaveen N. Rao 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
249feb63072SNaveen N. Rao 			return -ERANGE;
250feb63072SNaveen N. Rao 		}
251feb63072SNaveen N. Rao 
252feb63072SNaveen N. Rao 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
253feb63072SNaveen N. Rao 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
254feb63072SNaveen N. Rao 		EMIT(PPC_RAW_MTCTR(_R12));
25520ccb004SNaveen N. Rao 		EMIT(PPC_RAW_BCTRL());
25661688a82SHari Bathini 	} else {
25761688a82SHari Bathini 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
25861688a82SHari Bathini 			/* func points to the function descriptor */
25961688a82SHari Bathini 			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
26061688a82SHari Bathini 			/* Load actual entry point from function descriptor */
26161688a82SHari Bathini 			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
26261688a82SHari Bathini 			/* ... and move it to CTR */
26361688a82SHari Bathini 			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
26461688a82SHari Bathini 			/*
26561688a82SHari Bathini 			 * Load TOC from function descriptor at offset 8.
26661688a82SHari Bathini 			 * We can clobber r2 since we get called through a
26761688a82SHari Bathini 			 * function pointer (so caller will save/restore r2).
26861688a82SHari Bathini 			 */
26961688a82SHari Bathini 			EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
27061688a82SHari Bathini 		} else {
27161688a82SHari Bathini 			PPC_LI64(_R12, func);
27261688a82SHari Bathini 			EMIT(PPC_RAW_MTCTR(_R12));
27361688a82SHari Bathini 		}
27461688a82SHari Bathini 		EMIT(PPC_RAW_BCTRL());
27561688a82SHari Bathini 		/*
27661688a82SHari Bathini 		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
27761688a82SHari Bathini 		 * within core kernel text.
27861688a82SHari Bathini 		 */
27961688a82SHari Bathini 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
28061688a82SHari Bathini 	}
28161688a82SHari Bathini #endif
28243d636f8SNaveen N. Rao 
28343d636f8SNaveen N. Rao 	return 0;
284e2c95a61SDaniel Borkmann }
285e2c95a61SDaniel Borkmann 
28690d862f3SHari Bathini int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
287ce076141SNaveen N. Rao {
2884ea69b2fSSandipan Das 	unsigned int i, ctx_idx = ctx->idx;
2894ea69b2fSSandipan Das 
29043d636f8SNaveen N. Rao 	if (WARN_ON_ONCE(func && is_module_text_address(func)))
29143d636f8SNaveen N. Rao 		return -EINVAL;
29243d636f8SNaveen N. Rao 
293feb63072SNaveen N. Rao 	/* skip past descriptor if elf v1 */
294feb63072SNaveen N. Rao 	func += FUNCTION_DESCR_SIZE;
295feb63072SNaveen N. Rao 
2964ea69b2fSSandipan Das 	/* Load function address into r12 */
297036d559cSNaveen N. Rao 	PPC_LI64(_R12, func);
2984ea69b2fSSandipan Das 
2994ea69b2fSSandipan Das 	/* For bpf-to-bpf function calls, the callee's address is unknown
3004ea69b2fSSandipan Das 	 * until the last extra pass. As seen above, we use PPC_LI64() to
3014ea69b2fSSandipan Das 	 * load the callee's address, but this may optimize the number of
3024ea69b2fSSandipan Das 	 * instructions required based on the nature of the address.
3034ea69b2fSSandipan Das 	 *
304d3921cbbSChristophe Leroy 	 * Since we don't want the number of instructions emitted to increase,
3054ea69b2fSSandipan Das 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
3064ea69b2fSSandipan Das 	 * we always have a five-instruction sequence, which is the maximum
3074ea69b2fSSandipan Das 	 * that PPC_LI64() can emit.
3084ea69b2fSSandipan Das 	 */
309d3921cbbSChristophe Leroy 	if (!image)
3104ea69b2fSSandipan Das 		for (i = ctx->idx - ctx_idx; i < 5; i++)
3113a181237SBalamuruhan S 			EMIT(PPC_RAW_NOP());
3124ea69b2fSSandipan Das 
313036d559cSNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(_R12));
31420ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
31543d636f8SNaveen N. Rao 
31643d636f8SNaveen N. Rao 	return 0;
317ce076141SNaveen N. Rao }
318ce076141SNaveen N. Rao 
3193832ba4eSNaveen N. Rao static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
320ce076141SNaveen N. Rao {
321ce076141SNaveen N. Rao 	/*
322ce076141SNaveen N. Rao 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
323ce076141SNaveen N. Rao 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
324ce076141SNaveen N. Rao 	 * r4/BPF_REG_2 - pointer to bpf_array
325ce076141SNaveen N. Rao 	 * r5/BPF_REG_3 - index in bpf_array
326ce076141SNaveen N. Rao 	 */
32749c3af43SNaveen N. Rao 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
32849c3af43SNaveen N. Rao 	int b2p_index = bpf_to_ppc(BPF_REG_3);
329b10cb163SNaveen N. Rao 	int bpf_tailcall_prologue_size = 8;
330b10cb163SNaveen N. Rao 
3312ecfe59cSHari Bathini 	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
332b10cb163SNaveen N. Rao 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
333ce076141SNaveen N. Rao 
334ce076141SNaveen N. Rao 	/*
335ce076141SNaveen N. Rao 	 * if (index >= array->map.max_entries)
336ce076141SNaveen N. Rao 	 *   goto out;
337ce076141SNaveen N. Rao 	 */
33849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
3393a181237SBalamuruhan S 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
34049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
341bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_GE, out);
342ce076141SNaveen N. Rao 
343ce076141SNaveen N. Rao 	/*
344ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
345ce076141SNaveen N. Rao 	 *   goto out;
346ce076141SNaveen N. Rao 	 */
34749c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
34849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
349bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_GE, out);
350ce076141SNaveen N. Rao 
351ce076141SNaveen N. Rao 	/*
352ce076141SNaveen N. Rao 	 * tail_call_cnt++;
353ce076141SNaveen N. Rao 	 */
35449c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
35549c3af43SNaveen N. Rao 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
356ce076141SNaveen N. Rao 
357ce076141SNaveen N. Rao 	/* prog = array->ptrs[index]; */
35849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
35949c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
36049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
361ce076141SNaveen N. Rao 
362ce076141SNaveen N. Rao 	/*
363ce076141SNaveen N. Rao 	 * if (prog == NULL)
364ce076141SNaveen N. Rao 	 *   goto out;
365ce076141SNaveen N. Rao 	 */
36649c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
367bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_EQ, out);
368ce076141SNaveen N. Rao 
369ce076141SNaveen N. Rao 	/* goto *(prog->bpf_func + prologue_size); */
37049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
37149c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
372b10cb163SNaveen N. Rao 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
37349c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
374ce076141SNaveen N. Rao 
375ce076141SNaveen N. Rao 	/* tear down stack, restore NVRs, ... */
376ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
377ce076141SNaveen N. Rao 
3783a181237SBalamuruhan S 	EMIT(PPC_RAW_BCTR());
3793832ba4eSNaveen N. Rao 
380ce076141SNaveen N. Rao 	/* out: */
3813832ba4eSNaveen N. Rao 	return 0;
382ce076141SNaveen N. Rao }
383ce076141SNaveen N. Rao 
384b7540d62SNaveen N. Rao /*
385b7540d62SNaveen N. Rao  * We spill into the redzone always, even if the bpf program has its own stackframe.
386b7540d62SNaveen N. Rao  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
387b7540d62SNaveen N. Rao  */
388b7540d62SNaveen N. Rao void bpf_stf_barrier(void);
389b7540d62SNaveen N. Rao 
390b7540d62SNaveen N. Rao asm (
391b7540d62SNaveen N. Rao "		.global bpf_stf_barrier		;"
392b7540d62SNaveen N. Rao "	bpf_stf_barrier:			;"
393b7540d62SNaveen N. Rao "		std	21,-64(1)		;"
394b7540d62SNaveen N. Rao "		std	22,-56(1)		;"
395b7540d62SNaveen N. Rao "		sync				;"
396b7540d62SNaveen N. Rao "		ld	21,-64(1)		;"
397b7540d62SNaveen N. Rao "		ld	22,-56(1)		;"
398b7540d62SNaveen N. Rao "		ori	31,31,0			;"
399b7540d62SNaveen N. Rao "		.rept 14			;"
400b7540d62SNaveen N. Rao "		b	1f			;"
401b7540d62SNaveen N. Rao "	1:					;"
402b7540d62SNaveen N. Rao "		.endr				;"
403b7540d62SNaveen N. Rao "		blr				;"
404b7540d62SNaveen N. Rao );
405b7540d62SNaveen N. Rao 
406156d0e29SNaveen N. Rao /* Assemble the body code between the prologue & epilogue */
40790d862f3SHari Bathini int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
40885e03115SChristophe Leroy 		       u32 *addrs, int pass, bool extra_pass)
409156d0e29SNaveen N. Rao {
410b7540d62SNaveen N. Rao 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
411156d0e29SNaveen N. Rao 	const struct bpf_insn *insn = fp->insnsi;
412156d0e29SNaveen N. Rao 	int flen = fp->len;
413e2c95a61SDaniel Borkmann 	int i, ret;
414156d0e29SNaveen N. Rao 
415156d0e29SNaveen N. Rao 	/* Start of epilogue code - will only be valid 2nd pass onwards */
416156d0e29SNaveen N. Rao 	u32 exit_addr = addrs[flen];
417156d0e29SNaveen N. Rao 
418156d0e29SNaveen N. Rao 	for (i = 0; i < flen; i++) {
419156d0e29SNaveen N. Rao 		u32 code = insn[i].code;
42049c3af43SNaveen N. Rao 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
42149c3af43SNaveen N. Rao 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
422efa95f03SHari Bathini 		u32 size = BPF_SIZE(code);
42349c3af43SNaveen N. Rao 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
42449c3af43SNaveen N. Rao 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
4251e82dfaaSHari Bathini 		u32 save_reg, ret_reg;
426156d0e29SNaveen N. Rao 		s16 off = insn[i].off;
427156d0e29SNaveen N. Rao 		s32 imm = insn[i].imm;
428e2c95a61SDaniel Borkmann 		bool func_addr_fixed;
429e2c95a61SDaniel Borkmann 		u64 func_addr;
430156d0e29SNaveen N. Rao 		u64 imm64;
431156d0e29SNaveen N. Rao 		u32 true_cond;
432b9c1e60eSDaniel Borkmann 		u32 tmp_idx;
433f9320c49SNaveen N. Rao 		int j;
434156d0e29SNaveen N. Rao 
435156d0e29SNaveen N. Rao 		/*
436156d0e29SNaveen N. Rao 		 * addrs[] maps a BPF bytecode address into a real offset from
437156d0e29SNaveen N. Rao 		 * the start of the body code.
438156d0e29SNaveen N. Rao 		 */
439156d0e29SNaveen N. Rao 		addrs[i] = ctx->idx * 4;
440156d0e29SNaveen N. Rao 
441156d0e29SNaveen N. Rao 		/*
442156d0e29SNaveen N. Rao 		 * As an optimization, we note down which non-volatile registers
443156d0e29SNaveen N. Rao 		 * are used so that we can only save/restore those in our
444156d0e29SNaveen N. Rao 		 * prologue and epilogue. We do this here regardless of whether
445156d0e29SNaveen N. Rao 		 * the actual BPF instruction uses src/dst registers or not
446156d0e29SNaveen N. Rao 		 * (for instance, BPF_CALL does not use them). The expectation
447156d0e29SNaveen N. Rao 		 * is that those instructions will have src_reg/dst_reg set to
448156d0e29SNaveen N. Rao 		 * 0. Even otherwise, we just lose some prologue/epilogue
449156d0e29SNaveen N. Rao 		 * optimization but everything else should work without
450156d0e29SNaveen N. Rao 		 * any issues.
451156d0e29SNaveen N. Rao 		 */
4527b847f52SNaveen N. Rao 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
453ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, dst_reg);
4547b847f52SNaveen N. Rao 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
455ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, src_reg);
456156d0e29SNaveen N. Rao 
457156d0e29SNaveen N. Rao 		switch (code) {
458156d0e29SNaveen N. Rao 		/*
459156d0e29SNaveen N. Rao 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
460156d0e29SNaveen N. Rao 		 */
461156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
462156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
46306541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
464156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
465156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
466156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
4673a181237SBalamuruhan S 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
468156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
469156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
470156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
4715855c4c1SNaveen N. Rao 			if (!imm) {
4725855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
4735855c4c1SNaveen N. Rao 			} else if (imm >= -32768 && imm < 32768) {
4743a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
4755855c4c1SNaveen N. Rao 			} else {
4763a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
4773a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
478156d0e29SNaveen N. Rao 			}
4795855c4c1SNaveen N. Rao 			goto bpf_alu32_trunc;
4805855c4c1SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
4815855c4c1SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
4825855c4c1SNaveen N. Rao 			if (!imm) {
4835855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
4845855c4c1SNaveen N. Rao 			} else if (imm > -32768 && imm <= 32768) {
4855855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
4865855c4c1SNaveen N. Rao 			} else {
4873a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
4883a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
489156d0e29SNaveen N. Rao 			}
490156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
491156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
492156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
493156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ALU)
4943a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
495156d0e29SNaveen N. Rao 			else
4963a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
497156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
498156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
499156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
500156d0e29SNaveen N. Rao 			if (imm >= -32768 && imm < 32768)
5013a181237SBalamuruhan S 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
502156d0e29SNaveen N. Rao 			else {
5033a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
504156d0e29SNaveen N. Rao 				if (BPF_CLASS(code) == BPF_ALU)
5053a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
506156d0e29SNaveen N. Rao 				else
5073a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
508156d0e29SNaveen N. Rao 			}
509156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
510156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
511156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
512156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
5133a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
5143a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
5153a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
516156d0e29SNaveen N. Rao 			} else
5173a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
518156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
519156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
520156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
521156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
5223a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
5233a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
5243a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
525156d0e29SNaveen N. Rao 			} else
5263a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
527156d0e29SNaveen N. Rao 			break;
528156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
529156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
530156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
531156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
532156d0e29SNaveen N. Rao 			if (imm == 0)
533156d0e29SNaveen N. Rao 				return -EINVAL;
5348bbc9d82SNaveen N. Rao 			if (imm == 1) {
5358bbc9d82SNaveen N. Rao 				if (BPF_OP(code) == BPF_DIV) {
536156d0e29SNaveen N. Rao 					goto bpf_alu32_trunc;
5378bbc9d82SNaveen N. Rao 				} else {
5388bbc9d82SNaveen N. Rao 					EMIT(PPC_RAW_LI(dst_reg, 0));
5398bbc9d82SNaveen N. Rao 					break;
5408bbc9d82SNaveen N. Rao 				}
5418bbc9d82SNaveen N. Rao 			}
542156d0e29SNaveen N. Rao 
5433a3fc9bfSJordan Niethe 			PPC_LI32(tmp1_reg, imm);
544156d0e29SNaveen N. Rao 			switch (BPF_CLASS(code)) {
545156d0e29SNaveen N. Rao 			case BPF_ALU:
546156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
5473a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
5483a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
5493a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
550156d0e29SNaveen N. Rao 				} else
5513a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
552156d0e29SNaveen N. Rao 				break;
553156d0e29SNaveen N. Rao 			case BPF_ALU64:
554156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
5553a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
5563a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
5573a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
558156d0e29SNaveen N. Rao 				} else
5593a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
560156d0e29SNaveen N. Rao 				break;
561156d0e29SNaveen N. Rao 			}
562156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
563156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
564156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
5653a181237SBalamuruhan S 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
566156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
567156d0e29SNaveen N. Rao 
568156d0e29SNaveen N. Rao 		/*
569156d0e29SNaveen N. Rao 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
570156d0e29SNaveen N. Rao 		 */
571156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
572156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
5733a181237SBalamuruhan S 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
574156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
575156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
576156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
577156d0e29SNaveen N. Rao 			if (!IMM_H(imm))
5783a181237SBalamuruhan S 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
579156d0e29SNaveen N. Rao 			else {
580156d0e29SNaveen N. Rao 				/* Sign-extended */
5813a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5823a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
583156d0e29SNaveen N. Rao 			}
584156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
585156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
586156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
5873a181237SBalamuruhan S 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
588156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
589156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
590156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
591156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
592156d0e29SNaveen N. Rao 				/* Sign-extended */
5933a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5943a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
595156d0e29SNaveen N. Rao 			} else {
596156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5973a181237SBalamuruhan S 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
598156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5993a181237SBalamuruhan S 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
600156d0e29SNaveen N. Rao 			}
601156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
602156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
603156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
6043a181237SBalamuruhan S 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
605156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
606156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
607156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
608156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
609156d0e29SNaveen N. Rao 				/* Sign-extended */
6103a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
6113a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
612156d0e29SNaveen N. Rao 			} else {
613156d0e29SNaveen N. Rao 				if (IMM_L(imm))
6143a181237SBalamuruhan S 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
615156d0e29SNaveen N. Rao 				if (IMM_H(imm))
6163a181237SBalamuruhan S 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
617156d0e29SNaveen N. Rao 			}
618156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
619156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
620156d0e29SNaveen N. Rao 			/* slw clears top 32 bits */
6213a181237SBalamuruhan S 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
622a4c92773SJiong Wang 			/* skip zero extension move, but set address map. */
623a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
624a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
625156d0e29SNaveen N. Rao 			break;
626156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
6273a181237SBalamuruhan S 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
628156d0e29SNaveen N. Rao 			break;
629156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
630156d0e29SNaveen N. Rao 			/* with imm 0, we still need to clear top 32 bits */
6313a181237SBalamuruhan S 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
632a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
633a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
634156d0e29SNaveen N. Rao 			break;
635156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
636156d0e29SNaveen N. Rao 			if (imm != 0)
6373a181237SBalamuruhan S 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
638156d0e29SNaveen N. Rao 			break;
639156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
6403a181237SBalamuruhan S 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
641a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
642a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
643156d0e29SNaveen N. Rao 			break;
644156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
6453a181237SBalamuruhan S 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
646156d0e29SNaveen N. Rao 			break;
647156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
6483a181237SBalamuruhan S 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
649a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
650a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
651156d0e29SNaveen N. Rao 			break;
652156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
653156d0e29SNaveen N. Rao 			if (imm != 0)
6543a181237SBalamuruhan S 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
655156d0e29SNaveen N. Rao 			break;
65644cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
6573a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
65844cf43c0SJiong Wang 			goto bpf_alu32_trunc;
659156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
6603a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
661156d0e29SNaveen N. Rao 			break;
66244cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
6633a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
66444cf43c0SJiong Wang 			goto bpf_alu32_trunc;
665156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
666156d0e29SNaveen N. Rao 			if (imm != 0)
6673a181237SBalamuruhan S 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
668156d0e29SNaveen N. Rao 			break;
669156d0e29SNaveen N. Rao 
670156d0e29SNaveen N. Rao 		/*
671156d0e29SNaveen N. Rao 		 * MOV
672156d0e29SNaveen N. Rao 		 */
673156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
674156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
675a4c92773SJiong Wang 			if (imm == 1) {
676a4c92773SJiong Wang 				/* special mov32 for zext */
6773a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
678a4c92773SJiong Wang 				break;
679a4c92773SJiong Wang 			}
6803a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
681156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
682156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
683156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
684156d0e29SNaveen N. Rao 			PPC_LI32(dst_reg, imm);
685156d0e29SNaveen N. Rao 			if (imm < 0)
686156d0e29SNaveen N. Rao 				goto bpf_alu32_trunc;
687a4c92773SJiong Wang 			else if (insn_is_zext(&insn[i + 1]))
688a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
689156d0e29SNaveen N. Rao 			break;
690156d0e29SNaveen N. Rao 
691156d0e29SNaveen N. Rao bpf_alu32_trunc:
692156d0e29SNaveen N. Rao 		/* Truncate to 32-bits */
693a4c92773SJiong Wang 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
6943a181237SBalamuruhan S 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
695156d0e29SNaveen N. Rao 		break;
696156d0e29SNaveen N. Rao 
697156d0e29SNaveen N. Rao 		/*
698156d0e29SNaveen N. Rao 		 * BPF_FROM_BE/LE
699156d0e29SNaveen N. Rao 		 */
700156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_LE:
701156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_BE:
702156d0e29SNaveen N. Rao #ifdef __BIG_ENDIAN__
703156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_BE)
704156d0e29SNaveen N. Rao 				goto emit_clear;
705156d0e29SNaveen N. Rao #else /* !__BIG_ENDIAN__ */
706156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_LE)
707156d0e29SNaveen N. Rao 				goto emit_clear;
708156d0e29SNaveen N. Rao #endif
709156d0e29SNaveen N. Rao 			switch (imm) {
710156d0e29SNaveen N. Rao 			case 16:
711156d0e29SNaveen N. Rao 				/* Rotate 8 bits left & mask with 0x0000ff00 */
7123a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
713156d0e29SNaveen N. Rao 				/* Rotate 8 bits right & insert LSB to reg */
7143a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
715156d0e29SNaveen N. Rao 				/* Move result back to dst_reg */
7163a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
717156d0e29SNaveen N. Rao 				break;
718156d0e29SNaveen N. Rao 			case 32:
719156d0e29SNaveen N. Rao 				/*
720156d0e29SNaveen N. Rao 				 * Rotate word left by 8 bits:
721156d0e29SNaveen N. Rao 				 * 2 bytes are already in their final position
722156d0e29SNaveen N. Rao 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
723156d0e29SNaveen N. Rao 				 */
7243a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
725156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 1 */
7263a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
727156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 3 */
7283a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
7293a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
730156d0e29SNaveen N. Rao 				break;
731156d0e29SNaveen N. Rao 			case 64:
7323f5f766dSNaveen N. Rao 				/* Store the value to stack and then use byte-reverse loads */
733036d559cSNaveen N. Rao 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
7343a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
7353f5f766dSNaveen N. Rao 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
7363a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
7373f5f766dSNaveen N. Rao 				} else {
7383a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
7393f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
7403f5f766dSNaveen N. Rao 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
7413a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
7423a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
7433f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
7443a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
7453a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
7463f5f766dSNaveen N. Rao 				}
747156d0e29SNaveen N. Rao 				break;
748156d0e29SNaveen N. Rao 			}
749156d0e29SNaveen N. Rao 			break;
750156d0e29SNaveen N. Rao 
751156d0e29SNaveen N. Rao emit_clear:
752156d0e29SNaveen N. Rao 			switch (imm) {
753156d0e29SNaveen N. Rao 			case 16:
754156d0e29SNaveen N. Rao 				/* zero-extend 16 bits into 64 bits */
7553a181237SBalamuruhan S 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
756a4c92773SJiong Wang 				if (insn_is_zext(&insn[i + 1]))
757a4c92773SJiong Wang 					addrs[++i] = ctx->idx * 4;
758156d0e29SNaveen N. Rao 				break;
759156d0e29SNaveen N. Rao 			case 32:
760a4c92773SJiong Wang 				if (!fp->aux->verifier_zext)
761156d0e29SNaveen N. Rao 					/* zero-extend 32 bits into 64 bits */
7623a181237SBalamuruhan S 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
763156d0e29SNaveen N. Rao 				break;
764156d0e29SNaveen N. Rao 			case 64:
765156d0e29SNaveen N. Rao 				/* nop */
766156d0e29SNaveen N. Rao 				break;
767156d0e29SNaveen N. Rao 			}
768156d0e29SNaveen N. Rao 			break;
769156d0e29SNaveen N. Rao 
770156d0e29SNaveen N. Rao 		/*
771f5e81d11SDaniel Borkmann 		 * BPF_ST NOSPEC (speculation barrier)
772f5e81d11SDaniel Borkmann 		 */
773f5e81d11SDaniel Borkmann 		case BPF_ST | BPF_NOSPEC:
774b7540d62SNaveen N. Rao 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
775b7540d62SNaveen N. Rao 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
776b7540d62SNaveen N. Rao 				break;
777b7540d62SNaveen N. Rao 
778b7540d62SNaveen N. Rao 			switch (stf_barrier) {
779b7540d62SNaveen N. Rao 			case STF_BARRIER_EIEIO:
780b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
781b7540d62SNaveen N. Rao 				break;
782b7540d62SNaveen N. Rao 			case STF_BARRIER_SYNC_ORI:
783b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_SYNC());
7843a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
785b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
786b7540d62SNaveen N. Rao 				break;
787b7540d62SNaveen N. Rao 			case STF_BARRIER_FALLBACK:
788c2067f7fSNaveen N. Rao 				ctx->seen |= SEEN_FUNC;
789036d559cSNaveen N. Rao 				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
790036d559cSNaveen N. Rao 				EMIT(PPC_RAW_MTCTR(_R12));
791b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_BCTRL());
792b7540d62SNaveen N. Rao 				break;
793b7540d62SNaveen N. Rao 			case STF_BARRIER_NONE:
794b7540d62SNaveen N. Rao 				break;
795b7540d62SNaveen N. Rao 			}
796f5e81d11SDaniel Borkmann 			break;
797f5e81d11SDaniel Borkmann 
798f5e81d11SDaniel Borkmann 		/*
799156d0e29SNaveen N. Rao 		 * BPF_ST(X)
800156d0e29SNaveen N. Rao 		 */
801156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
802156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
803156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8043a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
8053a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
806156d0e29SNaveen N. Rao 			}
8073a181237SBalamuruhan S 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
808156d0e29SNaveen N. Rao 			break;
809156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
810156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
811156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8123a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
8133a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
814156d0e29SNaveen N. Rao 			}
8153a181237SBalamuruhan S 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
816156d0e29SNaveen N. Rao 			break;
817156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
818156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
819156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8203a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
8213a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
822156d0e29SNaveen N. Rao 			}
8233a181237SBalamuruhan S 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
824156d0e29SNaveen N. Rao 			break;
825156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
826156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
827156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8283a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
8293a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
830156d0e29SNaveen N. Rao 			}
831794abc08SNaveen N. Rao 			if (off % 4) {
8323a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp2_reg, off));
8333a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
834794abc08SNaveen N. Rao 			} else {
835794abc08SNaveen N. Rao 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
836794abc08SNaveen N. Rao 			}
837156d0e29SNaveen N. Rao 			break;
838156d0e29SNaveen N. Rao 
839156d0e29SNaveen N. Rao 		/*
84091c960b0SBrendan Jackman 		 * BPF_STX ATOMIC (atomic ops)
841156d0e29SNaveen N. Rao 		 */
84291c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_W:
84365112709SHari Bathini 		case BPF_STX | BPF_ATOMIC | BPF_DW:
8441e82dfaaSHari Bathini 			save_reg = tmp2_reg;
8451e82dfaaSHari Bathini 			ret_reg = src_reg;
8461e82dfaaSHari Bathini 
84765112709SHari Bathini 			/* Get offset into TMP_REG_1 */
84865112709SHari Bathini 			EMIT(PPC_RAW_LI(tmp1_reg, off));
849b1e7cee9SPuranjay Mohan 			/*
850b1e7cee9SPuranjay Mohan 			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
851b1e7cee9SPuranjay Mohan 			 * before and after the operation.
852b1e7cee9SPuranjay Mohan 			 *
853b1e7cee9SPuranjay Mohan 			 * This is a requirement in the Linux Kernel Memory Model.
854b1e7cee9SPuranjay Mohan 			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
855b1e7cee9SPuranjay Mohan 			 */
856b1e7cee9SPuranjay Mohan 			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
857b1e7cee9SPuranjay Mohan 				EMIT(PPC_RAW_SYNC());
858b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
859156d0e29SNaveen N. Rao 			/* load value from memory into TMP_REG_2 */
86065112709SHari Bathini 			if (size == BPF_DW)
86165112709SHari Bathini 				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
86265112709SHari Bathini 			else
86365112709SHari Bathini 				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
86465112709SHari Bathini 
865dbe6e245SHari Bathini 			/* Save old value in _R0 */
866dbe6e245SHari Bathini 			if (imm & BPF_FETCH)
867dbe6e245SHari Bathini 				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
868dbe6e245SHari Bathini 
86965112709SHari Bathini 			switch (imm) {
87065112709SHari Bathini 			case BPF_ADD:
871dbe6e245SHari Bathini 			case BPF_ADD | BPF_FETCH:
8723a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
873156d0e29SNaveen N. Rao 				break;
87465112709SHari Bathini 			case BPF_AND:
875dbe6e245SHari Bathini 			case BPF_AND | BPF_FETCH:
87665112709SHari Bathini 				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
87765112709SHari Bathini 				break;
87865112709SHari Bathini 			case BPF_OR:
879dbe6e245SHari Bathini 			case BPF_OR | BPF_FETCH:
88065112709SHari Bathini 				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
88165112709SHari Bathini 				break;
88265112709SHari Bathini 			case BPF_XOR:
883dbe6e245SHari Bathini 			case BPF_XOR | BPF_FETCH:
88465112709SHari Bathini 				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
88565112709SHari Bathini 				break;
8861e82dfaaSHari Bathini 			case BPF_CMPXCHG:
8871e82dfaaSHari Bathini 				/*
8881e82dfaaSHari Bathini 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
8891e82dfaaSHari Bathini 				 * in src_reg for other cases.
8901e82dfaaSHari Bathini 				 */
8911e82dfaaSHari Bathini 				ret_reg = bpf_to_ppc(BPF_REG_0);
8921e82dfaaSHari Bathini 
8931e82dfaaSHari Bathini 				/* Compare with old value in BPF_R0 */
8941e82dfaaSHari Bathini 				if (size == BPF_DW)
8951e82dfaaSHari Bathini 					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
8961e82dfaaSHari Bathini 				else
8971e82dfaaSHari Bathini 					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
8981e82dfaaSHari Bathini 				/* Don't set if different from old value */
8991e82dfaaSHari Bathini 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
9001e82dfaaSHari Bathini 				fallthrough;
9011e82dfaaSHari Bathini 			case BPF_XCHG:
9021e82dfaaSHari Bathini 				save_reg = src_reg;
9031e82dfaaSHari Bathini 				break;
90465112709SHari Bathini 			default:
90591c960b0SBrendan Jackman 				pr_err_ratelimited(
90691c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
90791c960b0SBrendan Jackman 					code, i);
90865112709SHari Bathini 				return -EOPNOTSUPP;
90991c960b0SBrendan Jackman 			}
91091c960b0SBrendan Jackman 
911dbe6e245SHari Bathini 			/* store new value */
91265112709SHari Bathini 			if (size == BPF_DW)
9131e82dfaaSHari Bathini 				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
91465112709SHari Bathini 			else
9151e82dfaaSHari Bathini 				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
91665112709SHari Bathini 			/* we're done if this succeeded */
917b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
918dbe6e245SHari Bathini 
9191e82dfaaSHari Bathini 			if (imm & BPF_FETCH) {
920b1e7cee9SPuranjay Mohan 				/* Emit 'sync' to enforce full ordering */
921b1e7cee9SPuranjay Mohan 				if (IS_ENABLED(CONFIG_SMP))
922b1e7cee9SPuranjay Mohan 					EMIT(PPC_RAW_SYNC());
9231e82dfaaSHari Bathini 				EMIT(PPC_RAW_MR(ret_reg, _R0));
9241e82dfaaSHari Bathini 				/*
9251e82dfaaSHari Bathini 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
9261e82dfaaSHari Bathini 				 * For context, see commit 39491867ace5.
9271e82dfaaSHari Bathini 				 */
9281e82dfaaSHari Bathini 				if (size != BPF_DW && imm == BPF_CMPXCHG &&
9291e82dfaaSHari Bathini 				    insn_is_zext(&insn[i + 1]))
9301e82dfaaSHari Bathini 					addrs[++i] = ctx->idx * 4;
9311e82dfaaSHari Bathini 			}
932156d0e29SNaveen N. Rao 			break;
933156d0e29SNaveen N. Rao 
934156d0e29SNaveen N. Rao 		/*
935156d0e29SNaveen N. Rao 		 * BPF_LDX
936156d0e29SNaveen N. Rao 		 */
937156d0e29SNaveen N. Rao 		/* dst = *(u8 *)(ul) (src + off) */
938156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_B:
939983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
940156d0e29SNaveen N. Rao 		/* dst = *(u16 *)(ul) (src + off) */
941156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_H:
942983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
943156d0e29SNaveen N. Rao 		/* dst = *(u32 *)(ul) (src + off) */
944156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_W:
945983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
946156d0e29SNaveen N. Rao 		/* dst = *(u64 *)(ul) (src + off) */
947156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_DW:
948983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
9499c70c714SRavi Bangoria 			/*
9509c70c714SRavi Bangoria 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
9519c70c714SRavi Bangoria 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
9529c70c714SRavi Bangoria 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
9539c70c714SRavi Bangoria 			 * set dst_reg=0 and move on.
9549c70c714SRavi Bangoria 			 */
9559c70c714SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
9563a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
9579c70c714SRavi Bangoria 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
9583a3fc9bfSJordan Niethe 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
9599c70c714SRavi Bangoria 				else /* BOOK3S_64 */
9603a3fc9bfSJordan Niethe 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
9613a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
962bafb5898SNaveen N. Rao 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
9639c70c714SRavi Bangoria 				EMIT(PPC_RAW_LI(dst_reg, 0));
9649c70c714SRavi Bangoria 				/*
965794abc08SNaveen N. Rao 				 * Check if 'off' is word aligned for BPF_DW, because
966794abc08SNaveen N. Rao 				 * we might generate two instructions.
9679c70c714SRavi Bangoria 				 */
9689c70c714SRavi Bangoria 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
9699c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 3) * 4);
9709c70c714SRavi Bangoria 				else
9719c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 2) * 4);
9729c70c714SRavi Bangoria 			}
9739c70c714SRavi Bangoria 
974efa95f03SHari Bathini 			switch (size) {
975efa95f03SHari Bathini 			case BPF_B:
976efa95f03SHari Bathini 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
977efa95f03SHari Bathini 				break;
978efa95f03SHari Bathini 			case BPF_H:
979efa95f03SHari Bathini 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
980efa95f03SHari Bathini 				break;
981efa95f03SHari Bathini 			case BPF_W:
982efa95f03SHari Bathini 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
983efa95f03SHari Bathini 				break;
984efa95f03SHari Bathini 			case BPF_DW:
985794abc08SNaveen N. Rao 				if (off % 4) {
9863a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LI(tmp1_reg, off));
9873a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
988794abc08SNaveen N. Rao 				} else {
989794abc08SNaveen N. Rao 					EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
990794abc08SNaveen N. Rao 				}
991156d0e29SNaveen N. Rao 				break;
992efa95f03SHari Bathini 			}
993efa95f03SHari Bathini 
994efa95f03SHari Bathini 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
995efa95f03SHari Bathini 				addrs[++i] = ctx->idx * 4;
996983bdc02SRavi Bangoria 
997983bdc02SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
99890d862f3SHari Bathini 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
99990d862f3SHari Bathini 							    ctx->idx - 1, 4, dst_reg);
1000983bdc02SRavi Bangoria 				if (ret)
1001983bdc02SRavi Bangoria 					return ret;
1002983bdc02SRavi Bangoria 			}
1003efa95f03SHari Bathini 			break;
1004156d0e29SNaveen N. Rao 
1005156d0e29SNaveen N. Rao 		/*
1006156d0e29SNaveen N. Rao 		 * Doubleword load
1007156d0e29SNaveen N. Rao 		 * 16 byte instruction that uses two 'struct bpf_insn'
1008156d0e29SNaveen N. Rao 		 */
1009156d0e29SNaveen N. Rao 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1010156d0e29SNaveen N. Rao 			imm64 = ((u64)(u32) insn[i].imm) |
1011156d0e29SNaveen N. Rao 				    (((u64)(u32) insn[i+1].imm) << 32);
1012f9320c49SNaveen N. Rao 			tmp_idx = ctx->idx;
1013f9320c49SNaveen N. Rao 			PPC_LI64(dst_reg, imm64);
1014f9320c49SNaveen N. Rao 			/* padding to allow full 5 instructions for later patching */
1015d3921cbbSChristophe Leroy 			if (!image)
1016f9320c49SNaveen N. Rao 				for (j = ctx->idx - tmp_idx; j < 5; j++)
1017f9320c49SNaveen N. Rao 					EMIT(PPC_RAW_NOP());
1018156d0e29SNaveen N. Rao 			/* Adjust for two bpf instructions */
1019156d0e29SNaveen N. Rao 			addrs[++i] = ctx->idx * 4;
1020156d0e29SNaveen N. Rao 			break;
1021156d0e29SNaveen N. Rao 
1022156d0e29SNaveen N. Rao 		/*
1023156d0e29SNaveen N. Rao 		 * Return/Exit
1024156d0e29SNaveen N. Rao 		 */
1025156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_EXIT:
1026156d0e29SNaveen N. Rao 			/*
1027156d0e29SNaveen N. Rao 			 * If this isn't the very last instruction, branch to
1028156d0e29SNaveen N. Rao 			 * the epilogue. If we _are_ the last instruction,
1029156d0e29SNaveen N. Rao 			 * we'll just fall through to the epilogue.
1030156d0e29SNaveen N. Rao 			 */
10310ffdbce6SNaveen N. Rao 			if (i != flen - 1) {
10323a3fc9bfSJordan Niethe 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
10330ffdbce6SNaveen N. Rao 				if (ret)
10340ffdbce6SNaveen N. Rao 					return ret;
10350ffdbce6SNaveen N. Rao 			}
1036156d0e29SNaveen N. Rao 			/* else fall through to the epilogue */
1037156d0e29SNaveen N. Rao 			break;
1038156d0e29SNaveen N. Rao 
1039156d0e29SNaveen N. Rao 		/*
10408484ce83SSandipan Das 		 * Call kernel helper or bpf function
1041156d0e29SNaveen N. Rao 		 */
1042156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_CALL:
1043156d0e29SNaveen N. Rao 			ctx->seen |= SEEN_FUNC;
10448484ce83SSandipan Das 
104585e03115SChristophe Leroy 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1046e2c95a61SDaniel Borkmann 						    &func_addr, &func_addr_fixed);
1047e2c95a61SDaniel Borkmann 			if (ret < 0)
1048e2c95a61SDaniel Borkmann 				return ret;
1049156d0e29SNaveen N. Rao 
1050e2c95a61SDaniel Borkmann 			if (func_addr_fixed)
10512ecfe59cSHari Bathini 				ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
1052e2c95a61SDaniel Borkmann 			else
105390d862f3SHari Bathini 				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
105443d636f8SNaveen N. Rao 
105543d636f8SNaveen N. Rao 			if (ret)
105643d636f8SNaveen N. Rao 				return ret;
105743d636f8SNaveen N. Rao 
1058156d0e29SNaveen N. Rao 			/* move return value from r3 to BPF_REG_0 */
105949c3af43SNaveen N. Rao 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1060156d0e29SNaveen N. Rao 			break;
1061156d0e29SNaveen N. Rao 
1062156d0e29SNaveen N. Rao 		/*
1063156d0e29SNaveen N. Rao 		 * Jumps and branches
1064156d0e29SNaveen N. Rao 		 */
1065156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JA:
1066156d0e29SNaveen N. Rao 			PPC_JMP(addrs[i + 1 + off]);
1067156d0e29SNaveen N. Rao 			break;
1068*3c086ce2SArtem Savkov 		case BPF_JMP32 | BPF_JA:
1069*3c086ce2SArtem Savkov 			PPC_JMP(addrs[i + 1 + imm]);
1070*3c086ce2SArtem Savkov 			break;
1071156d0e29SNaveen N. Rao 
1072156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_K:
1073156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_X:
1074156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_K:
1075156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_X:
10765f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_K:
10775f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_X:
10785f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_K:
10795f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1080156d0e29SNaveen N. Rao 			true_cond = COND_GT;
1081156d0e29SNaveen N. Rao 			goto cond_branch;
108220dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_K:
108320dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_X:
108420dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_K:
108520dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_X:
10865f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_K:
10875f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_X:
10885f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_K:
10895f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_X:
109020dbf5ccSDaniel Borkmann 			true_cond = COND_LT;
109120dbf5ccSDaniel Borkmann 			goto cond_branch;
1092156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_K:
1093156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_X:
1094156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_K:
1095156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_X:
10965f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_K:
10975f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_X:
10985f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_K:
10995f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1100156d0e29SNaveen N. Rao 			true_cond = COND_GE;
1101156d0e29SNaveen N. Rao 			goto cond_branch;
110220dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_K:
110320dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_X:
110420dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_K:
110520dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_X:
11065f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_K:
11075f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_X:
11085f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_K:
11095f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_X:
111020dbf5ccSDaniel Borkmann 			true_cond = COND_LE;
111120dbf5ccSDaniel Borkmann 			goto cond_branch;
1112156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_K:
1113156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_X:
11145f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_K:
11155f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1116156d0e29SNaveen N. Rao 			true_cond = COND_EQ;
1117156d0e29SNaveen N. Rao 			goto cond_branch;
1118156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_K:
1119156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_X:
11205f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_K:
11215f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_X:
1122156d0e29SNaveen N. Rao 			true_cond = COND_NE;
1123156d0e29SNaveen N. Rao 			goto cond_branch;
1124156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_K:
1125156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_X:
11265f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_K:
11275f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_X:
1128156d0e29SNaveen N. Rao 			true_cond = COND_NE;
1129156d0e29SNaveen N. Rao 			/* Fall through */
1130156d0e29SNaveen N. Rao 
1131156d0e29SNaveen N. Rao cond_branch:
1132156d0e29SNaveen N. Rao 			switch (code) {
1133156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_X:
113420dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_X:
1135156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_X:
113620dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_X:
1137156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_X:
1138156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_X:
11395f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_X:
11405f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_X:
11415f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_X:
11425f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_X:
11435f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_X:
11445f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_X:
1145156d0e29SNaveen N. Rao 				/* unsigned comparison */
11465f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
11473a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
11485f645996SJiong Wang 				else
11493a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1150156d0e29SNaveen N. Rao 				break;
1151156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_X:
115220dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_X:
1153156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_X:
115420dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_X:
11555f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_X:
11565f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_X:
11575f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_X:
11585f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1159156d0e29SNaveen N. Rao 				/* signed comparison */
11605f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
11613a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
11625f645996SJiong Wang 				else
11633a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1164156d0e29SNaveen N. Rao 				break;
1165156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_X:
11665f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_X:
11675f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP) {
11683a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
11695f645996SJiong Wang 				} else {
11703a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
11713a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
11725f645996SJiong Wang 				}
1173156d0e29SNaveen N. Rao 				break;
1174156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_K:
1175156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_K:
1176156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_K:
117720dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_K:
1178156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_K:
117920dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_K:
11805f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_K:
11815f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_K:
11825f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_K:
11835f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_K:
11845f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_K:
11855f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_K:
11865f645996SJiong Wang 			{
11875f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
11885f645996SJiong Wang 
1189156d0e29SNaveen N. Rao 				/*
1190156d0e29SNaveen N. Rao 				 * Need sign-extended load, so only positive
1191156d0e29SNaveen N. Rao 				 * values can be used as imm in cmpldi
1192156d0e29SNaveen N. Rao 				 */
11935f645996SJiong Wang 				if (imm >= 0 && imm < 32768) {
11945f645996SJiong Wang 					if (is_jmp32)
11953a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
11965f645996SJiong Wang 					else
11973a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
11985f645996SJiong Wang 				} else {
1199156d0e29SNaveen N. Rao 					/* sign-extending load */
12003a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
1201156d0e29SNaveen N. Rao 					/* ... but unsigned comparison */
12025f645996SJiong Wang 					if (is_jmp32)
12033a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
12045f645996SJiong Wang 					else
12053a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1206156d0e29SNaveen N. Rao 				}
1207156d0e29SNaveen N. Rao 				break;
12085f645996SJiong Wang 			}
1209156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_K:
121020dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_K:
1211156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_K:
121220dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_K:
12135f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_K:
12145f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_K:
12155f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_K:
12165f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_K:
12175f645996SJiong Wang 			{
12185f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
12195f645996SJiong Wang 
1220156d0e29SNaveen N. Rao 				/*
1221156d0e29SNaveen N. Rao 				 * signed comparison, so any 16-bit value
1222156d0e29SNaveen N. Rao 				 * can be used in cmpdi
1223156d0e29SNaveen N. Rao 				 */
12245f645996SJiong Wang 				if (imm >= -32768 && imm < 32768) {
12255f645996SJiong Wang 					if (is_jmp32)
12263a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
12275f645996SJiong Wang 					else
12283a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
12295f645996SJiong Wang 				} else {
12303a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
12315f645996SJiong Wang 					if (is_jmp32)
12323a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
12335f645996SJiong Wang 					else
12343a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1235156d0e29SNaveen N. Rao 				}
1236156d0e29SNaveen N. Rao 				break;
12375f645996SJiong Wang 			}
1238156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_K:
12395f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_K:
1240156d0e29SNaveen N. Rao 				/* andi does not sign-extend the immediate */
1241156d0e29SNaveen N. Rao 				if (imm >= 0 && imm < 32768)
1242156d0e29SNaveen N. Rao 					/* PPC_ANDI is _only/always_ dot-form */
12433a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1244156d0e29SNaveen N. Rao 				else {
12453a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
12465f645996SJiong Wang 					if (BPF_CLASS(code) == BPF_JMP) {
12473a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
12483a3fc9bfSJordan Niethe 								     tmp1_reg));
12495f645996SJiong Wang 					} else {
12503a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
12513a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
12523a181237SBalamuruhan S 									0, 0, 31));
12535f645996SJiong Wang 					}
1254156d0e29SNaveen N. Rao 				}
1255156d0e29SNaveen N. Rao 				break;
1256156d0e29SNaveen N. Rao 			}
1257156d0e29SNaveen N. Rao 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1258156d0e29SNaveen N. Rao 			break;
1259156d0e29SNaveen N. Rao 
1260156d0e29SNaveen N. Rao 		/*
1261ce076141SNaveen N. Rao 		 * Tail call
1262156d0e29SNaveen N. Rao 		 */
126371189fa9SAlexei Starovoitov 		case BPF_JMP | BPF_TAIL_CALL:
1264ce076141SNaveen N. Rao 			ctx->seen |= SEEN_TAILCALL;
12653832ba4eSNaveen N. Rao 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
12663832ba4eSNaveen N. Rao 			if (ret < 0)
12673832ba4eSNaveen N. Rao 				return ret;
1268ce076141SNaveen N. Rao 			break;
1269156d0e29SNaveen N. Rao 
1270156d0e29SNaveen N. Rao 		default:
1271156d0e29SNaveen N. Rao 			/*
1272156d0e29SNaveen N. Rao 			 * The filter contains something cruel & unusual.
1273156d0e29SNaveen N. Rao 			 * We don't handle it, but also there shouldn't be
1274156d0e29SNaveen N. Rao 			 * anything missing from our list.
1275156d0e29SNaveen N. Rao 			 */
1276156d0e29SNaveen N. Rao 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1277156d0e29SNaveen N. Rao 					code, i);
1278156d0e29SNaveen N. Rao 			return -ENOTSUPP;
1279156d0e29SNaveen N. Rao 		}
1280156d0e29SNaveen N. Rao 	}
1281156d0e29SNaveen N. Rao 
1282156d0e29SNaveen N. Rao 	/* Set end-of-body-code address for exit. */
1283156d0e29SNaveen N. Rao 	addrs[i] = ctx->idx * 4;
1284156d0e29SNaveen N. Rao 
1285156d0e29SNaveen N. Rao 	return 0;
1286156d0e29SNaveen N. Rao }
1287