xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 717756c9c8ddad9f28389185bfb161d4d88e01a4)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2156d0e29SNaveen N. Rao /*
3156d0e29SNaveen N. Rao  * bpf_jit_comp64.c: eBPF JIT compiler
4156d0e29SNaveen N. Rao  *
5156d0e29SNaveen N. Rao  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6156d0e29SNaveen N. Rao  *		  IBM Corporation
7156d0e29SNaveen N. Rao  *
8156d0e29SNaveen N. Rao  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9156d0e29SNaveen N. Rao  */
10156d0e29SNaveen N. Rao #include <linux/moduleloader.h>
11156d0e29SNaveen N. Rao #include <asm/cacheflush.h>
12ec0c464cSChristophe Leroy #include <asm/asm-compat.h>
13156d0e29SNaveen N. Rao #include <linux/netdevice.h>
14156d0e29SNaveen N. Rao #include <linux/filter.h>
15156d0e29SNaveen N. Rao #include <linux/if_vlan.h>
16156d0e29SNaveen N. Rao #include <asm/kprobes.h>
17ce076141SNaveen N. Rao #include <linux/bpf.h>
18b7540d62SNaveen N. Rao #include <asm/security_features.h>
19156d0e29SNaveen N. Rao 
20576a6c3aSNaveen N. Rao #include "bpf_jit.h"
21576a6c3aSNaveen N. Rao 
22576a6c3aSNaveen N. Rao /*
23576a6c3aSNaveen N. Rao  * Stack layout:
24576a6c3aSNaveen N. Rao  * Ensure the top half (upto local_tmp_var) stays consistent
25576a6c3aSNaveen N. Rao  * with our redzone usage.
26576a6c3aSNaveen N. Rao  *
27576a6c3aSNaveen N. Rao  *		[	prev sp		] <-------------
28576a6c3aSNaveen N. Rao  *		[   nv gpr save area	] 5*8		|
29576a6c3aSNaveen N. Rao  *		[    tail_call_cnt	] 8		|
30576a6c3aSNaveen N. Rao  *		[    local_tmp_var	] 16		|
31576a6c3aSNaveen N. Rao  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32576a6c3aSNaveen N. Rao  *		[     frame header	] 32/112	|
33576a6c3aSNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
34576a6c3aSNaveen N. Rao  */
35576a6c3aSNaveen N. Rao 
36576a6c3aSNaveen N. Rao /* for gpr non volatile registers BPG_REG_6 to 10 */
37576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_SAVE	(5*8)
38576a6c3aSNaveen N. Rao /* for bpf JIT code internal usage */
39576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_LOCALS	24
40576a6c3aSNaveen N. Rao /* stack frame excluding BPF stack, ensure this is quadword aligned */
41576a6c3aSNaveen N. Rao #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42576a6c3aSNaveen N. Rao 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43576a6c3aSNaveen N. Rao 
44576a6c3aSNaveen N. Rao /* BPF register usage */
45576a6c3aSNaveen N. Rao #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46576a6c3aSNaveen N. Rao #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47576a6c3aSNaveen N. Rao 
48576a6c3aSNaveen N. Rao /* BPF to ppc register mappings */
4949c3af43SNaveen N. Rao void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
5049c3af43SNaveen N. Rao {
51576a6c3aSNaveen N. Rao 	/* function return value */
5249c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_0] = _R8;
53576a6c3aSNaveen N. Rao 	/* function arguments */
5449c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_1] = _R3;
5549c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_2] = _R4;
5649c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_3] = _R5;
5749c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_4] = _R6;
5849c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_5] = _R7;
59576a6c3aSNaveen N. Rao 	/* non volatile registers */
6049c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_6] = _R27;
6149c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_7] = _R28;
6249c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_8] = _R29;
6349c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_9] = _R30;
64576a6c3aSNaveen N. Rao 	/* frame pointer aka BPF_REG_10 */
6549c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_FP] = _R31;
66576a6c3aSNaveen N. Rao 	/* eBPF jit internal registers */
6749c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_AX] = _R12;
6849c3af43SNaveen N. Rao 	ctx->b2p[TMP_REG_1] = _R9;
6949c3af43SNaveen N. Rao 	ctx->b2p[TMP_REG_2] = _R10;
7049c3af43SNaveen N. Rao }
71576a6c3aSNaveen N. Rao 
72576a6c3aSNaveen N. Rao /* PPC NVR range -- update this if we ever use NVRs below r27 */
73036d559cSNaveen N. Rao #define BPF_PPC_NVR_MIN		_R27
74156d0e29SNaveen N. Rao 
75156d0e29SNaveen N. Rao static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76156d0e29SNaveen N. Rao {
77156d0e29SNaveen N. Rao 	/*
78156d0e29SNaveen N. Rao 	 * We only need a stack frame if:
79156d0e29SNaveen N. Rao 	 * - we call other functions (kernel helpers), or
80156d0e29SNaveen N. Rao 	 * - the bpf program uses its stack area
81156d0e29SNaveen N. Rao 	 * The latter condition is deduced from the usage of BPF_REG_FP
82156d0e29SNaveen N. Rao 	 */
8349c3af43SNaveen N. Rao 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84156d0e29SNaveen N. Rao }
85156d0e29SNaveen N. Rao 
867b847f52SNaveen N. Rao /*
877b847f52SNaveen N. Rao  * When not setting up our own stackframe, the redzone usage is:
887b847f52SNaveen N. Rao  *
897b847f52SNaveen N. Rao  *		[	prev sp		] <-------------
907b847f52SNaveen N. Rao  *		[	  ...       	] 		|
917b847f52SNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
92b7540d62SNaveen N. Rao  *		[   nv gpr save area	] 5*8
937b847f52SNaveen N. Rao  *		[    tail_call_cnt	] 8
94b7540d62SNaveen N. Rao  *		[    local_tmp_var	] 16
957b847f52SNaveen N. Rao  *		[   unused red zone	] 208 bytes protected
967b847f52SNaveen N. Rao  */
977b847f52SNaveen N. Rao static int bpf_jit_stack_local(struct codegen_context *ctx)
987b847f52SNaveen N. Rao {
997b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx))
100ac0761ebSSandipan Das 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
1017b847f52SNaveen N. Rao 	else
102b7540d62SNaveen N. Rao 		return -(BPF_PPC_STACK_SAVE + 24);
1037b847f52SNaveen N. Rao }
1047b847f52SNaveen N. Rao 
105ce076141SNaveen N. Rao static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106ce076141SNaveen N. Rao {
107b7540d62SNaveen N. Rao 	return bpf_jit_stack_local(ctx) + 16;
108ce076141SNaveen N. Rao }
109ce076141SNaveen N. Rao 
1107b847f52SNaveen N. Rao static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
1117b847f52SNaveen N. Rao {
1127b847f52SNaveen N. Rao 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113ac0761ebSSandipan Das 		return (bpf_has_stack_frame(ctx) ?
114ac0761ebSSandipan Das 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
1157b847f52SNaveen N. Rao 				- (8 * (32 - reg));
1167b847f52SNaveen N. Rao 
1177b847f52SNaveen N. Rao 	pr_err("BPF JIT is asking about unknown registers");
1187b847f52SNaveen N. Rao 	BUG();
1197b847f52SNaveen N. Rao }
1207b847f52SNaveen N. Rao 
12140272035SChristophe Leroy void bpf_jit_realloc_regs(struct codegen_context *ctx)
12240272035SChristophe Leroy {
12340272035SChristophe Leroy }
12440272035SChristophe Leroy 
1254ea76e90SChristophe Leroy void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126156d0e29SNaveen N. Rao {
127156d0e29SNaveen N. Rao 	int i;
128156d0e29SNaveen N. Rao 
1297e3a68beSNicholas Piggin #ifndef CONFIG_PPC_KERNEL_PCREL
1305b89492cSChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131391c271fSNaveen N. Rao 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
1327e3a68beSNicholas Piggin #endif
133b10cb163SNaveen N. Rao 
134ce076141SNaveen N. Rao 	/*
135ce076141SNaveen N. Rao 	 * Initialize tail_call_cnt if we do tail calls.
136ce076141SNaveen N. Rao 	 * Otherwise, put in NOPs so that it can be skipped when we are
137ce076141SNaveen N. Rao 	 * invoked through a tail call.
138ce076141SNaveen N. Rao 	 */
139ce076141SNaveen N. Rao 	if (ctx->seen & SEEN_TAILCALL) {
14049c3af43SNaveen N. Rao 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141ce076141SNaveen N. Rao 		/* this goes in the redzone */
14249c3af43SNaveen N. Rao 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
143ce076141SNaveen N. Rao 	} else {
1443a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
1453a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
146ce076141SNaveen N. Rao 	}
147ce076141SNaveen N. Rao 
1487b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
149156d0e29SNaveen N. Rao 		/*
150156d0e29SNaveen N. Rao 		 * We need a stack frame, but we don't necessarily need to
151156d0e29SNaveen N. Rao 		 * save/restore LR unless we call other functions
152156d0e29SNaveen N. Rao 		 */
153156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
154e08021f8SChristophe Leroy 			EMIT(PPC_RAW_MFLR(_R0));
155036d559cSNaveen N. Rao 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
156156d0e29SNaveen N. Rao 		}
157156d0e29SNaveen N. Rao 
158036d559cSNaveen N. Rao 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
159156d0e29SNaveen N. Rao 	}
160156d0e29SNaveen N. Rao 
161156d0e29SNaveen N. Rao 	/*
162156d0e29SNaveen N. Rao 	 * Back up non-volatile regs -- BPF registers 6-10
163156d0e29SNaveen N. Rao 	 * If we haven't created our own stack frame, we save these
164156d0e29SNaveen N. Rao 	 * in the protected zone below the previous stack frame
165156d0e29SNaveen N. Rao 	 */
166156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
16749c3af43SNaveen N. Rao 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
16849c3af43SNaveen N. Rao 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
169156d0e29SNaveen N. Rao 
170156d0e29SNaveen N. Rao 	/* Setup frame pointer to point to the bpf stack area */
17149c3af43SNaveen N. Rao 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
17249c3af43SNaveen N. Rao 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
1733a181237SBalamuruhan S 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
174156d0e29SNaveen N. Rao }
175156d0e29SNaveen N. Rao 
176ce076141SNaveen N. Rao static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
177156d0e29SNaveen N. Rao {
178156d0e29SNaveen N. Rao 	int i;
179156d0e29SNaveen N. Rao 
180156d0e29SNaveen N. Rao 	/* Restore NVRs */
181156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
18249c3af43SNaveen N. Rao 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
18349c3af43SNaveen N. Rao 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
184156d0e29SNaveen N. Rao 
185156d0e29SNaveen N. Rao 	/* Tear down our stack frame */
1867b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
187036d559cSNaveen N. Rao 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
189036d559cSNaveen N. Rao 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190036d559cSNaveen N. Rao 			EMIT(PPC_RAW_MTLR(_R0));
191156d0e29SNaveen N. Rao 		}
192156d0e29SNaveen N. Rao 	}
193ce076141SNaveen N. Rao }
194ce076141SNaveen N. Rao 
1954ea76e90SChristophe Leroy void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196ce076141SNaveen N. Rao {
197ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
198ce076141SNaveen N. Rao 
199ce076141SNaveen N. Rao 	/* Move result to r3 */
20049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201156d0e29SNaveen N. Rao 
2023a181237SBalamuruhan S 	EMIT(PPC_RAW_BLR());
203156d0e29SNaveen N. Rao }
204156d0e29SNaveen N. Rao 
2052ecfe59cSHari Bathini static int
2062ecfe59cSHari Bathini bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
207e2c95a61SDaniel Borkmann {
20843d636f8SNaveen N. Rao 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
209feb63072SNaveen N. Rao 	long reladdr;
21043d636f8SNaveen N. Rao 
21161688a82SHari Bathini 	if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
21243d636f8SNaveen N. Rao 		return -EINVAL;
21343d636f8SNaveen N. Rao 
21461688a82SHari Bathini #ifdef CONFIG_PPC_KERNEL_PCREL
2152ecfe59cSHari Bathini 	reladdr = func_addr - local_paca->kernelbase;
2167e3a68beSNicholas Piggin 
21761688a82SHari Bathini 	if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
2182ecfe59cSHari Bathini 		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
2192ecfe59cSHari Bathini 		/* Align for subsequent prefix instruction */
2202ecfe59cSHari Bathini 		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
2212ecfe59cSHari Bathini 			EMIT(PPC_RAW_NOP());
2222ecfe59cSHari Bathini 		/* paddi r12,r12,addr */
2232ecfe59cSHari Bathini 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
2242ecfe59cSHari Bathini 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
2257e3a68beSNicholas Piggin 	} else {
22661688a82SHari Bathini 		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
22761688a82SHari Bathini 		bool alignment_needed = !IS_ALIGNED(pc, 8);
22861688a82SHari Bathini 
22961688a82SHari Bathini 		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
23061688a82SHari Bathini 
23161688a82SHari Bathini 		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
23261688a82SHari Bathini 			if (alignment_needed)
23361688a82SHari Bathini 				EMIT(PPC_RAW_NOP());
23461688a82SHari Bathini 			/* pla r12,addr */
23561688a82SHari Bathini 			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
23661688a82SHari Bathini 			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
23761688a82SHari Bathini 		} else {
23861688a82SHari Bathini 			/* We can clobber r12 */
23961688a82SHari Bathini 			PPC_LI64(_R12, func);
24061688a82SHari Bathini 		}
24161688a82SHari Bathini 	}
24261688a82SHari Bathini 	EMIT(PPC_RAW_MTCTR(_R12));
24361688a82SHari Bathini 	EMIT(PPC_RAW_BCTRL());
24461688a82SHari Bathini #else
24561688a82SHari Bathini 	if (core_kernel_text(func_addr)) {
246feb63072SNaveen N. Rao 		reladdr = func_addr - kernel_toc_addr();
247feb63072SNaveen N. Rao 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
248feb63072SNaveen N. Rao 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
249feb63072SNaveen N. Rao 			return -ERANGE;
250feb63072SNaveen N. Rao 		}
251feb63072SNaveen N. Rao 
252feb63072SNaveen N. Rao 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
253feb63072SNaveen N. Rao 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
254feb63072SNaveen N. Rao 		EMIT(PPC_RAW_MTCTR(_R12));
25520ccb004SNaveen N. Rao 		EMIT(PPC_RAW_BCTRL());
25661688a82SHari Bathini 	} else {
25761688a82SHari Bathini 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
25861688a82SHari Bathini 			/* func points to the function descriptor */
25961688a82SHari Bathini 			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
26061688a82SHari Bathini 			/* Load actual entry point from function descriptor */
26161688a82SHari Bathini 			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
26261688a82SHari Bathini 			/* ... and move it to CTR */
26361688a82SHari Bathini 			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
26461688a82SHari Bathini 			/*
26561688a82SHari Bathini 			 * Load TOC from function descriptor at offset 8.
26661688a82SHari Bathini 			 * We can clobber r2 since we get called through a
26761688a82SHari Bathini 			 * function pointer (so caller will save/restore r2).
26861688a82SHari Bathini 			 */
26961688a82SHari Bathini 			EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
27061688a82SHari Bathini 		} else {
27161688a82SHari Bathini 			PPC_LI64(_R12, func);
27261688a82SHari Bathini 			EMIT(PPC_RAW_MTCTR(_R12));
27361688a82SHari Bathini 		}
27461688a82SHari Bathini 		EMIT(PPC_RAW_BCTRL());
27561688a82SHari Bathini 		/*
27661688a82SHari Bathini 		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
27761688a82SHari Bathini 		 * within core kernel text.
27861688a82SHari Bathini 		 */
27961688a82SHari Bathini 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
28061688a82SHari Bathini 	}
28161688a82SHari Bathini #endif
28243d636f8SNaveen N. Rao 
28343d636f8SNaveen N. Rao 	return 0;
284e2c95a61SDaniel Borkmann }
285e2c95a61SDaniel Borkmann 
28690d862f3SHari Bathini int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
287ce076141SNaveen N. Rao {
2884ea69b2fSSandipan Das 	unsigned int i, ctx_idx = ctx->idx;
2894ea69b2fSSandipan Das 
29043d636f8SNaveen N. Rao 	if (WARN_ON_ONCE(func && is_module_text_address(func)))
29143d636f8SNaveen N. Rao 		return -EINVAL;
29243d636f8SNaveen N. Rao 
293feb63072SNaveen N. Rao 	/* skip past descriptor if elf v1 */
294feb63072SNaveen N. Rao 	func += FUNCTION_DESCR_SIZE;
295feb63072SNaveen N. Rao 
2964ea69b2fSSandipan Das 	/* Load function address into r12 */
297036d559cSNaveen N. Rao 	PPC_LI64(_R12, func);
2984ea69b2fSSandipan Das 
2994ea69b2fSSandipan Das 	/* For bpf-to-bpf function calls, the callee's address is unknown
3004ea69b2fSSandipan Das 	 * until the last extra pass. As seen above, we use PPC_LI64() to
3014ea69b2fSSandipan Das 	 * load the callee's address, but this may optimize the number of
3024ea69b2fSSandipan Das 	 * instructions required based on the nature of the address.
3034ea69b2fSSandipan Das 	 *
304d3921cbbSChristophe Leroy 	 * Since we don't want the number of instructions emitted to increase,
3054ea69b2fSSandipan Das 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
3064ea69b2fSSandipan Das 	 * we always have a five-instruction sequence, which is the maximum
3074ea69b2fSSandipan Das 	 * that PPC_LI64() can emit.
3084ea69b2fSSandipan Das 	 */
309d3921cbbSChristophe Leroy 	if (!image)
3104ea69b2fSSandipan Das 		for (i = ctx->idx - ctx_idx; i < 5; i++)
3113a181237SBalamuruhan S 			EMIT(PPC_RAW_NOP());
3124ea69b2fSSandipan Das 
313036d559cSNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(_R12));
31420ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
31543d636f8SNaveen N. Rao 
31643d636f8SNaveen N. Rao 	return 0;
317ce076141SNaveen N. Rao }
318ce076141SNaveen N. Rao 
3193832ba4eSNaveen N. Rao static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
320ce076141SNaveen N. Rao {
321ce076141SNaveen N. Rao 	/*
322ce076141SNaveen N. Rao 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
323ce076141SNaveen N. Rao 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
324ce076141SNaveen N. Rao 	 * r4/BPF_REG_2 - pointer to bpf_array
325ce076141SNaveen N. Rao 	 * r5/BPF_REG_3 - index in bpf_array
326ce076141SNaveen N. Rao 	 */
32749c3af43SNaveen N. Rao 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
32849c3af43SNaveen N. Rao 	int b2p_index = bpf_to_ppc(BPF_REG_3);
329b10cb163SNaveen N. Rao 	int bpf_tailcall_prologue_size = 8;
330b10cb163SNaveen N. Rao 
3312ecfe59cSHari Bathini 	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
332b10cb163SNaveen N. Rao 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
333ce076141SNaveen N. Rao 
334ce076141SNaveen N. Rao 	/*
335ce076141SNaveen N. Rao 	 * if (index >= array->map.max_entries)
336ce076141SNaveen N. Rao 	 *   goto out;
337ce076141SNaveen N. Rao 	 */
33849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
3393a181237SBalamuruhan S 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
34049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
341bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_GE, out);
342ce076141SNaveen N. Rao 
343ce076141SNaveen N. Rao 	/*
344ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
345ce076141SNaveen N. Rao 	 *   goto out;
346ce076141SNaveen N. Rao 	 */
34749c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
34849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
349bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_GE, out);
350ce076141SNaveen N. Rao 
351ce076141SNaveen N. Rao 	/*
352ce076141SNaveen N. Rao 	 * tail_call_cnt++;
353ce076141SNaveen N. Rao 	 */
35449c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
35549c3af43SNaveen N. Rao 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
356ce076141SNaveen N. Rao 
357ce076141SNaveen N. Rao 	/* prog = array->ptrs[index]; */
35849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
35949c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
36049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
361ce076141SNaveen N. Rao 
362ce076141SNaveen N. Rao 	/*
363ce076141SNaveen N. Rao 	 * if (prog == NULL)
364ce076141SNaveen N. Rao 	 *   goto out;
365ce076141SNaveen N. Rao 	 */
36649c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
367bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_EQ, out);
368ce076141SNaveen N. Rao 
369ce076141SNaveen N. Rao 	/* goto *(prog->bpf_func + prologue_size); */
37049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
37149c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
372b10cb163SNaveen N. Rao 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
37349c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
374ce076141SNaveen N. Rao 
375ce076141SNaveen N. Rao 	/* tear down stack, restore NVRs, ... */
376ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
377ce076141SNaveen N. Rao 
3783a181237SBalamuruhan S 	EMIT(PPC_RAW_BCTR());
3793832ba4eSNaveen N. Rao 
380ce076141SNaveen N. Rao 	/* out: */
3813832ba4eSNaveen N. Rao 	return 0;
382ce076141SNaveen N. Rao }
383ce076141SNaveen N. Rao 
384b7540d62SNaveen N. Rao /*
385b7540d62SNaveen N. Rao  * We spill into the redzone always, even if the bpf program has its own stackframe.
386b7540d62SNaveen N. Rao  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
387b7540d62SNaveen N. Rao  */
388b7540d62SNaveen N. Rao void bpf_stf_barrier(void);
389b7540d62SNaveen N. Rao 
390b7540d62SNaveen N. Rao asm (
391b7540d62SNaveen N. Rao "		.global bpf_stf_barrier		;"
392b7540d62SNaveen N. Rao "	bpf_stf_barrier:			;"
393b7540d62SNaveen N. Rao "		std	21,-64(1)		;"
394b7540d62SNaveen N. Rao "		std	22,-56(1)		;"
395b7540d62SNaveen N. Rao "		sync				;"
396b7540d62SNaveen N. Rao "		ld	21,-64(1)		;"
397b7540d62SNaveen N. Rao "		ld	22,-56(1)		;"
398b7540d62SNaveen N. Rao "		ori	31,31,0			;"
399b7540d62SNaveen N. Rao "		.rept 14			;"
400b7540d62SNaveen N. Rao "		b	1f			;"
401b7540d62SNaveen N. Rao "	1:					;"
402b7540d62SNaveen N. Rao "		.endr				;"
403b7540d62SNaveen N. Rao "		blr				;"
404b7540d62SNaveen N. Rao );
405b7540d62SNaveen N. Rao 
406156d0e29SNaveen N. Rao /* Assemble the body code between the prologue & epilogue */
40790d862f3SHari Bathini int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
40885e03115SChristophe Leroy 		       u32 *addrs, int pass, bool extra_pass)
409156d0e29SNaveen N. Rao {
410b7540d62SNaveen N. Rao 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
411156d0e29SNaveen N. Rao 	const struct bpf_insn *insn = fp->insnsi;
412156d0e29SNaveen N. Rao 	int flen = fp->len;
413e2c95a61SDaniel Borkmann 	int i, ret;
414156d0e29SNaveen N. Rao 
415156d0e29SNaveen N. Rao 	/* Start of epilogue code - will only be valid 2nd pass onwards */
416156d0e29SNaveen N. Rao 	u32 exit_addr = addrs[flen];
417156d0e29SNaveen N. Rao 
418156d0e29SNaveen N. Rao 	for (i = 0; i < flen; i++) {
419156d0e29SNaveen N. Rao 		u32 code = insn[i].code;
42049c3af43SNaveen N. Rao 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
42149c3af43SNaveen N. Rao 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
422efa95f03SHari Bathini 		u32 size = BPF_SIZE(code);
42349c3af43SNaveen N. Rao 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
42449c3af43SNaveen N. Rao 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
4251e82dfaaSHari Bathini 		u32 save_reg, ret_reg;
426156d0e29SNaveen N. Rao 		s16 off = insn[i].off;
427156d0e29SNaveen N. Rao 		s32 imm = insn[i].imm;
428e2c95a61SDaniel Borkmann 		bool func_addr_fixed;
429e2c95a61SDaniel Borkmann 		u64 func_addr;
430156d0e29SNaveen N. Rao 		u64 imm64;
431156d0e29SNaveen N. Rao 		u32 true_cond;
432b9c1e60eSDaniel Borkmann 		u32 tmp_idx;
433f9320c49SNaveen N. Rao 		int j;
434156d0e29SNaveen N. Rao 
435156d0e29SNaveen N. Rao 		/*
436156d0e29SNaveen N. Rao 		 * addrs[] maps a BPF bytecode address into a real offset from
437156d0e29SNaveen N. Rao 		 * the start of the body code.
438156d0e29SNaveen N. Rao 		 */
439156d0e29SNaveen N. Rao 		addrs[i] = ctx->idx * 4;
440156d0e29SNaveen N. Rao 
441156d0e29SNaveen N. Rao 		/*
442156d0e29SNaveen N. Rao 		 * As an optimization, we note down which non-volatile registers
443156d0e29SNaveen N. Rao 		 * are used so that we can only save/restore those in our
444156d0e29SNaveen N. Rao 		 * prologue and epilogue. We do this here regardless of whether
445156d0e29SNaveen N. Rao 		 * the actual BPF instruction uses src/dst registers or not
446156d0e29SNaveen N. Rao 		 * (for instance, BPF_CALL does not use them). The expectation
447156d0e29SNaveen N. Rao 		 * is that those instructions will have src_reg/dst_reg set to
448156d0e29SNaveen N. Rao 		 * 0. Even otherwise, we just lose some prologue/epilogue
449156d0e29SNaveen N. Rao 		 * optimization but everything else should work without
450156d0e29SNaveen N. Rao 		 * any issues.
451156d0e29SNaveen N. Rao 		 */
4527b847f52SNaveen N. Rao 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
453ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, dst_reg);
4547b847f52SNaveen N. Rao 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
455ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, src_reg);
456156d0e29SNaveen N. Rao 
457156d0e29SNaveen N. Rao 		switch (code) {
458156d0e29SNaveen N. Rao 		/*
459156d0e29SNaveen N. Rao 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
460156d0e29SNaveen N. Rao 		 */
461156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
462156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
46306541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
464156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
465156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
466156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
4673a181237SBalamuruhan S 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
468156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
469156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
470156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
4715855c4c1SNaveen N. Rao 			if (!imm) {
4725855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
4735855c4c1SNaveen N. Rao 			} else if (imm >= -32768 && imm < 32768) {
4743a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
4755855c4c1SNaveen N. Rao 			} else {
4763a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
4773a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
478156d0e29SNaveen N. Rao 			}
4795855c4c1SNaveen N. Rao 			goto bpf_alu32_trunc;
4805855c4c1SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
4815855c4c1SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
4825855c4c1SNaveen N. Rao 			if (!imm) {
4835855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
4845855c4c1SNaveen N. Rao 			} else if (imm > -32768 && imm <= 32768) {
4855855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
4865855c4c1SNaveen N. Rao 			} else {
4873a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
4883a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
489156d0e29SNaveen N. Rao 			}
490156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
491156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
492156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
493156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ALU)
4943a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
495156d0e29SNaveen N. Rao 			else
4963a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
497156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
498156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
499156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
500156d0e29SNaveen N. Rao 			if (imm >= -32768 && imm < 32768)
5013a181237SBalamuruhan S 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
502156d0e29SNaveen N. Rao 			else {
5033a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
504156d0e29SNaveen N. Rao 				if (BPF_CLASS(code) == BPF_ALU)
5053a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
506156d0e29SNaveen N. Rao 				else
5073a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
508156d0e29SNaveen N. Rao 			}
509156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
510156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
511156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
512156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
5133a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
5143a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
5153a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
516156d0e29SNaveen N. Rao 			} else
5173a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
518156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
519156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
520156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
521156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
5223a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
5233a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
5243a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
525156d0e29SNaveen N. Rao 			} else
5263a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
527156d0e29SNaveen N. Rao 			break;
528156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
529156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
530156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
531156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
532156d0e29SNaveen N. Rao 			if (imm == 0)
533156d0e29SNaveen N. Rao 				return -EINVAL;
5348bbc9d82SNaveen N. Rao 			if (imm == 1) {
5358bbc9d82SNaveen N. Rao 				if (BPF_OP(code) == BPF_DIV) {
536156d0e29SNaveen N. Rao 					goto bpf_alu32_trunc;
5378bbc9d82SNaveen N. Rao 				} else {
5388bbc9d82SNaveen N. Rao 					EMIT(PPC_RAW_LI(dst_reg, 0));
5398bbc9d82SNaveen N. Rao 					break;
5408bbc9d82SNaveen N. Rao 				}
5418bbc9d82SNaveen N. Rao 			}
542156d0e29SNaveen N. Rao 
5433a3fc9bfSJordan Niethe 			PPC_LI32(tmp1_reg, imm);
544156d0e29SNaveen N. Rao 			switch (BPF_CLASS(code)) {
545156d0e29SNaveen N. Rao 			case BPF_ALU:
546156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
5473a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
5483a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
5493a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
550156d0e29SNaveen N. Rao 				} else
5513a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
552156d0e29SNaveen N. Rao 				break;
553156d0e29SNaveen N. Rao 			case BPF_ALU64:
554156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
5553a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
5563a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
5573a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
558156d0e29SNaveen N. Rao 				} else
5593a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
560156d0e29SNaveen N. Rao 				break;
561156d0e29SNaveen N. Rao 			}
562156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
563156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
564156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
5653a181237SBalamuruhan S 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
566156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
567156d0e29SNaveen N. Rao 
568156d0e29SNaveen N. Rao 		/*
569156d0e29SNaveen N. Rao 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
570156d0e29SNaveen N. Rao 		 */
571156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
572156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
5733a181237SBalamuruhan S 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
574156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
575156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
576156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
577156d0e29SNaveen N. Rao 			if (!IMM_H(imm))
5783a181237SBalamuruhan S 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
579156d0e29SNaveen N. Rao 			else {
580156d0e29SNaveen N. Rao 				/* Sign-extended */
5813a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5823a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
583156d0e29SNaveen N. Rao 			}
584156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
585156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
586156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
5873a181237SBalamuruhan S 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
588156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
589156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
590156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
591156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
592156d0e29SNaveen N. Rao 				/* Sign-extended */
5933a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5943a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
595156d0e29SNaveen N. Rao 			} else {
596156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5973a181237SBalamuruhan S 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
598156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5993a181237SBalamuruhan S 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
600156d0e29SNaveen N. Rao 			}
601156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
602156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
603156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
6043a181237SBalamuruhan S 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
605156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
606156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
607156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
608156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
609156d0e29SNaveen N. Rao 				/* Sign-extended */
6103a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
6113a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
612156d0e29SNaveen N. Rao 			} else {
613156d0e29SNaveen N. Rao 				if (IMM_L(imm))
6143a181237SBalamuruhan S 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
615156d0e29SNaveen N. Rao 				if (IMM_H(imm))
6163a181237SBalamuruhan S 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
617156d0e29SNaveen N. Rao 			}
618156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
619156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
620156d0e29SNaveen N. Rao 			/* slw clears top 32 bits */
6213a181237SBalamuruhan S 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
622a4c92773SJiong Wang 			/* skip zero extension move, but set address map. */
623a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
624a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
625156d0e29SNaveen N. Rao 			break;
626156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
6273a181237SBalamuruhan S 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
628156d0e29SNaveen N. Rao 			break;
629156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
630156d0e29SNaveen N. Rao 			/* with imm 0, we still need to clear top 32 bits */
6313a181237SBalamuruhan S 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
632a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
633a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
634156d0e29SNaveen N. Rao 			break;
635156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
636156d0e29SNaveen N. Rao 			if (imm != 0)
6373a181237SBalamuruhan S 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
638156d0e29SNaveen N. Rao 			break;
639156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
6403a181237SBalamuruhan S 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
641a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
642a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
643156d0e29SNaveen N. Rao 			break;
644156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
6453a181237SBalamuruhan S 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
646156d0e29SNaveen N. Rao 			break;
647156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
6483a181237SBalamuruhan S 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
649a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
650a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
651156d0e29SNaveen N. Rao 			break;
652156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
653156d0e29SNaveen N. Rao 			if (imm != 0)
6543a181237SBalamuruhan S 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
655156d0e29SNaveen N. Rao 			break;
65644cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
6573a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
65844cf43c0SJiong Wang 			goto bpf_alu32_trunc;
659156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
6603a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
661156d0e29SNaveen N. Rao 			break;
66244cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
6633a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
66444cf43c0SJiong Wang 			goto bpf_alu32_trunc;
665156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
666156d0e29SNaveen N. Rao 			if (imm != 0)
6673a181237SBalamuruhan S 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
668156d0e29SNaveen N. Rao 			break;
669156d0e29SNaveen N. Rao 
670156d0e29SNaveen N. Rao 		/*
671156d0e29SNaveen N. Rao 		 * MOV
672156d0e29SNaveen N. Rao 		 */
673156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
674156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
675a4c92773SJiong Wang 			if (imm == 1) {
676a4c92773SJiong Wang 				/* special mov32 for zext */
6773a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
678a4c92773SJiong Wang 				break;
679a4c92773SJiong Wang 			}
6803a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
681156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
682156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
683156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
684156d0e29SNaveen N. Rao 			PPC_LI32(dst_reg, imm);
685156d0e29SNaveen N. Rao 			if (imm < 0)
686156d0e29SNaveen N. Rao 				goto bpf_alu32_trunc;
687a4c92773SJiong Wang 			else if (insn_is_zext(&insn[i + 1]))
688a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
689156d0e29SNaveen N. Rao 			break;
690156d0e29SNaveen N. Rao 
691156d0e29SNaveen N. Rao bpf_alu32_trunc:
692156d0e29SNaveen N. Rao 		/* Truncate to 32-bits */
693a4c92773SJiong Wang 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
6943a181237SBalamuruhan S 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
695156d0e29SNaveen N. Rao 		break;
696156d0e29SNaveen N. Rao 
697156d0e29SNaveen N. Rao 		/*
698156d0e29SNaveen N. Rao 		 * BPF_FROM_BE/LE
699156d0e29SNaveen N. Rao 		 */
700156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_LE:
701156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_BE:
702a71c0b09SArtem Savkov 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
703156d0e29SNaveen N. Rao #ifdef __BIG_ENDIAN__
704156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_BE)
705156d0e29SNaveen N. Rao 				goto emit_clear;
706156d0e29SNaveen N. Rao #else /* !__BIG_ENDIAN__ */
707a71c0b09SArtem Savkov 			if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
708156d0e29SNaveen N. Rao 				goto emit_clear;
709156d0e29SNaveen N. Rao #endif
710156d0e29SNaveen N. Rao 			switch (imm) {
711156d0e29SNaveen N. Rao 			case 16:
712156d0e29SNaveen N. Rao 				/* Rotate 8 bits left & mask with 0x0000ff00 */
7133a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
714156d0e29SNaveen N. Rao 				/* Rotate 8 bits right & insert LSB to reg */
7153a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
716156d0e29SNaveen N. Rao 				/* Move result back to dst_reg */
7173a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
718156d0e29SNaveen N. Rao 				break;
719156d0e29SNaveen N. Rao 			case 32:
720156d0e29SNaveen N. Rao 				/*
721156d0e29SNaveen N. Rao 				 * Rotate word left by 8 bits:
722156d0e29SNaveen N. Rao 				 * 2 bytes are already in their final position
723156d0e29SNaveen N. Rao 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
724156d0e29SNaveen N. Rao 				 */
7253a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
726156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 1 */
7273a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
728156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 3 */
7293a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
7303a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
731156d0e29SNaveen N. Rao 				break;
732156d0e29SNaveen N. Rao 			case 64:
7333f5f766dSNaveen N. Rao 				/* Store the value to stack and then use byte-reverse loads */
734036d559cSNaveen N. Rao 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
7353a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
7363f5f766dSNaveen N. Rao 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
7373a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
7383f5f766dSNaveen N. Rao 				} else {
7393a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
7403f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
7413f5f766dSNaveen N. Rao 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
7423a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
7433a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
7443f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
7453a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
7463a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
7473f5f766dSNaveen N. Rao 				}
748156d0e29SNaveen N. Rao 				break;
749156d0e29SNaveen N. Rao 			}
750156d0e29SNaveen N. Rao 			break;
751156d0e29SNaveen N. Rao 
752156d0e29SNaveen N. Rao emit_clear:
753156d0e29SNaveen N. Rao 			switch (imm) {
754156d0e29SNaveen N. Rao 			case 16:
755156d0e29SNaveen N. Rao 				/* zero-extend 16 bits into 64 bits */
7563a181237SBalamuruhan S 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
757a4c92773SJiong Wang 				if (insn_is_zext(&insn[i + 1]))
758a4c92773SJiong Wang 					addrs[++i] = ctx->idx * 4;
759156d0e29SNaveen N. Rao 				break;
760156d0e29SNaveen N. Rao 			case 32:
761a4c92773SJiong Wang 				if (!fp->aux->verifier_zext)
762156d0e29SNaveen N. Rao 					/* zero-extend 32 bits into 64 bits */
7633a181237SBalamuruhan S 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
764156d0e29SNaveen N. Rao 				break;
765156d0e29SNaveen N. Rao 			case 64:
766156d0e29SNaveen N. Rao 				/* nop */
767156d0e29SNaveen N. Rao 				break;
768156d0e29SNaveen N. Rao 			}
769156d0e29SNaveen N. Rao 			break;
770156d0e29SNaveen N. Rao 
771156d0e29SNaveen N. Rao 		/*
772f5e81d11SDaniel Borkmann 		 * BPF_ST NOSPEC (speculation barrier)
773f5e81d11SDaniel Borkmann 		 */
774f5e81d11SDaniel Borkmann 		case BPF_ST | BPF_NOSPEC:
775b7540d62SNaveen N. Rao 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
776b7540d62SNaveen N. Rao 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
777b7540d62SNaveen N. Rao 				break;
778b7540d62SNaveen N. Rao 
779b7540d62SNaveen N. Rao 			switch (stf_barrier) {
780b7540d62SNaveen N. Rao 			case STF_BARRIER_EIEIO:
781b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
782b7540d62SNaveen N. Rao 				break;
783b7540d62SNaveen N. Rao 			case STF_BARRIER_SYNC_ORI:
784b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_SYNC());
7853a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
786b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
787b7540d62SNaveen N. Rao 				break;
788b7540d62SNaveen N. Rao 			case STF_BARRIER_FALLBACK:
789c2067f7fSNaveen N. Rao 				ctx->seen |= SEEN_FUNC;
790036d559cSNaveen N. Rao 				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
791036d559cSNaveen N. Rao 				EMIT(PPC_RAW_MTCTR(_R12));
792b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_BCTRL());
793b7540d62SNaveen N. Rao 				break;
794b7540d62SNaveen N. Rao 			case STF_BARRIER_NONE:
795b7540d62SNaveen N. Rao 				break;
796b7540d62SNaveen N. Rao 			}
797f5e81d11SDaniel Borkmann 			break;
798f5e81d11SDaniel Borkmann 
799f5e81d11SDaniel Borkmann 		/*
800156d0e29SNaveen N. Rao 		 * BPF_ST(X)
801156d0e29SNaveen N. Rao 		 */
802156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
803156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
804156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8053a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
8063a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
807156d0e29SNaveen N. Rao 			}
8083a181237SBalamuruhan S 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
809156d0e29SNaveen N. Rao 			break;
810156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
811156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
812156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8133a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
8143a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
815156d0e29SNaveen N. Rao 			}
8163a181237SBalamuruhan S 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
817156d0e29SNaveen N. Rao 			break;
818156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
819156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
820156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8213a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
8223a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
823156d0e29SNaveen N. Rao 			}
8243a181237SBalamuruhan S 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
825156d0e29SNaveen N. Rao 			break;
826156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
827156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
828156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
8293a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
8303a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
831156d0e29SNaveen N. Rao 			}
832794abc08SNaveen N. Rao 			if (off % 4) {
8333a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp2_reg, off));
8343a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
835794abc08SNaveen N. Rao 			} else {
836794abc08SNaveen N. Rao 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
837794abc08SNaveen N. Rao 			}
838156d0e29SNaveen N. Rao 			break;
839156d0e29SNaveen N. Rao 
840156d0e29SNaveen N. Rao 		/*
84191c960b0SBrendan Jackman 		 * BPF_STX ATOMIC (atomic ops)
842156d0e29SNaveen N. Rao 		 */
84391c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_W:
84465112709SHari Bathini 		case BPF_STX | BPF_ATOMIC | BPF_DW:
8451e82dfaaSHari Bathini 			save_reg = tmp2_reg;
8461e82dfaaSHari Bathini 			ret_reg = src_reg;
8471e82dfaaSHari Bathini 
84865112709SHari Bathini 			/* Get offset into TMP_REG_1 */
84965112709SHari Bathini 			EMIT(PPC_RAW_LI(tmp1_reg, off));
850b1e7cee9SPuranjay Mohan 			/*
851b1e7cee9SPuranjay Mohan 			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
852b1e7cee9SPuranjay Mohan 			 * before and after the operation.
853b1e7cee9SPuranjay Mohan 			 *
854b1e7cee9SPuranjay Mohan 			 * This is a requirement in the Linux Kernel Memory Model.
855b1e7cee9SPuranjay Mohan 			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
856b1e7cee9SPuranjay Mohan 			 */
857b1e7cee9SPuranjay Mohan 			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
858b1e7cee9SPuranjay Mohan 				EMIT(PPC_RAW_SYNC());
859b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
860156d0e29SNaveen N. Rao 			/* load value from memory into TMP_REG_2 */
86165112709SHari Bathini 			if (size == BPF_DW)
86265112709SHari Bathini 				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
86365112709SHari Bathini 			else
86465112709SHari Bathini 				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
86565112709SHari Bathini 
866dbe6e245SHari Bathini 			/* Save old value in _R0 */
867dbe6e245SHari Bathini 			if (imm & BPF_FETCH)
868dbe6e245SHari Bathini 				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
869dbe6e245SHari Bathini 
87065112709SHari Bathini 			switch (imm) {
87165112709SHari Bathini 			case BPF_ADD:
872dbe6e245SHari Bathini 			case BPF_ADD | BPF_FETCH:
8733a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
874156d0e29SNaveen N. Rao 				break;
87565112709SHari Bathini 			case BPF_AND:
876dbe6e245SHari Bathini 			case BPF_AND | BPF_FETCH:
87765112709SHari Bathini 				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
87865112709SHari Bathini 				break;
87965112709SHari Bathini 			case BPF_OR:
880dbe6e245SHari Bathini 			case BPF_OR | BPF_FETCH:
88165112709SHari Bathini 				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
88265112709SHari Bathini 				break;
88365112709SHari Bathini 			case BPF_XOR:
884dbe6e245SHari Bathini 			case BPF_XOR | BPF_FETCH:
88565112709SHari Bathini 				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
88665112709SHari Bathini 				break;
8871e82dfaaSHari Bathini 			case BPF_CMPXCHG:
8881e82dfaaSHari Bathini 				/*
8891e82dfaaSHari Bathini 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
8901e82dfaaSHari Bathini 				 * in src_reg for other cases.
8911e82dfaaSHari Bathini 				 */
8921e82dfaaSHari Bathini 				ret_reg = bpf_to_ppc(BPF_REG_0);
8931e82dfaaSHari Bathini 
8941e82dfaaSHari Bathini 				/* Compare with old value in BPF_R0 */
8951e82dfaaSHari Bathini 				if (size == BPF_DW)
8961e82dfaaSHari Bathini 					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
8971e82dfaaSHari Bathini 				else
8981e82dfaaSHari Bathini 					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
8991e82dfaaSHari Bathini 				/* Don't set if different from old value */
9001e82dfaaSHari Bathini 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
9011e82dfaaSHari Bathini 				fallthrough;
9021e82dfaaSHari Bathini 			case BPF_XCHG:
9031e82dfaaSHari Bathini 				save_reg = src_reg;
9041e82dfaaSHari Bathini 				break;
90565112709SHari Bathini 			default:
90691c960b0SBrendan Jackman 				pr_err_ratelimited(
90791c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
90891c960b0SBrendan Jackman 					code, i);
90965112709SHari Bathini 				return -EOPNOTSUPP;
91091c960b0SBrendan Jackman 			}
91191c960b0SBrendan Jackman 
912dbe6e245SHari Bathini 			/* store new value */
91365112709SHari Bathini 			if (size == BPF_DW)
9141e82dfaaSHari Bathini 				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
91565112709SHari Bathini 			else
9161e82dfaaSHari Bathini 				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
91765112709SHari Bathini 			/* we're done if this succeeded */
918b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
919dbe6e245SHari Bathini 
9201e82dfaaSHari Bathini 			if (imm & BPF_FETCH) {
921b1e7cee9SPuranjay Mohan 				/* Emit 'sync' to enforce full ordering */
922b1e7cee9SPuranjay Mohan 				if (IS_ENABLED(CONFIG_SMP))
923b1e7cee9SPuranjay Mohan 					EMIT(PPC_RAW_SYNC());
9241e82dfaaSHari Bathini 				EMIT(PPC_RAW_MR(ret_reg, _R0));
9251e82dfaaSHari Bathini 				/*
9261e82dfaaSHari Bathini 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
9271e82dfaaSHari Bathini 				 * For context, see commit 39491867ace5.
9281e82dfaaSHari Bathini 				 */
9291e82dfaaSHari Bathini 				if (size != BPF_DW && imm == BPF_CMPXCHG &&
9301e82dfaaSHari Bathini 				    insn_is_zext(&insn[i + 1]))
9311e82dfaaSHari Bathini 					addrs[++i] = ctx->idx * 4;
9321e82dfaaSHari Bathini 			}
933156d0e29SNaveen N. Rao 			break;
934156d0e29SNaveen N. Rao 
935156d0e29SNaveen N. Rao 		/*
936156d0e29SNaveen N. Rao 		 * BPF_LDX
937156d0e29SNaveen N. Rao 		 */
938156d0e29SNaveen N. Rao 		/* dst = *(u8 *)(ul) (src + off) */
939156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_B:
940*717756c9SArtem Savkov 		case BPF_LDX | BPF_MEMSX | BPF_B:
941983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
942*717756c9SArtem Savkov 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
943156d0e29SNaveen N. Rao 		/* dst = *(u16 *)(ul) (src + off) */
944156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_H:
945*717756c9SArtem Savkov 		case BPF_LDX | BPF_MEMSX | BPF_H:
946983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
947*717756c9SArtem Savkov 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
948156d0e29SNaveen N. Rao 		/* dst = *(u32 *)(ul) (src + off) */
949156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_W:
950*717756c9SArtem Savkov 		case BPF_LDX | BPF_MEMSX | BPF_W:
951983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
952*717756c9SArtem Savkov 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
953156d0e29SNaveen N. Rao 		/* dst = *(u64 *)(ul) (src + off) */
954156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_DW:
955983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
9569c70c714SRavi Bangoria 			/*
9579c70c714SRavi Bangoria 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
9589c70c714SRavi Bangoria 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
9599c70c714SRavi Bangoria 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
9609c70c714SRavi Bangoria 			 * set dst_reg=0 and move on.
9619c70c714SRavi Bangoria 			 */
962*717756c9SArtem Savkov 			if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
9633a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
9649c70c714SRavi Bangoria 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
9653a3fc9bfSJordan Niethe 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
9669c70c714SRavi Bangoria 				else /* BOOK3S_64 */
9673a3fc9bfSJordan Niethe 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
9683a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
969bafb5898SNaveen N. Rao 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
9709c70c714SRavi Bangoria 				EMIT(PPC_RAW_LI(dst_reg, 0));
9719c70c714SRavi Bangoria 				/*
972794abc08SNaveen N. Rao 				 * Check if 'off' is word aligned for BPF_DW, because
973794abc08SNaveen N. Rao 				 * we might generate two instructions.
9749c70c714SRavi Bangoria 				 */
975*717756c9SArtem Savkov 				if ((BPF_SIZE(code) == BPF_DW ||
976*717756c9SArtem Savkov 				    (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) &&
977*717756c9SArtem Savkov 						(off & 3))
9789c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 3) * 4);
9799c70c714SRavi Bangoria 				else
9809c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 2) * 4);
9819c70c714SRavi Bangoria 			}
9829c70c714SRavi Bangoria 
983*717756c9SArtem Savkov 			if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
984*717756c9SArtem Savkov 				switch (size) {
985*717756c9SArtem Savkov 				case BPF_B:
986*717756c9SArtem Savkov 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
987*717756c9SArtem Savkov 					EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
988*717756c9SArtem Savkov 					break;
989*717756c9SArtem Savkov 				case BPF_H:
990*717756c9SArtem Savkov 					EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
991*717756c9SArtem Savkov 					break;
992*717756c9SArtem Savkov 				case BPF_W:
993*717756c9SArtem Savkov 					EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
994*717756c9SArtem Savkov 					break;
995*717756c9SArtem Savkov 				}
996*717756c9SArtem Savkov 			} else {
997efa95f03SHari Bathini 				switch (size) {
998efa95f03SHari Bathini 				case BPF_B:
999efa95f03SHari Bathini 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1000efa95f03SHari Bathini 					break;
1001efa95f03SHari Bathini 				case BPF_H:
1002efa95f03SHari Bathini 					EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1003efa95f03SHari Bathini 					break;
1004efa95f03SHari Bathini 				case BPF_W:
1005efa95f03SHari Bathini 					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1006efa95f03SHari Bathini 					break;
1007efa95f03SHari Bathini 				case BPF_DW:
1008794abc08SNaveen N. Rao 					if (off % 4) {
10093a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_LI(tmp1_reg, off));
10103a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1011794abc08SNaveen N. Rao 					} else {
1012794abc08SNaveen N. Rao 						EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1013794abc08SNaveen N. Rao 					}
1014156d0e29SNaveen N. Rao 					break;
1015efa95f03SHari Bathini 				}
1016*717756c9SArtem Savkov 			}
1017efa95f03SHari Bathini 
1018efa95f03SHari Bathini 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1019efa95f03SHari Bathini 				addrs[++i] = ctx->idx * 4;
1020983bdc02SRavi Bangoria 
1021983bdc02SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
102290d862f3SHari Bathini 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
102390d862f3SHari Bathini 							    ctx->idx - 1, 4, dst_reg);
1024983bdc02SRavi Bangoria 				if (ret)
1025983bdc02SRavi Bangoria 					return ret;
1026983bdc02SRavi Bangoria 			}
1027efa95f03SHari Bathini 			break;
1028156d0e29SNaveen N. Rao 
1029156d0e29SNaveen N. Rao 		/*
1030156d0e29SNaveen N. Rao 		 * Doubleword load
1031156d0e29SNaveen N. Rao 		 * 16 byte instruction that uses two 'struct bpf_insn'
1032156d0e29SNaveen N. Rao 		 */
1033156d0e29SNaveen N. Rao 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1034156d0e29SNaveen N. Rao 			imm64 = ((u64)(u32) insn[i].imm) |
1035156d0e29SNaveen N. Rao 				    (((u64)(u32) insn[i+1].imm) << 32);
1036f9320c49SNaveen N. Rao 			tmp_idx = ctx->idx;
1037f9320c49SNaveen N. Rao 			PPC_LI64(dst_reg, imm64);
1038f9320c49SNaveen N. Rao 			/* padding to allow full 5 instructions for later patching */
1039d3921cbbSChristophe Leroy 			if (!image)
1040f9320c49SNaveen N. Rao 				for (j = ctx->idx - tmp_idx; j < 5; j++)
1041f9320c49SNaveen N. Rao 					EMIT(PPC_RAW_NOP());
1042156d0e29SNaveen N. Rao 			/* Adjust for two bpf instructions */
1043156d0e29SNaveen N. Rao 			addrs[++i] = ctx->idx * 4;
1044156d0e29SNaveen N. Rao 			break;
1045156d0e29SNaveen N. Rao 
1046156d0e29SNaveen N. Rao 		/*
1047156d0e29SNaveen N. Rao 		 * Return/Exit
1048156d0e29SNaveen N. Rao 		 */
1049156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_EXIT:
1050156d0e29SNaveen N. Rao 			/*
1051156d0e29SNaveen N. Rao 			 * If this isn't the very last instruction, branch to
1052156d0e29SNaveen N. Rao 			 * the epilogue. If we _are_ the last instruction,
1053156d0e29SNaveen N. Rao 			 * we'll just fall through to the epilogue.
1054156d0e29SNaveen N. Rao 			 */
10550ffdbce6SNaveen N. Rao 			if (i != flen - 1) {
10563a3fc9bfSJordan Niethe 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
10570ffdbce6SNaveen N. Rao 				if (ret)
10580ffdbce6SNaveen N. Rao 					return ret;
10590ffdbce6SNaveen N. Rao 			}
1060156d0e29SNaveen N. Rao 			/* else fall through to the epilogue */
1061156d0e29SNaveen N. Rao 			break;
1062156d0e29SNaveen N. Rao 
1063156d0e29SNaveen N. Rao 		/*
10648484ce83SSandipan Das 		 * Call kernel helper or bpf function
1065156d0e29SNaveen N. Rao 		 */
1066156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_CALL:
1067156d0e29SNaveen N. Rao 			ctx->seen |= SEEN_FUNC;
10688484ce83SSandipan Das 
106985e03115SChristophe Leroy 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1070e2c95a61SDaniel Borkmann 						    &func_addr, &func_addr_fixed);
1071e2c95a61SDaniel Borkmann 			if (ret < 0)
1072e2c95a61SDaniel Borkmann 				return ret;
1073156d0e29SNaveen N. Rao 
1074e2c95a61SDaniel Borkmann 			if (func_addr_fixed)
10752ecfe59cSHari Bathini 				ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
1076e2c95a61SDaniel Borkmann 			else
107790d862f3SHari Bathini 				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
107843d636f8SNaveen N. Rao 
107943d636f8SNaveen N. Rao 			if (ret)
108043d636f8SNaveen N. Rao 				return ret;
108143d636f8SNaveen N. Rao 
1082156d0e29SNaveen N. Rao 			/* move return value from r3 to BPF_REG_0 */
108349c3af43SNaveen N. Rao 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1084156d0e29SNaveen N. Rao 			break;
1085156d0e29SNaveen N. Rao 
1086156d0e29SNaveen N. Rao 		/*
1087156d0e29SNaveen N. Rao 		 * Jumps and branches
1088156d0e29SNaveen N. Rao 		 */
1089156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JA:
1090156d0e29SNaveen N. Rao 			PPC_JMP(addrs[i + 1 + off]);
1091156d0e29SNaveen N. Rao 			break;
10923c086ce2SArtem Savkov 		case BPF_JMP32 | BPF_JA:
10933c086ce2SArtem Savkov 			PPC_JMP(addrs[i + 1 + imm]);
10943c086ce2SArtem Savkov 			break;
1095156d0e29SNaveen N. Rao 
1096156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_K:
1097156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_X:
1098156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_K:
1099156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_X:
11005f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_K:
11015f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_X:
11025f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_K:
11035f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1104156d0e29SNaveen N. Rao 			true_cond = COND_GT;
1105156d0e29SNaveen N. Rao 			goto cond_branch;
110620dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_K:
110720dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_X:
110820dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_K:
110920dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_X:
11105f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_K:
11115f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_X:
11125f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_K:
11135f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_X:
111420dbf5ccSDaniel Borkmann 			true_cond = COND_LT;
111520dbf5ccSDaniel Borkmann 			goto cond_branch;
1116156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_K:
1117156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_X:
1118156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_K:
1119156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_X:
11205f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_K:
11215f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_X:
11225f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_K:
11235f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1124156d0e29SNaveen N. Rao 			true_cond = COND_GE;
1125156d0e29SNaveen N. Rao 			goto cond_branch;
112620dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_K:
112720dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_X:
112820dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_K:
112920dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_X:
11305f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_K:
11315f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_X:
11325f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_K:
11335f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_X:
113420dbf5ccSDaniel Borkmann 			true_cond = COND_LE;
113520dbf5ccSDaniel Borkmann 			goto cond_branch;
1136156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_K:
1137156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_X:
11385f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_K:
11395f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1140156d0e29SNaveen N. Rao 			true_cond = COND_EQ;
1141156d0e29SNaveen N. Rao 			goto cond_branch;
1142156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_K:
1143156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_X:
11445f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_K:
11455f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_X:
1146156d0e29SNaveen N. Rao 			true_cond = COND_NE;
1147156d0e29SNaveen N. Rao 			goto cond_branch;
1148156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_K:
1149156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_X:
11505f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_K:
11515f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_X:
1152156d0e29SNaveen N. Rao 			true_cond = COND_NE;
1153156d0e29SNaveen N. Rao 			/* Fall through */
1154156d0e29SNaveen N. Rao 
1155156d0e29SNaveen N. Rao cond_branch:
1156156d0e29SNaveen N. Rao 			switch (code) {
1157156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_X:
115820dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_X:
1159156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_X:
116020dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_X:
1161156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_X:
1162156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_X:
11635f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_X:
11645f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_X:
11655f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_X:
11665f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_X:
11675f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_X:
11685f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_X:
1169156d0e29SNaveen N. Rao 				/* unsigned comparison */
11705f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
11713a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
11725f645996SJiong Wang 				else
11733a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1174156d0e29SNaveen N. Rao 				break;
1175156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_X:
117620dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_X:
1177156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_X:
117820dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_X:
11795f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_X:
11805f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_X:
11815f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_X:
11825f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1183156d0e29SNaveen N. Rao 				/* signed comparison */
11845f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
11853a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
11865f645996SJiong Wang 				else
11873a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1188156d0e29SNaveen N. Rao 				break;
1189156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_X:
11905f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_X:
11915f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP) {
11923a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
11935f645996SJiong Wang 				} else {
11943a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
11953a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
11965f645996SJiong Wang 				}
1197156d0e29SNaveen N. Rao 				break;
1198156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_K:
1199156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_K:
1200156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_K:
120120dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_K:
1202156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_K:
120320dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_K:
12045f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_K:
12055f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_K:
12065f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_K:
12075f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_K:
12085f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_K:
12095f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_K:
12105f645996SJiong Wang 			{
12115f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
12125f645996SJiong Wang 
1213156d0e29SNaveen N. Rao 				/*
1214156d0e29SNaveen N. Rao 				 * Need sign-extended load, so only positive
1215156d0e29SNaveen N. Rao 				 * values can be used as imm in cmpldi
1216156d0e29SNaveen N. Rao 				 */
12175f645996SJiong Wang 				if (imm >= 0 && imm < 32768) {
12185f645996SJiong Wang 					if (is_jmp32)
12193a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
12205f645996SJiong Wang 					else
12213a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
12225f645996SJiong Wang 				} else {
1223156d0e29SNaveen N. Rao 					/* sign-extending load */
12243a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
1225156d0e29SNaveen N. Rao 					/* ... but unsigned comparison */
12265f645996SJiong Wang 					if (is_jmp32)
12273a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
12285f645996SJiong Wang 					else
12293a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1230156d0e29SNaveen N. Rao 				}
1231156d0e29SNaveen N. Rao 				break;
12325f645996SJiong Wang 			}
1233156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_K:
123420dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_K:
1235156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_K:
123620dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_K:
12375f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_K:
12385f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_K:
12395f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_K:
12405f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_K:
12415f645996SJiong Wang 			{
12425f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
12435f645996SJiong Wang 
1244156d0e29SNaveen N. Rao 				/*
1245156d0e29SNaveen N. Rao 				 * signed comparison, so any 16-bit value
1246156d0e29SNaveen N. Rao 				 * can be used in cmpdi
1247156d0e29SNaveen N. Rao 				 */
12485f645996SJiong Wang 				if (imm >= -32768 && imm < 32768) {
12495f645996SJiong Wang 					if (is_jmp32)
12503a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
12515f645996SJiong Wang 					else
12523a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
12535f645996SJiong Wang 				} else {
12543a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
12555f645996SJiong Wang 					if (is_jmp32)
12563a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
12575f645996SJiong Wang 					else
12583a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1259156d0e29SNaveen N. Rao 				}
1260156d0e29SNaveen N. Rao 				break;
12615f645996SJiong Wang 			}
1262156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_K:
12635f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_K:
1264156d0e29SNaveen N. Rao 				/* andi does not sign-extend the immediate */
1265156d0e29SNaveen N. Rao 				if (imm >= 0 && imm < 32768)
1266156d0e29SNaveen N. Rao 					/* PPC_ANDI is _only/always_ dot-form */
12673a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1268156d0e29SNaveen N. Rao 				else {
12693a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
12705f645996SJiong Wang 					if (BPF_CLASS(code) == BPF_JMP) {
12713a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
12723a3fc9bfSJordan Niethe 								     tmp1_reg));
12735f645996SJiong Wang 					} else {
12743a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
12753a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
12763a181237SBalamuruhan S 									0, 0, 31));
12775f645996SJiong Wang 					}
1278156d0e29SNaveen N. Rao 				}
1279156d0e29SNaveen N. Rao 				break;
1280156d0e29SNaveen N. Rao 			}
1281156d0e29SNaveen N. Rao 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1282156d0e29SNaveen N. Rao 			break;
1283156d0e29SNaveen N. Rao 
1284156d0e29SNaveen N. Rao 		/*
1285ce076141SNaveen N. Rao 		 * Tail call
1286156d0e29SNaveen N. Rao 		 */
128771189fa9SAlexei Starovoitov 		case BPF_JMP | BPF_TAIL_CALL:
1288ce076141SNaveen N. Rao 			ctx->seen |= SEEN_TAILCALL;
12893832ba4eSNaveen N. Rao 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
12903832ba4eSNaveen N. Rao 			if (ret < 0)
12913832ba4eSNaveen N. Rao 				return ret;
1292ce076141SNaveen N. Rao 			break;
1293156d0e29SNaveen N. Rao 
1294156d0e29SNaveen N. Rao 		default:
1295156d0e29SNaveen N. Rao 			/*
1296156d0e29SNaveen N. Rao 			 * The filter contains something cruel & unusual.
1297156d0e29SNaveen N. Rao 			 * We don't handle it, but also there shouldn't be
1298156d0e29SNaveen N. Rao 			 * anything missing from our list.
1299156d0e29SNaveen N. Rao 			 */
1300156d0e29SNaveen N. Rao 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1301156d0e29SNaveen N. Rao 					code, i);
1302156d0e29SNaveen N. Rao 			return -ENOTSUPP;
1303156d0e29SNaveen N. Rao 		}
1304156d0e29SNaveen N. Rao 	}
1305156d0e29SNaveen N. Rao 
1306156d0e29SNaveen N. Rao 	/* Set end-of-body-code address for exit. */
1307156d0e29SNaveen N. Rao 	addrs[i] = ctx->idx * 4;
1308156d0e29SNaveen N. Rao 
1309156d0e29SNaveen N. Rao 	return 0;
1310156d0e29SNaveen N. Rao }
1311