xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 90d862f370b6e9de1b5d607843c5a2f9823990f3)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2156d0e29SNaveen N. Rao /*
3156d0e29SNaveen N. Rao  * bpf_jit_comp64.c: eBPF JIT compiler
4156d0e29SNaveen N. Rao  *
5156d0e29SNaveen N. Rao  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6156d0e29SNaveen N. Rao  *		  IBM Corporation
7156d0e29SNaveen N. Rao  *
8156d0e29SNaveen N. Rao  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9156d0e29SNaveen N. Rao  */
10156d0e29SNaveen N. Rao #include <linux/moduleloader.h>
11156d0e29SNaveen N. Rao #include <asm/cacheflush.h>
12ec0c464cSChristophe Leroy #include <asm/asm-compat.h>
13156d0e29SNaveen N. Rao #include <linux/netdevice.h>
14156d0e29SNaveen N. Rao #include <linux/filter.h>
15156d0e29SNaveen N. Rao #include <linux/if_vlan.h>
16156d0e29SNaveen N. Rao #include <asm/kprobes.h>
17ce076141SNaveen N. Rao #include <linux/bpf.h>
18b7540d62SNaveen N. Rao #include <asm/security_features.h>
19156d0e29SNaveen N. Rao 
20576a6c3aSNaveen N. Rao #include "bpf_jit.h"
21576a6c3aSNaveen N. Rao 
22576a6c3aSNaveen N. Rao /*
23576a6c3aSNaveen N. Rao  * Stack layout:
24576a6c3aSNaveen N. Rao  * Ensure the top half (upto local_tmp_var) stays consistent
25576a6c3aSNaveen N. Rao  * with our redzone usage.
26576a6c3aSNaveen N. Rao  *
27576a6c3aSNaveen N. Rao  *		[	prev sp		] <-------------
28576a6c3aSNaveen N. Rao  *		[   nv gpr save area	] 5*8		|
29576a6c3aSNaveen N. Rao  *		[    tail_call_cnt	] 8		|
30576a6c3aSNaveen N. Rao  *		[    local_tmp_var	] 16		|
31576a6c3aSNaveen N. Rao  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32576a6c3aSNaveen N. Rao  *		[     frame header	] 32/112	|
33576a6c3aSNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
34576a6c3aSNaveen N. Rao  */
35576a6c3aSNaveen N. Rao 
36576a6c3aSNaveen N. Rao /* for gpr non volatile registers BPG_REG_6 to 10 */
37576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_SAVE	(5*8)
38576a6c3aSNaveen N. Rao /* for bpf JIT code internal usage */
39576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_LOCALS	24
40576a6c3aSNaveen N. Rao /* stack frame excluding BPF stack, ensure this is quadword aligned */
41576a6c3aSNaveen N. Rao #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42576a6c3aSNaveen N. Rao 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43576a6c3aSNaveen N. Rao 
44576a6c3aSNaveen N. Rao /* BPF register usage */
45576a6c3aSNaveen N. Rao #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46576a6c3aSNaveen N. Rao #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47576a6c3aSNaveen N. Rao 
48576a6c3aSNaveen N. Rao /* BPF to ppc register mappings */
4949c3af43SNaveen N. Rao void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
5049c3af43SNaveen N. Rao {
51576a6c3aSNaveen N. Rao 	/* function return value */
5249c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_0] = _R8;
53576a6c3aSNaveen N. Rao 	/* function arguments */
5449c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_1] = _R3;
5549c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_2] = _R4;
5649c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_3] = _R5;
5749c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_4] = _R6;
5849c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_5] = _R7;
59576a6c3aSNaveen N. Rao 	/* non volatile registers */
6049c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_6] = _R27;
6149c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_7] = _R28;
6249c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_8] = _R29;
6349c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_9] = _R30;
64576a6c3aSNaveen N. Rao 	/* frame pointer aka BPF_REG_10 */
6549c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_FP] = _R31;
66576a6c3aSNaveen N. Rao 	/* eBPF jit internal registers */
6749c3af43SNaveen N. Rao 	ctx->b2p[BPF_REG_AX] = _R12;
6849c3af43SNaveen N. Rao 	ctx->b2p[TMP_REG_1] = _R9;
6949c3af43SNaveen N. Rao 	ctx->b2p[TMP_REG_2] = _R10;
7049c3af43SNaveen N. Rao }
71576a6c3aSNaveen N. Rao 
72576a6c3aSNaveen N. Rao /* PPC NVR range -- update this if we ever use NVRs below r27 */
73036d559cSNaveen N. Rao #define BPF_PPC_NVR_MIN		_R27
74156d0e29SNaveen N. Rao 
75156d0e29SNaveen N. Rao static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76156d0e29SNaveen N. Rao {
77156d0e29SNaveen N. Rao 	/*
78156d0e29SNaveen N. Rao 	 * We only need a stack frame if:
79156d0e29SNaveen N. Rao 	 * - we call other functions (kernel helpers), or
80156d0e29SNaveen N. Rao 	 * - the bpf program uses its stack area
81156d0e29SNaveen N. Rao 	 * The latter condition is deduced from the usage of BPF_REG_FP
82156d0e29SNaveen N. Rao 	 */
8349c3af43SNaveen N. Rao 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84156d0e29SNaveen N. Rao }
85156d0e29SNaveen N. Rao 
867b847f52SNaveen N. Rao /*
877b847f52SNaveen N. Rao  * When not setting up our own stackframe, the redzone usage is:
887b847f52SNaveen N. Rao  *
897b847f52SNaveen N. Rao  *		[	prev sp		] <-------------
907b847f52SNaveen N. Rao  *		[	  ...       	] 		|
917b847f52SNaveen N. Rao  * sp (r1) --->	[    stack pointer	] --------------
92b7540d62SNaveen N. Rao  *		[   nv gpr save area	] 5*8
937b847f52SNaveen N. Rao  *		[    tail_call_cnt	] 8
94b7540d62SNaveen N. Rao  *		[    local_tmp_var	] 16
957b847f52SNaveen N. Rao  *		[   unused red zone	] 208 bytes protected
967b847f52SNaveen N. Rao  */
977b847f52SNaveen N. Rao static int bpf_jit_stack_local(struct codegen_context *ctx)
987b847f52SNaveen N. Rao {
997b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx))
100ac0761ebSSandipan Das 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
1017b847f52SNaveen N. Rao 	else
102b7540d62SNaveen N. Rao 		return -(BPF_PPC_STACK_SAVE + 24);
1037b847f52SNaveen N. Rao }
1047b847f52SNaveen N. Rao 
105ce076141SNaveen N. Rao static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106ce076141SNaveen N. Rao {
107b7540d62SNaveen N. Rao 	return bpf_jit_stack_local(ctx) + 16;
108ce076141SNaveen N. Rao }
109ce076141SNaveen N. Rao 
1107b847f52SNaveen N. Rao static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
1117b847f52SNaveen N. Rao {
1127b847f52SNaveen N. Rao 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113ac0761ebSSandipan Das 		return (bpf_has_stack_frame(ctx) ?
114ac0761ebSSandipan Das 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
1157b847f52SNaveen N. Rao 				- (8 * (32 - reg));
1167b847f52SNaveen N. Rao 
1177b847f52SNaveen N. Rao 	pr_err("BPF JIT is asking about unknown registers");
1187b847f52SNaveen N. Rao 	BUG();
1197b847f52SNaveen N. Rao }
1207b847f52SNaveen N. Rao 
12140272035SChristophe Leroy void bpf_jit_realloc_regs(struct codegen_context *ctx)
12240272035SChristophe Leroy {
12340272035SChristophe Leroy }
12440272035SChristophe Leroy 
1254ea76e90SChristophe Leroy void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126156d0e29SNaveen N. Rao {
127156d0e29SNaveen N. Rao 	int i;
128156d0e29SNaveen N. Rao 
1297e3a68beSNicholas Piggin #ifndef CONFIG_PPC_KERNEL_PCREL
1305b89492cSChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131391c271fSNaveen N. Rao 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
1327e3a68beSNicholas Piggin #endif
133b10cb163SNaveen N. Rao 
134ce076141SNaveen N. Rao 	/*
135ce076141SNaveen N. Rao 	 * Initialize tail_call_cnt if we do tail calls.
136ce076141SNaveen N. Rao 	 * Otherwise, put in NOPs so that it can be skipped when we are
137ce076141SNaveen N. Rao 	 * invoked through a tail call.
138ce076141SNaveen N. Rao 	 */
139ce076141SNaveen N. Rao 	if (ctx->seen & SEEN_TAILCALL) {
14049c3af43SNaveen N. Rao 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141ce076141SNaveen N. Rao 		/* this goes in the redzone */
14249c3af43SNaveen N. Rao 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
143ce076141SNaveen N. Rao 	} else {
1443a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
1453a181237SBalamuruhan S 		EMIT(PPC_RAW_NOP());
146ce076141SNaveen N. Rao 	}
147ce076141SNaveen N. Rao 
1487b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
149156d0e29SNaveen N. Rao 		/*
150156d0e29SNaveen N. Rao 		 * We need a stack frame, but we don't necessarily need to
151156d0e29SNaveen N. Rao 		 * save/restore LR unless we call other functions
152156d0e29SNaveen N. Rao 		 */
153156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
154e08021f8SChristophe Leroy 			EMIT(PPC_RAW_MFLR(_R0));
155036d559cSNaveen N. Rao 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
156156d0e29SNaveen N. Rao 		}
157156d0e29SNaveen N. Rao 
158036d559cSNaveen N. Rao 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
159156d0e29SNaveen N. Rao 	}
160156d0e29SNaveen N. Rao 
161156d0e29SNaveen N. Rao 	/*
162156d0e29SNaveen N. Rao 	 * Back up non-volatile regs -- BPF registers 6-10
163156d0e29SNaveen N. Rao 	 * If we haven't created our own stack frame, we save these
164156d0e29SNaveen N. Rao 	 * in the protected zone below the previous stack frame
165156d0e29SNaveen N. Rao 	 */
166156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
16749c3af43SNaveen N. Rao 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
16849c3af43SNaveen N. Rao 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
169156d0e29SNaveen N. Rao 
170156d0e29SNaveen N. Rao 	/* Setup frame pointer to point to the bpf stack area */
17149c3af43SNaveen N. Rao 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
17249c3af43SNaveen N. Rao 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
1733a181237SBalamuruhan S 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
174156d0e29SNaveen N. Rao }
175156d0e29SNaveen N. Rao 
176ce076141SNaveen N. Rao static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
177156d0e29SNaveen N. Rao {
178156d0e29SNaveen N. Rao 	int i;
179156d0e29SNaveen N. Rao 
180156d0e29SNaveen N. Rao 	/* Restore NVRs */
181156d0e29SNaveen N. Rao 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
18249c3af43SNaveen N. Rao 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
18349c3af43SNaveen N. Rao 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
184156d0e29SNaveen N. Rao 
185156d0e29SNaveen N. Rao 	/* Tear down our stack frame */
1867b847f52SNaveen N. Rao 	if (bpf_has_stack_frame(ctx)) {
187036d559cSNaveen N. Rao 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188156d0e29SNaveen N. Rao 		if (ctx->seen & SEEN_FUNC) {
189036d559cSNaveen N. Rao 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190036d559cSNaveen N. Rao 			EMIT(PPC_RAW_MTLR(_R0));
191156d0e29SNaveen N. Rao 		}
192156d0e29SNaveen N. Rao 	}
193ce076141SNaveen N. Rao }
194ce076141SNaveen N. Rao 
1954ea76e90SChristophe Leroy void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196ce076141SNaveen N. Rao {
197ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
198ce076141SNaveen N. Rao 
199ce076141SNaveen N. Rao 	/* Move result to r3 */
20049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201156d0e29SNaveen N. Rao 
2023a181237SBalamuruhan S 	EMIT(PPC_RAW_BLR());
203156d0e29SNaveen N. Rao }
204156d0e29SNaveen N. Rao 
20543d636f8SNaveen N. Rao static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
206e2c95a61SDaniel Borkmann {
20743d636f8SNaveen N. Rao 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
208feb63072SNaveen N. Rao 	long reladdr;
20943d636f8SNaveen N. Rao 
21043d636f8SNaveen N. Rao 	if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
21143d636f8SNaveen N. Rao 		return -EINVAL;
21243d636f8SNaveen N. Rao 
2137e3a68beSNicholas Piggin 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
2147e3a68beSNicholas Piggin 		reladdr = func_addr - CTX_NIA(ctx);
2157e3a68beSNicholas Piggin 
2167e3a68beSNicholas Piggin 		if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
2177e3a68beSNicholas Piggin 			pr_err("eBPF: address of %ps out of range of pcrel address.\n",
2187e3a68beSNicholas Piggin 				(void *)func);
2197e3a68beSNicholas Piggin 			return -ERANGE;
2207e3a68beSNicholas Piggin 		}
2217e3a68beSNicholas Piggin 		/* pla r12,addr */
2227e3a68beSNicholas Piggin 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
2237e3a68beSNicholas Piggin 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
2247e3a68beSNicholas Piggin 		EMIT(PPC_RAW_MTCTR(_R12));
2257e3a68beSNicholas Piggin 		EMIT(PPC_RAW_BCTR());
2267e3a68beSNicholas Piggin 
2277e3a68beSNicholas Piggin 	} else {
228feb63072SNaveen N. Rao 		reladdr = func_addr - kernel_toc_addr();
229feb63072SNaveen N. Rao 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
230feb63072SNaveen N. Rao 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
231feb63072SNaveen N. Rao 			return -ERANGE;
232feb63072SNaveen N. Rao 		}
233feb63072SNaveen N. Rao 
234feb63072SNaveen N. Rao 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
235feb63072SNaveen N. Rao 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
236feb63072SNaveen N. Rao 		EMIT(PPC_RAW_MTCTR(_R12));
23720ccb004SNaveen N. Rao 		EMIT(PPC_RAW_BCTRL());
2387e3a68beSNicholas Piggin 	}
23943d636f8SNaveen N. Rao 
24043d636f8SNaveen N. Rao 	return 0;
241e2c95a61SDaniel Borkmann }
242e2c95a61SDaniel Borkmann 
243*90d862f3SHari Bathini int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
244ce076141SNaveen N. Rao {
2454ea69b2fSSandipan Das 	unsigned int i, ctx_idx = ctx->idx;
2464ea69b2fSSandipan Das 
24743d636f8SNaveen N. Rao 	if (WARN_ON_ONCE(func && is_module_text_address(func)))
24843d636f8SNaveen N. Rao 		return -EINVAL;
24943d636f8SNaveen N. Rao 
250feb63072SNaveen N. Rao 	/* skip past descriptor if elf v1 */
251feb63072SNaveen N. Rao 	func += FUNCTION_DESCR_SIZE;
252feb63072SNaveen N. Rao 
2534ea69b2fSSandipan Das 	/* Load function address into r12 */
254036d559cSNaveen N. Rao 	PPC_LI64(_R12, func);
2554ea69b2fSSandipan Das 
2564ea69b2fSSandipan Das 	/* For bpf-to-bpf function calls, the callee's address is unknown
2574ea69b2fSSandipan Das 	 * until the last extra pass. As seen above, we use PPC_LI64() to
2584ea69b2fSSandipan Das 	 * load the callee's address, but this may optimize the number of
2594ea69b2fSSandipan Das 	 * instructions required based on the nature of the address.
2604ea69b2fSSandipan Das 	 *
261d3921cbbSChristophe Leroy 	 * Since we don't want the number of instructions emitted to increase,
2624ea69b2fSSandipan Das 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
2634ea69b2fSSandipan Das 	 * we always have a five-instruction sequence, which is the maximum
2644ea69b2fSSandipan Das 	 * that PPC_LI64() can emit.
2654ea69b2fSSandipan Das 	 */
266d3921cbbSChristophe Leroy 	if (!image)
2674ea69b2fSSandipan Das 		for (i = ctx->idx - ctx_idx; i < 5; i++)
2683a181237SBalamuruhan S 			EMIT(PPC_RAW_NOP());
2694ea69b2fSSandipan Das 
270036d559cSNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(_R12));
27120ccb004SNaveen N. Rao 	EMIT(PPC_RAW_BCTRL());
27243d636f8SNaveen N. Rao 
27343d636f8SNaveen N. Rao 	return 0;
274ce076141SNaveen N. Rao }
275ce076141SNaveen N. Rao 
2763832ba4eSNaveen N. Rao static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
277ce076141SNaveen N. Rao {
278ce076141SNaveen N. Rao 	/*
279ce076141SNaveen N. Rao 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
280ce076141SNaveen N. Rao 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
281ce076141SNaveen N. Rao 	 * r4/BPF_REG_2 - pointer to bpf_array
282ce076141SNaveen N. Rao 	 * r5/BPF_REG_3 - index in bpf_array
283ce076141SNaveen N. Rao 	 */
28449c3af43SNaveen N. Rao 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
28549c3af43SNaveen N. Rao 	int b2p_index = bpf_to_ppc(BPF_REG_3);
286b10cb163SNaveen N. Rao 	int bpf_tailcall_prologue_size = 8;
287b10cb163SNaveen N. Rao 
2885b89492cSChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
289b10cb163SNaveen N. Rao 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
290ce076141SNaveen N. Rao 
291ce076141SNaveen N. Rao 	/*
292ce076141SNaveen N. Rao 	 * if (index >= array->map.max_entries)
293ce076141SNaveen N. Rao 	 *   goto out;
294ce076141SNaveen N. Rao 	 */
29549c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
2963a181237SBalamuruhan S 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
29749c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
298bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_GE, out);
299ce076141SNaveen N. Rao 
300ce076141SNaveen N. Rao 	/*
301ebf7f6f0STiezhu Yang 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
302ce076141SNaveen N. Rao 	 *   goto out;
303ce076141SNaveen N. Rao 	 */
30449c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
30549c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
306bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_GE, out);
307ce076141SNaveen N. Rao 
308ce076141SNaveen N. Rao 	/*
309ce076141SNaveen N. Rao 	 * tail_call_cnt++;
310ce076141SNaveen N. Rao 	 */
31149c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
31249c3af43SNaveen N. Rao 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
313ce076141SNaveen N. Rao 
314ce076141SNaveen N. Rao 	/* prog = array->ptrs[index]; */
31549c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
31649c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
31749c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
318ce076141SNaveen N. Rao 
319ce076141SNaveen N. Rao 	/*
320ce076141SNaveen N. Rao 	 * if (prog == NULL)
321ce076141SNaveen N. Rao 	 *   goto out;
322ce076141SNaveen N. Rao 	 */
32349c3af43SNaveen N. Rao 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
324bafb5898SNaveen N. Rao 	PPC_BCC_SHORT(COND_EQ, out);
325ce076141SNaveen N. Rao 
326ce076141SNaveen N. Rao 	/* goto *(prog->bpf_func + prologue_size); */
32749c3af43SNaveen N. Rao 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
32849c3af43SNaveen N. Rao 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
329b10cb163SNaveen N. Rao 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
33049c3af43SNaveen N. Rao 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
331ce076141SNaveen N. Rao 
332ce076141SNaveen N. Rao 	/* tear down stack, restore NVRs, ... */
333ce076141SNaveen N. Rao 	bpf_jit_emit_common_epilogue(image, ctx);
334ce076141SNaveen N. Rao 
3353a181237SBalamuruhan S 	EMIT(PPC_RAW_BCTR());
3363832ba4eSNaveen N. Rao 
337ce076141SNaveen N. Rao 	/* out: */
3383832ba4eSNaveen N. Rao 	return 0;
339ce076141SNaveen N. Rao }
340ce076141SNaveen N. Rao 
341b7540d62SNaveen N. Rao /*
342b7540d62SNaveen N. Rao  * We spill into the redzone always, even if the bpf program has its own stackframe.
343b7540d62SNaveen N. Rao  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
344b7540d62SNaveen N. Rao  */
345b7540d62SNaveen N. Rao void bpf_stf_barrier(void);
346b7540d62SNaveen N. Rao 
347b7540d62SNaveen N. Rao asm (
348b7540d62SNaveen N. Rao "		.global bpf_stf_barrier		;"
349b7540d62SNaveen N. Rao "	bpf_stf_barrier:			;"
350b7540d62SNaveen N. Rao "		std	21,-64(1)		;"
351b7540d62SNaveen N. Rao "		std	22,-56(1)		;"
352b7540d62SNaveen N. Rao "		sync				;"
353b7540d62SNaveen N. Rao "		ld	21,-64(1)		;"
354b7540d62SNaveen N. Rao "		ld	22,-56(1)		;"
355b7540d62SNaveen N. Rao "		ori	31,31,0			;"
356b7540d62SNaveen N. Rao "		.rept 14			;"
357b7540d62SNaveen N. Rao "		b	1f			;"
358b7540d62SNaveen N. Rao "	1:					;"
359b7540d62SNaveen N. Rao "		.endr				;"
360b7540d62SNaveen N. Rao "		blr				;"
361b7540d62SNaveen N. Rao );
362b7540d62SNaveen N. Rao 
363156d0e29SNaveen N. Rao /* Assemble the body code between the prologue & epilogue */
364*90d862f3SHari Bathini int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
36585e03115SChristophe Leroy 		       u32 *addrs, int pass, bool extra_pass)
366156d0e29SNaveen N. Rao {
367b7540d62SNaveen N. Rao 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
368156d0e29SNaveen N. Rao 	const struct bpf_insn *insn = fp->insnsi;
369156d0e29SNaveen N. Rao 	int flen = fp->len;
370e2c95a61SDaniel Borkmann 	int i, ret;
371156d0e29SNaveen N. Rao 
372156d0e29SNaveen N. Rao 	/* Start of epilogue code - will only be valid 2nd pass onwards */
373156d0e29SNaveen N. Rao 	u32 exit_addr = addrs[flen];
374156d0e29SNaveen N. Rao 
375156d0e29SNaveen N. Rao 	for (i = 0; i < flen; i++) {
376156d0e29SNaveen N. Rao 		u32 code = insn[i].code;
37749c3af43SNaveen N. Rao 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
37849c3af43SNaveen N. Rao 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
379efa95f03SHari Bathini 		u32 size = BPF_SIZE(code);
38049c3af43SNaveen N. Rao 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
38149c3af43SNaveen N. Rao 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
3821e82dfaaSHari Bathini 		u32 save_reg, ret_reg;
383156d0e29SNaveen N. Rao 		s16 off = insn[i].off;
384156d0e29SNaveen N. Rao 		s32 imm = insn[i].imm;
385e2c95a61SDaniel Borkmann 		bool func_addr_fixed;
386e2c95a61SDaniel Borkmann 		u64 func_addr;
387156d0e29SNaveen N. Rao 		u64 imm64;
388156d0e29SNaveen N. Rao 		u32 true_cond;
389b9c1e60eSDaniel Borkmann 		u32 tmp_idx;
390f9320c49SNaveen N. Rao 		int j;
391156d0e29SNaveen N. Rao 
392156d0e29SNaveen N. Rao 		/*
393156d0e29SNaveen N. Rao 		 * addrs[] maps a BPF bytecode address into a real offset from
394156d0e29SNaveen N. Rao 		 * the start of the body code.
395156d0e29SNaveen N. Rao 		 */
396156d0e29SNaveen N. Rao 		addrs[i] = ctx->idx * 4;
397156d0e29SNaveen N. Rao 
398156d0e29SNaveen N. Rao 		/*
399156d0e29SNaveen N. Rao 		 * As an optimization, we note down which non-volatile registers
400156d0e29SNaveen N. Rao 		 * are used so that we can only save/restore those in our
401156d0e29SNaveen N. Rao 		 * prologue and epilogue. We do this here regardless of whether
402156d0e29SNaveen N. Rao 		 * the actual BPF instruction uses src/dst registers or not
403156d0e29SNaveen N. Rao 		 * (for instance, BPF_CALL does not use them). The expectation
404156d0e29SNaveen N. Rao 		 * is that those instructions will have src_reg/dst_reg set to
405156d0e29SNaveen N. Rao 		 * 0. Even otherwise, we just lose some prologue/epilogue
406156d0e29SNaveen N. Rao 		 * optimization but everything else should work without
407156d0e29SNaveen N. Rao 		 * any issues.
408156d0e29SNaveen N. Rao 		 */
4097b847f52SNaveen N. Rao 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
410ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, dst_reg);
4117b847f52SNaveen N. Rao 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
412ed573b57SChristophe Leroy 			bpf_set_seen_register(ctx, src_reg);
413156d0e29SNaveen N. Rao 
414156d0e29SNaveen N. Rao 		switch (code) {
415156d0e29SNaveen N. Rao 		/*
416156d0e29SNaveen N. Rao 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
417156d0e29SNaveen N. Rao 		 */
418156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
419156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
42006541865SBalamuruhan S 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
421156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
422156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
423156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
4243a181237SBalamuruhan S 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
425156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
426156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
427156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
4285855c4c1SNaveen N. Rao 			if (!imm) {
4295855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
4305855c4c1SNaveen N. Rao 			} else if (imm >= -32768 && imm < 32768) {
4313a181237SBalamuruhan S 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
4325855c4c1SNaveen N. Rao 			} else {
4333a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
4343a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
435156d0e29SNaveen N. Rao 			}
4365855c4c1SNaveen N. Rao 			goto bpf_alu32_trunc;
4375855c4c1SNaveen N. Rao 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
4385855c4c1SNaveen N. Rao 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
4395855c4c1SNaveen N. Rao 			if (!imm) {
4405855c4c1SNaveen N. Rao 				goto bpf_alu32_trunc;
4415855c4c1SNaveen N. Rao 			} else if (imm > -32768 && imm <= 32768) {
4425855c4c1SNaveen N. Rao 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
4435855c4c1SNaveen N. Rao 			} else {
4443a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
4453a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
446156d0e29SNaveen N. Rao 			}
447156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
448156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
449156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
450156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ALU)
4513a181237SBalamuruhan S 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
452156d0e29SNaveen N. Rao 			else
4533a181237SBalamuruhan S 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
454156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
455156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
456156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
457156d0e29SNaveen N. Rao 			if (imm >= -32768 && imm < 32768)
4583a181237SBalamuruhan S 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
459156d0e29SNaveen N. Rao 			else {
4603a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
461156d0e29SNaveen N. Rao 				if (BPF_CLASS(code) == BPF_ALU)
4623a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
463156d0e29SNaveen N. Rao 				else
4643a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
465156d0e29SNaveen N. Rao 			}
466156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
467156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
468156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
469156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
4703a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
4713a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
4723a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
473156d0e29SNaveen N. Rao 			} else
4743a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
475156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
476156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
477156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
478156d0e29SNaveen N. Rao 			if (BPF_OP(code) == BPF_MOD) {
4793a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
4803a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
4813a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
482156d0e29SNaveen N. Rao 			} else
4833a181237SBalamuruhan S 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
484156d0e29SNaveen N. Rao 			break;
485156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
486156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
487156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
488156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
489156d0e29SNaveen N. Rao 			if (imm == 0)
490156d0e29SNaveen N. Rao 				return -EINVAL;
4918bbc9d82SNaveen N. Rao 			if (imm == 1) {
4928bbc9d82SNaveen N. Rao 				if (BPF_OP(code) == BPF_DIV) {
493156d0e29SNaveen N. Rao 					goto bpf_alu32_trunc;
4948bbc9d82SNaveen N. Rao 				} else {
4958bbc9d82SNaveen N. Rao 					EMIT(PPC_RAW_LI(dst_reg, 0));
4968bbc9d82SNaveen N. Rao 					break;
4978bbc9d82SNaveen N. Rao 				}
4988bbc9d82SNaveen N. Rao 			}
499156d0e29SNaveen N. Rao 
5003a3fc9bfSJordan Niethe 			PPC_LI32(tmp1_reg, imm);
501156d0e29SNaveen N. Rao 			switch (BPF_CLASS(code)) {
502156d0e29SNaveen N. Rao 			case BPF_ALU:
503156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
5043a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
5053a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
5063a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
507156d0e29SNaveen N. Rao 				} else
5083a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
509156d0e29SNaveen N. Rao 				break;
510156d0e29SNaveen N. Rao 			case BPF_ALU64:
511156d0e29SNaveen N. Rao 				if (BPF_OP(code) == BPF_MOD) {
5123a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
5133a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
5143a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
515156d0e29SNaveen N. Rao 				} else
5163a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
517156d0e29SNaveen N. Rao 				break;
518156d0e29SNaveen N. Rao 			}
519156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
520156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
521156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
5223a181237SBalamuruhan S 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
523156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
524156d0e29SNaveen N. Rao 
525156d0e29SNaveen N. Rao 		/*
526156d0e29SNaveen N. Rao 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
527156d0e29SNaveen N. Rao 		 */
528156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
529156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
5303a181237SBalamuruhan S 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
531156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
532156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
533156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
534156d0e29SNaveen N. Rao 			if (!IMM_H(imm))
5353a181237SBalamuruhan S 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
536156d0e29SNaveen N. Rao 			else {
537156d0e29SNaveen N. Rao 				/* Sign-extended */
5383a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5393a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
540156d0e29SNaveen N. Rao 			}
541156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
542156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
543156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
5443a181237SBalamuruhan S 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
545156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
546156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
547156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
548156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
549156d0e29SNaveen N. Rao 				/* Sign-extended */
5503a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5513a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
552156d0e29SNaveen N. Rao 			} else {
553156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5543a181237SBalamuruhan S 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
555156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5563a181237SBalamuruhan S 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
557156d0e29SNaveen N. Rao 			}
558156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
559156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
560156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
5613a181237SBalamuruhan S 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
562156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
563156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
564156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
565156d0e29SNaveen N. Rao 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
566156d0e29SNaveen N. Rao 				/* Sign-extended */
5673a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
5683a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
569156d0e29SNaveen N. Rao 			} else {
570156d0e29SNaveen N. Rao 				if (IMM_L(imm))
5713a181237SBalamuruhan S 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
572156d0e29SNaveen N. Rao 				if (IMM_H(imm))
5733a181237SBalamuruhan S 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
574156d0e29SNaveen N. Rao 			}
575156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
576156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
577156d0e29SNaveen N. Rao 			/* slw clears top 32 bits */
5783a181237SBalamuruhan S 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
579a4c92773SJiong Wang 			/* skip zero extension move, but set address map. */
580a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
581a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
582156d0e29SNaveen N. Rao 			break;
583156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
5843a181237SBalamuruhan S 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
585156d0e29SNaveen N. Rao 			break;
586156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
587156d0e29SNaveen N. Rao 			/* with imm 0, we still need to clear top 32 bits */
5883a181237SBalamuruhan S 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
589a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
590a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
591156d0e29SNaveen N. Rao 			break;
592156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
593156d0e29SNaveen N. Rao 			if (imm != 0)
5943a181237SBalamuruhan S 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
595156d0e29SNaveen N. Rao 			break;
596156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
5973a181237SBalamuruhan S 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
598a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
599a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
600156d0e29SNaveen N. Rao 			break;
601156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
6023a181237SBalamuruhan S 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
603156d0e29SNaveen N. Rao 			break;
604156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
6053a181237SBalamuruhan S 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
606a4c92773SJiong Wang 			if (insn_is_zext(&insn[i + 1]))
607a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
608156d0e29SNaveen N. Rao 			break;
609156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
610156d0e29SNaveen N. Rao 			if (imm != 0)
6113a181237SBalamuruhan S 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
612156d0e29SNaveen N. Rao 			break;
61344cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
6143a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
61544cf43c0SJiong Wang 			goto bpf_alu32_trunc;
616156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
6173a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
618156d0e29SNaveen N. Rao 			break;
61944cf43c0SJiong Wang 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
6203a181237SBalamuruhan S 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
62144cf43c0SJiong Wang 			goto bpf_alu32_trunc;
622156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
623156d0e29SNaveen N. Rao 			if (imm != 0)
6243a181237SBalamuruhan S 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
625156d0e29SNaveen N. Rao 			break;
626156d0e29SNaveen N. Rao 
627156d0e29SNaveen N. Rao 		/*
628156d0e29SNaveen N. Rao 		 * MOV
629156d0e29SNaveen N. Rao 		 */
630156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
631156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
632a4c92773SJiong Wang 			if (imm == 1) {
633a4c92773SJiong Wang 				/* special mov32 for zext */
6343a181237SBalamuruhan S 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
635a4c92773SJiong Wang 				break;
636a4c92773SJiong Wang 			}
6373a181237SBalamuruhan S 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
638156d0e29SNaveen N. Rao 			goto bpf_alu32_trunc;
639156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
640156d0e29SNaveen N. Rao 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
641156d0e29SNaveen N. Rao 			PPC_LI32(dst_reg, imm);
642156d0e29SNaveen N. Rao 			if (imm < 0)
643156d0e29SNaveen N. Rao 				goto bpf_alu32_trunc;
644a4c92773SJiong Wang 			else if (insn_is_zext(&insn[i + 1]))
645a4c92773SJiong Wang 				addrs[++i] = ctx->idx * 4;
646156d0e29SNaveen N. Rao 			break;
647156d0e29SNaveen N. Rao 
648156d0e29SNaveen N. Rao bpf_alu32_trunc:
649156d0e29SNaveen N. Rao 		/* Truncate to 32-bits */
650a4c92773SJiong Wang 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
6513a181237SBalamuruhan S 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
652156d0e29SNaveen N. Rao 		break;
653156d0e29SNaveen N. Rao 
654156d0e29SNaveen N. Rao 		/*
655156d0e29SNaveen N. Rao 		 * BPF_FROM_BE/LE
656156d0e29SNaveen N. Rao 		 */
657156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_LE:
658156d0e29SNaveen N. Rao 		case BPF_ALU | BPF_END | BPF_FROM_BE:
659156d0e29SNaveen N. Rao #ifdef __BIG_ENDIAN__
660156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_BE)
661156d0e29SNaveen N. Rao 				goto emit_clear;
662156d0e29SNaveen N. Rao #else /* !__BIG_ENDIAN__ */
663156d0e29SNaveen N. Rao 			if (BPF_SRC(code) == BPF_FROM_LE)
664156d0e29SNaveen N. Rao 				goto emit_clear;
665156d0e29SNaveen N. Rao #endif
666156d0e29SNaveen N. Rao 			switch (imm) {
667156d0e29SNaveen N. Rao 			case 16:
668156d0e29SNaveen N. Rao 				/* Rotate 8 bits left & mask with 0x0000ff00 */
6693a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
670156d0e29SNaveen N. Rao 				/* Rotate 8 bits right & insert LSB to reg */
6713a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
672156d0e29SNaveen N. Rao 				/* Move result back to dst_reg */
6733a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
674156d0e29SNaveen N. Rao 				break;
675156d0e29SNaveen N. Rao 			case 32:
676156d0e29SNaveen N. Rao 				/*
677156d0e29SNaveen N. Rao 				 * Rotate word left by 8 bits:
678156d0e29SNaveen N. Rao 				 * 2 bytes are already in their final position
679156d0e29SNaveen N. Rao 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
680156d0e29SNaveen N. Rao 				 */
6813a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
682156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 1 */
6833a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
684156d0e29SNaveen N. Rao 				/* Rotate 24 bits and insert byte 3 */
6853a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
6863a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
687156d0e29SNaveen N. Rao 				break;
688156d0e29SNaveen N. Rao 			case 64:
6893f5f766dSNaveen N. Rao 				/* Store the value to stack and then use byte-reverse loads */
690036d559cSNaveen N. Rao 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
6913a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
6923f5f766dSNaveen N. Rao 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
6933a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
6943f5f766dSNaveen N. Rao 				} else {
6953a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
6963f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
6973f5f766dSNaveen N. Rao 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
6983a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
6993a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
7003f5f766dSNaveen N. Rao 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
7013a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
7023a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
7033f5f766dSNaveen N. Rao 				}
704156d0e29SNaveen N. Rao 				break;
705156d0e29SNaveen N. Rao 			}
706156d0e29SNaveen N. Rao 			break;
707156d0e29SNaveen N. Rao 
708156d0e29SNaveen N. Rao emit_clear:
709156d0e29SNaveen N. Rao 			switch (imm) {
710156d0e29SNaveen N. Rao 			case 16:
711156d0e29SNaveen N. Rao 				/* zero-extend 16 bits into 64 bits */
7123a181237SBalamuruhan S 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
713a4c92773SJiong Wang 				if (insn_is_zext(&insn[i + 1]))
714a4c92773SJiong Wang 					addrs[++i] = ctx->idx * 4;
715156d0e29SNaveen N. Rao 				break;
716156d0e29SNaveen N. Rao 			case 32:
717a4c92773SJiong Wang 				if (!fp->aux->verifier_zext)
718156d0e29SNaveen N. Rao 					/* zero-extend 32 bits into 64 bits */
7193a181237SBalamuruhan S 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
720156d0e29SNaveen N. Rao 				break;
721156d0e29SNaveen N. Rao 			case 64:
722156d0e29SNaveen N. Rao 				/* nop */
723156d0e29SNaveen N. Rao 				break;
724156d0e29SNaveen N. Rao 			}
725156d0e29SNaveen N. Rao 			break;
726156d0e29SNaveen N. Rao 
727156d0e29SNaveen N. Rao 		/*
728f5e81d11SDaniel Borkmann 		 * BPF_ST NOSPEC (speculation barrier)
729f5e81d11SDaniel Borkmann 		 */
730f5e81d11SDaniel Borkmann 		case BPF_ST | BPF_NOSPEC:
731b7540d62SNaveen N. Rao 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
732b7540d62SNaveen N. Rao 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
733b7540d62SNaveen N. Rao 				break;
734b7540d62SNaveen N. Rao 
735b7540d62SNaveen N. Rao 			switch (stf_barrier) {
736b7540d62SNaveen N. Rao 			case STF_BARRIER_EIEIO:
737b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
738b7540d62SNaveen N. Rao 				break;
739b7540d62SNaveen N. Rao 			case STF_BARRIER_SYNC_ORI:
740b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_SYNC());
7413a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
742b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
743b7540d62SNaveen N. Rao 				break;
744b7540d62SNaveen N. Rao 			case STF_BARRIER_FALLBACK:
745c2067f7fSNaveen N. Rao 				ctx->seen |= SEEN_FUNC;
746036d559cSNaveen N. Rao 				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
747036d559cSNaveen N. Rao 				EMIT(PPC_RAW_MTCTR(_R12));
748b7540d62SNaveen N. Rao 				EMIT(PPC_RAW_BCTRL());
749b7540d62SNaveen N. Rao 				break;
750b7540d62SNaveen N. Rao 			case STF_BARRIER_NONE:
751b7540d62SNaveen N. Rao 				break;
752b7540d62SNaveen N. Rao 			}
753f5e81d11SDaniel Borkmann 			break;
754f5e81d11SDaniel Borkmann 
755f5e81d11SDaniel Borkmann 		/*
756156d0e29SNaveen N. Rao 		 * BPF_ST(X)
757156d0e29SNaveen N. Rao 		 */
758156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
759156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
760156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7613a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
7623a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
763156d0e29SNaveen N. Rao 			}
7643a181237SBalamuruhan S 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
765156d0e29SNaveen N. Rao 			break;
766156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
767156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
768156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7693a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
7703a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
771156d0e29SNaveen N. Rao 			}
7723a181237SBalamuruhan S 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
773156d0e29SNaveen N. Rao 			break;
774156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
775156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
776156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7773a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
7783a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
779156d0e29SNaveen N. Rao 			}
7803a181237SBalamuruhan S 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
781156d0e29SNaveen N. Rao 			break;
782156d0e29SNaveen N. Rao 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
783156d0e29SNaveen N. Rao 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
784156d0e29SNaveen N. Rao 			if (BPF_CLASS(code) == BPF_ST) {
7853a3fc9bfSJordan Niethe 				PPC_LI32(tmp1_reg, imm);
7863a3fc9bfSJordan Niethe 				src_reg = tmp1_reg;
787156d0e29SNaveen N. Rao 			}
788794abc08SNaveen N. Rao 			if (off % 4) {
7893a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_LI(tmp2_reg, off));
7903a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
791794abc08SNaveen N. Rao 			} else {
792794abc08SNaveen N. Rao 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
793794abc08SNaveen N. Rao 			}
794156d0e29SNaveen N. Rao 			break;
795156d0e29SNaveen N. Rao 
796156d0e29SNaveen N. Rao 		/*
79791c960b0SBrendan Jackman 		 * BPF_STX ATOMIC (atomic ops)
798156d0e29SNaveen N. Rao 		 */
79991c960b0SBrendan Jackman 		case BPF_STX | BPF_ATOMIC | BPF_W:
80065112709SHari Bathini 		case BPF_STX | BPF_ATOMIC | BPF_DW:
8011e82dfaaSHari Bathini 			save_reg = tmp2_reg;
8021e82dfaaSHari Bathini 			ret_reg = src_reg;
8031e82dfaaSHari Bathini 
80465112709SHari Bathini 			/* Get offset into TMP_REG_1 */
80565112709SHari Bathini 			EMIT(PPC_RAW_LI(tmp1_reg, off));
806b9c1e60eSDaniel Borkmann 			tmp_idx = ctx->idx * 4;
807156d0e29SNaveen N. Rao 			/* load value from memory into TMP_REG_2 */
80865112709SHari Bathini 			if (size == BPF_DW)
80965112709SHari Bathini 				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
81065112709SHari Bathini 			else
81165112709SHari Bathini 				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
81265112709SHari Bathini 
813dbe6e245SHari Bathini 			/* Save old value in _R0 */
814dbe6e245SHari Bathini 			if (imm & BPF_FETCH)
815dbe6e245SHari Bathini 				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
816dbe6e245SHari Bathini 
81765112709SHari Bathini 			switch (imm) {
81865112709SHari Bathini 			case BPF_ADD:
819dbe6e245SHari Bathini 			case BPF_ADD | BPF_FETCH:
8203a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
821156d0e29SNaveen N. Rao 				break;
82265112709SHari Bathini 			case BPF_AND:
823dbe6e245SHari Bathini 			case BPF_AND | BPF_FETCH:
82465112709SHari Bathini 				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
82565112709SHari Bathini 				break;
82665112709SHari Bathini 			case BPF_OR:
827dbe6e245SHari Bathini 			case BPF_OR | BPF_FETCH:
82865112709SHari Bathini 				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
82965112709SHari Bathini 				break;
83065112709SHari Bathini 			case BPF_XOR:
831dbe6e245SHari Bathini 			case BPF_XOR | BPF_FETCH:
83265112709SHari Bathini 				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
83365112709SHari Bathini 				break;
8341e82dfaaSHari Bathini 			case BPF_CMPXCHG:
8351e82dfaaSHari Bathini 				/*
8361e82dfaaSHari Bathini 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
8371e82dfaaSHari Bathini 				 * in src_reg for other cases.
8381e82dfaaSHari Bathini 				 */
8391e82dfaaSHari Bathini 				ret_reg = bpf_to_ppc(BPF_REG_0);
8401e82dfaaSHari Bathini 
8411e82dfaaSHari Bathini 				/* Compare with old value in BPF_R0 */
8421e82dfaaSHari Bathini 				if (size == BPF_DW)
8431e82dfaaSHari Bathini 					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
8441e82dfaaSHari Bathini 				else
8451e82dfaaSHari Bathini 					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
8461e82dfaaSHari Bathini 				/* Don't set if different from old value */
8471e82dfaaSHari Bathini 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
8481e82dfaaSHari Bathini 				fallthrough;
8491e82dfaaSHari Bathini 			case BPF_XCHG:
8501e82dfaaSHari Bathini 				save_reg = src_reg;
8511e82dfaaSHari Bathini 				break;
85265112709SHari Bathini 			default:
85391c960b0SBrendan Jackman 				pr_err_ratelimited(
85491c960b0SBrendan Jackman 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
85591c960b0SBrendan Jackman 					code, i);
85665112709SHari Bathini 				return -EOPNOTSUPP;
85791c960b0SBrendan Jackman 			}
85891c960b0SBrendan Jackman 
859dbe6e245SHari Bathini 			/* store new value */
86065112709SHari Bathini 			if (size == BPF_DW)
8611e82dfaaSHari Bathini 				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
86265112709SHari Bathini 			else
8631e82dfaaSHari Bathini 				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
86465112709SHari Bathini 			/* we're done if this succeeded */
865b9c1e60eSDaniel Borkmann 			PPC_BCC_SHORT(COND_NE, tmp_idx);
866dbe6e245SHari Bathini 
8671e82dfaaSHari Bathini 			if (imm & BPF_FETCH) {
8681e82dfaaSHari Bathini 				EMIT(PPC_RAW_MR(ret_reg, _R0));
8691e82dfaaSHari Bathini 				/*
8701e82dfaaSHari Bathini 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
8711e82dfaaSHari Bathini 				 * For context, see commit 39491867ace5.
8721e82dfaaSHari Bathini 				 */
8731e82dfaaSHari Bathini 				if (size != BPF_DW && imm == BPF_CMPXCHG &&
8741e82dfaaSHari Bathini 				    insn_is_zext(&insn[i + 1]))
8751e82dfaaSHari Bathini 					addrs[++i] = ctx->idx * 4;
8761e82dfaaSHari Bathini 			}
877156d0e29SNaveen N. Rao 			break;
878156d0e29SNaveen N. Rao 
879156d0e29SNaveen N. Rao 		/*
880156d0e29SNaveen N. Rao 		 * BPF_LDX
881156d0e29SNaveen N. Rao 		 */
882156d0e29SNaveen N. Rao 		/* dst = *(u8 *)(ul) (src + off) */
883156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_B:
884983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
885156d0e29SNaveen N. Rao 		/* dst = *(u16 *)(ul) (src + off) */
886156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_H:
887983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
888156d0e29SNaveen N. Rao 		/* dst = *(u32 *)(ul) (src + off) */
889156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_W:
890983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
891156d0e29SNaveen N. Rao 		/* dst = *(u64 *)(ul) (src + off) */
892156d0e29SNaveen N. Rao 		case BPF_LDX | BPF_MEM | BPF_DW:
893983bdc02SRavi Bangoria 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
8949c70c714SRavi Bangoria 			/*
8959c70c714SRavi Bangoria 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
8969c70c714SRavi Bangoria 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
8979c70c714SRavi Bangoria 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
8989c70c714SRavi Bangoria 			 * set dst_reg=0 and move on.
8999c70c714SRavi Bangoria 			 */
9009c70c714SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
9013a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
9029c70c714SRavi Bangoria 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
9033a3fc9bfSJordan Niethe 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
9049c70c714SRavi Bangoria 				else /* BOOK3S_64 */
9053a3fc9bfSJordan Niethe 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
9063a3fc9bfSJordan Niethe 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
907bafb5898SNaveen N. Rao 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
9089c70c714SRavi Bangoria 				EMIT(PPC_RAW_LI(dst_reg, 0));
9099c70c714SRavi Bangoria 				/*
910794abc08SNaveen N. Rao 				 * Check if 'off' is word aligned for BPF_DW, because
911794abc08SNaveen N. Rao 				 * we might generate two instructions.
9129c70c714SRavi Bangoria 				 */
9139c70c714SRavi Bangoria 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
9149c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 3) * 4);
9159c70c714SRavi Bangoria 				else
9169c70c714SRavi Bangoria 					PPC_JMP((ctx->idx + 2) * 4);
9179c70c714SRavi Bangoria 			}
9189c70c714SRavi Bangoria 
919efa95f03SHari Bathini 			switch (size) {
920efa95f03SHari Bathini 			case BPF_B:
921efa95f03SHari Bathini 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
922efa95f03SHari Bathini 				break;
923efa95f03SHari Bathini 			case BPF_H:
924efa95f03SHari Bathini 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
925efa95f03SHari Bathini 				break;
926efa95f03SHari Bathini 			case BPF_W:
927efa95f03SHari Bathini 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
928efa95f03SHari Bathini 				break;
929efa95f03SHari Bathini 			case BPF_DW:
930794abc08SNaveen N. Rao 				if (off % 4) {
9313a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LI(tmp1_reg, off));
9323a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
933794abc08SNaveen N. Rao 				} else {
934794abc08SNaveen N. Rao 					EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
935794abc08SNaveen N. Rao 				}
936156d0e29SNaveen N. Rao 				break;
937efa95f03SHari Bathini 			}
938efa95f03SHari Bathini 
939efa95f03SHari Bathini 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
940efa95f03SHari Bathini 				addrs[++i] = ctx->idx * 4;
941983bdc02SRavi Bangoria 
942983bdc02SRavi Bangoria 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
943*90d862f3SHari Bathini 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
944*90d862f3SHari Bathini 							    ctx->idx - 1, 4, dst_reg);
945983bdc02SRavi Bangoria 				if (ret)
946983bdc02SRavi Bangoria 					return ret;
947983bdc02SRavi Bangoria 			}
948efa95f03SHari Bathini 			break;
949156d0e29SNaveen N. Rao 
950156d0e29SNaveen N. Rao 		/*
951156d0e29SNaveen N. Rao 		 * Doubleword load
952156d0e29SNaveen N. Rao 		 * 16 byte instruction that uses two 'struct bpf_insn'
953156d0e29SNaveen N. Rao 		 */
954156d0e29SNaveen N. Rao 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
955156d0e29SNaveen N. Rao 			imm64 = ((u64)(u32) insn[i].imm) |
956156d0e29SNaveen N. Rao 				    (((u64)(u32) insn[i+1].imm) << 32);
957f9320c49SNaveen N. Rao 			tmp_idx = ctx->idx;
958f9320c49SNaveen N. Rao 			PPC_LI64(dst_reg, imm64);
959f9320c49SNaveen N. Rao 			/* padding to allow full 5 instructions for later patching */
960d3921cbbSChristophe Leroy 			if (!image)
961f9320c49SNaveen N. Rao 				for (j = ctx->idx - tmp_idx; j < 5; j++)
962f9320c49SNaveen N. Rao 					EMIT(PPC_RAW_NOP());
963156d0e29SNaveen N. Rao 			/* Adjust for two bpf instructions */
964156d0e29SNaveen N. Rao 			addrs[++i] = ctx->idx * 4;
965156d0e29SNaveen N. Rao 			break;
966156d0e29SNaveen N. Rao 
967156d0e29SNaveen N. Rao 		/*
968156d0e29SNaveen N. Rao 		 * Return/Exit
969156d0e29SNaveen N. Rao 		 */
970156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_EXIT:
971156d0e29SNaveen N. Rao 			/*
972156d0e29SNaveen N. Rao 			 * If this isn't the very last instruction, branch to
973156d0e29SNaveen N. Rao 			 * the epilogue. If we _are_ the last instruction,
974156d0e29SNaveen N. Rao 			 * we'll just fall through to the epilogue.
975156d0e29SNaveen N. Rao 			 */
9760ffdbce6SNaveen N. Rao 			if (i != flen - 1) {
9773a3fc9bfSJordan Niethe 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
9780ffdbce6SNaveen N. Rao 				if (ret)
9790ffdbce6SNaveen N. Rao 					return ret;
9800ffdbce6SNaveen N. Rao 			}
981156d0e29SNaveen N. Rao 			/* else fall through to the epilogue */
982156d0e29SNaveen N. Rao 			break;
983156d0e29SNaveen N. Rao 
984156d0e29SNaveen N. Rao 		/*
9858484ce83SSandipan Das 		 * Call kernel helper or bpf function
986156d0e29SNaveen N. Rao 		 */
987156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_CALL:
988156d0e29SNaveen N. Rao 			ctx->seen |= SEEN_FUNC;
9898484ce83SSandipan Das 
99085e03115SChristophe Leroy 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
991e2c95a61SDaniel Borkmann 						    &func_addr, &func_addr_fixed);
992e2c95a61SDaniel Borkmann 			if (ret < 0)
993e2c95a61SDaniel Borkmann 				return ret;
994156d0e29SNaveen N. Rao 
995e2c95a61SDaniel Borkmann 			if (func_addr_fixed)
99643d636f8SNaveen N. Rao 				ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
997e2c95a61SDaniel Borkmann 			else
998*90d862f3SHari Bathini 				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
99943d636f8SNaveen N. Rao 
100043d636f8SNaveen N. Rao 			if (ret)
100143d636f8SNaveen N. Rao 				return ret;
100243d636f8SNaveen N. Rao 
1003156d0e29SNaveen N. Rao 			/* move return value from r3 to BPF_REG_0 */
100449c3af43SNaveen N. Rao 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1005156d0e29SNaveen N. Rao 			break;
1006156d0e29SNaveen N. Rao 
1007156d0e29SNaveen N. Rao 		/*
1008156d0e29SNaveen N. Rao 		 * Jumps and branches
1009156d0e29SNaveen N. Rao 		 */
1010156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JA:
1011156d0e29SNaveen N. Rao 			PPC_JMP(addrs[i + 1 + off]);
1012156d0e29SNaveen N. Rao 			break;
1013156d0e29SNaveen N. Rao 
1014156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_K:
1015156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGT | BPF_X:
1016156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_K:
1017156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGT | BPF_X:
10185f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_K:
10195f645996SJiong Wang 		case BPF_JMP32 | BPF_JGT | BPF_X:
10205f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_K:
10215f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1022156d0e29SNaveen N. Rao 			true_cond = COND_GT;
1023156d0e29SNaveen N. Rao 			goto cond_branch;
102420dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_K:
102520dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLT | BPF_X:
102620dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_K:
102720dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLT | BPF_X:
10285f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_K:
10295f645996SJiong Wang 		case BPF_JMP32 | BPF_JLT | BPF_X:
10305f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_K:
10315f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLT | BPF_X:
103220dbf5ccSDaniel Borkmann 			true_cond = COND_LT;
103320dbf5ccSDaniel Borkmann 			goto cond_branch;
1034156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_K:
1035156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JGE | BPF_X:
1036156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_K:
1037156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSGE | BPF_X:
10385f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_K:
10395f645996SJiong Wang 		case BPF_JMP32 | BPF_JGE | BPF_X:
10405f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_K:
10415f645996SJiong Wang 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1042156d0e29SNaveen N. Rao 			true_cond = COND_GE;
1043156d0e29SNaveen N. Rao 			goto cond_branch;
104420dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_K:
104520dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JLE | BPF_X:
104620dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_K:
104720dbf5ccSDaniel Borkmann 		case BPF_JMP | BPF_JSLE | BPF_X:
10485f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_K:
10495f645996SJiong Wang 		case BPF_JMP32 | BPF_JLE | BPF_X:
10505f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_K:
10515f645996SJiong Wang 		case BPF_JMP32 | BPF_JSLE | BPF_X:
105220dbf5ccSDaniel Borkmann 			true_cond = COND_LE;
105320dbf5ccSDaniel Borkmann 			goto cond_branch;
1054156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_K:
1055156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JEQ | BPF_X:
10565f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_K:
10575f645996SJiong Wang 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1058156d0e29SNaveen N. Rao 			true_cond = COND_EQ;
1059156d0e29SNaveen N. Rao 			goto cond_branch;
1060156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_K:
1061156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JNE | BPF_X:
10625f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_K:
10635f645996SJiong Wang 		case BPF_JMP32 | BPF_JNE | BPF_X:
1064156d0e29SNaveen N. Rao 			true_cond = COND_NE;
1065156d0e29SNaveen N. Rao 			goto cond_branch;
1066156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_K:
1067156d0e29SNaveen N. Rao 		case BPF_JMP | BPF_JSET | BPF_X:
10685f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_K:
10695f645996SJiong Wang 		case BPF_JMP32 | BPF_JSET | BPF_X:
1070156d0e29SNaveen N. Rao 			true_cond = COND_NE;
1071156d0e29SNaveen N. Rao 			/* Fall through */
1072156d0e29SNaveen N. Rao 
1073156d0e29SNaveen N. Rao cond_branch:
1074156d0e29SNaveen N. Rao 			switch (code) {
1075156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_X:
107620dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_X:
1077156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_X:
107820dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_X:
1079156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_X:
1080156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_X:
10815f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_X:
10825f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_X:
10835f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_X:
10845f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_X:
10855f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_X:
10865f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_X:
1087156d0e29SNaveen N. Rao 				/* unsigned comparison */
10885f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
10893a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
10905f645996SJiong Wang 				else
10913a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1092156d0e29SNaveen N. Rao 				break;
1093156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_X:
109420dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_X:
1095156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_X:
109620dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_X:
10975f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_X:
10985f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_X:
10995f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_X:
11005f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1101156d0e29SNaveen N. Rao 				/* signed comparison */
11025f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP32)
11033a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
11045f645996SJiong Wang 				else
11053a181237SBalamuruhan S 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1106156d0e29SNaveen N. Rao 				break;
1107156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_X:
11085f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_X:
11095f645996SJiong Wang 				if (BPF_CLASS(code) == BPF_JMP) {
11103a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
11115f645996SJiong Wang 				} else {
11123a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
11133a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
11145f645996SJiong Wang 				}
1115156d0e29SNaveen N. Rao 				break;
1116156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JNE | BPF_K:
1117156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JEQ | BPF_K:
1118156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGT | BPF_K:
111920dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLT | BPF_K:
1120156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JGE | BPF_K:
112120dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JLE | BPF_K:
11225f645996SJiong Wang 			case BPF_JMP32 | BPF_JNE | BPF_K:
11235f645996SJiong Wang 			case BPF_JMP32 | BPF_JEQ | BPF_K:
11245f645996SJiong Wang 			case BPF_JMP32 | BPF_JGT | BPF_K:
11255f645996SJiong Wang 			case BPF_JMP32 | BPF_JLT | BPF_K:
11265f645996SJiong Wang 			case BPF_JMP32 | BPF_JGE | BPF_K:
11275f645996SJiong Wang 			case BPF_JMP32 | BPF_JLE | BPF_K:
11285f645996SJiong Wang 			{
11295f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
11305f645996SJiong Wang 
1131156d0e29SNaveen N. Rao 				/*
1132156d0e29SNaveen N. Rao 				 * Need sign-extended load, so only positive
1133156d0e29SNaveen N. Rao 				 * values can be used as imm in cmpldi
1134156d0e29SNaveen N. Rao 				 */
11355f645996SJiong Wang 				if (imm >= 0 && imm < 32768) {
11365f645996SJiong Wang 					if (is_jmp32)
11373a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
11385f645996SJiong Wang 					else
11393a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
11405f645996SJiong Wang 				} else {
1141156d0e29SNaveen N. Rao 					/* sign-extending load */
11423a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
1143156d0e29SNaveen N. Rao 					/* ... but unsigned comparison */
11445f645996SJiong Wang 					if (is_jmp32)
11453a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
11465f645996SJiong Wang 					else
11473a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1148156d0e29SNaveen N. Rao 				}
1149156d0e29SNaveen N. Rao 				break;
11505f645996SJiong Wang 			}
1151156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGT | BPF_K:
115220dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLT | BPF_K:
1153156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSGE | BPF_K:
115420dbf5ccSDaniel Borkmann 			case BPF_JMP | BPF_JSLE | BPF_K:
11555f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGT | BPF_K:
11565f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLT | BPF_K:
11575f645996SJiong Wang 			case BPF_JMP32 | BPF_JSGE | BPF_K:
11585f645996SJiong Wang 			case BPF_JMP32 | BPF_JSLE | BPF_K:
11595f645996SJiong Wang 			{
11605f645996SJiong Wang 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
11615f645996SJiong Wang 
1162156d0e29SNaveen N. Rao 				/*
1163156d0e29SNaveen N. Rao 				 * signed comparison, so any 16-bit value
1164156d0e29SNaveen N. Rao 				 * can be used in cmpdi
1165156d0e29SNaveen N. Rao 				 */
11665f645996SJiong Wang 				if (imm >= -32768 && imm < 32768) {
11675f645996SJiong Wang 					if (is_jmp32)
11683a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
11695f645996SJiong Wang 					else
11703a181237SBalamuruhan S 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
11715f645996SJiong Wang 				} else {
11723a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
11735f645996SJiong Wang 					if (is_jmp32)
11743a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
11755f645996SJiong Wang 					else
11763a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1177156d0e29SNaveen N. Rao 				}
1178156d0e29SNaveen N. Rao 				break;
11795f645996SJiong Wang 			}
1180156d0e29SNaveen N. Rao 			case BPF_JMP | BPF_JSET | BPF_K:
11815f645996SJiong Wang 			case BPF_JMP32 | BPF_JSET | BPF_K:
1182156d0e29SNaveen N. Rao 				/* andi does not sign-extend the immediate */
1183156d0e29SNaveen N. Rao 				if (imm >= 0 && imm < 32768)
1184156d0e29SNaveen N. Rao 					/* PPC_ANDI is _only/always_ dot-form */
11853a3fc9bfSJordan Niethe 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1186156d0e29SNaveen N. Rao 				else {
11873a3fc9bfSJordan Niethe 					PPC_LI32(tmp1_reg, imm);
11885f645996SJiong Wang 					if (BPF_CLASS(code) == BPF_JMP) {
11893a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
11903a3fc9bfSJordan Niethe 								     tmp1_reg));
11915f645996SJiong Wang 					} else {
11923a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
11933a3fc9bfSJordan Niethe 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
11943a181237SBalamuruhan S 									0, 0, 31));
11955f645996SJiong Wang 					}
1196156d0e29SNaveen N. Rao 				}
1197156d0e29SNaveen N. Rao 				break;
1198156d0e29SNaveen N. Rao 			}
1199156d0e29SNaveen N. Rao 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1200156d0e29SNaveen N. Rao 			break;
1201156d0e29SNaveen N. Rao 
1202156d0e29SNaveen N. Rao 		/*
1203ce076141SNaveen N. Rao 		 * Tail call
1204156d0e29SNaveen N. Rao 		 */
120571189fa9SAlexei Starovoitov 		case BPF_JMP | BPF_TAIL_CALL:
1206ce076141SNaveen N. Rao 			ctx->seen |= SEEN_TAILCALL;
12073832ba4eSNaveen N. Rao 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
12083832ba4eSNaveen N. Rao 			if (ret < 0)
12093832ba4eSNaveen N. Rao 				return ret;
1210ce076141SNaveen N. Rao 			break;
1211156d0e29SNaveen N. Rao 
1212156d0e29SNaveen N. Rao 		default:
1213156d0e29SNaveen N. Rao 			/*
1214156d0e29SNaveen N. Rao 			 * The filter contains something cruel & unusual.
1215156d0e29SNaveen N. Rao 			 * We don't handle it, but also there shouldn't be
1216156d0e29SNaveen N. Rao 			 * anything missing from our list.
1217156d0e29SNaveen N. Rao 			 */
1218156d0e29SNaveen N. Rao 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1219156d0e29SNaveen N. Rao 					code, i);
1220156d0e29SNaveen N. Rao 			return -ENOTSUPP;
1221156d0e29SNaveen N. Rao 		}
1222156d0e29SNaveen N. Rao 	}
1223156d0e29SNaveen N. Rao 
1224156d0e29SNaveen N. Rao 	/* Set end-of-body-code address for exit. */
1225156d0e29SNaveen N. Rao 	addrs[i] = ctx->idx * 4;
1226156d0e29SNaveen N. Rao 
1227156d0e29SNaveen N. Rao 	return 0;
1228156d0e29SNaveen N. Rao }
1229