xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 417552999d0b6681ac30e117ae890828ca7e46b3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit.h"
21 
22 /*
23  * Stack layout:
24  * Ensure the top half (upto local_tmp_var) stays consistent
25  * with our redzone usage.
26  *
27  *		[	prev sp		] <-------------
28  *		[   nv gpr save area	] 6*8		|
29  *		[    tail_call_cnt	] 8		|
30  *		[    local_tmp_var	] 24		|
31  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32  *		[     frame header	] 32/112	|
33  * sp (r1) --->	[    stack pointer	] --------------
34  */
35 
36 /* for gpr non volatile registers BPG_REG_6 to 10 */
37 #define BPF_PPC_STACK_SAVE	(6*8)
38 /* for bpf JIT code internal usage */
39 #define BPF_PPC_STACK_LOCALS	32
40 /* stack frame excluding BPF stack, ensure this is quadword aligned */
41 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43 
44 /* BPF register usage */
45 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47 #define ARENA_VM_START  (MAX_BPF_JIT_REG + 2)
48 
49 /* BPF to ppc register mappings */
bpf_jit_init_reg_mapping(struct codegen_context * ctx)50 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
51 {
52 	/* function return value */
53 	ctx->b2p[BPF_REG_0] = _R8;
54 	/* function arguments */
55 	ctx->b2p[BPF_REG_1] = _R3;
56 	ctx->b2p[BPF_REG_2] = _R4;
57 	ctx->b2p[BPF_REG_3] = _R5;
58 	ctx->b2p[BPF_REG_4] = _R6;
59 	ctx->b2p[BPF_REG_5] = _R7;
60 	/* non volatile registers */
61 	ctx->b2p[BPF_REG_6] = _R27;
62 	ctx->b2p[BPF_REG_7] = _R28;
63 	ctx->b2p[BPF_REG_8] = _R29;
64 	ctx->b2p[BPF_REG_9] = _R30;
65 	/* frame pointer aka BPF_REG_10 */
66 	ctx->b2p[BPF_REG_FP] = _R31;
67 	/* eBPF jit internal registers */
68 	ctx->b2p[BPF_REG_AX] = _R12;
69 	ctx->b2p[TMP_REG_1] = _R9;
70 	ctx->b2p[TMP_REG_2] = _R10;
71 	/* non volatile register for kern_vm_start address */
72 	ctx->b2p[ARENA_VM_START] = _R26;
73 }
74 
75 /* PPC NVR range -- update this if we ever use NVRs below r26 */
76 #define BPF_PPC_NVR_MIN		_R26
77 
bpf_has_stack_frame(struct codegen_context * ctx)78 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
79 {
80 	/*
81 	 * We only need a stack frame if:
82 	 * - we call other functions (kernel helpers), or
83 	 * - the bpf program uses its stack area
84 	 * The latter condition is deduced from the usage of BPF_REG_FP
85 	 */
86 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
87 }
88 
89 /*
90  * When not setting up our own stackframe, the redzone (288 bytes) usage is:
91  *
92  *		[	prev sp		] <-------------
93  *		[	  ...       	] 		|
94  * sp (r1) --->	[    stack pointer	] --------------
95  *		[   nv gpr save area	] 6*8
96  *		[    tail_call_cnt	] 8
97  *		[    local_tmp_var	] 24
98  *		[   unused red zone	] 224
99  */
bpf_jit_stack_local(struct codegen_context * ctx)100 static int bpf_jit_stack_local(struct codegen_context *ctx)
101 {
102 	if (bpf_has_stack_frame(ctx))
103 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
104 	else
105 		return -(BPF_PPC_STACK_SAVE + 32);
106 }
107 
bpf_jit_stack_tailcallcnt(struct codegen_context * ctx)108 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
109 {
110 	return bpf_jit_stack_local(ctx) + 24;
111 }
112 
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)113 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
114 {
115 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
116 		return (bpf_has_stack_frame(ctx) ?
117 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
118 				- (8 * (32 - reg));
119 
120 	pr_err("BPF JIT is asking about unknown registers");
121 	BUG();
122 }
123 
bpf_jit_realloc_regs(struct codegen_context * ctx)124 void bpf_jit_realloc_regs(struct codegen_context *ctx)
125 {
126 }
127 
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)128 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
129 {
130 	int i;
131 
132 	/* Instruction for trampoline attach */
133 	EMIT(PPC_RAW_NOP());
134 
135 #ifndef CONFIG_PPC_KERNEL_PCREL
136 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
137 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
138 #endif
139 
140 	/*
141 	 * Initialize tail_call_cnt if we do tail calls.
142 	 * Otherwise, put in NOPs so that it can be skipped when we are
143 	 * invoked through a tail call.
144 	 */
145 	if (ctx->seen & SEEN_TAILCALL) {
146 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
147 		/* this goes in the redzone */
148 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
149 	} else {
150 		EMIT(PPC_RAW_NOP());
151 		EMIT(PPC_RAW_NOP());
152 	}
153 
154 	if (bpf_has_stack_frame(ctx)) {
155 		/*
156 		 * We need a stack frame, but we don't necessarily need to
157 		 * save/restore LR unless we call other functions
158 		 */
159 		if (ctx->seen & SEEN_FUNC) {
160 			EMIT(PPC_RAW_MFLR(_R0));
161 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
162 		}
163 
164 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
165 	}
166 
167 	/*
168 	 * Back up non-volatile regs -- BPF registers 6-10
169 	 * If we haven't created our own stack frame, we save these
170 	 * in the protected zone below the previous stack frame
171 	 */
172 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
173 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
174 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
175 
176 	if (ctx->arena_vm_start)
177 		EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
178 				 bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
179 
180 	/* Setup frame pointer to point to the bpf stack area */
181 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
182 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
183 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
184 
185 	if (ctx->arena_vm_start)
186 		PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
187 }
188 
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)189 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
190 {
191 	int i;
192 
193 	/* Restore NVRs */
194 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
195 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
196 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
197 
198 	if (ctx->arena_vm_start)
199 		EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
200 				bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
201 
202 	/* Tear down our stack frame */
203 	if (bpf_has_stack_frame(ctx)) {
204 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
205 		if (ctx->seen & SEEN_FUNC) {
206 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
207 			EMIT(PPC_RAW_MTLR(_R0));
208 		}
209 	}
210 }
211 
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)212 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
213 {
214 	bpf_jit_emit_common_epilogue(image, ctx);
215 
216 	/* Move result to r3 */
217 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
218 
219 	EMIT(PPC_RAW_BLR());
220 
221 	bpf_jit_build_fentry_stubs(image, ctx);
222 }
223 
bpf_jit_emit_func_call_rel(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)224 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
225 {
226 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
227 	long reladdr;
228 
229 	/* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
230 	if (!func) {
231 		for (int i = 0; i < 5; i++)
232 			EMIT(PPC_RAW_NOP());
233 		/* elfv1 needs an additional instruction to load addr from descriptor */
234 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
235 			EMIT(PPC_RAW_NOP());
236 		EMIT(PPC_RAW_MTCTR(_R12));
237 		EMIT(PPC_RAW_BCTRL());
238 		return 0;
239 	}
240 
241 #ifdef CONFIG_PPC_KERNEL_PCREL
242 	reladdr = func_addr - local_paca->kernelbase;
243 
244 	/*
245 	 * If fimage is NULL (the initial pass to find image size),
246 	 * account for the maximum no. of instructions possible.
247 	 */
248 	if (!fimage) {
249 		ctx->idx += 7;
250 		return 0;
251 	} else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
252 		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
253 		/* Align for subsequent prefix instruction */
254 		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
255 			EMIT(PPC_RAW_NOP());
256 		/* paddi r12,r12,addr */
257 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
258 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
259 	} else {
260 		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
261 		bool alignment_needed = !IS_ALIGNED(pc, 8);
262 
263 		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
264 
265 		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
266 			if (alignment_needed)
267 				EMIT(PPC_RAW_NOP());
268 			/* pla r12,addr */
269 			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
270 			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
271 		} else {
272 			/* We can clobber r12 */
273 			PPC_LI64(_R12, func);
274 		}
275 	}
276 	EMIT(PPC_RAW_MTCTR(_R12));
277 	EMIT(PPC_RAW_BCTRL());
278 #else
279 	if (core_kernel_text(func_addr)) {
280 		reladdr = func_addr - kernel_toc_addr();
281 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
282 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
283 			return -ERANGE;
284 		}
285 
286 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
287 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
288 		EMIT(PPC_RAW_MTCTR(_R12));
289 		EMIT(PPC_RAW_BCTRL());
290 	} else {
291 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
292 			/* func points to the function descriptor */
293 			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
294 			/* Load actual entry point from function descriptor */
295 			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
296 			/* ... and move it to CTR */
297 			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
298 			/*
299 			 * Load TOC from function descriptor at offset 8.
300 			 * We can clobber r2 since we get called through a
301 			 * function pointer (so caller will save/restore r2).
302 			 */
303 			if (is_module_text_address(func_addr))
304 				EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
305 		} else {
306 			PPC_LI64(_R12, func);
307 			EMIT(PPC_RAW_MTCTR(_R12));
308 		}
309 		EMIT(PPC_RAW_BCTRL());
310 		/*
311 		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
312 		 * within core kernel text.
313 		 */
314 		if (is_module_text_address(func_addr))
315 			EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
316 	}
317 #endif
318 
319 	return 0;
320 }
321 
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)322 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
323 {
324 	/*
325 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
326 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
327 	 * r4/BPF_REG_2 - pointer to bpf_array
328 	 * r5/BPF_REG_3 - index in bpf_array
329 	 */
330 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
331 	int b2p_index = bpf_to_ppc(BPF_REG_3);
332 	int bpf_tailcall_prologue_size = 12;
333 
334 	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
335 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
336 
337 	/*
338 	 * if (index >= array->map.max_entries)
339 	 *   goto out;
340 	 */
341 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
342 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
343 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
344 	PPC_BCC_SHORT(COND_GE, out);
345 
346 	/*
347 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
348 	 *   goto out;
349 	 */
350 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
351 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
352 	PPC_BCC_SHORT(COND_GE, out);
353 
354 	/*
355 	 * tail_call_cnt++;
356 	 */
357 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
358 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
359 
360 	/* prog = array->ptrs[index]; */
361 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
362 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
363 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
364 
365 	/*
366 	 * if (prog == NULL)
367 	 *   goto out;
368 	 */
369 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
370 	PPC_BCC_SHORT(COND_EQ, out);
371 
372 	/* goto *(prog->bpf_func + prologue_size); */
373 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
374 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
375 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
376 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
377 
378 	/* tear down stack, restore NVRs, ... */
379 	bpf_jit_emit_common_epilogue(image, ctx);
380 
381 	EMIT(PPC_RAW_BCTR());
382 
383 	/* out: */
384 	return 0;
385 }
386 
bpf_jit_bypass_spec_v1(void)387 bool bpf_jit_bypass_spec_v1(void)
388 {
389 #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
390 	return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
391 		 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR));
392 #else
393 	return true;
394 #endif
395 }
396 
bpf_jit_bypass_spec_v4(void)397 bool bpf_jit_bypass_spec_v4(void)
398 {
399 	return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
400 		 security_ftr_enabled(SEC_FTR_STF_BARRIER) &&
401 		 stf_barrier_type_get() != STF_BARRIER_NONE);
402 }
403 
404 /*
405  * We spill into the redzone always, even if the bpf program has its own stackframe.
406  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
407  */
408 void bpf_stf_barrier(void);
409 
410 asm (
411 "		.global bpf_stf_barrier		;"
412 "	bpf_stf_barrier:			;"
413 "		std	21,-80(1)		;"
414 "		std	22,-72(1)		;"
415 "		sync				;"
416 "		ld	21,-80(1)		;"
417 "		ld	22,-72(1)		;"
418 "		ori	31,31,0			;"
419 "		.rept 14			;"
420 "		b	1f			;"
421 "	1:					;"
422 "		.endr				;"
423 "		blr				;"
424 );
425 
bpf_jit_emit_atomic_ops(u32 * image,struct codegen_context * ctx,const struct bpf_insn * insn,u32 * jmp_off,u32 * tmp_idx,u32 * addrp)426 static int bpf_jit_emit_atomic_ops(u32 *image, struct codegen_context *ctx,
427 				   const struct bpf_insn *insn, u32 *jmp_off,
428 				   u32 *tmp_idx, u32 *addrp)
429 {
430 	u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
431 	u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
432 	u32 size = BPF_SIZE(insn->code);
433 	u32 src_reg = bpf_to_ppc(insn->src_reg);
434 	u32 dst_reg = bpf_to_ppc(insn->dst_reg);
435 	s32 imm = insn->imm;
436 
437 	u32 save_reg = tmp2_reg;
438 	u32 ret_reg = src_reg;
439 	u32 fixup_idx;
440 
441 	/* Get offset into TMP_REG_1 */
442 	EMIT(PPC_RAW_LI(tmp1_reg, insn->off));
443        /*
444 	* Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
445 	* before and after the operation.
446 	*
447 	* This is a requirement in the Linux Kernel Memory Model.
448 	* See __cmpxchg_u64() in asm/cmpxchg.h as an example.
449 	*/
450 	if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
451 		EMIT(PPC_RAW_SYNC());
452 
453 	*tmp_idx = ctx->idx;
454 
455 	/* load value from memory into TMP_REG_2 */
456 	if (size == BPF_DW)
457 		EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
458 	else
459 		EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
460 	/* Save old value in _R0 */
461 	if (imm & BPF_FETCH)
462 		EMIT(PPC_RAW_MR(_R0, tmp2_reg));
463 
464 	switch (imm) {
465 	case BPF_ADD:
466 	case BPF_ADD | BPF_FETCH:
467 		EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
468 		break;
469 	case BPF_AND:
470 	case BPF_AND | BPF_FETCH:
471 		EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
472 		break;
473 	case BPF_OR:
474 	case BPF_OR | BPF_FETCH:
475 		EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
476 		break;
477 	case BPF_XOR:
478 	case BPF_XOR | BPF_FETCH:
479 		EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
480 		break;
481 	case BPF_CMPXCHG:
482 	       /*
483 		* Return old value in BPF_REG_0 for BPF_CMPXCHG &
484 		* in src_reg for other cases.
485 		*/
486 		ret_reg = bpf_to_ppc(BPF_REG_0);
487 
488 		/* Compare with old value in BPF_R0 */
489 		if (size == BPF_DW)
490 			EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
491 		else
492 			EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
493 		/* Don't set if different from old value */
494 		PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
495 		fallthrough;
496 	case BPF_XCHG:
497 		save_reg = src_reg;
498 		break;
499 	default:
500 		return -EOPNOTSUPP;
501 	}
502 
503 	/* store new value */
504 	if (size == BPF_DW)
505 		EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
506 	else
507 		EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
508 	/* we're done if this succeeded */
509 	PPC_BCC_SHORT(COND_NE, *tmp_idx * 4);
510 	fixup_idx = ctx->idx;
511 
512 	if (imm & BPF_FETCH) {
513 		/* Emit 'sync' to enforce full ordering */
514 		if (IS_ENABLED(CONFIG_SMP))
515 			EMIT(PPC_RAW_SYNC());
516 		EMIT(PPC_RAW_MR(ret_reg, _R0));
517 		/*
518 		 * Skip unnecessary zero-extension for 32-bit cmpxchg.
519 		 * For context, see commit 39491867ace5.
520 		 */
521 		if (size != BPF_DW && imm == BPF_CMPXCHG &&
522 		    insn_is_zext(insn + 1))
523 			*addrp = ctx->idx * 4;
524 	}
525 
526 	*jmp_off = (fixup_idx - *tmp_idx) * 4;
527 
528 	return 0;
529 }
530 
bpf_jit_emit_probe_mem_store(struct codegen_context * ctx,u32 src_reg,s16 off,u32 code,u32 * image)531 static int bpf_jit_emit_probe_mem_store(struct codegen_context *ctx, u32 src_reg, s16 off,
532 					u32 code, u32 *image)
533 {
534 	u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
535 	u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
536 
537 	switch (BPF_SIZE(code)) {
538 	case BPF_B:
539 		EMIT(PPC_RAW_STB(src_reg, tmp1_reg, off));
540 		break;
541 	case BPF_H:
542 		EMIT(PPC_RAW_STH(src_reg, tmp1_reg, off));
543 		break;
544 	case BPF_W:
545 		EMIT(PPC_RAW_STW(src_reg, tmp1_reg, off));
546 		break;
547 	case BPF_DW:
548 		if (off % 4) {
549 			EMIT(PPC_RAW_LI(tmp2_reg, off));
550 			EMIT(PPC_RAW_STDX(src_reg, tmp1_reg, tmp2_reg));
551 		} else {
552 			EMIT(PPC_RAW_STD(src_reg, tmp1_reg, off));
553 		}
554 		break;
555 	default:
556 		return -EINVAL;
557 	}
558 	return 0;
559 }
560 
emit_atomic_ld_st(const struct bpf_insn insn,struct codegen_context * ctx,u32 * image)561 static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image)
562 {
563 	u32 code = insn.code;
564 	u32 dst_reg = bpf_to_ppc(insn.dst_reg);
565 	u32 src_reg = bpf_to_ppc(insn.src_reg);
566 	u32 size = BPF_SIZE(code);
567 	u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
568 	u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
569 	s16 off = insn.off;
570 	s32 imm = insn.imm;
571 
572 	switch (imm) {
573 	case BPF_LOAD_ACQ:
574 		switch (size) {
575 		case BPF_B:
576 			EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
577 			break;
578 		case BPF_H:
579 			EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
580 			break;
581 		case BPF_W:
582 			EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
583 			break;
584 		case BPF_DW:
585 			if (off % 4) {
586 				EMIT(PPC_RAW_LI(tmp1_reg, off));
587 				EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
588 			} else {
589 				EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
590 			}
591 			break;
592 		}
593 		EMIT(PPC_RAW_LWSYNC());
594 		break;
595 	case BPF_STORE_REL:
596 		EMIT(PPC_RAW_LWSYNC());
597 		switch (size) {
598 		case BPF_B:
599 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
600 			break;
601 		case BPF_H:
602 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
603 			break;
604 		case BPF_W:
605 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
606 			break;
607 		case BPF_DW:
608 			if (off % 4) {
609 				EMIT(PPC_RAW_LI(tmp2_reg, off));
610 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
611 			} else {
612 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
613 			}
614 			break;
615 		}
616 		break;
617 	default:
618 		pr_err_ratelimited("unexpected atomic load/store op code %02x\n",
619 				   imm);
620 		return -EINVAL;
621 	}
622 
623 	return 0;
624 }
625 
626 /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,u32 * fimage,struct codegen_context * ctx,u32 * addrs,int pass,bool extra_pass)627 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
628 		       u32 *addrs, int pass, bool extra_pass)
629 {
630 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
631 	bool sync_emitted, ori31_emitted;
632 	const struct bpf_insn *insn = fp->insnsi;
633 	int flen = fp->len;
634 	int i, ret;
635 
636 	/* Start of epilogue code - will only be valid 2nd pass onwards */
637 	u32 exit_addr = addrs[flen];
638 
639 	for (i = 0; i < flen; i++) {
640 		u32 code = insn[i].code;
641 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
642 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
643 		u32 size = BPF_SIZE(code);
644 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
645 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
646 		s16 off = insn[i].off;
647 		s32 imm = insn[i].imm;
648 		bool func_addr_fixed;
649 		u64 func_addr;
650 		u64 imm64;
651 		u32 true_cond;
652 		u32 tmp_idx;
653 		u32 jmp_off;
654 
655 		/*
656 		 * addrs[] maps a BPF bytecode address into a real offset from
657 		 * the start of the body code.
658 		 */
659 		addrs[i] = ctx->idx * 4;
660 
661 		/*
662 		 * As an optimization, we note down which non-volatile registers
663 		 * are used so that we can only save/restore those in our
664 		 * prologue and epilogue. We do this here regardless of whether
665 		 * the actual BPF instruction uses src/dst registers or not
666 		 * (for instance, BPF_CALL does not use them). The expectation
667 		 * is that those instructions will have src_reg/dst_reg set to
668 		 * 0. Even otherwise, we just lose some prologue/epilogue
669 		 * optimization but everything else should work without
670 		 * any issues.
671 		 */
672 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
673 			bpf_set_seen_register(ctx, dst_reg);
674 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
675 			bpf_set_seen_register(ctx, src_reg);
676 
677 		switch (code) {
678 		/*
679 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
680 		 */
681 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
682 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
683 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
684 			goto bpf_alu32_trunc;
685 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
686 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
687 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
688 			goto bpf_alu32_trunc;
689 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
690 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
691 			if (!imm) {
692 				goto bpf_alu32_trunc;
693 			} else if (imm >= -32768 && imm < 32768) {
694 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
695 			} else {
696 				PPC_LI32(tmp1_reg, imm);
697 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
698 			}
699 			goto bpf_alu32_trunc;
700 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
701 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
702 			if (!imm) {
703 				goto bpf_alu32_trunc;
704 			} else if (imm > -32768 && imm <= 32768) {
705 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
706 			} else {
707 				PPC_LI32(tmp1_reg, imm);
708 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
709 			}
710 			goto bpf_alu32_trunc;
711 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
712 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
713 			if (BPF_CLASS(code) == BPF_ALU)
714 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
715 			else
716 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
717 			goto bpf_alu32_trunc;
718 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
719 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
720 			if (imm >= -32768 && imm < 32768)
721 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
722 			else {
723 				PPC_LI32(tmp1_reg, imm);
724 				if (BPF_CLASS(code) == BPF_ALU)
725 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
726 				else
727 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
728 			}
729 			goto bpf_alu32_trunc;
730 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
731 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
732 			if (BPF_OP(code) == BPF_MOD) {
733 				if (off)
734 					EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
735 				else
736 					EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
737 
738 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
739 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
740 			} else
741 				if (off)
742 					EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
743 				else
744 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
745 			goto bpf_alu32_trunc;
746 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
747 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
748 			if (BPF_OP(code) == BPF_MOD) {
749 				if (off)
750 					EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
751 				else
752 					EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
753 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
754 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
755 			} else
756 				if (off)
757 					EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
758 				else
759 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
760 			break;
761 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
762 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
763 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
764 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
765 			if (imm == 0)
766 				return -EINVAL;
767 			if (imm == 1) {
768 				if (BPF_OP(code) == BPF_DIV) {
769 					goto bpf_alu32_trunc;
770 				} else {
771 					EMIT(PPC_RAW_LI(dst_reg, 0));
772 					break;
773 				}
774 			}
775 
776 			PPC_LI32(tmp1_reg, imm);
777 			switch (BPF_CLASS(code)) {
778 			case BPF_ALU:
779 				if (BPF_OP(code) == BPF_MOD) {
780 					if (off)
781 						EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
782 					else
783 						EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
784 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
785 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
786 				} else
787 					if (off)
788 						EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
789 					else
790 						EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
791 				break;
792 			case BPF_ALU64:
793 				if (BPF_OP(code) == BPF_MOD) {
794 					if (off)
795 						EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
796 					else
797 						EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
798 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
799 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
800 				} else
801 					if (off)
802 						EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
803 					else
804 						EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
805 				break;
806 			}
807 			goto bpf_alu32_trunc;
808 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
809 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
810 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
811 			goto bpf_alu32_trunc;
812 
813 		/*
814 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
815 		 */
816 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
817 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
818 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
819 			goto bpf_alu32_trunc;
820 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
821 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
822 			if (!IMM_H(imm))
823 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
824 			else {
825 				/* Sign-extended */
826 				PPC_LI32(tmp1_reg, imm);
827 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
828 			}
829 			goto bpf_alu32_trunc;
830 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
831 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
832 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
833 			goto bpf_alu32_trunc;
834 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
835 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
836 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
837 				/* Sign-extended */
838 				PPC_LI32(tmp1_reg, imm);
839 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
840 			} else {
841 				if (IMM_L(imm))
842 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
843 				if (IMM_H(imm))
844 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
845 			}
846 			goto bpf_alu32_trunc;
847 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
848 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
849 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
850 			goto bpf_alu32_trunc;
851 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
852 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
853 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
854 				/* Sign-extended */
855 				PPC_LI32(tmp1_reg, imm);
856 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
857 			} else {
858 				if (IMM_L(imm))
859 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
860 				if (IMM_H(imm))
861 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
862 			}
863 			goto bpf_alu32_trunc;
864 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
865 			/* slw clears top 32 bits */
866 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
867 			/* skip zero extension move, but set address map. */
868 			if (insn_is_zext(&insn[i + 1]))
869 				addrs[++i] = ctx->idx * 4;
870 			break;
871 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
872 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
873 			break;
874 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
875 			/* with imm 0, we still need to clear top 32 bits */
876 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
877 			if (insn_is_zext(&insn[i + 1]))
878 				addrs[++i] = ctx->idx * 4;
879 			break;
880 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
881 			if (imm != 0)
882 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
883 			break;
884 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
885 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
886 			if (insn_is_zext(&insn[i + 1]))
887 				addrs[++i] = ctx->idx * 4;
888 			break;
889 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
890 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
891 			break;
892 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
893 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
894 			if (insn_is_zext(&insn[i + 1]))
895 				addrs[++i] = ctx->idx * 4;
896 			break;
897 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
898 			if (imm != 0)
899 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
900 			break;
901 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
902 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
903 			goto bpf_alu32_trunc;
904 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
905 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
906 			break;
907 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
908 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
909 			goto bpf_alu32_trunc;
910 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
911 			if (imm != 0)
912 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
913 			break;
914 
915 		/*
916 		 * MOV
917 		 */
918 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
919 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
920 
921 			if (insn_is_cast_user(&insn[i])) {
922 				EMIT(PPC_RAW_RLDICL_DOT(tmp1_reg, src_reg, 0, 32));
923 				PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
924 				PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
925 				EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
926 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
927 				break;
928 			}
929 
930 			if (imm == 1) {
931 				/* special mov32 for zext */
932 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
933 				break;
934 			} else if (off == 8) {
935 				EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
936 			} else if (off == 16) {
937 				EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
938 			} else if (off == 32) {
939 				EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
940 			} else if (dst_reg != src_reg)
941 				EMIT(PPC_RAW_MR(dst_reg, src_reg));
942 			goto bpf_alu32_trunc;
943 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
944 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
945 			PPC_LI32(dst_reg, imm);
946 			if (imm < 0)
947 				goto bpf_alu32_trunc;
948 			else if (insn_is_zext(&insn[i + 1]))
949 				addrs[++i] = ctx->idx * 4;
950 			break;
951 
952 bpf_alu32_trunc:
953 		/* Truncate to 32-bits */
954 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
955 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
956 		break;
957 
958 		/*
959 		 * BPF_FROM_BE/LE
960 		 */
961 		case BPF_ALU | BPF_END | BPF_FROM_LE:
962 		case BPF_ALU | BPF_END | BPF_FROM_BE:
963 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
964 #ifdef __BIG_ENDIAN__
965 			if (BPF_SRC(code) == BPF_FROM_BE)
966 				goto emit_clear;
967 #else /* !__BIG_ENDIAN__ */
968 			if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
969 				goto emit_clear;
970 #endif
971 			switch (imm) {
972 			case 16:
973 				/* Rotate 8 bits left & mask with 0x0000ff00 */
974 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
975 				/* Rotate 8 bits right & insert LSB to reg */
976 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
977 				/* Move result back to dst_reg */
978 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
979 				break;
980 			case 32:
981 				/*
982 				 * Rotate word left by 8 bits:
983 				 * 2 bytes are already in their final position
984 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
985 				 */
986 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
987 				/* Rotate 24 bits and insert byte 1 */
988 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
989 				/* Rotate 24 bits and insert byte 3 */
990 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
991 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
992 				break;
993 			case 64:
994 				/* Store the value to stack and then use byte-reverse loads */
995 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
996 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
997 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
998 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
999 				} else {
1000 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
1001 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
1002 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
1003 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
1004 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
1005 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1006 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
1007 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
1008 				}
1009 				break;
1010 			}
1011 			break;
1012 
1013 emit_clear:
1014 			switch (imm) {
1015 			case 16:
1016 				/* zero-extend 16 bits into 64 bits */
1017 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
1018 				if (insn_is_zext(&insn[i + 1]))
1019 					addrs[++i] = ctx->idx * 4;
1020 				break;
1021 			case 32:
1022 				if (!fp->aux->verifier_zext)
1023 					/* zero-extend 32 bits into 64 bits */
1024 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
1025 				break;
1026 			case 64:
1027 				/* nop */
1028 				break;
1029 			}
1030 			break;
1031 
1032 		/*
1033 		 * BPF_ST NOSPEC (speculation barrier)
1034 		 *
1035 		 * The following must act as a barrier against both Spectre v1
1036 		 * and v4 if we requested both mitigations. Therefore, also emit
1037 		 * 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to
1038 		 * the insns needed for a Spectre v4 barrier.
1039 		 *
1040 		 * If we requested only !bypass_spec_v1 OR only !bypass_spec_v4,
1041 		 * we can skip the respective other barrier type as an
1042 		 * optimization.
1043 		 */
1044 		case BPF_ST | BPF_NOSPEC:
1045 			sync_emitted = false;
1046 			ori31_emitted = false;
1047 			if (IS_ENABLED(CONFIG_PPC_E500) &&
1048 			    !bpf_jit_bypass_spec_v1()) {
1049 				EMIT(PPC_RAW_ISYNC());
1050 				EMIT(PPC_RAW_SYNC());
1051 				sync_emitted = true;
1052 			}
1053 			if (!bpf_jit_bypass_spec_v4()) {
1054 				switch (stf_barrier) {
1055 				case STF_BARRIER_EIEIO:
1056 					EMIT(PPC_RAW_EIEIO() | 0x02000000);
1057 					break;
1058 				case STF_BARRIER_SYNC_ORI:
1059 					if (!sync_emitted)
1060 						EMIT(PPC_RAW_SYNC());
1061 					EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
1062 					EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1063 					ori31_emitted = true;
1064 					break;
1065 				case STF_BARRIER_FALLBACK:
1066 					ctx->seen |= SEEN_FUNC;
1067 					PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
1068 					EMIT(PPC_RAW_MTCTR(_R12));
1069 					EMIT(PPC_RAW_BCTRL());
1070 					break;
1071 				case STF_BARRIER_NONE:
1072 					break;
1073 				}
1074 			}
1075 			if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1076 			    !bpf_jit_bypass_spec_v1() &&
1077 			    !ori31_emitted)
1078 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1079 			break;
1080 
1081 		/*
1082 		 * BPF_ST(X)
1083 		 */
1084 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
1085 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1086 			if (BPF_CLASS(code) == BPF_ST) {
1087 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
1088 				src_reg = tmp1_reg;
1089 			}
1090 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
1091 			break;
1092 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1093 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1094 			if (BPF_CLASS(code) == BPF_ST) {
1095 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
1096 				src_reg = tmp1_reg;
1097 			}
1098 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
1099 			break;
1100 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1101 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1102 			if (BPF_CLASS(code) == BPF_ST) {
1103 				PPC_LI32(tmp1_reg, imm);
1104 				src_reg = tmp1_reg;
1105 			}
1106 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
1107 			break;
1108 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1109 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1110 			if (BPF_CLASS(code) == BPF_ST) {
1111 				PPC_LI32(tmp1_reg, imm);
1112 				src_reg = tmp1_reg;
1113 			}
1114 			if (off % 4) {
1115 				EMIT(PPC_RAW_LI(tmp2_reg, off));
1116 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
1117 			} else {
1118 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
1119 			}
1120 			break;
1121 
1122 		case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1123 		case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1124 		case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1125 		case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1126 
1127 			EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1128 
1129 			ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image);
1130 			if (ret)
1131 				return ret;
1132 
1133 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1134 						    ctx->idx - 1, 4, -1, code);
1135 			if (ret)
1136 				return ret;
1137 
1138 			break;
1139 
1140 		case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1141 		case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1142 		case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1143 		case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1144 
1145 			EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1146 
1147 			if (BPF_SIZE(code) == BPF_W || BPF_SIZE(code) == BPF_DW) {
1148 				PPC_LI32(tmp2_reg, imm);
1149 				src_reg = tmp2_reg;
1150 			} else {
1151 				EMIT(PPC_RAW_LI(tmp2_reg, imm));
1152 				src_reg = tmp2_reg;
1153 			}
1154 
1155 			ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image);
1156 			if (ret)
1157 				return ret;
1158 
1159 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1160 						    ctx->idx - 1, 4, -1, code);
1161 			if (ret)
1162 				return ret;
1163 
1164 			break;
1165 
1166 		/*
1167 		 * BPF_STX PROBE_ATOMIC (arena atomic ops)
1168 		 */
1169 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
1170 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
1171 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1172 			ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
1173 						      &jmp_off, &tmp_idx, &addrs[i + 1]);
1174 			if (ret) {
1175 				if (ret == -EOPNOTSUPP) {
1176 					pr_err_ratelimited(
1177 						"eBPF filter atomic op code %02x (@%d) unsupported\n",
1178 						code, i);
1179 				}
1180 				return ret;
1181 			}
1182 			/* LDARX/LWARX should land here on exception. */
1183 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1184 						    tmp_idx, jmp_off, dst_reg, code);
1185 			if (ret)
1186 				return ret;
1187 
1188 			/* Retrieve the dst_reg */
1189 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1190 			break;
1191 
1192 		/*
1193 		 * BPF_STX ATOMIC (atomic ops)
1194 		 */
1195 		case BPF_STX | BPF_ATOMIC | BPF_B:
1196 		case BPF_STX | BPF_ATOMIC | BPF_H:
1197 		case BPF_STX | BPF_ATOMIC | BPF_W:
1198 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1199 			if (bpf_atomic_is_load_store(&insn[i])) {
1200 				ret = emit_atomic_ld_st(insn[i], ctx, image);
1201 				if (ret)
1202 					return ret;
1203 
1204 				if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1205 					addrs[++i] = ctx->idx * 4;
1206 				break;
1207 			} else if (size == BPF_B || size == BPF_H) {
1208 				pr_err_ratelimited(
1209 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
1210 					code, i);
1211 				return -EOPNOTSUPP;
1212 			}
1213 
1214 			ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
1215 						      &jmp_off, &tmp_idx, &addrs[i + 1]);
1216 			if (ret) {
1217 				if (ret == -EOPNOTSUPP) {
1218 					pr_err_ratelimited(
1219 						"eBPF filter atomic op code %02x (@%d) unsupported\n",
1220 						code, i);
1221 				}
1222 				return ret;
1223 			}
1224 			break;
1225 
1226 		/*
1227 		 * BPF_LDX
1228 		 */
1229 		/* dst = *(u8 *)(ul) (src + off) */
1230 		case BPF_LDX | BPF_MEM | BPF_B:
1231 		case BPF_LDX | BPF_MEMSX | BPF_B:
1232 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1233 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1234 		/* dst = *(u16 *)(ul) (src + off) */
1235 		case BPF_LDX | BPF_MEM | BPF_H:
1236 		case BPF_LDX | BPF_MEMSX | BPF_H:
1237 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1238 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1239 		/* dst = *(u32 *)(ul) (src + off) */
1240 		case BPF_LDX | BPF_MEM | BPF_W:
1241 		case BPF_LDX | BPF_MEMSX | BPF_W:
1242 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1243 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1244 		/* dst = *(u64 *)(ul) (src + off) */
1245 		case BPF_LDX | BPF_MEM | BPF_DW:
1246 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1247 			/*
1248 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
1249 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
1250 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
1251 			 * set dst_reg=0 and move on.
1252 			 */
1253 			if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1254 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
1255 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
1256 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
1257 				else /* BOOK3S_64 */
1258 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
1259 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
1260 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
1261 				EMIT(PPC_RAW_LI(dst_reg, 0));
1262 				/*
1263 				 * Check if 'off' is word aligned for BPF_DW, because
1264 				 * we might generate two instructions.
1265 				 */
1266 				if ((BPF_SIZE(code) == BPF_DW && (off & 3)) ||
1267 				    (BPF_SIZE(code) == BPF_B &&
1268 				     BPF_MODE(code) == BPF_PROBE_MEMSX) ||
1269 				    (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_MEMSX))
1270 					PPC_JMP((ctx->idx + 3) * 4);
1271 				else
1272 					PPC_JMP((ctx->idx + 2) * 4);
1273 			}
1274 
1275 			if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1276 				switch (size) {
1277 				case BPF_B:
1278 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1279 					EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1280 					break;
1281 				case BPF_H:
1282 					EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1283 					break;
1284 				case BPF_W:
1285 					EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1286 					break;
1287 				}
1288 			} else {
1289 				switch (size) {
1290 				case BPF_B:
1291 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1292 					break;
1293 				case BPF_H:
1294 					EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1295 					break;
1296 				case BPF_W:
1297 					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1298 					break;
1299 				case BPF_DW:
1300 					if (off % 4) {
1301 						EMIT(PPC_RAW_LI(tmp1_reg, off));
1302 						EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1303 					} else {
1304 						EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1305 					}
1306 					break;
1307 				}
1308 			}
1309 
1310 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1311 				addrs[++i] = ctx->idx * 4;
1312 
1313 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
1314 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1315 							    ctx->idx - 1, 4, dst_reg, code);
1316 				if (ret)
1317 					return ret;
1318 			}
1319 			break;
1320 
1321 		/* dst = *(u64 *)(ul) (src + ARENA_VM_START + off) */
1322 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1323 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1324 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1325 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1326 
1327 			EMIT(PPC_RAW_ADD(tmp1_reg, src_reg, bpf_to_ppc(ARENA_VM_START)));
1328 
1329 			switch (size) {
1330 			case BPF_B:
1331 				EMIT(PPC_RAW_LBZ(dst_reg, tmp1_reg, off));
1332 				break;
1333 			case BPF_H:
1334 				EMIT(PPC_RAW_LHZ(dst_reg, tmp1_reg, off));
1335 				break;
1336 			case BPF_W:
1337 				EMIT(PPC_RAW_LWZ(dst_reg, tmp1_reg, off));
1338 				break;
1339 			case BPF_DW:
1340 				if (off % 4) {
1341 					EMIT(PPC_RAW_LI(tmp2_reg, off));
1342 					EMIT(PPC_RAW_LDX(dst_reg, tmp1_reg, tmp2_reg));
1343 				} else {
1344 					EMIT(PPC_RAW_LD(dst_reg, tmp1_reg, off));
1345 				}
1346 				break;
1347 			}
1348 
1349 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1350 				addrs[++i] = ctx->idx * 4;
1351 
1352 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1353 						    ctx->idx - 1, 4, dst_reg, code);
1354 			if (ret)
1355 				return ret;
1356 			break;
1357 
1358 		/*
1359 		 * Doubleword load
1360 		 * 16 byte instruction that uses two 'struct bpf_insn'
1361 		 */
1362 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1363 			imm64 = ((u64)(u32) insn[i].imm) |
1364 				    (((u64)(u32) insn[i+1].imm) << 32);
1365 			PPC_LI64(dst_reg, imm64);
1366 			/* Adjust for two bpf instructions */
1367 			addrs[++i] = ctx->idx * 4;
1368 			break;
1369 
1370 		/*
1371 		 * Return/Exit
1372 		 */
1373 		case BPF_JMP | BPF_EXIT:
1374 			/*
1375 			 * If this isn't the very last instruction, branch to
1376 			 * the epilogue. If we _are_ the last instruction,
1377 			 * we'll just fall through to the epilogue.
1378 			 */
1379 			if (i != flen - 1) {
1380 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1381 				if (ret)
1382 					return ret;
1383 			}
1384 			/* else fall through to the epilogue */
1385 			break;
1386 
1387 		/*
1388 		 * Call kernel helper or bpf function
1389 		 */
1390 		case BPF_JMP | BPF_CALL:
1391 			ctx->seen |= SEEN_FUNC;
1392 
1393 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1394 						    &func_addr, &func_addr_fixed);
1395 			if (ret < 0)
1396 				return ret;
1397 
1398 			ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1399 			if (ret)
1400 				return ret;
1401 
1402 			/* move return value from r3 to BPF_REG_0 */
1403 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1404 			break;
1405 
1406 		/*
1407 		 * Jumps and branches
1408 		 */
1409 		case BPF_JMP | BPF_JA:
1410 			PPC_JMP(addrs[i + 1 + off]);
1411 			break;
1412 		case BPF_JMP32 | BPF_JA:
1413 			PPC_JMP(addrs[i + 1 + imm]);
1414 			break;
1415 
1416 		case BPF_JMP | BPF_JGT | BPF_K:
1417 		case BPF_JMP | BPF_JGT | BPF_X:
1418 		case BPF_JMP | BPF_JSGT | BPF_K:
1419 		case BPF_JMP | BPF_JSGT | BPF_X:
1420 		case BPF_JMP32 | BPF_JGT | BPF_K:
1421 		case BPF_JMP32 | BPF_JGT | BPF_X:
1422 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1423 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1424 			true_cond = COND_GT;
1425 			goto cond_branch;
1426 		case BPF_JMP | BPF_JLT | BPF_K:
1427 		case BPF_JMP | BPF_JLT | BPF_X:
1428 		case BPF_JMP | BPF_JSLT | BPF_K:
1429 		case BPF_JMP | BPF_JSLT | BPF_X:
1430 		case BPF_JMP32 | BPF_JLT | BPF_K:
1431 		case BPF_JMP32 | BPF_JLT | BPF_X:
1432 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1433 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1434 			true_cond = COND_LT;
1435 			goto cond_branch;
1436 		case BPF_JMP | BPF_JGE | BPF_K:
1437 		case BPF_JMP | BPF_JGE | BPF_X:
1438 		case BPF_JMP | BPF_JSGE | BPF_K:
1439 		case BPF_JMP | BPF_JSGE | BPF_X:
1440 		case BPF_JMP32 | BPF_JGE | BPF_K:
1441 		case BPF_JMP32 | BPF_JGE | BPF_X:
1442 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1443 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1444 			true_cond = COND_GE;
1445 			goto cond_branch;
1446 		case BPF_JMP | BPF_JLE | BPF_K:
1447 		case BPF_JMP | BPF_JLE | BPF_X:
1448 		case BPF_JMP | BPF_JSLE | BPF_K:
1449 		case BPF_JMP | BPF_JSLE | BPF_X:
1450 		case BPF_JMP32 | BPF_JLE | BPF_K:
1451 		case BPF_JMP32 | BPF_JLE | BPF_X:
1452 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1453 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1454 			true_cond = COND_LE;
1455 			goto cond_branch;
1456 		case BPF_JMP | BPF_JEQ | BPF_K:
1457 		case BPF_JMP | BPF_JEQ | BPF_X:
1458 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1459 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1460 			true_cond = COND_EQ;
1461 			goto cond_branch;
1462 		case BPF_JMP | BPF_JNE | BPF_K:
1463 		case BPF_JMP | BPF_JNE | BPF_X:
1464 		case BPF_JMP32 | BPF_JNE | BPF_K:
1465 		case BPF_JMP32 | BPF_JNE | BPF_X:
1466 			true_cond = COND_NE;
1467 			goto cond_branch;
1468 		case BPF_JMP | BPF_JSET | BPF_K:
1469 		case BPF_JMP | BPF_JSET | BPF_X:
1470 		case BPF_JMP32 | BPF_JSET | BPF_K:
1471 		case BPF_JMP32 | BPF_JSET | BPF_X:
1472 			true_cond = COND_NE;
1473 			/* Fall through */
1474 
1475 cond_branch:
1476 			switch (code) {
1477 			case BPF_JMP | BPF_JGT | BPF_X:
1478 			case BPF_JMP | BPF_JLT | BPF_X:
1479 			case BPF_JMP | BPF_JGE | BPF_X:
1480 			case BPF_JMP | BPF_JLE | BPF_X:
1481 			case BPF_JMP | BPF_JEQ | BPF_X:
1482 			case BPF_JMP | BPF_JNE | BPF_X:
1483 			case BPF_JMP32 | BPF_JGT | BPF_X:
1484 			case BPF_JMP32 | BPF_JLT | BPF_X:
1485 			case BPF_JMP32 | BPF_JGE | BPF_X:
1486 			case BPF_JMP32 | BPF_JLE | BPF_X:
1487 			case BPF_JMP32 | BPF_JEQ | BPF_X:
1488 			case BPF_JMP32 | BPF_JNE | BPF_X:
1489 				/* unsigned comparison */
1490 				if (BPF_CLASS(code) == BPF_JMP32)
1491 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1492 				else
1493 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1494 				break;
1495 			case BPF_JMP | BPF_JSGT | BPF_X:
1496 			case BPF_JMP | BPF_JSLT | BPF_X:
1497 			case BPF_JMP | BPF_JSGE | BPF_X:
1498 			case BPF_JMP | BPF_JSLE | BPF_X:
1499 			case BPF_JMP32 | BPF_JSGT | BPF_X:
1500 			case BPF_JMP32 | BPF_JSLT | BPF_X:
1501 			case BPF_JMP32 | BPF_JSGE | BPF_X:
1502 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1503 				/* signed comparison */
1504 				if (BPF_CLASS(code) == BPF_JMP32)
1505 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1506 				else
1507 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1508 				break;
1509 			case BPF_JMP | BPF_JSET | BPF_X:
1510 			case BPF_JMP32 | BPF_JSET | BPF_X:
1511 				if (BPF_CLASS(code) == BPF_JMP) {
1512 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1513 				} else {
1514 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1515 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1516 				}
1517 				break;
1518 			case BPF_JMP | BPF_JNE | BPF_K:
1519 			case BPF_JMP | BPF_JEQ | BPF_K:
1520 			case BPF_JMP | BPF_JGT | BPF_K:
1521 			case BPF_JMP | BPF_JLT | BPF_K:
1522 			case BPF_JMP | BPF_JGE | BPF_K:
1523 			case BPF_JMP | BPF_JLE | BPF_K:
1524 			case BPF_JMP32 | BPF_JNE | BPF_K:
1525 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1526 			case BPF_JMP32 | BPF_JGT | BPF_K:
1527 			case BPF_JMP32 | BPF_JLT | BPF_K:
1528 			case BPF_JMP32 | BPF_JGE | BPF_K:
1529 			case BPF_JMP32 | BPF_JLE | BPF_K:
1530 			{
1531 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1532 
1533 				/*
1534 				 * Need sign-extended load, so only positive
1535 				 * values can be used as imm in cmpldi
1536 				 */
1537 				if (imm >= 0 && imm < 32768) {
1538 					if (is_jmp32)
1539 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1540 					else
1541 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1542 				} else {
1543 					/* sign-extending load */
1544 					PPC_LI32(tmp1_reg, imm);
1545 					/* ... but unsigned comparison */
1546 					if (is_jmp32)
1547 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1548 					else
1549 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1550 				}
1551 				break;
1552 			}
1553 			case BPF_JMP | BPF_JSGT | BPF_K:
1554 			case BPF_JMP | BPF_JSLT | BPF_K:
1555 			case BPF_JMP | BPF_JSGE | BPF_K:
1556 			case BPF_JMP | BPF_JSLE | BPF_K:
1557 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1558 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1559 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1560 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1561 			{
1562 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1563 
1564 				/*
1565 				 * signed comparison, so any 16-bit value
1566 				 * can be used in cmpdi
1567 				 */
1568 				if (imm >= -32768 && imm < 32768) {
1569 					if (is_jmp32)
1570 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1571 					else
1572 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1573 				} else {
1574 					PPC_LI32(tmp1_reg, imm);
1575 					if (is_jmp32)
1576 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1577 					else
1578 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1579 				}
1580 				break;
1581 			}
1582 			case BPF_JMP | BPF_JSET | BPF_K:
1583 			case BPF_JMP32 | BPF_JSET | BPF_K:
1584 				/* andi does not sign-extend the immediate */
1585 				if (imm >= 0 && imm < 32768)
1586 					/* PPC_ANDI is _only/always_ dot-form */
1587 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1588 				else {
1589 					PPC_LI32(tmp1_reg, imm);
1590 					if (BPF_CLASS(code) == BPF_JMP) {
1591 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1592 								     tmp1_reg));
1593 					} else {
1594 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1595 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1596 									0, 0, 31));
1597 					}
1598 				}
1599 				break;
1600 			}
1601 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1602 			break;
1603 
1604 		/*
1605 		 * Tail call
1606 		 */
1607 		case BPF_JMP | BPF_TAIL_CALL:
1608 			ctx->seen |= SEEN_TAILCALL;
1609 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1610 			if (ret < 0)
1611 				return ret;
1612 			break;
1613 
1614 		default:
1615 			/*
1616 			 * The filter contains something cruel & unusual.
1617 			 * We don't handle it, but also there shouldn't be
1618 			 * anything missing from our list.
1619 			 */
1620 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1621 					code, i);
1622 			return -ENOTSUPP;
1623 		}
1624 	}
1625 
1626 	/* Set end-of-body-code address for exit. */
1627 	addrs[i] = ctx->idx * 4;
1628 
1629 	return 0;
1630 }
1631