xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision b50ecc5aca4d18f1f0c4942f5c797bc85edef144)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit.h"
21 
22 /*
23  * Stack layout:
24  * Ensure the top half (upto local_tmp_var) stays consistent
25  * with our redzone usage.
26  *
27  *		[	prev sp		] <-------------
28  *		[   nv gpr save area	] 5*8		|
29  *		[    tail_call_cnt	] 8		|
30  *		[    local_tmp_var	] 16		|
31  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32  *		[     frame header	] 32/112	|
33  * sp (r1) --->	[    stack pointer	] --------------
34  */
35 
36 /* for gpr non volatile registers BPG_REG_6 to 10 */
37 #define BPF_PPC_STACK_SAVE	(5*8)
38 /* for bpf JIT code internal usage */
39 #define BPF_PPC_STACK_LOCALS	24
40 /* stack frame excluding BPF stack, ensure this is quadword aligned */
41 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43 
44 /* BPF register usage */
45 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47 
48 /* BPF to ppc register mappings */
49 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
50 {
51 	/* function return value */
52 	ctx->b2p[BPF_REG_0] = _R8;
53 	/* function arguments */
54 	ctx->b2p[BPF_REG_1] = _R3;
55 	ctx->b2p[BPF_REG_2] = _R4;
56 	ctx->b2p[BPF_REG_3] = _R5;
57 	ctx->b2p[BPF_REG_4] = _R6;
58 	ctx->b2p[BPF_REG_5] = _R7;
59 	/* non volatile registers */
60 	ctx->b2p[BPF_REG_6] = _R27;
61 	ctx->b2p[BPF_REG_7] = _R28;
62 	ctx->b2p[BPF_REG_8] = _R29;
63 	ctx->b2p[BPF_REG_9] = _R30;
64 	/* frame pointer aka BPF_REG_10 */
65 	ctx->b2p[BPF_REG_FP] = _R31;
66 	/* eBPF jit internal registers */
67 	ctx->b2p[BPF_REG_AX] = _R12;
68 	ctx->b2p[TMP_REG_1] = _R9;
69 	ctx->b2p[TMP_REG_2] = _R10;
70 }
71 
72 /* PPC NVR range -- update this if we ever use NVRs below r27 */
73 #define BPF_PPC_NVR_MIN		_R27
74 
75 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76 {
77 	/*
78 	 * We only need a stack frame if:
79 	 * - we call other functions (kernel helpers), or
80 	 * - the bpf program uses its stack area
81 	 * The latter condition is deduced from the usage of BPF_REG_FP
82 	 */
83 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84 }
85 
86 /*
87  * When not setting up our own stackframe, the redzone (288 bytes) usage is:
88  *
89  *		[	prev sp		] <-------------
90  *		[	  ...       	] 		|
91  * sp (r1) --->	[    stack pointer	] --------------
92  *		[   nv gpr save area	] 5*8
93  *		[    tail_call_cnt	] 8
94  *		[    local_tmp_var	] 16
95  *		[   unused red zone	] 224
96  */
97 static int bpf_jit_stack_local(struct codegen_context *ctx)
98 {
99 	if (bpf_has_stack_frame(ctx))
100 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
101 	else
102 		return -(BPF_PPC_STACK_SAVE + 24);
103 }
104 
105 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106 {
107 	return bpf_jit_stack_local(ctx) + 16;
108 }
109 
110 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
111 {
112 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 		return (bpf_has_stack_frame(ctx) ?
114 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
115 				- (8 * (32 - reg));
116 
117 	pr_err("BPF JIT is asking about unknown registers");
118 	BUG();
119 }
120 
121 void bpf_jit_realloc_regs(struct codegen_context *ctx)
122 {
123 }
124 
125 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126 {
127 	int i;
128 
129 	/* Instruction for trampoline attach */
130 	EMIT(PPC_RAW_NOP());
131 
132 #ifndef CONFIG_PPC_KERNEL_PCREL
133 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
134 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
135 #endif
136 
137 	/*
138 	 * Initialize tail_call_cnt if we do tail calls.
139 	 * Otherwise, put in NOPs so that it can be skipped when we are
140 	 * invoked through a tail call.
141 	 */
142 	if (ctx->seen & SEEN_TAILCALL) {
143 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
144 		/* this goes in the redzone */
145 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
146 	} else {
147 		EMIT(PPC_RAW_NOP());
148 		EMIT(PPC_RAW_NOP());
149 	}
150 
151 	if (bpf_has_stack_frame(ctx)) {
152 		/*
153 		 * We need a stack frame, but we don't necessarily need to
154 		 * save/restore LR unless we call other functions
155 		 */
156 		if (ctx->seen & SEEN_FUNC) {
157 			EMIT(PPC_RAW_MFLR(_R0));
158 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
159 		}
160 
161 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
162 	}
163 
164 	/*
165 	 * Back up non-volatile regs -- BPF registers 6-10
166 	 * If we haven't created our own stack frame, we save these
167 	 * in the protected zone below the previous stack frame
168 	 */
169 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
170 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
171 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
172 
173 	/* Setup frame pointer to point to the bpf stack area */
174 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
175 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
176 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
177 }
178 
179 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
180 {
181 	int i;
182 
183 	/* Restore NVRs */
184 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
185 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
186 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
187 
188 	/* Tear down our stack frame */
189 	if (bpf_has_stack_frame(ctx)) {
190 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
191 		if (ctx->seen & SEEN_FUNC) {
192 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
193 			EMIT(PPC_RAW_MTLR(_R0));
194 		}
195 	}
196 }
197 
198 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
199 {
200 	bpf_jit_emit_common_epilogue(image, ctx);
201 
202 	/* Move result to r3 */
203 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
204 
205 	EMIT(PPC_RAW_BLR());
206 
207 	bpf_jit_build_fentry_stubs(image, ctx);
208 }
209 
210 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
211 {
212 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
213 	long reladdr;
214 
215 	/* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
216 	if (!func) {
217 		for (int i = 0; i < 5; i++)
218 			EMIT(PPC_RAW_NOP());
219 		/* elfv1 needs an additional instruction to load addr from descriptor */
220 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
221 			EMIT(PPC_RAW_NOP());
222 		EMIT(PPC_RAW_MTCTR(_R12));
223 		EMIT(PPC_RAW_BCTRL());
224 		return 0;
225 	}
226 
227 #ifdef CONFIG_PPC_KERNEL_PCREL
228 	reladdr = func_addr - local_paca->kernelbase;
229 
230 	if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
231 		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
232 		/* Align for subsequent prefix instruction */
233 		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
234 			EMIT(PPC_RAW_NOP());
235 		/* paddi r12,r12,addr */
236 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
237 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
238 	} else {
239 		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
240 		bool alignment_needed = !IS_ALIGNED(pc, 8);
241 
242 		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
243 
244 		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
245 			if (alignment_needed)
246 				EMIT(PPC_RAW_NOP());
247 			/* pla r12,addr */
248 			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
249 			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
250 		} else {
251 			/* We can clobber r12 */
252 			PPC_LI64(_R12, func);
253 		}
254 	}
255 	EMIT(PPC_RAW_MTCTR(_R12));
256 	EMIT(PPC_RAW_BCTRL());
257 #else
258 	if (core_kernel_text(func_addr)) {
259 		reladdr = func_addr - kernel_toc_addr();
260 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
261 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
262 			return -ERANGE;
263 		}
264 
265 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
266 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
267 		EMIT(PPC_RAW_MTCTR(_R12));
268 		EMIT(PPC_RAW_BCTRL());
269 	} else {
270 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
271 			/* func points to the function descriptor */
272 			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
273 			/* Load actual entry point from function descriptor */
274 			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
275 			/* ... and move it to CTR */
276 			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
277 			/*
278 			 * Load TOC from function descriptor at offset 8.
279 			 * We can clobber r2 since we get called through a
280 			 * function pointer (so caller will save/restore r2).
281 			 */
282 			if (is_module_text_address(func_addr))
283 				EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
284 		} else {
285 			PPC_LI64(_R12, func);
286 			EMIT(PPC_RAW_MTCTR(_R12));
287 		}
288 		EMIT(PPC_RAW_BCTRL());
289 		/*
290 		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
291 		 * within core kernel text.
292 		 */
293 		if (is_module_text_address(func_addr))
294 			EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
295 	}
296 #endif
297 
298 	return 0;
299 }
300 
301 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
302 {
303 	/*
304 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
305 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
306 	 * r4/BPF_REG_2 - pointer to bpf_array
307 	 * r5/BPF_REG_3 - index in bpf_array
308 	 */
309 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
310 	int b2p_index = bpf_to_ppc(BPF_REG_3);
311 	int bpf_tailcall_prologue_size = 12;
312 
313 	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
314 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
315 
316 	/*
317 	 * if (index >= array->map.max_entries)
318 	 *   goto out;
319 	 */
320 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
321 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
322 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
323 	PPC_BCC_SHORT(COND_GE, out);
324 
325 	/*
326 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
327 	 *   goto out;
328 	 */
329 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
330 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
331 	PPC_BCC_SHORT(COND_GE, out);
332 
333 	/*
334 	 * tail_call_cnt++;
335 	 */
336 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
337 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
338 
339 	/* prog = array->ptrs[index]; */
340 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
341 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
342 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
343 
344 	/*
345 	 * if (prog == NULL)
346 	 *   goto out;
347 	 */
348 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
349 	PPC_BCC_SHORT(COND_EQ, out);
350 
351 	/* goto *(prog->bpf_func + prologue_size); */
352 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
353 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
354 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
355 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
356 
357 	/* tear down stack, restore NVRs, ... */
358 	bpf_jit_emit_common_epilogue(image, ctx);
359 
360 	EMIT(PPC_RAW_BCTR());
361 
362 	/* out: */
363 	return 0;
364 }
365 
366 /*
367  * We spill into the redzone always, even if the bpf program has its own stackframe.
368  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
369  */
370 void bpf_stf_barrier(void);
371 
372 asm (
373 "		.global bpf_stf_barrier		;"
374 "	bpf_stf_barrier:			;"
375 "		std	21,-64(1)		;"
376 "		std	22,-56(1)		;"
377 "		sync				;"
378 "		ld	21,-64(1)		;"
379 "		ld	22,-56(1)		;"
380 "		ori	31,31,0			;"
381 "		.rept 14			;"
382 "		b	1f			;"
383 "	1:					;"
384 "		.endr				;"
385 "		blr				;"
386 );
387 
388 /* Assemble the body code between the prologue & epilogue */
389 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
390 		       u32 *addrs, int pass, bool extra_pass)
391 {
392 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
393 	const struct bpf_insn *insn = fp->insnsi;
394 	int flen = fp->len;
395 	int i, ret;
396 
397 	/* Start of epilogue code - will only be valid 2nd pass onwards */
398 	u32 exit_addr = addrs[flen];
399 
400 	for (i = 0; i < flen; i++) {
401 		u32 code = insn[i].code;
402 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
403 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
404 		u32 size = BPF_SIZE(code);
405 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
406 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
407 		u32 save_reg, ret_reg;
408 		s16 off = insn[i].off;
409 		s32 imm = insn[i].imm;
410 		bool func_addr_fixed;
411 		u64 func_addr;
412 		u64 imm64;
413 		u32 true_cond;
414 		u32 tmp_idx;
415 		int j;
416 
417 		/*
418 		 * addrs[] maps a BPF bytecode address into a real offset from
419 		 * the start of the body code.
420 		 */
421 		addrs[i] = ctx->idx * 4;
422 
423 		/*
424 		 * As an optimization, we note down which non-volatile registers
425 		 * are used so that we can only save/restore those in our
426 		 * prologue and epilogue. We do this here regardless of whether
427 		 * the actual BPF instruction uses src/dst registers or not
428 		 * (for instance, BPF_CALL does not use them). The expectation
429 		 * is that those instructions will have src_reg/dst_reg set to
430 		 * 0. Even otherwise, we just lose some prologue/epilogue
431 		 * optimization but everything else should work without
432 		 * any issues.
433 		 */
434 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
435 			bpf_set_seen_register(ctx, dst_reg);
436 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
437 			bpf_set_seen_register(ctx, src_reg);
438 
439 		switch (code) {
440 		/*
441 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
442 		 */
443 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
444 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
445 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
446 			goto bpf_alu32_trunc;
447 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
448 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
449 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
450 			goto bpf_alu32_trunc;
451 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
452 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
453 			if (!imm) {
454 				goto bpf_alu32_trunc;
455 			} else if (imm >= -32768 && imm < 32768) {
456 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
457 			} else {
458 				PPC_LI32(tmp1_reg, imm);
459 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
460 			}
461 			goto bpf_alu32_trunc;
462 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
463 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
464 			if (!imm) {
465 				goto bpf_alu32_trunc;
466 			} else if (imm > -32768 && imm <= 32768) {
467 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
468 			} else {
469 				PPC_LI32(tmp1_reg, imm);
470 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
471 			}
472 			goto bpf_alu32_trunc;
473 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
474 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
475 			if (BPF_CLASS(code) == BPF_ALU)
476 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
477 			else
478 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
479 			goto bpf_alu32_trunc;
480 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
481 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
482 			if (imm >= -32768 && imm < 32768)
483 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
484 			else {
485 				PPC_LI32(tmp1_reg, imm);
486 				if (BPF_CLASS(code) == BPF_ALU)
487 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
488 				else
489 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
490 			}
491 			goto bpf_alu32_trunc;
492 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
493 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
494 			if (BPF_OP(code) == BPF_MOD) {
495 				if (off)
496 					EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
497 				else
498 					EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
499 
500 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
501 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
502 			} else
503 				if (off)
504 					EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
505 				else
506 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
507 			goto bpf_alu32_trunc;
508 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
509 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
510 			if (BPF_OP(code) == BPF_MOD) {
511 				if (off)
512 					EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
513 				else
514 					EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
515 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
516 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
517 			} else
518 				if (off)
519 					EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
520 				else
521 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
522 			break;
523 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
524 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
525 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
526 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
527 			if (imm == 0)
528 				return -EINVAL;
529 			if (imm == 1) {
530 				if (BPF_OP(code) == BPF_DIV) {
531 					goto bpf_alu32_trunc;
532 				} else {
533 					EMIT(PPC_RAW_LI(dst_reg, 0));
534 					break;
535 				}
536 			}
537 
538 			PPC_LI32(tmp1_reg, imm);
539 			switch (BPF_CLASS(code)) {
540 			case BPF_ALU:
541 				if (BPF_OP(code) == BPF_MOD) {
542 					if (off)
543 						EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
544 					else
545 						EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
546 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
547 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
548 				} else
549 					if (off)
550 						EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
551 					else
552 						EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
553 				break;
554 			case BPF_ALU64:
555 				if (BPF_OP(code) == BPF_MOD) {
556 					if (off)
557 						EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
558 					else
559 						EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
560 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
561 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
562 				} else
563 					if (off)
564 						EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
565 					else
566 						EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
567 				break;
568 			}
569 			goto bpf_alu32_trunc;
570 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
571 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
572 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
573 			goto bpf_alu32_trunc;
574 
575 		/*
576 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
577 		 */
578 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
579 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
580 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
581 			goto bpf_alu32_trunc;
582 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
583 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
584 			if (!IMM_H(imm))
585 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
586 			else {
587 				/* Sign-extended */
588 				PPC_LI32(tmp1_reg, imm);
589 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
590 			}
591 			goto bpf_alu32_trunc;
592 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
593 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
594 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
595 			goto bpf_alu32_trunc;
596 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
597 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
598 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
599 				/* Sign-extended */
600 				PPC_LI32(tmp1_reg, imm);
601 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
602 			} else {
603 				if (IMM_L(imm))
604 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
605 				if (IMM_H(imm))
606 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
607 			}
608 			goto bpf_alu32_trunc;
609 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
610 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
611 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
612 			goto bpf_alu32_trunc;
613 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
614 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
615 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
616 				/* Sign-extended */
617 				PPC_LI32(tmp1_reg, imm);
618 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
619 			} else {
620 				if (IMM_L(imm))
621 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
622 				if (IMM_H(imm))
623 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
624 			}
625 			goto bpf_alu32_trunc;
626 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
627 			/* slw clears top 32 bits */
628 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
629 			/* skip zero extension move, but set address map. */
630 			if (insn_is_zext(&insn[i + 1]))
631 				addrs[++i] = ctx->idx * 4;
632 			break;
633 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
634 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
635 			break;
636 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
637 			/* with imm 0, we still need to clear top 32 bits */
638 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
639 			if (insn_is_zext(&insn[i + 1]))
640 				addrs[++i] = ctx->idx * 4;
641 			break;
642 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
643 			if (imm != 0)
644 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
645 			break;
646 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
647 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
648 			if (insn_is_zext(&insn[i + 1]))
649 				addrs[++i] = ctx->idx * 4;
650 			break;
651 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
652 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
653 			break;
654 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
655 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
656 			if (insn_is_zext(&insn[i + 1]))
657 				addrs[++i] = ctx->idx * 4;
658 			break;
659 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
660 			if (imm != 0)
661 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
662 			break;
663 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
664 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
665 			goto bpf_alu32_trunc;
666 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
667 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
668 			break;
669 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
670 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
671 			goto bpf_alu32_trunc;
672 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
673 			if (imm != 0)
674 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
675 			break;
676 
677 		/*
678 		 * MOV
679 		 */
680 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
681 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
682 			if (imm == 1) {
683 				/* special mov32 for zext */
684 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
685 				break;
686 			} else if (off == 8) {
687 				EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
688 			} else if (off == 16) {
689 				EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
690 			} else if (off == 32) {
691 				EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
692 			} else if (dst_reg != src_reg)
693 				EMIT(PPC_RAW_MR(dst_reg, src_reg));
694 			goto bpf_alu32_trunc;
695 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
696 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
697 			PPC_LI32(dst_reg, imm);
698 			if (imm < 0)
699 				goto bpf_alu32_trunc;
700 			else if (insn_is_zext(&insn[i + 1]))
701 				addrs[++i] = ctx->idx * 4;
702 			break;
703 
704 bpf_alu32_trunc:
705 		/* Truncate to 32-bits */
706 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
707 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
708 		break;
709 
710 		/*
711 		 * BPF_FROM_BE/LE
712 		 */
713 		case BPF_ALU | BPF_END | BPF_FROM_LE:
714 		case BPF_ALU | BPF_END | BPF_FROM_BE:
715 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
716 #ifdef __BIG_ENDIAN__
717 			if (BPF_SRC(code) == BPF_FROM_BE)
718 				goto emit_clear;
719 #else /* !__BIG_ENDIAN__ */
720 			if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
721 				goto emit_clear;
722 #endif
723 			switch (imm) {
724 			case 16:
725 				/* Rotate 8 bits left & mask with 0x0000ff00 */
726 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
727 				/* Rotate 8 bits right & insert LSB to reg */
728 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
729 				/* Move result back to dst_reg */
730 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
731 				break;
732 			case 32:
733 				/*
734 				 * Rotate word left by 8 bits:
735 				 * 2 bytes are already in their final position
736 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
737 				 */
738 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
739 				/* Rotate 24 bits and insert byte 1 */
740 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
741 				/* Rotate 24 bits and insert byte 3 */
742 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
743 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
744 				break;
745 			case 64:
746 				/* Store the value to stack and then use byte-reverse loads */
747 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
748 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
749 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
750 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
751 				} else {
752 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
753 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
754 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
755 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
756 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
757 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
758 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
759 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
760 				}
761 				break;
762 			}
763 			break;
764 
765 emit_clear:
766 			switch (imm) {
767 			case 16:
768 				/* zero-extend 16 bits into 64 bits */
769 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
770 				if (insn_is_zext(&insn[i + 1]))
771 					addrs[++i] = ctx->idx * 4;
772 				break;
773 			case 32:
774 				if (!fp->aux->verifier_zext)
775 					/* zero-extend 32 bits into 64 bits */
776 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
777 				break;
778 			case 64:
779 				/* nop */
780 				break;
781 			}
782 			break;
783 
784 		/*
785 		 * BPF_ST NOSPEC (speculation barrier)
786 		 */
787 		case BPF_ST | BPF_NOSPEC:
788 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
789 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
790 				break;
791 
792 			switch (stf_barrier) {
793 			case STF_BARRIER_EIEIO:
794 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
795 				break;
796 			case STF_BARRIER_SYNC_ORI:
797 				EMIT(PPC_RAW_SYNC());
798 				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
799 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
800 				break;
801 			case STF_BARRIER_FALLBACK:
802 				ctx->seen |= SEEN_FUNC;
803 				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
804 				EMIT(PPC_RAW_MTCTR(_R12));
805 				EMIT(PPC_RAW_BCTRL());
806 				break;
807 			case STF_BARRIER_NONE:
808 				break;
809 			}
810 			break;
811 
812 		/*
813 		 * BPF_ST(X)
814 		 */
815 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
816 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
817 			if (BPF_CLASS(code) == BPF_ST) {
818 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
819 				src_reg = tmp1_reg;
820 			}
821 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
822 			break;
823 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
824 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
825 			if (BPF_CLASS(code) == BPF_ST) {
826 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
827 				src_reg = tmp1_reg;
828 			}
829 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
830 			break;
831 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
832 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
833 			if (BPF_CLASS(code) == BPF_ST) {
834 				PPC_LI32(tmp1_reg, imm);
835 				src_reg = tmp1_reg;
836 			}
837 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
838 			break;
839 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
840 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
841 			if (BPF_CLASS(code) == BPF_ST) {
842 				PPC_LI32(tmp1_reg, imm);
843 				src_reg = tmp1_reg;
844 			}
845 			if (off % 4) {
846 				EMIT(PPC_RAW_LI(tmp2_reg, off));
847 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
848 			} else {
849 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
850 			}
851 			break;
852 
853 		/*
854 		 * BPF_STX ATOMIC (atomic ops)
855 		 */
856 		case BPF_STX | BPF_ATOMIC | BPF_W:
857 		case BPF_STX | BPF_ATOMIC | BPF_DW:
858 			save_reg = tmp2_reg;
859 			ret_reg = src_reg;
860 
861 			/* Get offset into TMP_REG_1 */
862 			EMIT(PPC_RAW_LI(tmp1_reg, off));
863 			/*
864 			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
865 			 * before and after the operation.
866 			 *
867 			 * This is a requirement in the Linux Kernel Memory Model.
868 			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
869 			 */
870 			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
871 				EMIT(PPC_RAW_SYNC());
872 			tmp_idx = ctx->idx * 4;
873 			/* load value from memory into TMP_REG_2 */
874 			if (size == BPF_DW)
875 				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
876 			else
877 				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
878 
879 			/* Save old value in _R0 */
880 			if (imm & BPF_FETCH)
881 				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
882 
883 			switch (imm) {
884 			case BPF_ADD:
885 			case BPF_ADD | BPF_FETCH:
886 				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
887 				break;
888 			case BPF_AND:
889 			case BPF_AND | BPF_FETCH:
890 				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
891 				break;
892 			case BPF_OR:
893 			case BPF_OR | BPF_FETCH:
894 				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
895 				break;
896 			case BPF_XOR:
897 			case BPF_XOR | BPF_FETCH:
898 				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
899 				break;
900 			case BPF_CMPXCHG:
901 				/*
902 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
903 				 * in src_reg for other cases.
904 				 */
905 				ret_reg = bpf_to_ppc(BPF_REG_0);
906 
907 				/* Compare with old value in BPF_R0 */
908 				if (size == BPF_DW)
909 					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
910 				else
911 					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
912 				/* Don't set if different from old value */
913 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
914 				fallthrough;
915 			case BPF_XCHG:
916 				save_reg = src_reg;
917 				break;
918 			default:
919 				pr_err_ratelimited(
920 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
921 					code, i);
922 				return -EOPNOTSUPP;
923 			}
924 
925 			/* store new value */
926 			if (size == BPF_DW)
927 				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
928 			else
929 				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
930 			/* we're done if this succeeded */
931 			PPC_BCC_SHORT(COND_NE, tmp_idx);
932 
933 			if (imm & BPF_FETCH) {
934 				/* Emit 'sync' to enforce full ordering */
935 				if (IS_ENABLED(CONFIG_SMP))
936 					EMIT(PPC_RAW_SYNC());
937 				EMIT(PPC_RAW_MR(ret_reg, _R0));
938 				/*
939 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
940 				 * For context, see commit 39491867ace5.
941 				 */
942 				if (size != BPF_DW && imm == BPF_CMPXCHG &&
943 				    insn_is_zext(&insn[i + 1]))
944 					addrs[++i] = ctx->idx * 4;
945 			}
946 			break;
947 
948 		/*
949 		 * BPF_LDX
950 		 */
951 		/* dst = *(u8 *)(ul) (src + off) */
952 		case BPF_LDX | BPF_MEM | BPF_B:
953 		case BPF_LDX | BPF_MEMSX | BPF_B:
954 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
955 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
956 		/* dst = *(u16 *)(ul) (src + off) */
957 		case BPF_LDX | BPF_MEM | BPF_H:
958 		case BPF_LDX | BPF_MEMSX | BPF_H:
959 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
960 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
961 		/* dst = *(u32 *)(ul) (src + off) */
962 		case BPF_LDX | BPF_MEM | BPF_W:
963 		case BPF_LDX | BPF_MEMSX | BPF_W:
964 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
965 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
966 		/* dst = *(u64 *)(ul) (src + off) */
967 		case BPF_LDX | BPF_MEM | BPF_DW:
968 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
969 			/*
970 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
971 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
972 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
973 			 * set dst_reg=0 and move on.
974 			 */
975 			if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
976 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
977 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
978 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
979 				else /* BOOK3S_64 */
980 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
981 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
982 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
983 				EMIT(PPC_RAW_LI(dst_reg, 0));
984 				/*
985 				 * Check if 'off' is word aligned for BPF_DW, because
986 				 * we might generate two instructions.
987 				 */
988 				if ((BPF_SIZE(code) == BPF_DW ||
989 				    (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) &&
990 						(off & 3))
991 					PPC_JMP((ctx->idx + 3) * 4);
992 				else
993 					PPC_JMP((ctx->idx + 2) * 4);
994 			}
995 
996 			if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
997 				switch (size) {
998 				case BPF_B:
999 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1000 					EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1001 					break;
1002 				case BPF_H:
1003 					EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1004 					break;
1005 				case BPF_W:
1006 					EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1007 					break;
1008 				}
1009 			} else {
1010 				switch (size) {
1011 				case BPF_B:
1012 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1013 					break;
1014 				case BPF_H:
1015 					EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1016 					break;
1017 				case BPF_W:
1018 					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1019 					break;
1020 				case BPF_DW:
1021 					if (off % 4) {
1022 						EMIT(PPC_RAW_LI(tmp1_reg, off));
1023 						EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1024 					} else {
1025 						EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1026 					}
1027 					break;
1028 				}
1029 			}
1030 
1031 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1032 				addrs[++i] = ctx->idx * 4;
1033 
1034 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
1035 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1036 							    ctx->idx - 1, 4, dst_reg);
1037 				if (ret)
1038 					return ret;
1039 			}
1040 			break;
1041 
1042 		/*
1043 		 * Doubleword load
1044 		 * 16 byte instruction that uses two 'struct bpf_insn'
1045 		 */
1046 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1047 			imm64 = ((u64)(u32) insn[i].imm) |
1048 				    (((u64)(u32) insn[i+1].imm) << 32);
1049 			tmp_idx = ctx->idx;
1050 			PPC_LI64(dst_reg, imm64);
1051 			/* padding to allow full 5 instructions for later patching */
1052 			if (!image)
1053 				for (j = ctx->idx - tmp_idx; j < 5; j++)
1054 					EMIT(PPC_RAW_NOP());
1055 			/* Adjust for two bpf instructions */
1056 			addrs[++i] = ctx->idx * 4;
1057 			break;
1058 
1059 		/*
1060 		 * Return/Exit
1061 		 */
1062 		case BPF_JMP | BPF_EXIT:
1063 			/*
1064 			 * If this isn't the very last instruction, branch to
1065 			 * the epilogue. If we _are_ the last instruction,
1066 			 * we'll just fall through to the epilogue.
1067 			 */
1068 			if (i != flen - 1) {
1069 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1070 				if (ret)
1071 					return ret;
1072 			}
1073 			/* else fall through to the epilogue */
1074 			break;
1075 
1076 		/*
1077 		 * Call kernel helper or bpf function
1078 		 */
1079 		case BPF_JMP | BPF_CALL:
1080 			ctx->seen |= SEEN_FUNC;
1081 
1082 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1083 						    &func_addr, &func_addr_fixed);
1084 			if (ret < 0)
1085 				return ret;
1086 
1087 			ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1088 			if (ret)
1089 				return ret;
1090 
1091 			/* move return value from r3 to BPF_REG_0 */
1092 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1093 			break;
1094 
1095 		/*
1096 		 * Jumps and branches
1097 		 */
1098 		case BPF_JMP | BPF_JA:
1099 			PPC_JMP(addrs[i + 1 + off]);
1100 			break;
1101 		case BPF_JMP32 | BPF_JA:
1102 			PPC_JMP(addrs[i + 1 + imm]);
1103 			break;
1104 
1105 		case BPF_JMP | BPF_JGT | BPF_K:
1106 		case BPF_JMP | BPF_JGT | BPF_X:
1107 		case BPF_JMP | BPF_JSGT | BPF_K:
1108 		case BPF_JMP | BPF_JSGT | BPF_X:
1109 		case BPF_JMP32 | BPF_JGT | BPF_K:
1110 		case BPF_JMP32 | BPF_JGT | BPF_X:
1111 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1112 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1113 			true_cond = COND_GT;
1114 			goto cond_branch;
1115 		case BPF_JMP | BPF_JLT | BPF_K:
1116 		case BPF_JMP | BPF_JLT | BPF_X:
1117 		case BPF_JMP | BPF_JSLT | BPF_K:
1118 		case BPF_JMP | BPF_JSLT | BPF_X:
1119 		case BPF_JMP32 | BPF_JLT | BPF_K:
1120 		case BPF_JMP32 | BPF_JLT | BPF_X:
1121 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1122 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1123 			true_cond = COND_LT;
1124 			goto cond_branch;
1125 		case BPF_JMP | BPF_JGE | BPF_K:
1126 		case BPF_JMP | BPF_JGE | BPF_X:
1127 		case BPF_JMP | BPF_JSGE | BPF_K:
1128 		case BPF_JMP | BPF_JSGE | BPF_X:
1129 		case BPF_JMP32 | BPF_JGE | BPF_K:
1130 		case BPF_JMP32 | BPF_JGE | BPF_X:
1131 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1132 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1133 			true_cond = COND_GE;
1134 			goto cond_branch;
1135 		case BPF_JMP | BPF_JLE | BPF_K:
1136 		case BPF_JMP | BPF_JLE | BPF_X:
1137 		case BPF_JMP | BPF_JSLE | BPF_K:
1138 		case BPF_JMP | BPF_JSLE | BPF_X:
1139 		case BPF_JMP32 | BPF_JLE | BPF_K:
1140 		case BPF_JMP32 | BPF_JLE | BPF_X:
1141 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1142 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1143 			true_cond = COND_LE;
1144 			goto cond_branch;
1145 		case BPF_JMP | BPF_JEQ | BPF_K:
1146 		case BPF_JMP | BPF_JEQ | BPF_X:
1147 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1148 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1149 			true_cond = COND_EQ;
1150 			goto cond_branch;
1151 		case BPF_JMP | BPF_JNE | BPF_K:
1152 		case BPF_JMP | BPF_JNE | BPF_X:
1153 		case BPF_JMP32 | BPF_JNE | BPF_K:
1154 		case BPF_JMP32 | BPF_JNE | BPF_X:
1155 			true_cond = COND_NE;
1156 			goto cond_branch;
1157 		case BPF_JMP | BPF_JSET | BPF_K:
1158 		case BPF_JMP | BPF_JSET | BPF_X:
1159 		case BPF_JMP32 | BPF_JSET | BPF_K:
1160 		case BPF_JMP32 | BPF_JSET | BPF_X:
1161 			true_cond = COND_NE;
1162 			/* Fall through */
1163 
1164 cond_branch:
1165 			switch (code) {
1166 			case BPF_JMP | BPF_JGT | BPF_X:
1167 			case BPF_JMP | BPF_JLT | BPF_X:
1168 			case BPF_JMP | BPF_JGE | BPF_X:
1169 			case BPF_JMP | BPF_JLE | BPF_X:
1170 			case BPF_JMP | BPF_JEQ | BPF_X:
1171 			case BPF_JMP | BPF_JNE | BPF_X:
1172 			case BPF_JMP32 | BPF_JGT | BPF_X:
1173 			case BPF_JMP32 | BPF_JLT | BPF_X:
1174 			case BPF_JMP32 | BPF_JGE | BPF_X:
1175 			case BPF_JMP32 | BPF_JLE | BPF_X:
1176 			case BPF_JMP32 | BPF_JEQ | BPF_X:
1177 			case BPF_JMP32 | BPF_JNE | BPF_X:
1178 				/* unsigned comparison */
1179 				if (BPF_CLASS(code) == BPF_JMP32)
1180 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1181 				else
1182 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1183 				break;
1184 			case BPF_JMP | BPF_JSGT | BPF_X:
1185 			case BPF_JMP | BPF_JSLT | BPF_X:
1186 			case BPF_JMP | BPF_JSGE | BPF_X:
1187 			case BPF_JMP | BPF_JSLE | BPF_X:
1188 			case BPF_JMP32 | BPF_JSGT | BPF_X:
1189 			case BPF_JMP32 | BPF_JSLT | BPF_X:
1190 			case BPF_JMP32 | BPF_JSGE | BPF_X:
1191 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1192 				/* signed comparison */
1193 				if (BPF_CLASS(code) == BPF_JMP32)
1194 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1195 				else
1196 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1197 				break;
1198 			case BPF_JMP | BPF_JSET | BPF_X:
1199 			case BPF_JMP32 | BPF_JSET | BPF_X:
1200 				if (BPF_CLASS(code) == BPF_JMP) {
1201 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1202 				} else {
1203 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1204 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1205 				}
1206 				break;
1207 			case BPF_JMP | BPF_JNE | BPF_K:
1208 			case BPF_JMP | BPF_JEQ | BPF_K:
1209 			case BPF_JMP | BPF_JGT | BPF_K:
1210 			case BPF_JMP | BPF_JLT | BPF_K:
1211 			case BPF_JMP | BPF_JGE | BPF_K:
1212 			case BPF_JMP | BPF_JLE | BPF_K:
1213 			case BPF_JMP32 | BPF_JNE | BPF_K:
1214 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1215 			case BPF_JMP32 | BPF_JGT | BPF_K:
1216 			case BPF_JMP32 | BPF_JLT | BPF_K:
1217 			case BPF_JMP32 | BPF_JGE | BPF_K:
1218 			case BPF_JMP32 | BPF_JLE | BPF_K:
1219 			{
1220 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1221 
1222 				/*
1223 				 * Need sign-extended load, so only positive
1224 				 * values can be used as imm in cmpldi
1225 				 */
1226 				if (imm >= 0 && imm < 32768) {
1227 					if (is_jmp32)
1228 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1229 					else
1230 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1231 				} else {
1232 					/* sign-extending load */
1233 					PPC_LI32(tmp1_reg, imm);
1234 					/* ... but unsigned comparison */
1235 					if (is_jmp32)
1236 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1237 					else
1238 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1239 				}
1240 				break;
1241 			}
1242 			case BPF_JMP | BPF_JSGT | BPF_K:
1243 			case BPF_JMP | BPF_JSLT | BPF_K:
1244 			case BPF_JMP | BPF_JSGE | BPF_K:
1245 			case BPF_JMP | BPF_JSLE | BPF_K:
1246 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1247 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1248 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1249 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1250 			{
1251 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1252 
1253 				/*
1254 				 * signed comparison, so any 16-bit value
1255 				 * can be used in cmpdi
1256 				 */
1257 				if (imm >= -32768 && imm < 32768) {
1258 					if (is_jmp32)
1259 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1260 					else
1261 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1262 				} else {
1263 					PPC_LI32(tmp1_reg, imm);
1264 					if (is_jmp32)
1265 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1266 					else
1267 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1268 				}
1269 				break;
1270 			}
1271 			case BPF_JMP | BPF_JSET | BPF_K:
1272 			case BPF_JMP32 | BPF_JSET | BPF_K:
1273 				/* andi does not sign-extend the immediate */
1274 				if (imm >= 0 && imm < 32768)
1275 					/* PPC_ANDI is _only/always_ dot-form */
1276 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1277 				else {
1278 					PPC_LI32(tmp1_reg, imm);
1279 					if (BPF_CLASS(code) == BPF_JMP) {
1280 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1281 								     tmp1_reg));
1282 					} else {
1283 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1284 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1285 									0, 0, 31));
1286 					}
1287 				}
1288 				break;
1289 			}
1290 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1291 			break;
1292 
1293 		/*
1294 		 * Tail call
1295 		 */
1296 		case BPF_JMP | BPF_TAIL_CALL:
1297 			ctx->seen |= SEEN_TAILCALL;
1298 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1299 			if (ret < 0)
1300 				return ret;
1301 			break;
1302 
1303 		default:
1304 			/*
1305 			 * The filter contains something cruel & unusual.
1306 			 * We don't handle it, but also there shouldn't be
1307 			 * anything missing from our list.
1308 			 */
1309 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1310 					code, i);
1311 			return -ENOTSUPP;
1312 		}
1313 	}
1314 
1315 	/* Set end-of-body-code address for exit. */
1316 	addrs[i] = ctx->idx * 4;
1317 
1318 	return 0;
1319 }
1320