xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 3c086ce222cefcf16d412faa10d456161d076796)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit.h"
21 
22 /*
23  * Stack layout:
24  * Ensure the top half (upto local_tmp_var) stays consistent
25  * with our redzone usage.
26  *
27  *		[	prev sp		] <-------------
28  *		[   nv gpr save area	] 5*8		|
29  *		[    tail_call_cnt	] 8		|
30  *		[    local_tmp_var	] 16		|
31  * fp (r31) -->	[   ebpf stack space	] upto 512	|
32  *		[     frame header	] 32/112	|
33  * sp (r1) --->	[    stack pointer	] --------------
34  */
35 
36 /* for gpr non volatile registers BPG_REG_6 to 10 */
37 #define BPF_PPC_STACK_SAVE	(5*8)
38 /* for bpf JIT code internal usage */
39 #define BPF_PPC_STACK_LOCALS	24
40 /* stack frame excluding BPF stack, ensure this is quadword aligned */
41 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
42 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43 
44 /* BPF register usage */
45 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
46 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
47 
48 /* BPF to ppc register mappings */
49 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
50 {
51 	/* function return value */
52 	ctx->b2p[BPF_REG_0] = _R8;
53 	/* function arguments */
54 	ctx->b2p[BPF_REG_1] = _R3;
55 	ctx->b2p[BPF_REG_2] = _R4;
56 	ctx->b2p[BPF_REG_3] = _R5;
57 	ctx->b2p[BPF_REG_4] = _R6;
58 	ctx->b2p[BPF_REG_5] = _R7;
59 	/* non volatile registers */
60 	ctx->b2p[BPF_REG_6] = _R27;
61 	ctx->b2p[BPF_REG_7] = _R28;
62 	ctx->b2p[BPF_REG_8] = _R29;
63 	ctx->b2p[BPF_REG_9] = _R30;
64 	/* frame pointer aka BPF_REG_10 */
65 	ctx->b2p[BPF_REG_FP] = _R31;
66 	/* eBPF jit internal registers */
67 	ctx->b2p[BPF_REG_AX] = _R12;
68 	ctx->b2p[TMP_REG_1] = _R9;
69 	ctx->b2p[TMP_REG_2] = _R10;
70 }
71 
72 /* PPC NVR range -- update this if we ever use NVRs below r27 */
73 #define BPF_PPC_NVR_MIN		_R27
74 
75 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76 {
77 	/*
78 	 * We only need a stack frame if:
79 	 * - we call other functions (kernel helpers), or
80 	 * - the bpf program uses its stack area
81 	 * The latter condition is deduced from the usage of BPF_REG_FP
82 	 */
83 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84 }
85 
86 /*
87  * When not setting up our own stackframe, the redzone usage is:
88  *
89  *		[	prev sp		] <-------------
90  *		[	  ...       	] 		|
91  * sp (r1) --->	[    stack pointer	] --------------
92  *		[   nv gpr save area	] 5*8
93  *		[    tail_call_cnt	] 8
94  *		[    local_tmp_var	] 16
95  *		[   unused red zone	] 208 bytes protected
96  */
97 static int bpf_jit_stack_local(struct codegen_context *ctx)
98 {
99 	if (bpf_has_stack_frame(ctx))
100 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
101 	else
102 		return -(BPF_PPC_STACK_SAVE + 24);
103 }
104 
105 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106 {
107 	return bpf_jit_stack_local(ctx) + 16;
108 }
109 
110 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
111 {
112 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 		return (bpf_has_stack_frame(ctx) ?
114 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
115 				- (8 * (32 - reg));
116 
117 	pr_err("BPF JIT is asking about unknown registers");
118 	BUG();
119 }
120 
121 void bpf_jit_realloc_regs(struct codegen_context *ctx)
122 {
123 }
124 
125 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126 {
127 	int i;
128 
129 #ifndef CONFIG_PPC_KERNEL_PCREL
130 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
132 #endif
133 
134 	/*
135 	 * Initialize tail_call_cnt if we do tail calls.
136 	 * Otherwise, put in NOPs so that it can be skipped when we are
137 	 * invoked through a tail call.
138 	 */
139 	if (ctx->seen & SEEN_TAILCALL) {
140 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141 		/* this goes in the redzone */
142 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
143 	} else {
144 		EMIT(PPC_RAW_NOP());
145 		EMIT(PPC_RAW_NOP());
146 	}
147 
148 	if (bpf_has_stack_frame(ctx)) {
149 		/*
150 		 * We need a stack frame, but we don't necessarily need to
151 		 * save/restore LR unless we call other functions
152 		 */
153 		if (ctx->seen & SEEN_FUNC) {
154 			EMIT(PPC_RAW_MFLR(_R0));
155 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
156 		}
157 
158 		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
159 	}
160 
161 	/*
162 	 * Back up non-volatile regs -- BPF registers 6-10
163 	 * If we haven't created our own stack frame, we save these
164 	 * in the protected zone below the previous stack frame
165 	 */
166 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
167 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
168 			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
169 
170 	/* Setup frame pointer to point to the bpf stack area */
171 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
172 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
173 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
174 }
175 
176 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
177 {
178 	int i;
179 
180 	/* Restore NVRs */
181 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
182 		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
183 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
184 
185 	/* Tear down our stack frame */
186 	if (bpf_has_stack_frame(ctx)) {
187 		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188 		if (ctx->seen & SEEN_FUNC) {
189 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190 			EMIT(PPC_RAW_MTLR(_R0));
191 		}
192 	}
193 }
194 
195 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196 {
197 	bpf_jit_emit_common_epilogue(image, ctx);
198 
199 	/* Move result to r3 */
200 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201 
202 	EMIT(PPC_RAW_BLR());
203 }
204 
205 static int
206 bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
207 {
208 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
209 	long reladdr;
210 
211 	if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
212 		return -EINVAL;
213 
214 #ifdef CONFIG_PPC_KERNEL_PCREL
215 	reladdr = func_addr - local_paca->kernelbase;
216 
217 	if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
218 		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
219 		/* Align for subsequent prefix instruction */
220 		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
221 			EMIT(PPC_RAW_NOP());
222 		/* paddi r12,r12,addr */
223 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
224 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
225 	} else {
226 		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
227 		bool alignment_needed = !IS_ALIGNED(pc, 8);
228 
229 		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
230 
231 		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
232 			if (alignment_needed)
233 				EMIT(PPC_RAW_NOP());
234 			/* pla r12,addr */
235 			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
236 			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
237 		} else {
238 			/* We can clobber r12 */
239 			PPC_LI64(_R12, func);
240 		}
241 	}
242 	EMIT(PPC_RAW_MTCTR(_R12));
243 	EMIT(PPC_RAW_BCTRL());
244 #else
245 	if (core_kernel_text(func_addr)) {
246 		reladdr = func_addr - kernel_toc_addr();
247 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
248 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
249 			return -ERANGE;
250 		}
251 
252 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
253 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
254 		EMIT(PPC_RAW_MTCTR(_R12));
255 		EMIT(PPC_RAW_BCTRL());
256 	} else {
257 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
258 			/* func points to the function descriptor */
259 			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
260 			/* Load actual entry point from function descriptor */
261 			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
262 			/* ... and move it to CTR */
263 			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
264 			/*
265 			 * Load TOC from function descriptor at offset 8.
266 			 * We can clobber r2 since we get called through a
267 			 * function pointer (so caller will save/restore r2).
268 			 */
269 			EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
270 		} else {
271 			PPC_LI64(_R12, func);
272 			EMIT(PPC_RAW_MTCTR(_R12));
273 		}
274 		EMIT(PPC_RAW_BCTRL());
275 		/*
276 		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
277 		 * within core kernel text.
278 		 */
279 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
280 	}
281 #endif
282 
283 	return 0;
284 }
285 
286 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
287 {
288 	unsigned int i, ctx_idx = ctx->idx;
289 
290 	if (WARN_ON_ONCE(func && is_module_text_address(func)))
291 		return -EINVAL;
292 
293 	/* skip past descriptor if elf v1 */
294 	func += FUNCTION_DESCR_SIZE;
295 
296 	/* Load function address into r12 */
297 	PPC_LI64(_R12, func);
298 
299 	/* For bpf-to-bpf function calls, the callee's address is unknown
300 	 * until the last extra pass. As seen above, we use PPC_LI64() to
301 	 * load the callee's address, but this may optimize the number of
302 	 * instructions required based on the nature of the address.
303 	 *
304 	 * Since we don't want the number of instructions emitted to increase,
305 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
306 	 * we always have a five-instruction sequence, which is the maximum
307 	 * that PPC_LI64() can emit.
308 	 */
309 	if (!image)
310 		for (i = ctx->idx - ctx_idx; i < 5; i++)
311 			EMIT(PPC_RAW_NOP());
312 
313 	EMIT(PPC_RAW_MTCTR(_R12));
314 	EMIT(PPC_RAW_BCTRL());
315 
316 	return 0;
317 }
318 
319 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
320 {
321 	/*
322 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
323 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
324 	 * r4/BPF_REG_2 - pointer to bpf_array
325 	 * r5/BPF_REG_3 - index in bpf_array
326 	 */
327 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
328 	int b2p_index = bpf_to_ppc(BPF_REG_3);
329 	int bpf_tailcall_prologue_size = 8;
330 
331 	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
332 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
333 
334 	/*
335 	 * if (index >= array->map.max_entries)
336 	 *   goto out;
337 	 */
338 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
339 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
340 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
341 	PPC_BCC_SHORT(COND_GE, out);
342 
343 	/*
344 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
345 	 *   goto out;
346 	 */
347 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
348 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
349 	PPC_BCC_SHORT(COND_GE, out);
350 
351 	/*
352 	 * tail_call_cnt++;
353 	 */
354 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
355 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
356 
357 	/* prog = array->ptrs[index]; */
358 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
359 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
360 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
361 
362 	/*
363 	 * if (prog == NULL)
364 	 *   goto out;
365 	 */
366 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
367 	PPC_BCC_SHORT(COND_EQ, out);
368 
369 	/* goto *(prog->bpf_func + prologue_size); */
370 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
371 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
372 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
373 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
374 
375 	/* tear down stack, restore NVRs, ... */
376 	bpf_jit_emit_common_epilogue(image, ctx);
377 
378 	EMIT(PPC_RAW_BCTR());
379 
380 	/* out: */
381 	return 0;
382 }
383 
384 /*
385  * We spill into the redzone always, even if the bpf program has its own stackframe.
386  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
387  */
388 void bpf_stf_barrier(void);
389 
390 asm (
391 "		.global bpf_stf_barrier		;"
392 "	bpf_stf_barrier:			;"
393 "		std	21,-64(1)		;"
394 "		std	22,-56(1)		;"
395 "		sync				;"
396 "		ld	21,-64(1)		;"
397 "		ld	22,-56(1)		;"
398 "		ori	31,31,0			;"
399 "		.rept 14			;"
400 "		b	1f			;"
401 "	1:					;"
402 "		.endr				;"
403 "		blr				;"
404 );
405 
406 /* Assemble the body code between the prologue & epilogue */
407 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
408 		       u32 *addrs, int pass, bool extra_pass)
409 {
410 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
411 	const struct bpf_insn *insn = fp->insnsi;
412 	int flen = fp->len;
413 	int i, ret;
414 
415 	/* Start of epilogue code - will only be valid 2nd pass onwards */
416 	u32 exit_addr = addrs[flen];
417 
418 	for (i = 0; i < flen; i++) {
419 		u32 code = insn[i].code;
420 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
421 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
422 		u32 size = BPF_SIZE(code);
423 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
424 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
425 		u32 save_reg, ret_reg;
426 		s16 off = insn[i].off;
427 		s32 imm = insn[i].imm;
428 		bool func_addr_fixed;
429 		u64 func_addr;
430 		u64 imm64;
431 		u32 true_cond;
432 		u32 tmp_idx;
433 		int j;
434 
435 		/*
436 		 * addrs[] maps a BPF bytecode address into a real offset from
437 		 * the start of the body code.
438 		 */
439 		addrs[i] = ctx->idx * 4;
440 
441 		/*
442 		 * As an optimization, we note down which non-volatile registers
443 		 * are used so that we can only save/restore those in our
444 		 * prologue and epilogue. We do this here regardless of whether
445 		 * the actual BPF instruction uses src/dst registers or not
446 		 * (for instance, BPF_CALL does not use them). The expectation
447 		 * is that those instructions will have src_reg/dst_reg set to
448 		 * 0. Even otherwise, we just lose some prologue/epilogue
449 		 * optimization but everything else should work without
450 		 * any issues.
451 		 */
452 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
453 			bpf_set_seen_register(ctx, dst_reg);
454 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
455 			bpf_set_seen_register(ctx, src_reg);
456 
457 		switch (code) {
458 		/*
459 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
460 		 */
461 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
462 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
463 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
464 			goto bpf_alu32_trunc;
465 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
466 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
467 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
468 			goto bpf_alu32_trunc;
469 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
470 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
471 			if (!imm) {
472 				goto bpf_alu32_trunc;
473 			} else if (imm >= -32768 && imm < 32768) {
474 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
475 			} else {
476 				PPC_LI32(tmp1_reg, imm);
477 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
478 			}
479 			goto bpf_alu32_trunc;
480 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
481 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
482 			if (!imm) {
483 				goto bpf_alu32_trunc;
484 			} else if (imm > -32768 && imm <= 32768) {
485 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
486 			} else {
487 				PPC_LI32(tmp1_reg, imm);
488 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
489 			}
490 			goto bpf_alu32_trunc;
491 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
492 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
493 			if (BPF_CLASS(code) == BPF_ALU)
494 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
495 			else
496 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
497 			goto bpf_alu32_trunc;
498 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
499 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
500 			if (imm >= -32768 && imm < 32768)
501 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
502 			else {
503 				PPC_LI32(tmp1_reg, imm);
504 				if (BPF_CLASS(code) == BPF_ALU)
505 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
506 				else
507 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
508 			}
509 			goto bpf_alu32_trunc;
510 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
511 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
512 			if (BPF_OP(code) == BPF_MOD) {
513 				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
514 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
515 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
516 			} else
517 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
518 			goto bpf_alu32_trunc;
519 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
520 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
521 			if (BPF_OP(code) == BPF_MOD) {
522 				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
523 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
524 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
525 			} else
526 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
527 			break;
528 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
529 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
530 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
531 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
532 			if (imm == 0)
533 				return -EINVAL;
534 			if (imm == 1) {
535 				if (BPF_OP(code) == BPF_DIV) {
536 					goto bpf_alu32_trunc;
537 				} else {
538 					EMIT(PPC_RAW_LI(dst_reg, 0));
539 					break;
540 				}
541 			}
542 
543 			PPC_LI32(tmp1_reg, imm);
544 			switch (BPF_CLASS(code)) {
545 			case BPF_ALU:
546 				if (BPF_OP(code) == BPF_MOD) {
547 					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
548 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
549 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
550 				} else
551 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
552 				break;
553 			case BPF_ALU64:
554 				if (BPF_OP(code) == BPF_MOD) {
555 					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
556 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
557 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
558 				} else
559 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
560 				break;
561 			}
562 			goto bpf_alu32_trunc;
563 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
564 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
565 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
566 			goto bpf_alu32_trunc;
567 
568 		/*
569 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
570 		 */
571 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
572 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
573 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
574 			goto bpf_alu32_trunc;
575 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
576 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
577 			if (!IMM_H(imm))
578 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
579 			else {
580 				/* Sign-extended */
581 				PPC_LI32(tmp1_reg, imm);
582 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
583 			}
584 			goto bpf_alu32_trunc;
585 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
586 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
587 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
588 			goto bpf_alu32_trunc;
589 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
590 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
591 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
592 				/* Sign-extended */
593 				PPC_LI32(tmp1_reg, imm);
594 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
595 			} else {
596 				if (IMM_L(imm))
597 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
598 				if (IMM_H(imm))
599 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
600 			}
601 			goto bpf_alu32_trunc;
602 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
603 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
604 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
605 			goto bpf_alu32_trunc;
606 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
607 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
608 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
609 				/* Sign-extended */
610 				PPC_LI32(tmp1_reg, imm);
611 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
612 			} else {
613 				if (IMM_L(imm))
614 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
615 				if (IMM_H(imm))
616 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
617 			}
618 			goto bpf_alu32_trunc;
619 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
620 			/* slw clears top 32 bits */
621 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
622 			/* skip zero extension move, but set address map. */
623 			if (insn_is_zext(&insn[i + 1]))
624 				addrs[++i] = ctx->idx * 4;
625 			break;
626 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
627 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
628 			break;
629 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
630 			/* with imm 0, we still need to clear top 32 bits */
631 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
632 			if (insn_is_zext(&insn[i + 1]))
633 				addrs[++i] = ctx->idx * 4;
634 			break;
635 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
636 			if (imm != 0)
637 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
638 			break;
639 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
640 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
641 			if (insn_is_zext(&insn[i + 1]))
642 				addrs[++i] = ctx->idx * 4;
643 			break;
644 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
645 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
646 			break;
647 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
648 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
649 			if (insn_is_zext(&insn[i + 1]))
650 				addrs[++i] = ctx->idx * 4;
651 			break;
652 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
653 			if (imm != 0)
654 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
655 			break;
656 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
657 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
658 			goto bpf_alu32_trunc;
659 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
660 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
661 			break;
662 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
663 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
664 			goto bpf_alu32_trunc;
665 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
666 			if (imm != 0)
667 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
668 			break;
669 
670 		/*
671 		 * MOV
672 		 */
673 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
674 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
675 			if (imm == 1) {
676 				/* special mov32 for zext */
677 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
678 				break;
679 			}
680 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
681 			goto bpf_alu32_trunc;
682 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
683 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
684 			PPC_LI32(dst_reg, imm);
685 			if (imm < 0)
686 				goto bpf_alu32_trunc;
687 			else if (insn_is_zext(&insn[i + 1]))
688 				addrs[++i] = ctx->idx * 4;
689 			break;
690 
691 bpf_alu32_trunc:
692 		/* Truncate to 32-bits */
693 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
694 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
695 		break;
696 
697 		/*
698 		 * BPF_FROM_BE/LE
699 		 */
700 		case BPF_ALU | BPF_END | BPF_FROM_LE:
701 		case BPF_ALU | BPF_END | BPF_FROM_BE:
702 #ifdef __BIG_ENDIAN__
703 			if (BPF_SRC(code) == BPF_FROM_BE)
704 				goto emit_clear;
705 #else /* !__BIG_ENDIAN__ */
706 			if (BPF_SRC(code) == BPF_FROM_LE)
707 				goto emit_clear;
708 #endif
709 			switch (imm) {
710 			case 16:
711 				/* Rotate 8 bits left & mask with 0x0000ff00 */
712 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
713 				/* Rotate 8 bits right & insert LSB to reg */
714 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
715 				/* Move result back to dst_reg */
716 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
717 				break;
718 			case 32:
719 				/*
720 				 * Rotate word left by 8 bits:
721 				 * 2 bytes are already in their final position
722 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
723 				 */
724 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
725 				/* Rotate 24 bits and insert byte 1 */
726 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
727 				/* Rotate 24 bits and insert byte 3 */
728 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
729 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
730 				break;
731 			case 64:
732 				/* Store the value to stack and then use byte-reverse loads */
733 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
734 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
735 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
736 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
737 				} else {
738 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
739 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
740 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
741 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
742 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
743 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
744 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
745 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
746 				}
747 				break;
748 			}
749 			break;
750 
751 emit_clear:
752 			switch (imm) {
753 			case 16:
754 				/* zero-extend 16 bits into 64 bits */
755 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
756 				if (insn_is_zext(&insn[i + 1]))
757 					addrs[++i] = ctx->idx * 4;
758 				break;
759 			case 32:
760 				if (!fp->aux->verifier_zext)
761 					/* zero-extend 32 bits into 64 bits */
762 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
763 				break;
764 			case 64:
765 				/* nop */
766 				break;
767 			}
768 			break;
769 
770 		/*
771 		 * BPF_ST NOSPEC (speculation barrier)
772 		 */
773 		case BPF_ST | BPF_NOSPEC:
774 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
775 					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
776 				break;
777 
778 			switch (stf_barrier) {
779 			case STF_BARRIER_EIEIO:
780 				EMIT(PPC_RAW_EIEIO() | 0x02000000);
781 				break;
782 			case STF_BARRIER_SYNC_ORI:
783 				EMIT(PPC_RAW_SYNC());
784 				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
785 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
786 				break;
787 			case STF_BARRIER_FALLBACK:
788 				ctx->seen |= SEEN_FUNC;
789 				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
790 				EMIT(PPC_RAW_MTCTR(_R12));
791 				EMIT(PPC_RAW_BCTRL());
792 				break;
793 			case STF_BARRIER_NONE:
794 				break;
795 			}
796 			break;
797 
798 		/*
799 		 * BPF_ST(X)
800 		 */
801 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
802 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
803 			if (BPF_CLASS(code) == BPF_ST) {
804 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
805 				src_reg = tmp1_reg;
806 			}
807 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
808 			break;
809 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
810 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
811 			if (BPF_CLASS(code) == BPF_ST) {
812 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
813 				src_reg = tmp1_reg;
814 			}
815 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
816 			break;
817 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
818 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
819 			if (BPF_CLASS(code) == BPF_ST) {
820 				PPC_LI32(tmp1_reg, imm);
821 				src_reg = tmp1_reg;
822 			}
823 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
824 			break;
825 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
826 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
827 			if (BPF_CLASS(code) == BPF_ST) {
828 				PPC_LI32(tmp1_reg, imm);
829 				src_reg = tmp1_reg;
830 			}
831 			if (off % 4) {
832 				EMIT(PPC_RAW_LI(tmp2_reg, off));
833 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
834 			} else {
835 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
836 			}
837 			break;
838 
839 		/*
840 		 * BPF_STX ATOMIC (atomic ops)
841 		 */
842 		case BPF_STX | BPF_ATOMIC | BPF_W:
843 		case BPF_STX | BPF_ATOMIC | BPF_DW:
844 			save_reg = tmp2_reg;
845 			ret_reg = src_reg;
846 
847 			/* Get offset into TMP_REG_1 */
848 			EMIT(PPC_RAW_LI(tmp1_reg, off));
849 			/*
850 			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
851 			 * before and after the operation.
852 			 *
853 			 * This is a requirement in the Linux Kernel Memory Model.
854 			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
855 			 */
856 			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
857 				EMIT(PPC_RAW_SYNC());
858 			tmp_idx = ctx->idx * 4;
859 			/* load value from memory into TMP_REG_2 */
860 			if (size == BPF_DW)
861 				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
862 			else
863 				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
864 
865 			/* Save old value in _R0 */
866 			if (imm & BPF_FETCH)
867 				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
868 
869 			switch (imm) {
870 			case BPF_ADD:
871 			case BPF_ADD | BPF_FETCH:
872 				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
873 				break;
874 			case BPF_AND:
875 			case BPF_AND | BPF_FETCH:
876 				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
877 				break;
878 			case BPF_OR:
879 			case BPF_OR | BPF_FETCH:
880 				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
881 				break;
882 			case BPF_XOR:
883 			case BPF_XOR | BPF_FETCH:
884 				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
885 				break;
886 			case BPF_CMPXCHG:
887 				/*
888 				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
889 				 * in src_reg for other cases.
890 				 */
891 				ret_reg = bpf_to_ppc(BPF_REG_0);
892 
893 				/* Compare with old value in BPF_R0 */
894 				if (size == BPF_DW)
895 					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
896 				else
897 					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
898 				/* Don't set if different from old value */
899 				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
900 				fallthrough;
901 			case BPF_XCHG:
902 				save_reg = src_reg;
903 				break;
904 			default:
905 				pr_err_ratelimited(
906 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
907 					code, i);
908 				return -EOPNOTSUPP;
909 			}
910 
911 			/* store new value */
912 			if (size == BPF_DW)
913 				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
914 			else
915 				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
916 			/* we're done if this succeeded */
917 			PPC_BCC_SHORT(COND_NE, tmp_idx);
918 
919 			if (imm & BPF_FETCH) {
920 				/* Emit 'sync' to enforce full ordering */
921 				if (IS_ENABLED(CONFIG_SMP))
922 					EMIT(PPC_RAW_SYNC());
923 				EMIT(PPC_RAW_MR(ret_reg, _R0));
924 				/*
925 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
926 				 * For context, see commit 39491867ace5.
927 				 */
928 				if (size != BPF_DW && imm == BPF_CMPXCHG &&
929 				    insn_is_zext(&insn[i + 1]))
930 					addrs[++i] = ctx->idx * 4;
931 			}
932 			break;
933 
934 		/*
935 		 * BPF_LDX
936 		 */
937 		/* dst = *(u8 *)(ul) (src + off) */
938 		case BPF_LDX | BPF_MEM | BPF_B:
939 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
940 		/* dst = *(u16 *)(ul) (src + off) */
941 		case BPF_LDX | BPF_MEM | BPF_H:
942 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
943 		/* dst = *(u32 *)(ul) (src + off) */
944 		case BPF_LDX | BPF_MEM | BPF_W:
945 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
946 		/* dst = *(u64 *)(ul) (src + off) */
947 		case BPF_LDX | BPF_MEM | BPF_DW:
948 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
949 			/*
950 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
951 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
952 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
953 			 * set dst_reg=0 and move on.
954 			 */
955 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
956 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
957 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
958 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
959 				else /* BOOK3S_64 */
960 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
961 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
962 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
963 				EMIT(PPC_RAW_LI(dst_reg, 0));
964 				/*
965 				 * Check if 'off' is word aligned for BPF_DW, because
966 				 * we might generate two instructions.
967 				 */
968 				if (BPF_SIZE(code) == BPF_DW && (off & 3))
969 					PPC_JMP((ctx->idx + 3) * 4);
970 				else
971 					PPC_JMP((ctx->idx + 2) * 4);
972 			}
973 
974 			switch (size) {
975 			case BPF_B:
976 				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
977 				break;
978 			case BPF_H:
979 				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
980 				break;
981 			case BPF_W:
982 				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
983 				break;
984 			case BPF_DW:
985 				if (off % 4) {
986 					EMIT(PPC_RAW_LI(tmp1_reg, off));
987 					EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
988 				} else {
989 					EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
990 				}
991 				break;
992 			}
993 
994 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
995 				addrs[++i] = ctx->idx * 4;
996 
997 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
998 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
999 							    ctx->idx - 1, 4, dst_reg);
1000 				if (ret)
1001 					return ret;
1002 			}
1003 			break;
1004 
1005 		/*
1006 		 * Doubleword load
1007 		 * 16 byte instruction that uses two 'struct bpf_insn'
1008 		 */
1009 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1010 			imm64 = ((u64)(u32) insn[i].imm) |
1011 				    (((u64)(u32) insn[i+1].imm) << 32);
1012 			tmp_idx = ctx->idx;
1013 			PPC_LI64(dst_reg, imm64);
1014 			/* padding to allow full 5 instructions for later patching */
1015 			if (!image)
1016 				for (j = ctx->idx - tmp_idx; j < 5; j++)
1017 					EMIT(PPC_RAW_NOP());
1018 			/* Adjust for two bpf instructions */
1019 			addrs[++i] = ctx->idx * 4;
1020 			break;
1021 
1022 		/*
1023 		 * Return/Exit
1024 		 */
1025 		case BPF_JMP | BPF_EXIT:
1026 			/*
1027 			 * If this isn't the very last instruction, branch to
1028 			 * the epilogue. If we _are_ the last instruction,
1029 			 * we'll just fall through to the epilogue.
1030 			 */
1031 			if (i != flen - 1) {
1032 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1033 				if (ret)
1034 					return ret;
1035 			}
1036 			/* else fall through to the epilogue */
1037 			break;
1038 
1039 		/*
1040 		 * Call kernel helper or bpf function
1041 		 */
1042 		case BPF_JMP | BPF_CALL:
1043 			ctx->seen |= SEEN_FUNC;
1044 
1045 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1046 						    &func_addr, &func_addr_fixed);
1047 			if (ret < 0)
1048 				return ret;
1049 
1050 			if (func_addr_fixed)
1051 				ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
1052 			else
1053 				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1054 
1055 			if (ret)
1056 				return ret;
1057 
1058 			/* move return value from r3 to BPF_REG_0 */
1059 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1060 			break;
1061 
1062 		/*
1063 		 * Jumps and branches
1064 		 */
1065 		case BPF_JMP | BPF_JA:
1066 			PPC_JMP(addrs[i + 1 + off]);
1067 			break;
1068 		case BPF_JMP32 | BPF_JA:
1069 			PPC_JMP(addrs[i + 1 + imm]);
1070 			break;
1071 
1072 		case BPF_JMP | BPF_JGT | BPF_K:
1073 		case BPF_JMP | BPF_JGT | BPF_X:
1074 		case BPF_JMP | BPF_JSGT | BPF_K:
1075 		case BPF_JMP | BPF_JSGT | BPF_X:
1076 		case BPF_JMP32 | BPF_JGT | BPF_K:
1077 		case BPF_JMP32 | BPF_JGT | BPF_X:
1078 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1079 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1080 			true_cond = COND_GT;
1081 			goto cond_branch;
1082 		case BPF_JMP | BPF_JLT | BPF_K:
1083 		case BPF_JMP | BPF_JLT | BPF_X:
1084 		case BPF_JMP | BPF_JSLT | BPF_K:
1085 		case BPF_JMP | BPF_JSLT | BPF_X:
1086 		case BPF_JMP32 | BPF_JLT | BPF_K:
1087 		case BPF_JMP32 | BPF_JLT | BPF_X:
1088 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1089 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1090 			true_cond = COND_LT;
1091 			goto cond_branch;
1092 		case BPF_JMP | BPF_JGE | BPF_K:
1093 		case BPF_JMP | BPF_JGE | BPF_X:
1094 		case BPF_JMP | BPF_JSGE | BPF_K:
1095 		case BPF_JMP | BPF_JSGE | BPF_X:
1096 		case BPF_JMP32 | BPF_JGE | BPF_K:
1097 		case BPF_JMP32 | BPF_JGE | BPF_X:
1098 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1099 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1100 			true_cond = COND_GE;
1101 			goto cond_branch;
1102 		case BPF_JMP | BPF_JLE | BPF_K:
1103 		case BPF_JMP | BPF_JLE | BPF_X:
1104 		case BPF_JMP | BPF_JSLE | BPF_K:
1105 		case BPF_JMP | BPF_JSLE | BPF_X:
1106 		case BPF_JMP32 | BPF_JLE | BPF_K:
1107 		case BPF_JMP32 | BPF_JLE | BPF_X:
1108 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1109 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1110 			true_cond = COND_LE;
1111 			goto cond_branch;
1112 		case BPF_JMP | BPF_JEQ | BPF_K:
1113 		case BPF_JMP | BPF_JEQ | BPF_X:
1114 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1115 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1116 			true_cond = COND_EQ;
1117 			goto cond_branch;
1118 		case BPF_JMP | BPF_JNE | BPF_K:
1119 		case BPF_JMP | BPF_JNE | BPF_X:
1120 		case BPF_JMP32 | BPF_JNE | BPF_K:
1121 		case BPF_JMP32 | BPF_JNE | BPF_X:
1122 			true_cond = COND_NE;
1123 			goto cond_branch;
1124 		case BPF_JMP | BPF_JSET | BPF_K:
1125 		case BPF_JMP | BPF_JSET | BPF_X:
1126 		case BPF_JMP32 | BPF_JSET | BPF_K:
1127 		case BPF_JMP32 | BPF_JSET | BPF_X:
1128 			true_cond = COND_NE;
1129 			/* Fall through */
1130 
1131 cond_branch:
1132 			switch (code) {
1133 			case BPF_JMP | BPF_JGT | BPF_X:
1134 			case BPF_JMP | BPF_JLT | BPF_X:
1135 			case BPF_JMP | BPF_JGE | BPF_X:
1136 			case BPF_JMP | BPF_JLE | BPF_X:
1137 			case BPF_JMP | BPF_JEQ | BPF_X:
1138 			case BPF_JMP | BPF_JNE | BPF_X:
1139 			case BPF_JMP32 | BPF_JGT | BPF_X:
1140 			case BPF_JMP32 | BPF_JLT | BPF_X:
1141 			case BPF_JMP32 | BPF_JGE | BPF_X:
1142 			case BPF_JMP32 | BPF_JLE | BPF_X:
1143 			case BPF_JMP32 | BPF_JEQ | BPF_X:
1144 			case BPF_JMP32 | BPF_JNE | BPF_X:
1145 				/* unsigned comparison */
1146 				if (BPF_CLASS(code) == BPF_JMP32)
1147 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1148 				else
1149 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1150 				break;
1151 			case BPF_JMP | BPF_JSGT | BPF_X:
1152 			case BPF_JMP | BPF_JSLT | BPF_X:
1153 			case BPF_JMP | BPF_JSGE | BPF_X:
1154 			case BPF_JMP | BPF_JSLE | BPF_X:
1155 			case BPF_JMP32 | BPF_JSGT | BPF_X:
1156 			case BPF_JMP32 | BPF_JSLT | BPF_X:
1157 			case BPF_JMP32 | BPF_JSGE | BPF_X:
1158 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1159 				/* signed comparison */
1160 				if (BPF_CLASS(code) == BPF_JMP32)
1161 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1162 				else
1163 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1164 				break;
1165 			case BPF_JMP | BPF_JSET | BPF_X:
1166 			case BPF_JMP32 | BPF_JSET | BPF_X:
1167 				if (BPF_CLASS(code) == BPF_JMP) {
1168 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1169 				} else {
1170 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1171 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1172 				}
1173 				break;
1174 			case BPF_JMP | BPF_JNE | BPF_K:
1175 			case BPF_JMP | BPF_JEQ | BPF_K:
1176 			case BPF_JMP | BPF_JGT | BPF_K:
1177 			case BPF_JMP | BPF_JLT | BPF_K:
1178 			case BPF_JMP | BPF_JGE | BPF_K:
1179 			case BPF_JMP | BPF_JLE | BPF_K:
1180 			case BPF_JMP32 | BPF_JNE | BPF_K:
1181 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1182 			case BPF_JMP32 | BPF_JGT | BPF_K:
1183 			case BPF_JMP32 | BPF_JLT | BPF_K:
1184 			case BPF_JMP32 | BPF_JGE | BPF_K:
1185 			case BPF_JMP32 | BPF_JLE | BPF_K:
1186 			{
1187 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1188 
1189 				/*
1190 				 * Need sign-extended load, so only positive
1191 				 * values can be used as imm in cmpldi
1192 				 */
1193 				if (imm >= 0 && imm < 32768) {
1194 					if (is_jmp32)
1195 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1196 					else
1197 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1198 				} else {
1199 					/* sign-extending load */
1200 					PPC_LI32(tmp1_reg, imm);
1201 					/* ... but unsigned comparison */
1202 					if (is_jmp32)
1203 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1204 					else
1205 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1206 				}
1207 				break;
1208 			}
1209 			case BPF_JMP | BPF_JSGT | BPF_K:
1210 			case BPF_JMP | BPF_JSLT | BPF_K:
1211 			case BPF_JMP | BPF_JSGE | BPF_K:
1212 			case BPF_JMP | BPF_JSLE | BPF_K:
1213 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1214 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1215 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1216 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1217 			{
1218 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1219 
1220 				/*
1221 				 * signed comparison, so any 16-bit value
1222 				 * can be used in cmpdi
1223 				 */
1224 				if (imm >= -32768 && imm < 32768) {
1225 					if (is_jmp32)
1226 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1227 					else
1228 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1229 				} else {
1230 					PPC_LI32(tmp1_reg, imm);
1231 					if (is_jmp32)
1232 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1233 					else
1234 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1235 				}
1236 				break;
1237 			}
1238 			case BPF_JMP | BPF_JSET | BPF_K:
1239 			case BPF_JMP32 | BPF_JSET | BPF_K:
1240 				/* andi does not sign-extend the immediate */
1241 				if (imm >= 0 && imm < 32768)
1242 					/* PPC_ANDI is _only/always_ dot-form */
1243 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1244 				else {
1245 					PPC_LI32(tmp1_reg, imm);
1246 					if (BPF_CLASS(code) == BPF_JMP) {
1247 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1248 								     tmp1_reg));
1249 					} else {
1250 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1251 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1252 									0, 0, 31));
1253 					}
1254 				}
1255 				break;
1256 			}
1257 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1258 			break;
1259 
1260 		/*
1261 		 * Tail call
1262 		 */
1263 		case BPF_JMP | BPF_TAIL_CALL:
1264 			ctx->seen |= SEEN_TAILCALL;
1265 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1266 			if (ret < 0)
1267 				return ret;
1268 			break;
1269 
1270 		default:
1271 			/*
1272 			 * The filter contains something cruel & unusual.
1273 			 * We don't handle it, but also there shouldn't be
1274 			 * anything missing from our list.
1275 			 */
1276 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1277 					code, i);
1278 			return -ENOTSUPP;
1279 		}
1280 	}
1281 
1282 	/* Set end-of-body-code address for exit. */
1283 	addrs[i] = ctx->idx * 4;
1284 
1285 	return 0;
1286 }
1287