xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit.h"
21 
22 /*
23  * Stack layout with frame:
24  * Layout when setting up our own stack frame.
25  * Note: r1 at bottom, component offsets positive wrt r1.
26  * Ensure the top half (upto local_tmp_var) stays consistent
27  * with our redzone usage.
28  *
29  * tail_call_info - stores tailcall count value in main program's
30  *                  frame, stores reference to tail_call_info of
31  *                  main's frame in sub-prog's frame.
32  *
33  *		[	prev sp		] <-------------
34  *		[    tail_call_info	] 8		|
35  *		[   nv gpr save area	] 6*8 + (12*8)	|
36  *		[    local_tmp_var	] 24		|
37  * fp (r31) -->	[   ebpf stack space	] upto 512	|
38  *		[     frame header	] 32/112	|
39  * sp (r1) --->	[    stack pointer	] --------------
40  *
41  * Additional (12*8) in 'nv gpr save area' only in case of
42  * exception boundary.
43  */
44 
45 /* for bpf JIT code internal usage */
46 #define BPF_PPC_STACK_LOCALS	24
47 /*
48  * for additional non volatile registers(r14-r25) to be saved
49  * at exception boundary
50  */
51 #define BPF_PPC_EXC_STACK_SAVE (12*8)
52 
53 /* stack frame excluding BPF stack, ensure this is quadword aligned */
54 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
55 				 BPF_PPC_STACK_LOCALS + \
56 				 BPF_PPC_STACK_SAVE   + \
57 				 BPF_PPC_TAILCALL)
58 
59 /*
60  * same as BPF_PPC_STACKFRAME with save area for additional
61  * non volatile registers saved at exception boundary.
62  * This is quad-word aligned.
63  */
64 #define BPF_PPC_EXC_STACKFRAME (BPF_PPC_STACKFRAME + BPF_PPC_EXC_STACK_SAVE)
65 
66 /* BPF register usage */
67 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
68 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
69 #define ARENA_VM_START  (MAX_BPF_JIT_REG + 2)
70 
71 /* BPF to ppc register mappings */
72 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
73 {
74 	/* function return value */
75 	ctx->b2p[BPF_REG_0] = _R8;
76 	/* function arguments */
77 	ctx->b2p[BPF_REG_1] = _R3;
78 	ctx->b2p[BPF_REG_2] = _R4;
79 	ctx->b2p[BPF_REG_3] = _R5;
80 	ctx->b2p[BPF_REG_4] = _R6;
81 	ctx->b2p[BPF_REG_5] = _R7;
82 	/* non volatile registers */
83 	ctx->b2p[BPF_REG_6] = _R27;
84 	ctx->b2p[BPF_REG_7] = _R28;
85 	ctx->b2p[BPF_REG_8] = _R29;
86 	ctx->b2p[BPF_REG_9] = _R30;
87 	/* frame pointer aka BPF_REG_10 */
88 	ctx->b2p[BPF_REG_FP] = _R31;
89 	/* eBPF jit internal registers */
90 	ctx->b2p[BPF_REG_AX] = _R12;
91 	ctx->b2p[TMP_REG_1] = _R9;
92 	ctx->b2p[TMP_REG_2] = _R10;
93 	/* non volatile register for kern_vm_start address */
94 	ctx->b2p[ARENA_VM_START] = _R26;
95 }
96 
97 /* PPC NVR range -- update this if we ever use NVRs below r26 */
98 #define BPF_PPC_NVR_MIN		_R26
99 
100 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
101 {
102 	/*
103 	 * We only need a stack frame if:
104 	 * - we call other functions (kernel helpers), or
105 	 * - the bpf program uses its stack area
106 	 * The latter condition is deduced from the usage of BPF_REG_FP
107 	 *
108 	 * bpf_throw() leads to exception callback from a BPF (sub)program.
109 	 * The (sub)program is always marked as SEEN_FUNC, creating a stack
110 	 * frame. The exception callback uses the frame of the exception
111 	 * boundary, so the exception boundary program must have a frame.
112 	 */
113 	return ctx->seen & SEEN_FUNC ||
114 	       bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
115 	       ctx->exception_cb ||
116 	       ctx->exception_boundary;
117 }
118 
119 /*
120  * Stack layout with redzone:
121  * When not setting up our own stackframe, the redzone (288 bytes) usage is:
122  * Note: r1 from prev frame. Component offset negative wrt r1.
123  *
124  *		[	prev sp		] <-------------
125  *		[	  ...       	] 		|
126  * sp (r1) --->	[    stack pointer	] --------------
127  *		[    tail_call_info	] 8
128  *		[   nv gpr save area	] 6*8 + (12*8)
129  *		[    local_tmp_var	] 24
130  *		[   unused red zone	] 224
131  *
132  * Additional (12*8) in 'nv gpr save area' only in case of
133  * exception boundary.
134  */
135 static int bpf_jit_stack_local(struct codegen_context *ctx)
136 {
137 	if (bpf_has_stack_frame(ctx)) {
138 		/* Stack layout with frame */
139 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
140 	} else {
141 		/* Stack layout with redzone */
142 		return -(BPF_PPC_TAILCALL
143 			+BPF_PPC_STACK_SAVE
144 			+(ctx->exception_boundary || ctx->exception_cb ?
145 						BPF_PPC_EXC_STACK_SAVE : 0)
146 			+BPF_PPC_STACK_LOCALS
147 			);
148 	}
149 }
150 
151 int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx)
152 {
153 	return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE;
154 }
155 
156 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
157 {
158 	int min_valid_nvreg = BPF_PPC_NVR_MIN;
159 	/* Default frame size for all cases except exception boundary */
160 	int frame_nvr_size = BPF_PPC_STACKFRAME;
161 
162 	/* Consider all nv regs for handling exceptions */
163 	if (ctx->exception_boundary || ctx->exception_cb) {
164 		min_valid_nvreg = _R14;
165 		frame_nvr_size = BPF_PPC_EXC_STACKFRAME;
166 	}
167 
168 	if (reg >= min_valid_nvreg && reg < 32)
169 		return (bpf_has_stack_frame(ctx) ?
170 			(frame_nvr_size + ctx->stack_size) : 0)
171 				- (8 * (32 - reg)) - BPF_PPC_TAILCALL;
172 
173 	pr_err("BPF JIT is asking about unknown registers");
174 	BUG();
175 }
176 
177 void bpf_jit_realloc_regs(struct codegen_context *ctx)
178 {
179 }
180 
181 /*
182  * For exception boundary & exception_cb progs:
183  *     return increased size to accommodate additional NVRs.
184  */
185 static int bpf_jit_stack_size(struct codegen_context *ctx)
186 {
187 	return ctx->exception_boundary || ctx->exception_cb ?
188 					BPF_PPC_EXC_STACKFRAME :
189 					BPF_PPC_STACKFRAME;
190 }
191 
192 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
193 {
194 	int i;
195 
196 	/* Instruction for trampoline attach */
197 	EMIT(PPC_RAW_NOP());
198 
199 #ifndef CONFIG_PPC_KERNEL_PCREL
200 	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
201 		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
202 #endif
203 
204 	/*
205 	 * Tail call count(tcc) is saved & updated only in main
206 	 * program's frame and the address of tcc in main program's
207 	 * frame (tcc_ptr) is saved in subprogs frame.
208 	 *
209 	 * Offset of tail_call_info on any frame will be interpreted
210 	 * as either tcc_ptr or tcc value depending on whether it is
211 	 * greater than MAX_TAIL_CALL_CNT or not.
212 	 */
213 	if (!ctx->is_subprog) {
214 		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
215 		/* this goes in the redzone */
216 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
217 	} else if (!ctx->exception_cb) {
218 		/*
219 		 * Tailcall jitting for non exception_cb progs only.
220 		 * exception_cb won't require tail_call_info to be setup.
221 		 *
222 		 * tail_call_info interpretation logic:
223 		 *
224 		 * if tail_call_info < MAX_TAIL_CALL_CNT
225 		 *      main prog calling first subprog -> copy reference
226 		 * else
227 		 *      subsequent subprog calling another subprog -> directly copy content
228 		 */
229 		EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, 0));
230 		EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), -(BPF_PPC_TAILCALL)));
231 		EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
232 		PPC_BCC_CONST_SHORT(COND_GT, 8);
233 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2),
234 								-(BPF_PPC_TAILCALL)));
235 		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
236 	}
237 
238 	if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) {
239 		/*
240 		 * exception_cb uses boundary frame after stack walk.
241 		 * It can simply use redzone, this optimization reduces
242 		 * stack walk loop by one level.
243 		 *
244 		 * We need a stack frame, but we don't necessarily need to
245 		 * save/restore LR unless we call other functions
246 		 */
247 		if (ctx->seen & SEEN_FUNC) {
248 			EMIT(PPC_RAW_MFLR(_R0));
249 			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
250 		}
251 
252 		EMIT(PPC_RAW_STDU(_R1, _R1,
253 				-(bpf_jit_stack_size(ctx) + ctx->stack_size)));
254 	}
255 
256 	/*
257 	 * Program acting as exception boundary pushes R14..R25 in addition to
258 	 * BPF callee-saved non volatile registers. Exception callback uses
259 	 * the boundary program's stack frame, recover additionally saved
260 	 * registers in epilogue of exception callback.
261 	 */
262 	if (ctx->exception_boundary) {
263 		for (i = _R14; i <= _R25; i++)
264 			EMIT(PPC_RAW_STD(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
265 	}
266 
267 	if (!ctx->exception_cb) {
268 		/*
269 		 * Back up non-volatile regs -- BPF registers 6-10
270 		 * If we haven't created our own stack frame, we save these
271 		 * in the protected zone below the previous stack frame
272 		 */
273 		for (i = BPF_REG_6; i <= BPF_REG_10; i++)
274 			if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
275 				EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
276 					bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
277 
278 		if (ctx->exception_boundary || ctx->arena_vm_start)
279 			EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
280 				 bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
281 	} else {
282 		/*
283 		 * Exception callback receives Frame Pointer of boundary
284 		 * program(main prog) as third arg
285 		 */
286 		EMIT(PPC_RAW_MR(_R1, _R5));
287 	}
288 
289 	/*
290 	 * Exception_cb not restricted from using stack area or arena.
291 	 * Setup frame pointer to point to the bpf stack area
292 	 */
293 	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
294 		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
295 			STACK_FRAME_MIN_SIZE + ctx->stack_size));
296 
297 	if (ctx->arena_vm_start)
298 		PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
299 }
300 
301 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
302 {
303 	int i;
304 
305 	/* Restore NVRs */
306 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
307 		if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
308 			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
309 
310 	if (ctx->exception_cb || ctx->arena_vm_start)
311 		EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
312 				bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
313 
314 	if (ctx->exception_cb) {
315 		/*
316 		 * Recover additionally saved non volatile registers from stack
317 		 * frame of exception boundary program.
318 		 */
319 		for (i = _R14; i <= _R25; i++)
320 			EMIT(PPC_RAW_LD(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
321 	}
322 
323 	/* Tear down our stack frame */
324 	if (bpf_has_stack_frame(ctx)) {
325 		EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_jit_stack_size(ctx) + ctx->stack_size));
326 
327 		if (ctx->seen & SEEN_FUNC || ctx->exception_cb) {
328 			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
329 			EMIT(PPC_RAW_MTLR(_R0));
330 		}
331 	}
332 }
333 
334 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
335 {
336 	bpf_jit_emit_common_epilogue(image, ctx);
337 
338 	/* Move result to r3 */
339 	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
340 
341 	EMIT(PPC_RAW_BLR());
342 
343 	bpf_jit_build_fentry_stubs(image, ctx);
344 }
345 
346 /*
347  * arch_bpf_stack_walk() - BPF stack walker for PowerPC
348  *
349  * Based on arch_stack_walk() from stacktrace.c.
350  * PowerPC uses stack frames rather than stack pointers. See [1] for
351  * the equivalence between frame pointers and stack pointers.
352  * Additional reference at [2].
353  * TODO: refactor with arch_stack_walk()
354  *
355  * [1]: https://lore.kernel.org/all/20200220115141.2707-1-mpe@ellerman.id.au/
356  * [2]: https://lore.kernel.org/bpf/20260122211854.5508-5-adubey@linux.ibm.com/
357  */
358 
359 void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), void *cookie)
360 {
361 	// callback processing always in current context
362 	unsigned long sp = current_stack_frame();
363 
364 	for (;;) {
365 		unsigned long *stack = (unsigned long *) sp;
366 		unsigned long ip;
367 
368 		if (!validate_sp(sp, current))
369 			return;
370 
371 		ip = stack[STACK_FRAME_LR_SAVE];
372 		if (!ip)
373 			break;
374 
375 		/*
376 		 * consume_fn common code expects stack pointer in third
377 		 * argument. There is no sp in ppc64, rather pass frame
378 		 * pointer(named sp here).
379 		 */
380 		if (ip && !consume_fn(cookie, ip, sp, sp))
381 			break;
382 
383 		sp = stack[0];
384 	}
385 }
386 
387 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
388 {
389 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
390 	long reladdr;
391 
392 	/* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
393 	if (!func) {
394 		for (int i = 0; i < 5; i++)
395 			EMIT(PPC_RAW_NOP());
396 		/* elfv1 needs an additional instruction to load addr from descriptor */
397 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
398 			EMIT(PPC_RAW_NOP());
399 		EMIT(PPC_RAW_MTCTR(_R12));
400 		EMIT(PPC_RAW_BCTRL());
401 		return 0;
402 	}
403 
404 #ifdef CONFIG_PPC_KERNEL_PCREL
405 	reladdr = func_addr - local_paca->kernelbase;
406 
407 	/*
408 	 * If fimage is NULL (the initial pass to find image size),
409 	 * account for the maximum no. of instructions possible.
410 	 */
411 	if (!fimage) {
412 		ctx->idx += 7;
413 		return 0;
414 	} else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
415 		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
416 		/* Align for subsequent prefix instruction */
417 		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
418 			EMIT(PPC_RAW_NOP());
419 		/* paddi r12,r12,addr */
420 		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
421 		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
422 	} else {
423 		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
424 		bool alignment_needed = !IS_ALIGNED(pc, 8);
425 
426 		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
427 
428 		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
429 			if (alignment_needed)
430 				EMIT(PPC_RAW_NOP());
431 			/* pla r12,addr */
432 			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
433 			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
434 		} else {
435 			/* We can clobber r12 */
436 			PPC_LI64(_R12, func);
437 		}
438 	}
439 	EMIT(PPC_RAW_MTCTR(_R12));
440 	EMIT(PPC_RAW_BCTRL());
441 #else
442 	if (core_kernel_text(func_addr)) {
443 		reladdr = func_addr - kernel_toc_addr();
444 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
445 			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
446 			return -ERANGE;
447 		}
448 
449 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
450 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
451 		EMIT(PPC_RAW_MTCTR(_R12));
452 		EMIT(PPC_RAW_BCTRL());
453 	} else {
454 		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
455 			/* func points to the function descriptor */
456 			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
457 			/* Load actual entry point from function descriptor */
458 			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
459 			/* ... and move it to CTR */
460 			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
461 			/*
462 			 * Load TOC from function descriptor at offset 8.
463 			 * We can clobber r2 since we get called through a
464 			 * function pointer (so caller will save/restore r2).
465 			 */
466 			if (is_module_text_address(func_addr))
467 				EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
468 		} else {
469 			PPC_LI64(_R12, func);
470 			EMIT(PPC_RAW_MTCTR(_R12));
471 		}
472 		EMIT(PPC_RAW_BCTRL());
473 		/*
474 		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
475 		 * within core kernel text.
476 		 */
477 		if (is_module_text_address(func_addr))
478 			EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
479 	}
480 #endif
481 
482 	return 0;
483 }
484 
485 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
486 {
487 	/*
488 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
489 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
490 	 * r4/BPF_REG_2 - pointer to bpf_array
491 	 * r5/BPF_REG_3 - index in bpf_array
492 	 */
493 	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
494 	int b2p_index = bpf_to_ppc(BPF_REG_3);
495 	int bpf_tailcall_prologue_size = 12;
496 
497 	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
498 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
499 
500 	/*
501 	 * if (index >= array->map.max_entries)
502 	 *   goto out;
503 	 */
504 	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
505 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
506 	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
507 	PPC_BCC_SHORT(COND_GE, out);
508 
509 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
510 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
511 	PPC_BCC_CONST_SHORT(COND_LE, 8);
512 
513 	/* dereference TMP_REG_1 */
514 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 0));
515 
516 	/*
517 	 * if (tail_call_info == MAX_TAIL_CALL_CNT)
518 	 *   goto out;
519 	 */
520 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
521 	PPC_BCC_SHORT(COND_EQ, out);
522 
523 	/*
524 	 * tail_call_info++; <- Actual value of tcc here
525 	 */
526 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
527 
528 	/*
529 	 * Before writing updated tail_call_info, distinguish if current frame
530 	 * is storing a reference to tail_call_info or actual tcc value in
531 	 * tail_call_info.
532 	 */
533 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
534 	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_2), MAX_TAIL_CALL_CNT));
535 	PPC_BCC_CONST_SHORT(COND_GT, 8);
536 
537 	/* First get address of tail_call_info */
538 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx)));
539 	/* Writeback updated value to tail_call_info */
540 	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
541 
542 	/* prog = array->ptrs[index]; */
543 	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
544 	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
545 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
546 
547 	/*
548 	 * if (prog == NULL)
549 	 *   goto out;
550 	 */
551 	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
552 	PPC_BCC_SHORT(COND_EQ, out);
553 
554 	/* goto *(prog->bpf_func + prologue_size); */
555 	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
556 	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
557 			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
558 	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
559 
560 	/* tear down stack, restore NVRs, ... */
561 	bpf_jit_emit_common_epilogue(image, ctx);
562 
563 	EMIT(PPC_RAW_BCTR());
564 
565 	/* out: */
566 	return 0;
567 }
568 
569 bool bpf_jit_bypass_spec_v1(void)
570 {
571 #if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
572 	return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
573 		 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR));
574 #else
575 	return true;
576 #endif
577 }
578 
579 bool bpf_jit_bypass_spec_v4(void)
580 {
581 	return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
582 		 security_ftr_enabled(SEC_FTR_STF_BARRIER) &&
583 		 stf_barrier_type_get() != STF_BARRIER_NONE);
584 }
585 
586 /*
587  * We spill into the redzone always, even if the bpf program has its own stackframe.
588  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
589  */
590 void bpf_stf_barrier(void);
591 
592 asm (
593 "		.global bpf_stf_barrier		;"
594 "	bpf_stf_barrier:			;"
595 "		std	21,-80(1)		;"
596 "		std	22,-72(1)		;"
597 "		sync				;"
598 "		ld	21,-80(1)		;"
599 "		ld	22,-72(1)		;"
600 "		ori	31,31,0			;"
601 "		.rept 14			;"
602 "		b	1f			;"
603 "	1:					;"
604 "		.endr				;"
605 "		blr				;"
606 );
607 
608 static int bpf_jit_emit_atomic_ops(u32 *image, struct codegen_context *ctx,
609 				   const struct bpf_insn *insn, u32 *jmp_off,
610 				   u32 *tmp_idx, u32 *addrp)
611 {
612 	u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
613 	u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
614 	u32 size = BPF_SIZE(insn->code);
615 	u32 src_reg = bpf_to_ppc(insn->src_reg);
616 	u32 dst_reg = bpf_to_ppc(insn->dst_reg);
617 	s32 imm = insn->imm;
618 
619 	u32 save_reg = tmp2_reg;
620 	u32 ret_reg = src_reg;
621 	u32 fixup_idx;
622 
623 	/* Get offset into TMP_REG_1 */
624 	EMIT(PPC_RAW_LI(tmp1_reg, insn->off));
625        /*
626 	* Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
627 	* before and after the operation.
628 	*
629 	* This is a requirement in the Linux Kernel Memory Model.
630 	* See __cmpxchg_u64() in asm/cmpxchg.h as an example.
631 	*/
632 	if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
633 		EMIT(PPC_RAW_SYNC());
634 
635 	*tmp_idx = ctx->idx;
636 
637 	/* load value from memory into TMP_REG_2 */
638 	if (size == BPF_DW)
639 		EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
640 	else
641 		EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
642 	/* Save old value in _R0 */
643 	if (imm & BPF_FETCH)
644 		EMIT(PPC_RAW_MR(_R0, tmp2_reg));
645 
646 	switch (imm) {
647 	case BPF_ADD:
648 	case BPF_ADD | BPF_FETCH:
649 		EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
650 		break;
651 	case BPF_AND:
652 	case BPF_AND | BPF_FETCH:
653 		EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
654 		break;
655 	case BPF_OR:
656 	case BPF_OR | BPF_FETCH:
657 		EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
658 		break;
659 	case BPF_XOR:
660 	case BPF_XOR | BPF_FETCH:
661 		EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
662 		break;
663 	case BPF_CMPXCHG:
664 	       /*
665 		* Return old value in BPF_REG_0 for BPF_CMPXCHG &
666 		* in src_reg for other cases.
667 		*/
668 		ret_reg = bpf_to_ppc(BPF_REG_0);
669 
670 		/* Compare with old value in BPF_R0 */
671 		if (size == BPF_DW)
672 			EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
673 		else
674 			EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
675 		/* Don't set if different from old value */
676 		PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
677 		fallthrough;
678 	case BPF_XCHG:
679 		save_reg = src_reg;
680 		break;
681 	default:
682 		return -EOPNOTSUPP;
683 	}
684 
685 	/* store new value */
686 	if (size == BPF_DW)
687 		EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
688 	else
689 		EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
690 	/* we're done if this succeeded */
691 	PPC_BCC_SHORT(COND_NE, *tmp_idx * 4);
692 	fixup_idx = ctx->idx;
693 
694 	if (imm & BPF_FETCH) {
695 		/* Emit 'sync' to enforce full ordering */
696 		if (IS_ENABLED(CONFIG_SMP))
697 			EMIT(PPC_RAW_SYNC());
698 		EMIT(PPC_RAW_MR(ret_reg, _R0));
699 		/*
700 		 * Skip unnecessary zero-extension for 32-bit cmpxchg.
701 		 * For context, see commit 39491867ace5.
702 		 */
703 		if (size != BPF_DW && imm == BPF_CMPXCHG &&
704 		    insn_is_zext(insn + 1))
705 			*addrp = ctx->idx * 4;
706 	}
707 
708 	*jmp_off = (fixup_idx - *tmp_idx) * 4;
709 
710 	return 0;
711 }
712 
713 static int bpf_jit_emit_probe_mem_store(struct codegen_context *ctx, u32 src_reg, s16 off,
714 					u32 code, u32 *image)
715 {
716 	u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
717 	u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
718 
719 	switch (BPF_SIZE(code)) {
720 	case BPF_B:
721 		EMIT(PPC_RAW_STB(src_reg, tmp1_reg, off));
722 		break;
723 	case BPF_H:
724 		EMIT(PPC_RAW_STH(src_reg, tmp1_reg, off));
725 		break;
726 	case BPF_W:
727 		EMIT(PPC_RAW_STW(src_reg, tmp1_reg, off));
728 		break;
729 	case BPF_DW:
730 		if (off % 4) {
731 			EMIT(PPC_RAW_LI(tmp2_reg, off));
732 			EMIT(PPC_RAW_STDX(src_reg, tmp1_reg, tmp2_reg));
733 		} else {
734 			EMIT(PPC_RAW_STD(src_reg, tmp1_reg, off));
735 		}
736 		break;
737 	default:
738 		return -EINVAL;
739 	}
740 	return 0;
741 }
742 
743 static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image)
744 {
745 	u32 code = insn.code;
746 	u32 dst_reg = bpf_to_ppc(insn.dst_reg);
747 	u32 src_reg = bpf_to_ppc(insn.src_reg);
748 	u32 size = BPF_SIZE(code);
749 	u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
750 	u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
751 	s16 off = insn.off;
752 	s32 imm = insn.imm;
753 
754 	switch (imm) {
755 	case BPF_LOAD_ACQ:
756 		switch (size) {
757 		case BPF_B:
758 			EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
759 			break;
760 		case BPF_H:
761 			EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
762 			break;
763 		case BPF_W:
764 			EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
765 			break;
766 		case BPF_DW:
767 			if (off % 4) {
768 				EMIT(PPC_RAW_LI(tmp1_reg, off));
769 				EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
770 			} else {
771 				EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
772 			}
773 			break;
774 		}
775 		EMIT(PPC_RAW_LWSYNC());
776 		break;
777 	case BPF_STORE_REL:
778 		EMIT(PPC_RAW_LWSYNC());
779 		switch (size) {
780 		case BPF_B:
781 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
782 			break;
783 		case BPF_H:
784 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
785 			break;
786 		case BPF_W:
787 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
788 			break;
789 		case BPF_DW:
790 			if (off % 4) {
791 				EMIT(PPC_RAW_LI(tmp2_reg, off));
792 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
793 			} else {
794 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
795 			}
796 			break;
797 		}
798 		break;
799 	default:
800 		pr_err_ratelimited("unexpected atomic load/store op code %02x\n",
801 				   imm);
802 		return -EINVAL;
803 	}
804 
805 	return 0;
806 }
807 
808 /* Assemble the body code between the prologue & epilogue */
809 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
810 		       u32 *addrs, int pass, bool extra_pass)
811 {
812 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
813 	bool sync_emitted, ori31_emitted;
814 	const struct bpf_insn *insn = fp->insnsi;
815 	int flen = fp->len;
816 	int i, ret;
817 
818 	/* Start of epilogue code - will only be valid 2nd pass onwards */
819 	u32 exit_addr = addrs[flen];
820 
821 	for (i = 0; i < flen; i++) {
822 		u32 code = insn[i].code;
823 		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
824 		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
825 		u32 size = BPF_SIZE(code);
826 		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
827 		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
828 		s16 off = insn[i].off;
829 		s32 imm = insn[i].imm;
830 		bool func_addr_fixed;
831 		u64 func_addr;
832 		u64 imm64;
833 		u32 true_cond;
834 		u32 tmp_idx;
835 		u32 jmp_off;
836 
837 		/*
838 		 * addrs[] maps a BPF bytecode address into a real offset from
839 		 * the start of the body code.
840 		 */
841 		addrs[i] = ctx->idx * 4;
842 
843 		/*
844 		 * As an optimization, we note down which non-volatile registers
845 		 * are used so that we can only save/restore those in our
846 		 * prologue and epilogue. We do this here regardless of whether
847 		 * the actual BPF instruction uses src/dst registers or not
848 		 * (for instance, BPF_CALL does not use them). The expectation
849 		 * is that those instructions will have src_reg/dst_reg set to
850 		 * 0. Even otherwise, we just lose some prologue/epilogue
851 		 * optimization but everything else should work without
852 		 * any issues.
853 		 */
854 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
855 			bpf_set_seen_register(ctx, dst_reg);
856 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
857 			bpf_set_seen_register(ctx, src_reg);
858 
859 		switch (code) {
860 		/*
861 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
862 		 */
863 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
864 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
865 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
866 			goto bpf_alu32_trunc;
867 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
868 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
869 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
870 			goto bpf_alu32_trunc;
871 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
872 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
873 			if (!imm) {
874 				goto bpf_alu32_trunc;
875 			} else if (imm >= -32768 && imm < 32768) {
876 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
877 			} else {
878 				PPC_LI32(tmp1_reg, imm);
879 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
880 			}
881 			goto bpf_alu32_trunc;
882 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
883 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
884 			if (!imm) {
885 				goto bpf_alu32_trunc;
886 			} else if (imm > -32768 && imm <= 32768) {
887 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
888 			} else {
889 				PPC_LI32(tmp1_reg, imm);
890 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
891 			}
892 			goto bpf_alu32_trunc;
893 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
894 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
895 			if (BPF_CLASS(code) == BPF_ALU)
896 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
897 			else
898 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
899 			goto bpf_alu32_trunc;
900 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
901 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
902 			if (imm >= -32768 && imm < 32768)
903 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
904 			else {
905 				PPC_LI32(tmp1_reg, imm);
906 				if (BPF_CLASS(code) == BPF_ALU)
907 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
908 				else
909 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
910 			}
911 			goto bpf_alu32_trunc;
912 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
913 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
914 			if (BPF_OP(code) == BPF_MOD) {
915 				if (off)
916 					EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
917 				else
918 					EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
919 
920 				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
921 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
922 			} else
923 				if (off)
924 					EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
925 				else
926 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
927 			goto bpf_alu32_trunc;
928 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
929 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
930 			if (BPF_OP(code) == BPF_MOD) {
931 				if (off)
932 					EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
933 				else
934 					EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
935 				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
936 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
937 			} else
938 				if (off)
939 					EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
940 				else
941 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
942 			break;
943 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
944 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
945 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
946 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
947 			if (imm == 0)
948 				return -EINVAL;
949 			if (imm == 1) {
950 				if (BPF_OP(code) == BPF_DIV) {
951 					goto bpf_alu32_trunc;
952 				} else {
953 					EMIT(PPC_RAW_LI(dst_reg, 0));
954 					break;
955 				}
956 			}
957 
958 			PPC_LI32(tmp1_reg, imm);
959 			switch (BPF_CLASS(code)) {
960 			case BPF_ALU:
961 				if (BPF_OP(code) == BPF_MOD) {
962 					if (off)
963 						EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
964 					else
965 						EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
966 					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
967 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
968 				} else
969 					if (off)
970 						EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
971 					else
972 						EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
973 				break;
974 			case BPF_ALU64:
975 				if (BPF_OP(code) == BPF_MOD) {
976 					if (off)
977 						EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
978 					else
979 						EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
980 					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
981 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
982 				} else
983 					if (off)
984 						EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
985 					else
986 						EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
987 				break;
988 			}
989 			goto bpf_alu32_trunc;
990 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
991 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
992 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
993 			goto bpf_alu32_trunc;
994 
995 		/*
996 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
997 		 */
998 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
999 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
1000 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
1001 			goto bpf_alu32_trunc;
1002 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
1003 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
1004 			if (!IMM_H(imm))
1005 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
1006 			else {
1007 				/* Sign-extended */
1008 				PPC_LI32(tmp1_reg, imm);
1009 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
1010 			}
1011 			goto bpf_alu32_trunc;
1012 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
1013 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
1014 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
1015 			goto bpf_alu32_trunc;
1016 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
1017 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
1018 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
1019 				/* Sign-extended */
1020 				PPC_LI32(tmp1_reg, imm);
1021 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
1022 			} else {
1023 				if (IMM_L(imm))
1024 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
1025 				if (IMM_H(imm))
1026 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
1027 			}
1028 			goto bpf_alu32_trunc;
1029 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
1030 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
1031 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
1032 			goto bpf_alu32_trunc;
1033 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
1034 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
1035 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
1036 				/* Sign-extended */
1037 				PPC_LI32(tmp1_reg, imm);
1038 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
1039 			} else {
1040 				if (IMM_L(imm))
1041 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
1042 				if (IMM_H(imm))
1043 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
1044 			}
1045 			goto bpf_alu32_trunc;
1046 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
1047 			/* slw clears top 32 bits */
1048 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
1049 			/* skip zero extension move, but set address map. */
1050 			if (insn_is_zext(&insn[i + 1]))
1051 				addrs[++i] = ctx->idx * 4;
1052 			break;
1053 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
1054 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
1055 			break;
1056 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
1057 			/* with imm 0, we still need to clear top 32 bits */
1058 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
1059 			if (insn_is_zext(&insn[i + 1]))
1060 				addrs[++i] = ctx->idx * 4;
1061 			break;
1062 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
1063 			if (imm != 0)
1064 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
1065 			break;
1066 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
1067 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
1068 			if (insn_is_zext(&insn[i + 1]))
1069 				addrs[++i] = ctx->idx * 4;
1070 			break;
1071 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
1072 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
1073 			break;
1074 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
1075 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
1076 			if (insn_is_zext(&insn[i + 1]))
1077 				addrs[++i] = ctx->idx * 4;
1078 			break;
1079 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
1080 			if (imm != 0)
1081 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
1082 			break;
1083 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
1084 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
1085 			goto bpf_alu32_trunc;
1086 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
1087 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
1088 			break;
1089 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
1090 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
1091 			goto bpf_alu32_trunc;
1092 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
1093 			if (imm != 0)
1094 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
1095 			break;
1096 
1097 		/*
1098 		 * MOV
1099 		 */
1100 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
1101 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
1102 
1103 			if (insn_is_mov_percpu_addr(&insn[i])) {
1104 				if (IS_ENABLED(CONFIG_SMP)) {
1105 					EMIT(PPC_RAW_LD(tmp1_reg, _R13, offsetof(struct paca_struct, data_offset)));
1106 					EMIT(PPC_RAW_ADD(dst_reg, src_reg, tmp1_reg));
1107 				} else if (src_reg != dst_reg) {
1108 					EMIT(PPC_RAW_MR(dst_reg, src_reg));
1109 				}
1110 				break;
1111 			}
1112 
1113 			if (insn_is_cast_user(&insn[i])) {
1114 				EMIT(PPC_RAW_RLDICL_DOT(tmp1_reg, src_reg, 0, 32));
1115 				PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
1116 				PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
1117 				EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
1118 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
1119 				break;
1120 			}
1121 
1122 			if (imm == 1) {
1123 				/* special mov32 for zext */
1124 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
1125 				break;
1126 			} else if (off == 8) {
1127 				EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
1128 			} else if (off == 16) {
1129 				EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
1130 			} else if (off == 32) {
1131 				EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
1132 			} else if (dst_reg != src_reg)
1133 				EMIT(PPC_RAW_MR(dst_reg, src_reg));
1134 			goto bpf_alu32_trunc;
1135 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
1136 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
1137 			PPC_LI32(dst_reg, imm);
1138 			if (imm < 0)
1139 				goto bpf_alu32_trunc;
1140 			else if (insn_is_zext(&insn[i + 1]))
1141 				addrs[++i] = ctx->idx * 4;
1142 			break;
1143 
1144 bpf_alu32_trunc:
1145 		/* Truncate to 32-bits */
1146 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
1147 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
1148 		break;
1149 
1150 		/*
1151 		 * BPF_FROM_BE/LE
1152 		 */
1153 		case BPF_ALU | BPF_END | BPF_FROM_LE:
1154 		case BPF_ALU | BPF_END | BPF_FROM_BE:
1155 		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1156 #ifdef __BIG_ENDIAN__
1157 			if (BPF_SRC(code) == BPF_FROM_BE)
1158 				goto emit_clear;
1159 #else /* !__BIG_ENDIAN__ */
1160 			if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
1161 				goto emit_clear;
1162 #endif
1163 			switch (imm) {
1164 			case 16:
1165 				/* Rotate 8 bits left & mask with 0x0000ff00 */
1166 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
1167 				/* Rotate 8 bits right & insert LSB to reg */
1168 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
1169 				/* Move result back to dst_reg */
1170 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
1171 				break;
1172 			case 32:
1173 				/*
1174 				 * Rotate word left by 8 bits:
1175 				 * 2 bytes are already in their final position
1176 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
1177 				 */
1178 				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
1179 				/* Rotate 24 bits and insert byte 1 */
1180 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
1181 				/* Rotate 24 bits and insert byte 3 */
1182 				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
1183 				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
1184 				break;
1185 			case 64:
1186 				/* Store the value to stack and then use byte-reverse loads */
1187 				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
1188 				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
1189 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1190 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
1191 				} else {
1192 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
1193 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
1194 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
1195 					EMIT(PPC_RAW_LI(tmp2_reg, 4));
1196 					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
1197 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1198 						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
1199 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
1200 				}
1201 				break;
1202 			}
1203 			break;
1204 
1205 emit_clear:
1206 			switch (imm) {
1207 			case 16:
1208 				/* zero-extend 16 bits into 64 bits */
1209 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
1210 				if (insn_is_zext(&insn[i + 1]))
1211 					addrs[++i] = ctx->idx * 4;
1212 				break;
1213 			case 32:
1214 				if (!fp->aux->verifier_zext)
1215 					/* zero-extend 32 bits into 64 bits */
1216 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
1217 				break;
1218 			case 64:
1219 				/* nop */
1220 				break;
1221 			}
1222 			break;
1223 
1224 		/*
1225 		 * BPF_ST NOSPEC (speculation barrier)
1226 		 *
1227 		 * The following must act as a barrier against both Spectre v1
1228 		 * and v4 if we requested both mitigations. Therefore, also emit
1229 		 * 'isync; sync' on E500 or 'ori31' on BOOK3S_64 in addition to
1230 		 * the insns needed for a Spectre v4 barrier.
1231 		 *
1232 		 * If we requested only !bypass_spec_v1 OR only !bypass_spec_v4,
1233 		 * we can skip the respective other barrier type as an
1234 		 * optimization.
1235 		 */
1236 		case BPF_ST | BPF_NOSPEC:
1237 			sync_emitted = false;
1238 			ori31_emitted = false;
1239 			if (IS_ENABLED(CONFIG_PPC_E500) &&
1240 			    !bpf_jit_bypass_spec_v1()) {
1241 				EMIT(PPC_RAW_ISYNC());
1242 				EMIT(PPC_RAW_SYNC());
1243 				sync_emitted = true;
1244 			}
1245 			if (!bpf_jit_bypass_spec_v4()) {
1246 				switch (stf_barrier) {
1247 				case STF_BARRIER_EIEIO:
1248 					EMIT(PPC_RAW_EIEIO() | 0x02000000);
1249 					break;
1250 				case STF_BARRIER_SYNC_ORI:
1251 					if (!sync_emitted)
1252 						EMIT(PPC_RAW_SYNC());
1253 					EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
1254 					EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1255 					ori31_emitted = true;
1256 					break;
1257 				case STF_BARRIER_FALLBACK:
1258 					ctx->seen |= SEEN_FUNC;
1259 					PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
1260 					EMIT(PPC_RAW_MTCTR(_R12));
1261 					EMIT(PPC_RAW_BCTRL());
1262 					break;
1263 				case STF_BARRIER_NONE:
1264 					break;
1265 				}
1266 			}
1267 			if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1268 			    !bpf_jit_bypass_spec_v1() &&
1269 			    !ori31_emitted)
1270 				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1271 			break;
1272 
1273 		/*
1274 		 * BPF_ST(X)
1275 		 */
1276 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
1277 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1278 			if (BPF_CLASS(code) == BPF_ST) {
1279 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
1280 				src_reg = tmp1_reg;
1281 			}
1282 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
1283 			break;
1284 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1285 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1286 			if (BPF_CLASS(code) == BPF_ST) {
1287 				EMIT(PPC_RAW_LI(tmp1_reg, imm));
1288 				src_reg = tmp1_reg;
1289 			}
1290 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
1291 			break;
1292 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1293 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1294 			if (BPF_CLASS(code) == BPF_ST) {
1295 				PPC_LI32(tmp1_reg, imm);
1296 				src_reg = tmp1_reg;
1297 			}
1298 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
1299 			break;
1300 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1301 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1302 			if (BPF_CLASS(code) == BPF_ST) {
1303 				PPC_LI32(tmp1_reg, imm);
1304 				src_reg = tmp1_reg;
1305 			}
1306 			if (off % 4) {
1307 				EMIT(PPC_RAW_LI(tmp2_reg, off));
1308 				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
1309 			} else {
1310 				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
1311 			}
1312 			break;
1313 
1314 		case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1315 		case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1316 		case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1317 		case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1318 
1319 			EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1320 
1321 			ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image);
1322 			if (ret)
1323 				return ret;
1324 
1325 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1326 						    ctx->idx - 1, 4, -1, code);
1327 			if (ret)
1328 				return ret;
1329 
1330 			break;
1331 
1332 		case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1333 		case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1334 		case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1335 		case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1336 
1337 			EMIT(PPC_RAW_ADD(tmp1_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1338 
1339 			if (BPF_SIZE(code) == BPF_W || BPF_SIZE(code) == BPF_DW) {
1340 				PPC_LI32(tmp2_reg, imm);
1341 				src_reg = tmp2_reg;
1342 			} else {
1343 				EMIT(PPC_RAW_LI(tmp2_reg, imm));
1344 				src_reg = tmp2_reg;
1345 			}
1346 
1347 			ret = bpf_jit_emit_probe_mem_store(ctx, src_reg, off, code, image);
1348 			if (ret)
1349 				return ret;
1350 
1351 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1352 						    ctx->idx - 1, 4, -1, code);
1353 			if (ret)
1354 				return ret;
1355 
1356 			break;
1357 
1358 		/*
1359 		 * BPF_STX PROBE_ATOMIC (arena atomic ops)
1360 		 */
1361 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
1362 		case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
1363 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1364 			ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
1365 						      &jmp_off, &tmp_idx, &addrs[i + 1]);
1366 			if (ret) {
1367 				if (ret == -EOPNOTSUPP) {
1368 					pr_err_ratelimited(
1369 						"eBPF filter atomic op code %02x (@%d) unsupported\n",
1370 						code, i);
1371 				}
1372 				return ret;
1373 			}
1374 			/* LDARX/LWARX should land here on exception. */
1375 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1376 						    tmp_idx, jmp_off, dst_reg, code);
1377 			if (ret)
1378 				return ret;
1379 
1380 			/* Retrieve the dst_reg */
1381 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, bpf_to_ppc(ARENA_VM_START)));
1382 			break;
1383 
1384 		/*
1385 		 * BPF_STX ATOMIC (atomic ops)
1386 		 */
1387 		case BPF_STX | BPF_ATOMIC | BPF_B:
1388 		case BPF_STX | BPF_ATOMIC | BPF_H:
1389 		case BPF_STX | BPF_ATOMIC | BPF_W:
1390 		case BPF_STX | BPF_ATOMIC | BPF_DW:
1391 			if (bpf_atomic_is_load_store(&insn[i])) {
1392 				ret = emit_atomic_ld_st(insn[i], ctx, image);
1393 				if (ret)
1394 					return ret;
1395 
1396 				if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1397 					addrs[++i] = ctx->idx * 4;
1398 				break;
1399 			} else if (size == BPF_B || size == BPF_H) {
1400 				pr_err_ratelimited(
1401 					"eBPF filter atomic op code %02x (@%d) unsupported\n",
1402 					code, i);
1403 				return -EOPNOTSUPP;
1404 			}
1405 
1406 			ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
1407 						      &jmp_off, &tmp_idx, &addrs[i + 1]);
1408 			if (ret) {
1409 				if (ret == -EOPNOTSUPP) {
1410 					pr_err_ratelimited(
1411 						"eBPF filter atomic op code %02x (@%d) unsupported\n",
1412 						code, i);
1413 				}
1414 				return ret;
1415 			}
1416 			break;
1417 
1418 		/*
1419 		 * BPF_LDX
1420 		 */
1421 		/* dst = *(u8 *)(ul) (src + off) */
1422 		case BPF_LDX | BPF_MEM | BPF_B:
1423 		case BPF_LDX | BPF_MEMSX | BPF_B:
1424 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1425 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1426 		/* dst = *(u16 *)(ul) (src + off) */
1427 		case BPF_LDX | BPF_MEM | BPF_H:
1428 		case BPF_LDX | BPF_MEMSX | BPF_H:
1429 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1430 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1431 		/* dst = *(u32 *)(ul) (src + off) */
1432 		case BPF_LDX | BPF_MEM | BPF_W:
1433 		case BPF_LDX | BPF_MEMSX | BPF_W:
1434 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1435 		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1436 		/* dst = *(u64 *)(ul) (src + off) */
1437 		case BPF_LDX | BPF_MEM | BPF_DW:
1438 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1439 			/*
1440 			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
1441 			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
1442 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
1443 			 * set dst_reg=0 and move on.
1444 			 */
1445 			if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1446 				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
1447 				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
1448 					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
1449 				else /* BOOK3S_64 */
1450 					PPC_LI64(tmp2_reg, PAGE_OFFSET);
1451 				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
1452 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
1453 				EMIT(PPC_RAW_LI(dst_reg, 0));
1454 				/*
1455 				 * Check if 'off' is word aligned for BPF_DW, because
1456 				 * we might generate two instructions.
1457 				 */
1458 				if ((BPF_SIZE(code) == BPF_DW && (off & 3)) ||
1459 				    (BPF_SIZE(code) == BPF_B &&
1460 				     BPF_MODE(code) == BPF_PROBE_MEMSX) ||
1461 				    (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_MEMSX))
1462 					PPC_JMP((ctx->idx + 3) * 4);
1463 				else
1464 					PPC_JMP((ctx->idx + 2) * 4);
1465 			}
1466 
1467 			if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1468 				switch (size) {
1469 				case BPF_B:
1470 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1471 					EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1472 					break;
1473 				case BPF_H:
1474 					EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1475 					break;
1476 				case BPF_W:
1477 					EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1478 					break;
1479 				}
1480 			} else {
1481 				switch (size) {
1482 				case BPF_B:
1483 					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1484 					break;
1485 				case BPF_H:
1486 					EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1487 					break;
1488 				case BPF_W:
1489 					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1490 					break;
1491 				case BPF_DW:
1492 					if (off % 4) {
1493 						EMIT(PPC_RAW_LI(tmp1_reg, off));
1494 						EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1495 					} else {
1496 						EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1497 					}
1498 					break;
1499 				}
1500 			}
1501 
1502 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1503 				addrs[++i] = ctx->idx * 4;
1504 
1505 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
1506 				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1507 							    ctx->idx - 1, 4, dst_reg, code);
1508 				if (ret)
1509 					return ret;
1510 			}
1511 			break;
1512 
1513 		/* dst = *(u64 *)(ul) (src + ARENA_VM_START + off) */
1514 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1515 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1516 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1517 		case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1518 
1519 			EMIT(PPC_RAW_ADD(tmp1_reg, src_reg, bpf_to_ppc(ARENA_VM_START)));
1520 
1521 			switch (size) {
1522 			case BPF_B:
1523 				EMIT(PPC_RAW_LBZ(dst_reg, tmp1_reg, off));
1524 				break;
1525 			case BPF_H:
1526 				EMIT(PPC_RAW_LHZ(dst_reg, tmp1_reg, off));
1527 				break;
1528 			case BPF_W:
1529 				EMIT(PPC_RAW_LWZ(dst_reg, tmp1_reg, off));
1530 				break;
1531 			case BPF_DW:
1532 				if (off % 4) {
1533 					EMIT(PPC_RAW_LI(tmp2_reg, off));
1534 					EMIT(PPC_RAW_LDX(dst_reg, tmp1_reg, tmp2_reg));
1535 				} else {
1536 					EMIT(PPC_RAW_LD(dst_reg, tmp1_reg, off));
1537 				}
1538 				break;
1539 			}
1540 
1541 			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1542 				addrs[++i] = ctx->idx * 4;
1543 
1544 			ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1545 						    ctx->idx - 1, 4, dst_reg, code);
1546 			if (ret)
1547 				return ret;
1548 			break;
1549 
1550 		/*
1551 		 * Doubleword load
1552 		 * 16 byte instruction that uses two 'struct bpf_insn'
1553 		 */
1554 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1555 			imm64 = ((u64)(u32) insn[i].imm) |
1556 				    (((u64)(u32) insn[i+1].imm) << 32);
1557 			PPC_LI64(dst_reg, imm64);
1558 			/* Adjust for two bpf instructions */
1559 			addrs[++i] = ctx->idx * 4;
1560 			break;
1561 
1562 		/*
1563 		 * Return/Exit
1564 		 */
1565 		case BPF_JMP | BPF_EXIT:
1566 			/*
1567 			 * If this isn't the very last instruction, branch to
1568 			 * the epilogue. If we _are_ the last instruction,
1569 			 * we'll just fall through to the epilogue.
1570 			 */
1571 			if (i != flen - 1) {
1572 				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1573 				if (ret)
1574 					return ret;
1575 			}
1576 			/* else fall through to the epilogue */
1577 			break;
1578 
1579 		/*
1580 		 * Call kernel helper or bpf function
1581 		 */
1582 		case BPF_JMP | BPF_CALL:
1583 			ctx->seen |= SEEN_FUNC;
1584 
1585 			if (src_reg == bpf_to_ppc(BPF_REG_0)) {
1586 				if (imm == BPF_FUNC_get_smp_processor_id) {
1587 					EMIT(PPC_RAW_LHZ(src_reg, _R13, offsetof(struct paca_struct, paca_index)));
1588 					break;
1589 				} else if (imm == BPF_FUNC_get_current_task ||
1590 					   imm == BPF_FUNC_get_current_task_btf) {
1591 					EMIT(PPC_RAW_LD(src_reg, _R13, offsetof(struct paca_struct, __current)));
1592 					break;
1593 				}
1594 			}
1595 
1596 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1597 						    &func_addr, &func_addr_fixed);
1598 			if (ret < 0)
1599 				return ret;
1600 
1601 			ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1602 			if (ret)
1603 				return ret;
1604 
1605 			/* move return value from r3 to BPF_REG_0 */
1606 			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1607 			break;
1608 
1609 		/*
1610 		 * Jumps and branches
1611 		 */
1612 		case BPF_JMP | BPF_JA:
1613 			PPC_JMP(addrs[i + 1 + off]);
1614 			break;
1615 		case BPF_JMP32 | BPF_JA:
1616 			PPC_JMP(addrs[i + 1 + imm]);
1617 			break;
1618 
1619 		case BPF_JMP | BPF_JGT | BPF_K:
1620 		case BPF_JMP | BPF_JGT | BPF_X:
1621 		case BPF_JMP | BPF_JSGT | BPF_K:
1622 		case BPF_JMP | BPF_JSGT | BPF_X:
1623 		case BPF_JMP32 | BPF_JGT | BPF_K:
1624 		case BPF_JMP32 | BPF_JGT | BPF_X:
1625 		case BPF_JMP32 | BPF_JSGT | BPF_K:
1626 		case BPF_JMP32 | BPF_JSGT | BPF_X:
1627 			true_cond = COND_GT;
1628 			goto cond_branch;
1629 		case BPF_JMP | BPF_JLT | BPF_K:
1630 		case BPF_JMP | BPF_JLT | BPF_X:
1631 		case BPF_JMP | BPF_JSLT | BPF_K:
1632 		case BPF_JMP | BPF_JSLT | BPF_X:
1633 		case BPF_JMP32 | BPF_JLT | BPF_K:
1634 		case BPF_JMP32 | BPF_JLT | BPF_X:
1635 		case BPF_JMP32 | BPF_JSLT | BPF_K:
1636 		case BPF_JMP32 | BPF_JSLT | BPF_X:
1637 			true_cond = COND_LT;
1638 			goto cond_branch;
1639 		case BPF_JMP | BPF_JGE | BPF_K:
1640 		case BPF_JMP | BPF_JGE | BPF_X:
1641 		case BPF_JMP | BPF_JSGE | BPF_K:
1642 		case BPF_JMP | BPF_JSGE | BPF_X:
1643 		case BPF_JMP32 | BPF_JGE | BPF_K:
1644 		case BPF_JMP32 | BPF_JGE | BPF_X:
1645 		case BPF_JMP32 | BPF_JSGE | BPF_K:
1646 		case BPF_JMP32 | BPF_JSGE | BPF_X:
1647 			true_cond = COND_GE;
1648 			goto cond_branch;
1649 		case BPF_JMP | BPF_JLE | BPF_K:
1650 		case BPF_JMP | BPF_JLE | BPF_X:
1651 		case BPF_JMP | BPF_JSLE | BPF_K:
1652 		case BPF_JMP | BPF_JSLE | BPF_X:
1653 		case BPF_JMP32 | BPF_JLE | BPF_K:
1654 		case BPF_JMP32 | BPF_JLE | BPF_X:
1655 		case BPF_JMP32 | BPF_JSLE | BPF_K:
1656 		case BPF_JMP32 | BPF_JSLE | BPF_X:
1657 			true_cond = COND_LE;
1658 			goto cond_branch;
1659 		case BPF_JMP | BPF_JEQ | BPF_K:
1660 		case BPF_JMP | BPF_JEQ | BPF_X:
1661 		case BPF_JMP32 | BPF_JEQ | BPF_K:
1662 		case BPF_JMP32 | BPF_JEQ | BPF_X:
1663 			true_cond = COND_EQ;
1664 			goto cond_branch;
1665 		case BPF_JMP | BPF_JNE | BPF_K:
1666 		case BPF_JMP | BPF_JNE | BPF_X:
1667 		case BPF_JMP32 | BPF_JNE | BPF_K:
1668 		case BPF_JMP32 | BPF_JNE | BPF_X:
1669 			true_cond = COND_NE;
1670 			goto cond_branch;
1671 		case BPF_JMP | BPF_JSET | BPF_K:
1672 		case BPF_JMP | BPF_JSET | BPF_X:
1673 		case BPF_JMP32 | BPF_JSET | BPF_K:
1674 		case BPF_JMP32 | BPF_JSET | BPF_X:
1675 			true_cond = COND_NE;
1676 			/* Fall through */
1677 
1678 cond_branch:
1679 			switch (code) {
1680 			case BPF_JMP | BPF_JGT | BPF_X:
1681 			case BPF_JMP | BPF_JLT | BPF_X:
1682 			case BPF_JMP | BPF_JGE | BPF_X:
1683 			case BPF_JMP | BPF_JLE | BPF_X:
1684 			case BPF_JMP | BPF_JEQ | BPF_X:
1685 			case BPF_JMP | BPF_JNE | BPF_X:
1686 			case BPF_JMP32 | BPF_JGT | BPF_X:
1687 			case BPF_JMP32 | BPF_JLT | BPF_X:
1688 			case BPF_JMP32 | BPF_JGE | BPF_X:
1689 			case BPF_JMP32 | BPF_JLE | BPF_X:
1690 			case BPF_JMP32 | BPF_JEQ | BPF_X:
1691 			case BPF_JMP32 | BPF_JNE | BPF_X:
1692 				/* unsigned comparison */
1693 				if (BPF_CLASS(code) == BPF_JMP32)
1694 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1695 				else
1696 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1697 				break;
1698 			case BPF_JMP | BPF_JSGT | BPF_X:
1699 			case BPF_JMP | BPF_JSLT | BPF_X:
1700 			case BPF_JMP | BPF_JSGE | BPF_X:
1701 			case BPF_JMP | BPF_JSLE | BPF_X:
1702 			case BPF_JMP32 | BPF_JSGT | BPF_X:
1703 			case BPF_JMP32 | BPF_JSLT | BPF_X:
1704 			case BPF_JMP32 | BPF_JSGE | BPF_X:
1705 			case BPF_JMP32 | BPF_JSLE | BPF_X:
1706 				/* signed comparison */
1707 				if (BPF_CLASS(code) == BPF_JMP32)
1708 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1709 				else
1710 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1711 				break;
1712 			case BPF_JMP | BPF_JSET | BPF_X:
1713 			case BPF_JMP32 | BPF_JSET | BPF_X:
1714 				if (BPF_CLASS(code) == BPF_JMP) {
1715 					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1716 				} else {
1717 					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1718 					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1719 				}
1720 				break;
1721 			case BPF_JMP | BPF_JNE | BPF_K:
1722 			case BPF_JMP | BPF_JEQ | BPF_K:
1723 			case BPF_JMP | BPF_JGT | BPF_K:
1724 			case BPF_JMP | BPF_JLT | BPF_K:
1725 			case BPF_JMP | BPF_JGE | BPF_K:
1726 			case BPF_JMP | BPF_JLE | BPF_K:
1727 			case BPF_JMP32 | BPF_JNE | BPF_K:
1728 			case BPF_JMP32 | BPF_JEQ | BPF_K:
1729 			case BPF_JMP32 | BPF_JGT | BPF_K:
1730 			case BPF_JMP32 | BPF_JLT | BPF_K:
1731 			case BPF_JMP32 | BPF_JGE | BPF_K:
1732 			case BPF_JMP32 | BPF_JLE | BPF_K:
1733 			{
1734 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1735 
1736 				/*
1737 				 * Need sign-extended load, so only positive
1738 				 * values can be used as imm in cmpldi
1739 				 */
1740 				if (imm >= 0 && imm < 32768) {
1741 					if (is_jmp32)
1742 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1743 					else
1744 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1745 				} else {
1746 					/* sign-extending load */
1747 					PPC_LI32(tmp1_reg, imm);
1748 					/* ... but unsigned comparison */
1749 					if (is_jmp32)
1750 						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1751 					else
1752 						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1753 				}
1754 				break;
1755 			}
1756 			case BPF_JMP | BPF_JSGT | BPF_K:
1757 			case BPF_JMP | BPF_JSLT | BPF_K:
1758 			case BPF_JMP | BPF_JSGE | BPF_K:
1759 			case BPF_JMP | BPF_JSLE | BPF_K:
1760 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1761 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1762 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1763 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1764 			{
1765 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1766 
1767 				/*
1768 				 * signed comparison, so any 16-bit value
1769 				 * can be used in cmpdi
1770 				 */
1771 				if (imm >= -32768 && imm < 32768) {
1772 					if (is_jmp32)
1773 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1774 					else
1775 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1776 				} else {
1777 					PPC_LI32(tmp1_reg, imm);
1778 					if (is_jmp32)
1779 						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1780 					else
1781 						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1782 				}
1783 				break;
1784 			}
1785 			case BPF_JMP | BPF_JSET | BPF_K:
1786 			case BPF_JMP32 | BPF_JSET | BPF_K:
1787 				/* andi does not sign-extend the immediate */
1788 				if (imm >= 0 && imm < 32768)
1789 					/* PPC_ANDI is _only/always_ dot-form */
1790 					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1791 				else {
1792 					PPC_LI32(tmp1_reg, imm);
1793 					if (BPF_CLASS(code) == BPF_JMP) {
1794 						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1795 								     tmp1_reg));
1796 					} else {
1797 						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1798 						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1799 									0, 0, 31));
1800 					}
1801 				}
1802 				break;
1803 			}
1804 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1805 			break;
1806 
1807 		/*
1808 		 * Tail call
1809 		 */
1810 		case BPF_JMP | BPF_TAIL_CALL:
1811 			ctx->seen |= SEEN_TAILCALL;
1812 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1813 			if (ret < 0)
1814 				return ret;
1815 			break;
1816 
1817 		default:
1818 			/*
1819 			 * The filter contains something cruel & unusual.
1820 			 * We don't handle it, but also there shouldn't be
1821 			 * anything missing from our list.
1822 			 */
1823 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1824 					code, i);
1825 			return -ENOTSUPP;
1826 		}
1827 	}
1828 
1829 	/* Set end-of-body-code address for exit. */
1830 	addrs[i] = ctx->idx * 4;
1831 
1832 	return 0;
1833 }
1834