1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2156d0e29SNaveen N. Rao /*
3156d0e29SNaveen N. Rao * bpf_jit_comp64.c: eBPF JIT compiler
4156d0e29SNaveen N. Rao *
5156d0e29SNaveen N. Rao * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6156d0e29SNaveen N. Rao * IBM Corporation
7156d0e29SNaveen N. Rao *
8156d0e29SNaveen N. Rao * Based on the powerpc classic BPF JIT compiler by Matt Evans
9156d0e29SNaveen N. Rao */
10156d0e29SNaveen N. Rao #include <linux/moduleloader.h>
11156d0e29SNaveen N. Rao #include <asm/cacheflush.h>
12ec0c464cSChristophe Leroy #include <asm/asm-compat.h>
13156d0e29SNaveen N. Rao #include <linux/netdevice.h>
14156d0e29SNaveen N. Rao #include <linux/filter.h>
15156d0e29SNaveen N. Rao #include <linux/if_vlan.h>
16156d0e29SNaveen N. Rao #include <asm/kprobes.h>
17ce076141SNaveen N. Rao #include <linux/bpf.h>
18b7540d62SNaveen N. Rao #include <asm/security_features.h>
19156d0e29SNaveen N. Rao
20576a6c3aSNaveen N. Rao #include "bpf_jit.h"
21576a6c3aSNaveen N. Rao
22576a6c3aSNaveen N. Rao /*
23576a6c3aSNaveen N. Rao * Stack layout:
24576a6c3aSNaveen N. Rao * Ensure the top half (upto local_tmp_var) stays consistent
25576a6c3aSNaveen N. Rao * with our redzone usage.
26576a6c3aSNaveen N. Rao *
27576a6c3aSNaveen N. Rao * [ prev sp ] <-------------
28576a6c3aSNaveen N. Rao * [ nv gpr save area ] 5*8 |
29576a6c3aSNaveen N. Rao * [ tail_call_cnt ] 8 |
30576a6c3aSNaveen N. Rao * [ local_tmp_var ] 16 |
31576a6c3aSNaveen N. Rao * fp (r31) --> [ ebpf stack space ] upto 512 |
32576a6c3aSNaveen N. Rao * [ frame header ] 32/112 |
33576a6c3aSNaveen N. Rao * sp (r1) ---> [ stack pointer ] --------------
34576a6c3aSNaveen N. Rao */
35576a6c3aSNaveen N. Rao
36576a6c3aSNaveen N. Rao /* for gpr non volatile registers BPG_REG_6 to 10 */
37576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_SAVE (5*8)
38576a6c3aSNaveen N. Rao /* for bpf JIT code internal usage */
39576a6c3aSNaveen N. Rao #define BPF_PPC_STACK_LOCALS 24
40576a6c3aSNaveen N. Rao /* stack frame excluding BPF stack, ensure this is quadword aligned */
41576a6c3aSNaveen N. Rao #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
42576a6c3aSNaveen N. Rao BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43576a6c3aSNaveen N. Rao
44576a6c3aSNaveen N. Rao /* BPF register usage */
45576a6c3aSNaveen N. Rao #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
46576a6c3aSNaveen N. Rao #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
47576a6c3aSNaveen N. Rao
48576a6c3aSNaveen N. Rao /* BPF to ppc register mappings */
bpf_jit_init_reg_mapping(struct codegen_context * ctx)4949c3af43SNaveen N. Rao void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
5049c3af43SNaveen N. Rao {
51576a6c3aSNaveen N. Rao /* function return value */
5249c3af43SNaveen N. Rao ctx->b2p[BPF_REG_0] = _R8;
53576a6c3aSNaveen N. Rao /* function arguments */
5449c3af43SNaveen N. Rao ctx->b2p[BPF_REG_1] = _R3;
5549c3af43SNaveen N. Rao ctx->b2p[BPF_REG_2] = _R4;
5649c3af43SNaveen N. Rao ctx->b2p[BPF_REG_3] = _R5;
5749c3af43SNaveen N. Rao ctx->b2p[BPF_REG_4] = _R6;
5849c3af43SNaveen N. Rao ctx->b2p[BPF_REG_5] = _R7;
59576a6c3aSNaveen N. Rao /* non volatile registers */
6049c3af43SNaveen N. Rao ctx->b2p[BPF_REG_6] = _R27;
6149c3af43SNaveen N. Rao ctx->b2p[BPF_REG_7] = _R28;
6249c3af43SNaveen N. Rao ctx->b2p[BPF_REG_8] = _R29;
6349c3af43SNaveen N. Rao ctx->b2p[BPF_REG_9] = _R30;
64576a6c3aSNaveen N. Rao /* frame pointer aka BPF_REG_10 */
6549c3af43SNaveen N. Rao ctx->b2p[BPF_REG_FP] = _R31;
66576a6c3aSNaveen N. Rao /* eBPF jit internal registers */
6749c3af43SNaveen N. Rao ctx->b2p[BPF_REG_AX] = _R12;
6849c3af43SNaveen N. Rao ctx->b2p[TMP_REG_1] = _R9;
6949c3af43SNaveen N. Rao ctx->b2p[TMP_REG_2] = _R10;
7049c3af43SNaveen N. Rao }
71576a6c3aSNaveen N. Rao
72576a6c3aSNaveen N. Rao /* PPC NVR range -- update this if we ever use NVRs below r27 */
73036d559cSNaveen N. Rao #define BPF_PPC_NVR_MIN _R27
74156d0e29SNaveen N. Rao
bpf_has_stack_frame(struct codegen_context * ctx)75156d0e29SNaveen N. Rao static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76156d0e29SNaveen N. Rao {
77156d0e29SNaveen N. Rao /*
78156d0e29SNaveen N. Rao * We only need a stack frame if:
79156d0e29SNaveen N. Rao * - we call other functions (kernel helpers), or
80156d0e29SNaveen N. Rao * - the bpf program uses its stack area
81156d0e29SNaveen N. Rao * The latter condition is deduced from the usage of BPF_REG_FP
82156d0e29SNaveen N. Rao */
8349c3af43SNaveen N. Rao return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84156d0e29SNaveen N. Rao }
85156d0e29SNaveen N. Rao
867b847f52SNaveen N. Rao /*
877b847f52SNaveen N. Rao * When not setting up our own stackframe, the redzone usage is:
887b847f52SNaveen N. Rao *
897b847f52SNaveen N. Rao * [ prev sp ] <-------------
907b847f52SNaveen N. Rao * [ ... ] |
917b847f52SNaveen N. Rao * sp (r1) ---> [ stack pointer ] --------------
92b7540d62SNaveen N. Rao * [ nv gpr save area ] 5*8
937b847f52SNaveen N. Rao * [ tail_call_cnt ] 8
94b7540d62SNaveen N. Rao * [ local_tmp_var ] 16
957b847f52SNaveen N. Rao * [ unused red zone ] 208 bytes protected
967b847f52SNaveen N. Rao */
bpf_jit_stack_local(struct codegen_context * ctx)977b847f52SNaveen N. Rao static int bpf_jit_stack_local(struct codegen_context *ctx)
987b847f52SNaveen N. Rao {
997b847f52SNaveen N. Rao if (bpf_has_stack_frame(ctx))
100ac0761ebSSandipan Das return STACK_FRAME_MIN_SIZE + ctx->stack_size;
1017b847f52SNaveen N. Rao else
102b7540d62SNaveen N. Rao return -(BPF_PPC_STACK_SAVE + 24);
1037b847f52SNaveen N. Rao }
1047b847f52SNaveen N. Rao
bpf_jit_stack_tailcallcnt(struct codegen_context * ctx)105ce076141SNaveen N. Rao static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106ce076141SNaveen N. Rao {
107b7540d62SNaveen N. Rao return bpf_jit_stack_local(ctx) + 16;
108ce076141SNaveen N. Rao }
109ce076141SNaveen N. Rao
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)1107b847f52SNaveen N. Rao static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
1117b847f52SNaveen N. Rao {
1127b847f52SNaveen N. Rao if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113ac0761ebSSandipan Das return (bpf_has_stack_frame(ctx) ?
114ac0761ebSSandipan Das (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
1157b847f52SNaveen N. Rao - (8 * (32 - reg));
1167b847f52SNaveen N. Rao
1177b847f52SNaveen N. Rao pr_err("BPF JIT is asking about unknown registers");
1187b847f52SNaveen N. Rao BUG();
1197b847f52SNaveen N. Rao }
1207b847f52SNaveen N. Rao
bpf_jit_realloc_regs(struct codegen_context * ctx)12140272035SChristophe Leroy void bpf_jit_realloc_regs(struct codegen_context *ctx)
12240272035SChristophe Leroy {
12340272035SChristophe Leroy }
12440272035SChristophe Leroy
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)1254ea76e90SChristophe Leroy void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126156d0e29SNaveen N. Rao {
127156d0e29SNaveen N. Rao int i;
128156d0e29SNaveen N. Rao
1297e3a68beSNicholas Piggin #ifndef CONFIG_PPC_KERNEL_PCREL
1305b89492cSChristophe Leroy if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131391c271fSNaveen N. Rao EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
1327e3a68beSNicholas Piggin #endif
133b10cb163SNaveen N. Rao
134ce076141SNaveen N. Rao /*
135ce076141SNaveen N. Rao * Initialize tail_call_cnt if we do tail calls.
136ce076141SNaveen N. Rao * Otherwise, put in NOPs so that it can be skipped when we are
137ce076141SNaveen N. Rao * invoked through a tail call.
138ce076141SNaveen N. Rao */
139ce076141SNaveen N. Rao if (ctx->seen & SEEN_TAILCALL) {
14049c3af43SNaveen N. Rao EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141ce076141SNaveen N. Rao /* this goes in the redzone */
14249c3af43SNaveen N. Rao EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
143ce076141SNaveen N. Rao } else {
1443a181237SBalamuruhan S EMIT(PPC_RAW_NOP());
1453a181237SBalamuruhan S EMIT(PPC_RAW_NOP());
146ce076141SNaveen N. Rao }
147ce076141SNaveen N. Rao
1487b847f52SNaveen N. Rao if (bpf_has_stack_frame(ctx)) {
149156d0e29SNaveen N. Rao /*
150156d0e29SNaveen N. Rao * We need a stack frame, but we don't necessarily need to
151156d0e29SNaveen N. Rao * save/restore LR unless we call other functions
152156d0e29SNaveen N. Rao */
153156d0e29SNaveen N. Rao if (ctx->seen & SEEN_FUNC) {
154e08021f8SChristophe Leroy EMIT(PPC_RAW_MFLR(_R0));
155036d559cSNaveen N. Rao EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
156156d0e29SNaveen N. Rao }
157156d0e29SNaveen N. Rao
158036d559cSNaveen N. Rao EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
159156d0e29SNaveen N. Rao }
160156d0e29SNaveen N. Rao
161156d0e29SNaveen N. Rao /*
162156d0e29SNaveen N. Rao * Back up non-volatile regs -- BPF registers 6-10
163156d0e29SNaveen N. Rao * If we haven't created our own stack frame, we save these
164156d0e29SNaveen N. Rao * in the protected zone below the previous stack frame
165156d0e29SNaveen N. Rao */
166156d0e29SNaveen N. Rao for (i = BPF_REG_6; i <= BPF_REG_10; i++)
16749c3af43SNaveen N. Rao if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
16849c3af43SNaveen N. Rao EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
169156d0e29SNaveen N. Rao
170156d0e29SNaveen N. Rao /* Setup frame pointer to point to the bpf stack area */
17149c3af43SNaveen N. Rao if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
17249c3af43SNaveen N. Rao EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
1733a181237SBalamuruhan S STACK_FRAME_MIN_SIZE + ctx->stack_size));
174156d0e29SNaveen N. Rao }
175156d0e29SNaveen N. Rao
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)176ce076141SNaveen N. Rao static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
177156d0e29SNaveen N. Rao {
178156d0e29SNaveen N. Rao int i;
179156d0e29SNaveen N. Rao
180156d0e29SNaveen N. Rao /* Restore NVRs */
181156d0e29SNaveen N. Rao for (i = BPF_REG_6; i <= BPF_REG_10; i++)
18249c3af43SNaveen N. Rao if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
18349c3af43SNaveen N. Rao EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
184156d0e29SNaveen N. Rao
185156d0e29SNaveen N. Rao /* Tear down our stack frame */
1867b847f52SNaveen N. Rao if (bpf_has_stack_frame(ctx)) {
187036d559cSNaveen N. Rao EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188156d0e29SNaveen N. Rao if (ctx->seen & SEEN_FUNC) {
189036d559cSNaveen N. Rao EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190036d559cSNaveen N. Rao EMIT(PPC_RAW_MTLR(_R0));
191156d0e29SNaveen N. Rao }
192156d0e29SNaveen N. Rao }
193ce076141SNaveen N. Rao }
194ce076141SNaveen N. Rao
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)1954ea76e90SChristophe Leroy void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196ce076141SNaveen N. Rao {
197ce076141SNaveen N. Rao bpf_jit_emit_common_epilogue(image, ctx);
198ce076141SNaveen N. Rao
199ce076141SNaveen N. Rao /* Move result to r3 */
20049c3af43SNaveen N. Rao EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201156d0e29SNaveen N. Rao
2023a181237SBalamuruhan S EMIT(PPC_RAW_BLR());
203156d0e29SNaveen N. Rao }
204156d0e29SNaveen N. Rao
2052ecfe59cSHari Bathini static int
bpf_jit_emit_func_call_hlp(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)2062ecfe59cSHari Bathini bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
207e2c95a61SDaniel Borkmann {
20843d636f8SNaveen N. Rao unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
209feb63072SNaveen N. Rao long reladdr;
21043d636f8SNaveen N. Rao
21161688a82SHari Bathini if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
21243d636f8SNaveen N. Rao return -EINVAL;
21343d636f8SNaveen N. Rao
21461688a82SHari Bathini #ifdef CONFIG_PPC_KERNEL_PCREL
2152ecfe59cSHari Bathini reladdr = func_addr - local_paca->kernelbase;
2167e3a68beSNicholas Piggin
21761688a82SHari Bathini if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
2182ecfe59cSHari Bathini EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
2192ecfe59cSHari Bathini /* Align for subsequent prefix instruction */
2202ecfe59cSHari Bathini if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
2212ecfe59cSHari Bathini EMIT(PPC_RAW_NOP());
2222ecfe59cSHari Bathini /* paddi r12,r12,addr */
2232ecfe59cSHari Bathini EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
2242ecfe59cSHari Bathini EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
2257e3a68beSNicholas Piggin } else {
22661688a82SHari Bathini unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
22761688a82SHari Bathini bool alignment_needed = !IS_ALIGNED(pc, 8);
22861688a82SHari Bathini
22961688a82SHari Bathini reladdr = func_addr - (alignment_needed ? pc + 4 : pc);
23061688a82SHari Bathini
23161688a82SHari Bathini if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
23261688a82SHari Bathini if (alignment_needed)
23361688a82SHari Bathini EMIT(PPC_RAW_NOP());
23461688a82SHari Bathini /* pla r12,addr */
23561688a82SHari Bathini EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
23661688a82SHari Bathini EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
23761688a82SHari Bathini } else {
23861688a82SHari Bathini /* We can clobber r12 */
23961688a82SHari Bathini PPC_LI64(_R12, func);
24061688a82SHari Bathini }
24161688a82SHari Bathini }
24261688a82SHari Bathini EMIT(PPC_RAW_MTCTR(_R12));
24361688a82SHari Bathini EMIT(PPC_RAW_BCTRL());
24461688a82SHari Bathini #else
24561688a82SHari Bathini if (core_kernel_text(func_addr)) {
246feb63072SNaveen N. Rao reladdr = func_addr - kernel_toc_addr();
247feb63072SNaveen N. Rao if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
248feb63072SNaveen N. Rao pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
249feb63072SNaveen N. Rao return -ERANGE;
250feb63072SNaveen N. Rao }
251feb63072SNaveen N. Rao
252feb63072SNaveen N. Rao EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
253feb63072SNaveen N. Rao EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
254feb63072SNaveen N. Rao EMIT(PPC_RAW_MTCTR(_R12));
25520ccb004SNaveen N. Rao EMIT(PPC_RAW_BCTRL());
25661688a82SHari Bathini } else {
25761688a82SHari Bathini if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
25861688a82SHari Bathini /* func points to the function descriptor */
25961688a82SHari Bathini PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
26061688a82SHari Bathini /* Load actual entry point from function descriptor */
26161688a82SHari Bathini EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
26261688a82SHari Bathini /* ... and move it to CTR */
26361688a82SHari Bathini EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
26461688a82SHari Bathini /*
26561688a82SHari Bathini * Load TOC from function descriptor at offset 8.
26661688a82SHari Bathini * We can clobber r2 since we get called through a
26761688a82SHari Bathini * function pointer (so caller will save/restore r2).
26861688a82SHari Bathini */
26961688a82SHari Bathini EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
27061688a82SHari Bathini } else {
27161688a82SHari Bathini PPC_LI64(_R12, func);
27261688a82SHari Bathini EMIT(PPC_RAW_MTCTR(_R12));
27361688a82SHari Bathini }
27461688a82SHari Bathini EMIT(PPC_RAW_BCTRL());
27561688a82SHari Bathini /*
27661688a82SHari Bathini * Load r2 with kernel TOC as kernel TOC is used if function address falls
27761688a82SHari Bathini * within core kernel text.
27861688a82SHari Bathini */
27961688a82SHari Bathini EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
28061688a82SHari Bathini }
28161688a82SHari Bathini #endif
28243d636f8SNaveen N. Rao
28343d636f8SNaveen N. Rao return 0;
284e2c95a61SDaniel Borkmann }
285e2c95a61SDaniel Borkmann
bpf_jit_emit_func_call_rel(u32 * image,u32 * fimage,struct codegen_context * ctx,u64 func)28690d862f3SHari Bathini int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
287ce076141SNaveen N. Rao {
2884ea69b2fSSandipan Das unsigned int i, ctx_idx = ctx->idx;
2894ea69b2fSSandipan Das
29043d636f8SNaveen N. Rao if (WARN_ON_ONCE(func && is_module_text_address(func)))
29143d636f8SNaveen N. Rao return -EINVAL;
29243d636f8SNaveen N. Rao
293feb63072SNaveen N. Rao /* skip past descriptor if elf v1 */
294feb63072SNaveen N. Rao func += FUNCTION_DESCR_SIZE;
295feb63072SNaveen N. Rao
2964ea69b2fSSandipan Das /* Load function address into r12 */
297036d559cSNaveen N. Rao PPC_LI64(_R12, func);
2984ea69b2fSSandipan Das
2994ea69b2fSSandipan Das /* For bpf-to-bpf function calls, the callee's address is unknown
3004ea69b2fSSandipan Das * until the last extra pass. As seen above, we use PPC_LI64() to
3014ea69b2fSSandipan Das * load the callee's address, but this may optimize the number of
3024ea69b2fSSandipan Das * instructions required based on the nature of the address.
3034ea69b2fSSandipan Das *
304d3921cbbSChristophe Leroy * Since we don't want the number of instructions emitted to increase,
3054ea69b2fSSandipan Das * we pad the optimized PPC_LI64() call with NOPs to guarantee that
3064ea69b2fSSandipan Das * we always have a five-instruction sequence, which is the maximum
3074ea69b2fSSandipan Das * that PPC_LI64() can emit.
3084ea69b2fSSandipan Das */
309d3921cbbSChristophe Leroy if (!image)
3104ea69b2fSSandipan Das for (i = ctx->idx - ctx_idx; i < 5; i++)
3113a181237SBalamuruhan S EMIT(PPC_RAW_NOP());
3124ea69b2fSSandipan Das
313036d559cSNaveen N. Rao EMIT(PPC_RAW_MTCTR(_R12));
31420ccb004SNaveen N. Rao EMIT(PPC_RAW_BCTRL());
31543d636f8SNaveen N. Rao
31643d636f8SNaveen N. Rao return 0;
317ce076141SNaveen N. Rao }
318ce076141SNaveen N. Rao
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)3193832ba4eSNaveen N. Rao static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
320ce076141SNaveen N. Rao {
321ce076141SNaveen N. Rao /*
322ce076141SNaveen N. Rao * By now, the eBPF program has already setup parameters in r3, r4 and r5
323ce076141SNaveen N. Rao * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
324ce076141SNaveen N. Rao * r4/BPF_REG_2 - pointer to bpf_array
325ce076141SNaveen N. Rao * r5/BPF_REG_3 - index in bpf_array
326ce076141SNaveen N. Rao */
32749c3af43SNaveen N. Rao int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
32849c3af43SNaveen N. Rao int b2p_index = bpf_to_ppc(BPF_REG_3);
329b10cb163SNaveen N. Rao int bpf_tailcall_prologue_size = 8;
330b10cb163SNaveen N. Rao
3312ecfe59cSHari Bathini if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
332b10cb163SNaveen N. Rao bpf_tailcall_prologue_size += 4; /* skip past the toc load */
333ce076141SNaveen N. Rao
334ce076141SNaveen N. Rao /*
335ce076141SNaveen N. Rao * if (index >= array->map.max_entries)
336ce076141SNaveen N. Rao * goto out;
337ce076141SNaveen N. Rao */
33849c3af43SNaveen N. Rao EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
3393a181237SBalamuruhan S EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
34049c3af43SNaveen N. Rao EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
341bafb5898SNaveen N. Rao PPC_BCC_SHORT(COND_GE, out);
342ce076141SNaveen N. Rao
343ce076141SNaveen N. Rao /*
344ebf7f6f0STiezhu Yang * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
345ce076141SNaveen N. Rao * goto out;
346ce076141SNaveen N. Rao */
34749c3af43SNaveen N. Rao EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
34849c3af43SNaveen N. Rao EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
349bafb5898SNaveen N. Rao PPC_BCC_SHORT(COND_GE, out);
350ce076141SNaveen N. Rao
351ce076141SNaveen N. Rao /*
352ce076141SNaveen N. Rao * tail_call_cnt++;
353ce076141SNaveen N. Rao */
35449c3af43SNaveen N. Rao EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
35549c3af43SNaveen N. Rao EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
356ce076141SNaveen N. Rao
357ce076141SNaveen N. Rao /* prog = array->ptrs[index]; */
35849c3af43SNaveen N. Rao EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
35949c3af43SNaveen N. Rao EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
36049c3af43SNaveen N. Rao EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
361ce076141SNaveen N. Rao
362ce076141SNaveen N. Rao /*
363ce076141SNaveen N. Rao * if (prog == NULL)
364ce076141SNaveen N. Rao * goto out;
365ce076141SNaveen N. Rao */
36649c3af43SNaveen N. Rao EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
367bafb5898SNaveen N. Rao PPC_BCC_SHORT(COND_EQ, out);
368ce076141SNaveen N. Rao
369ce076141SNaveen N. Rao /* goto *(prog->bpf_func + prologue_size); */
37049c3af43SNaveen N. Rao EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
37149c3af43SNaveen N. Rao EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
372b10cb163SNaveen N. Rao FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
37349c3af43SNaveen N. Rao EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
374ce076141SNaveen N. Rao
375ce076141SNaveen N. Rao /* tear down stack, restore NVRs, ... */
376ce076141SNaveen N. Rao bpf_jit_emit_common_epilogue(image, ctx);
377ce076141SNaveen N. Rao
3783a181237SBalamuruhan S EMIT(PPC_RAW_BCTR());
3793832ba4eSNaveen N. Rao
380ce076141SNaveen N. Rao /* out: */
3813832ba4eSNaveen N. Rao return 0;
382ce076141SNaveen N. Rao }
383ce076141SNaveen N. Rao
384b7540d62SNaveen N. Rao /*
385b7540d62SNaveen N. Rao * We spill into the redzone always, even if the bpf program has its own stackframe.
386b7540d62SNaveen N. Rao * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
387b7540d62SNaveen N. Rao */
388b7540d62SNaveen N. Rao void bpf_stf_barrier(void);
389b7540d62SNaveen N. Rao
390b7540d62SNaveen N. Rao asm (
391b7540d62SNaveen N. Rao " .global bpf_stf_barrier ;"
392b7540d62SNaveen N. Rao " bpf_stf_barrier: ;"
393b7540d62SNaveen N. Rao " std 21,-64(1) ;"
394b7540d62SNaveen N. Rao " std 22,-56(1) ;"
395b7540d62SNaveen N. Rao " sync ;"
396b7540d62SNaveen N. Rao " ld 21,-64(1) ;"
397b7540d62SNaveen N. Rao " ld 22,-56(1) ;"
398b7540d62SNaveen N. Rao " ori 31,31,0 ;"
399b7540d62SNaveen N. Rao " .rept 14 ;"
400b7540d62SNaveen N. Rao " b 1f ;"
401b7540d62SNaveen N. Rao " 1: ;"
402b7540d62SNaveen N. Rao " .endr ;"
403b7540d62SNaveen N. Rao " blr ;"
404b7540d62SNaveen N. Rao );
405b7540d62SNaveen N. Rao
406156d0e29SNaveen N. Rao /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,u32 * fimage,struct codegen_context * ctx,u32 * addrs,int pass,bool extra_pass)40790d862f3SHari Bathini int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
40885e03115SChristophe Leroy u32 *addrs, int pass, bool extra_pass)
409156d0e29SNaveen N. Rao {
410b7540d62SNaveen N. Rao enum stf_barrier_type stf_barrier = stf_barrier_type_get();
411156d0e29SNaveen N. Rao const struct bpf_insn *insn = fp->insnsi;
412156d0e29SNaveen N. Rao int flen = fp->len;
413e2c95a61SDaniel Borkmann int i, ret;
414156d0e29SNaveen N. Rao
415156d0e29SNaveen N. Rao /* Start of epilogue code - will only be valid 2nd pass onwards */
416156d0e29SNaveen N. Rao u32 exit_addr = addrs[flen];
417156d0e29SNaveen N. Rao
418156d0e29SNaveen N. Rao for (i = 0; i < flen; i++) {
419156d0e29SNaveen N. Rao u32 code = insn[i].code;
42049c3af43SNaveen N. Rao u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
42149c3af43SNaveen N. Rao u32 src_reg = bpf_to_ppc(insn[i].src_reg);
422efa95f03SHari Bathini u32 size = BPF_SIZE(code);
42349c3af43SNaveen N. Rao u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
42449c3af43SNaveen N. Rao u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
4251e82dfaaSHari Bathini u32 save_reg, ret_reg;
426156d0e29SNaveen N. Rao s16 off = insn[i].off;
427156d0e29SNaveen N. Rao s32 imm = insn[i].imm;
428e2c95a61SDaniel Borkmann bool func_addr_fixed;
429e2c95a61SDaniel Borkmann u64 func_addr;
430156d0e29SNaveen N. Rao u64 imm64;
431156d0e29SNaveen N. Rao u32 true_cond;
432b9c1e60eSDaniel Borkmann u32 tmp_idx;
433f9320c49SNaveen N. Rao int j;
434156d0e29SNaveen N. Rao
435156d0e29SNaveen N. Rao /*
436156d0e29SNaveen N. Rao * addrs[] maps a BPF bytecode address into a real offset from
437156d0e29SNaveen N. Rao * the start of the body code.
438156d0e29SNaveen N. Rao */
439156d0e29SNaveen N. Rao addrs[i] = ctx->idx * 4;
440156d0e29SNaveen N. Rao
441156d0e29SNaveen N. Rao /*
442156d0e29SNaveen N. Rao * As an optimization, we note down which non-volatile registers
443156d0e29SNaveen N. Rao * are used so that we can only save/restore those in our
444156d0e29SNaveen N. Rao * prologue and epilogue. We do this here regardless of whether
445156d0e29SNaveen N. Rao * the actual BPF instruction uses src/dst registers or not
446156d0e29SNaveen N. Rao * (for instance, BPF_CALL does not use them). The expectation
447156d0e29SNaveen N. Rao * is that those instructions will have src_reg/dst_reg set to
448156d0e29SNaveen N. Rao * 0. Even otherwise, we just lose some prologue/epilogue
449156d0e29SNaveen N. Rao * optimization but everything else should work without
450156d0e29SNaveen N. Rao * any issues.
451156d0e29SNaveen N. Rao */
4527b847f52SNaveen N. Rao if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
453ed573b57SChristophe Leroy bpf_set_seen_register(ctx, dst_reg);
4547b847f52SNaveen N. Rao if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
455ed573b57SChristophe Leroy bpf_set_seen_register(ctx, src_reg);
456156d0e29SNaveen N. Rao
457156d0e29SNaveen N. Rao switch (code) {
458156d0e29SNaveen N. Rao /*
459156d0e29SNaveen N. Rao * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
460156d0e29SNaveen N. Rao */
461156d0e29SNaveen N. Rao case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
462156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
46306541865SBalamuruhan S EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
464156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
465156d0e29SNaveen N. Rao case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
466156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
4673a181237SBalamuruhan S EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
468156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
469156d0e29SNaveen N. Rao case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
470156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
4715855c4c1SNaveen N. Rao if (!imm) {
4725855c4c1SNaveen N. Rao goto bpf_alu32_trunc;
4735855c4c1SNaveen N. Rao } else if (imm >= -32768 && imm < 32768) {
4743a181237SBalamuruhan S EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
4755855c4c1SNaveen N. Rao } else {
4763a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
4773a3fc9bfSJordan Niethe EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
478156d0e29SNaveen N. Rao }
4795855c4c1SNaveen N. Rao goto bpf_alu32_trunc;
4805855c4c1SNaveen N. Rao case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
4815855c4c1SNaveen N. Rao case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
4825855c4c1SNaveen N. Rao if (!imm) {
4835855c4c1SNaveen N. Rao goto bpf_alu32_trunc;
4845855c4c1SNaveen N. Rao } else if (imm > -32768 && imm <= 32768) {
4855855c4c1SNaveen N. Rao EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
4865855c4c1SNaveen N. Rao } else {
4873a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
4883a3fc9bfSJordan Niethe EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
489156d0e29SNaveen N. Rao }
490156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
491156d0e29SNaveen N. Rao case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
492156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
493156d0e29SNaveen N. Rao if (BPF_CLASS(code) == BPF_ALU)
4943a181237SBalamuruhan S EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
495156d0e29SNaveen N. Rao else
4963a181237SBalamuruhan S EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
497156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
498156d0e29SNaveen N. Rao case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
499156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
500156d0e29SNaveen N. Rao if (imm >= -32768 && imm < 32768)
5013a181237SBalamuruhan S EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
502156d0e29SNaveen N. Rao else {
5033a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
504156d0e29SNaveen N. Rao if (BPF_CLASS(code) == BPF_ALU)
5053a3fc9bfSJordan Niethe EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
506156d0e29SNaveen N. Rao else
5073a3fc9bfSJordan Niethe EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
508156d0e29SNaveen N. Rao }
509156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
510156d0e29SNaveen N. Rao case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
511156d0e29SNaveen N. Rao case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
512156d0e29SNaveen N. Rao if (BPF_OP(code) == BPF_MOD) {
513*fde31832SArtem Savkov if (off)
514*fde31832SArtem Savkov EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
515*fde31832SArtem Savkov else
5163a3fc9bfSJordan Niethe EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
517*fde31832SArtem Savkov
5183a3fc9bfSJordan Niethe EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
5193a3fc9bfSJordan Niethe EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
520156d0e29SNaveen N. Rao } else
521*fde31832SArtem Savkov if (off)
522*fde31832SArtem Savkov EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
523*fde31832SArtem Savkov else
5243a181237SBalamuruhan S EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
525156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
526156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
527156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
528156d0e29SNaveen N. Rao if (BPF_OP(code) == BPF_MOD) {
529*fde31832SArtem Savkov if (off)
530*fde31832SArtem Savkov EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
531*fde31832SArtem Savkov else
5323a3fc9bfSJordan Niethe EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
5333a3fc9bfSJordan Niethe EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
5343a3fc9bfSJordan Niethe EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
535156d0e29SNaveen N. Rao } else
536*fde31832SArtem Savkov if (off)
537*fde31832SArtem Savkov EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
538*fde31832SArtem Savkov else
5393a181237SBalamuruhan S EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
540156d0e29SNaveen N. Rao break;
541156d0e29SNaveen N. Rao case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
542156d0e29SNaveen N. Rao case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
543156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
544156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
545156d0e29SNaveen N. Rao if (imm == 0)
546156d0e29SNaveen N. Rao return -EINVAL;
5478bbc9d82SNaveen N. Rao if (imm == 1) {
5488bbc9d82SNaveen N. Rao if (BPF_OP(code) == BPF_DIV) {
549156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
5508bbc9d82SNaveen N. Rao } else {
5518bbc9d82SNaveen N. Rao EMIT(PPC_RAW_LI(dst_reg, 0));
5528bbc9d82SNaveen N. Rao break;
5538bbc9d82SNaveen N. Rao }
5548bbc9d82SNaveen N. Rao }
555156d0e29SNaveen N. Rao
5563a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
557156d0e29SNaveen N. Rao switch (BPF_CLASS(code)) {
558156d0e29SNaveen N. Rao case BPF_ALU:
559156d0e29SNaveen N. Rao if (BPF_OP(code) == BPF_MOD) {
560*fde31832SArtem Savkov if (off)
561*fde31832SArtem Savkov EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
562*fde31832SArtem Savkov else
5633a3fc9bfSJordan Niethe EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
5643a3fc9bfSJordan Niethe EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
5653a3fc9bfSJordan Niethe EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
566156d0e29SNaveen N. Rao } else
567*fde31832SArtem Savkov if (off)
568*fde31832SArtem Savkov EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
569*fde31832SArtem Savkov else
5703a3fc9bfSJordan Niethe EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
571156d0e29SNaveen N. Rao break;
572156d0e29SNaveen N. Rao case BPF_ALU64:
573156d0e29SNaveen N. Rao if (BPF_OP(code) == BPF_MOD) {
574*fde31832SArtem Savkov if (off)
575*fde31832SArtem Savkov EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
576*fde31832SArtem Savkov else
5773a3fc9bfSJordan Niethe EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
5783a3fc9bfSJordan Niethe EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
5793a3fc9bfSJordan Niethe EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
580156d0e29SNaveen N. Rao } else
581*fde31832SArtem Savkov if (off)
582*fde31832SArtem Savkov EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
583*fde31832SArtem Savkov else
5843a3fc9bfSJordan Niethe EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
585156d0e29SNaveen N. Rao break;
586156d0e29SNaveen N. Rao }
587156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
588156d0e29SNaveen N. Rao case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
589156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_NEG: /* dst = -dst */
5903a181237SBalamuruhan S EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
591156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
592156d0e29SNaveen N. Rao
593156d0e29SNaveen N. Rao /*
594156d0e29SNaveen N. Rao * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
595156d0e29SNaveen N. Rao */
596156d0e29SNaveen N. Rao case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
597156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
5983a181237SBalamuruhan S EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
599156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
600156d0e29SNaveen N. Rao case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
601156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
602156d0e29SNaveen N. Rao if (!IMM_H(imm))
6033a181237SBalamuruhan S EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
604156d0e29SNaveen N. Rao else {
605156d0e29SNaveen N. Rao /* Sign-extended */
6063a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
6073a3fc9bfSJordan Niethe EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
608156d0e29SNaveen N. Rao }
609156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
610156d0e29SNaveen N. Rao case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
611156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
6123a181237SBalamuruhan S EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
613156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
614156d0e29SNaveen N. Rao case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
615156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
616156d0e29SNaveen N. Rao if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
617156d0e29SNaveen N. Rao /* Sign-extended */
6183a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
6193a3fc9bfSJordan Niethe EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
620156d0e29SNaveen N. Rao } else {
621156d0e29SNaveen N. Rao if (IMM_L(imm))
6223a181237SBalamuruhan S EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
623156d0e29SNaveen N. Rao if (IMM_H(imm))
6243a181237SBalamuruhan S EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
625156d0e29SNaveen N. Rao }
626156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
627156d0e29SNaveen N. Rao case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
628156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
6293a181237SBalamuruhan S EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
630156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
631156d0e29SNaveen N. Rao case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
632156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
633156d0e29SNaveen N. Rao if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
634156d0e29SNaveen N. Rao /* Sign-extended */
6353a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
6363a3fc9bfSJordan Niethe EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
637156d0e29SNaveen N. Rao } else {
638156d0e29SNaveen N. Rao if (IMM_L(imm))
6393a181237SBalamuruhan S EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
640156d0e29SNaveen N. Rao if (IMM_H(imm))
6413a181237SBalamuruhan S EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
642156d0e29SNaveen N. Rao }
643156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
644156d0e29SNaveen N. Rao case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
645156d0e29SNaveen N. Rao /* slw clears top 32 bits */
6463a181237SBalamuruhan S EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
647a4c92773SJiong Wang /* skip zero extension move, but set address map. */
648a4c92773SJiong Wang if (insn_is_zext(&insn[i + 1]))
649a4c92773SJiong Wang addrs[++i] = ctx->idx * 4;
650156d0e29SNaveen N. Rao break;
651156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
6523a181237SBalamuruhan S EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
653156d0e29SNaveen N. Rao break;
654156d0e29SNaveen N. Rao case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
655156d0e29SNaveen N. Rao /* with imm 0, we still need to clear top 32 bits */
6563a181237SBalamuruhan S EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
657a4c92773SJiong Wang if (insn_is_zext(&insn[i + 1]))
658a4c92773SJiong Wang addrs[++i] = ctx->idx * 4;
659156d0e29SNaveen N. Rao break;
660156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
661156d0e29SNaveen N. Rao if (imm != 0)
6623a181237SBalamuruhan S EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
663156d0e29SNaveen N. Rao break;
664156d0e29SNaveen N. Rao case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
6653a181237SBalamuruhan S EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
666a4c92773SJiong Wang if (insn_is_zext(&insn[i + 1]))
667a4c92773SJiong Wang addrs[++i] = ctx->idx * 4;
668156d0e29SNaveen N. Rao break;
669156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
6703a181237SBalamuruhan S EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
671156d0e29SNaveen N. Rao break;
672156d0e29SNaveen N. Rao case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
6733a181237SBalamuruhan S EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
674a4c92773SJiong Wang if (insn_is_zext(&insn[i + 1]))
675a4c92773SJiong Wang addrs[++i] = ctx->idx * 4;
676156d0e29SNaveen N. Rao break;
677156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
678156d0e29SNaveen N. Rao if (imm != 0)
6793a181237SBalamuruhan S EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
680156d0e29SNaveen N. Rao break;
68144cf43c0SJiong Wang case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
6823a181237SBalamuruhan S EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
68344cf43c0SJiong Wang goto bpf_alu32_trunc;
684156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
6853a181237SBalamuruhan S EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
686156d0e29SNaveen N. Rao break;
68744cf43c0SJiong Wang case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
6883a181237SBalamuruhan S EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
68944cf43c0SJiong Wang goto bpf_alu32_trunc;
690156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
691156d0e29SNaveen N. Rao if (imm != 0)
6923a181237SBalamuruhan S EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
693156d0e29SNaveen N. Rao break;
694156d0e29SNaveen N. Rao
695156d0e29SNaveen N. Rao /*
696156d0e29SNaveen N. Rao * MOV
697156d0e29SNaveen N. Rao */
698156d0e29SNaveen N. Rao case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
699156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
700a4c92773SJiong Wang if (imm == 1) {
701a4c92773SJiong Wang /* special mov32 for zext */
7023a181237SBalamuruhan S EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
703a4c92773SJiong Wang break;
704597b1710SArtem Savkov } else if (off == 8) {
705597b1710SArtem Savkov EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
706597b1710SArtem Savkov } else if (off == 16) {
707597b1710SArtem Savkov EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
708597b1710SArtem Savkov } else if (off == 32) {
709597b1710SArtem Savkov EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
710597b1710SArtem Savkov } else if (dst_reg != src_reg)
7113a181237SBalamuruhan S EMIT(PPC_RAW_MR(dst_reg, src_reg));
712156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
713156d0e29SNaveen N. Rao case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
714156d0e29SNaveen N. Rao case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
715156d0e29SNaveen N. Rao PPC_LI32(dst_reg, imm);
716156d0e29SNaveen N. Rao if (imm < 0)
717156d0e29SNaveen N. Rao goto bpf_alu32_trunc;
718a4c92773SJiong Wang else if (insn_is_zext(&insn[i + 1]))
719a4c92773SJiong Wang addrs[++i] = ctx->idx * 4;
720156d0e29SNaveen N. Rao break;
721156d0e29SNaveen N. Rao
722156d0e29SNaveen N. Rao bpf_alu32_trunc:
723156d0e29SNaveen N. Rao /* Truncate to 32-bits */
724a4c92773SJiong Wang if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
7253a181237SBalamuruhan S EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
726156d0e29SNaveen N. Rao break;
727156d0e29SNaveen N. Rao
728156d0e29SNaveen N. Rao /*
729156d0e29SNaveen N. Rao * BPF_FROM_BE/LE
730156d0e29SNaveen N. Rao */
731156d0e29SNaveen N. Rao case BPF_ALU | BPF_END | BPF_FROM_LE:
732156d0e29SNaveen N. Rao case BPF_ALU | BPF_END | BPF_FROM_BE:
733a71c0b09SArtem Savkov case BPF_ALU64 | BPF_END | BPF_FROM_LE:
734156d0e29SNaveen N. Rao #ifdef __BIG_ENDIAN__
735156d0e29SNaveen N. Rao if (BPF_SRC(code) == BPF_FROM_BE)
736156d0e29SNaveen N. Rao goto emit_clear;
737156d0e29SNaveen N. Rao #else /* !__BIG_ENDIAN__ */
738a71c0b09SArtem Savkov if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
739156d0e29SNaveen N. Rao goto emit_clear;
740156d0e29SNaveen N. Rao #endif
741156d0e29SNaveen N. Rao switch (imm) {
742156d0e29SNaveen N. Rao case 16:
743156d0e29SNaveen N. Rao /* Rotate 8 bits left & mask with 0x0000ff00 */
7443a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
745156d0e29SNaveen N. Rao /* Rotate 8 bits right & insert LSB to reg */
7463a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
747156d0e29SNaveen N. Rao /* Move result back to dst_reg */
7483a3fc9bfSJordan Niethe EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
749156d0e29SNaveen N. Rao break;
750156d0e29SNaveen N. Rao case 32:
751156d0e29SNaveen N. Rao /*
752156d0e29SNaveen N. Rao * Rotate word left by 8 bits:
753156d0e29SNaveen N. Rao * 2 bytes are already in their final position
754156d0e29SNaveen N. Rao * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
755156d0e29SNaveen N. Rao */
7563a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
757156d0e29SNaveen N. Rao /* Rotate 24 bits and insert byte 1 */
7583a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
759156d0e29SNaveen N. Rao /* Rotate 24 bits and insert byte 3 */
7603a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
7613a3fc9bfSJordan Niethe EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
762156d0e29SNaveen N. Rao break;
763156d0e29SNaveen N. Rao case 64:
7643f5f766dSNaveen N. Rao /* Store the value to stack and then use byte-reverse loads */
765036d559cSNaveen N. Rao EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
7663a3fc9bfSJordan Niethe EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
7673f5f766dSNaveen N. Rao if (cpu_has_feature(CPU_FTR_ARCH_206)) {
7683a3fc9bfSJordan Niethe EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
7693f5f766dSNaveen N. Rao } else {
7703a3fc9bfSJordan Niethe EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
7713f5f766dSNaveen N. Rao if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
7723f5f766dSNaveen N. Rao EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
7733a3fc9bfSJordan Niethe EMIT(PPC_RAW_LI(tmp2_reg, 4));
7743a3fc9bfSJordan Niethe EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
7753f5f766dSNaveen N. Rao if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
7763a3fc9bfSJordan Niethe EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
7773a3fc9bfSJordan Niethe EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
7783f5f766dSNaveen N. Rao }
779156d0e29SNaveen N. Rao break;
780156d0e29SNaveen N. Rao }
781156d0e29SNaveen N. Rao break;
782156d0e29SNaveen N. Rao
783156d0e29SNaveen N. Rao emit_clear:
784156d0e29SNaveen N. Rao switch (imm) {
785156d0e29SNaveen N. Rao case 16:
786156d0e29SNaveen N. Rao /* zero-extend 16 bits into 64 bits */
7873a181237SBalamuruhan S EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
788a4c92773SJiong Wang if (insn_is_zext(&insn[i + 1]))
789a4c92773SJiong Wang addrs[++i] = ctx->idx * 4;
790156d0e29SNaveen N. Rao break;
791156d0e29SNaveen N. Rao case 32:
792a4c92773SJiong Wang if (!fp->aux->verifier_zext)
793156d0e29SNaveen N. Rao /* zero-extend 32 bits into 64 bits */
7943a181237SBalamuruhan S EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
795156d0e29SNaveen N. Rao break;
796156d0e29SNaveen N. Rao case 64:
797156d0e29SNaveen N. Rao /* nop */
798156d0e29SNaveen N. Rao break;
799156d0e29SNaveen N. Rao }
800156d0e29SNaveen N. Rao break;
801156d0e29SNaveen N. Rao
802156d0e29SNaveen N. Rao /*
803f5e81d11SDaniel Borkmann * BPF_ST NOSPEC (speculation barrier)
804f5e81d11SDaniel Borkmann */
805f5e81d11SDaniel Borkmann case BPF_ST | BPF_NOSPEC:
806b7540d62SNaveen N. Rao if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
807b7540d62SNaveen N. Rao !security_ftr_enabled(SEC_FTR_STF_BARRIER))
808b7540d62SNaveen N. Rao break;
809b7540d62SNaveen N. Rao
810b7540d62SNaveen N. Rao switch (stf_barrier) {
811b7540d62SNaveen N. Rao case STF_BARRIER_EIEIO:
812b7540d62SNaveen N. Rao EMIT(PPC_RAW_EIEIO() | 0x02000000);
813b7540d62SNaveen N. Rao break;
814b7540d62SNaveen N. Rao case STF_BARRIER_SYNC_ORI:
815b7540d62SNaveen N. Rao EMIT(PPC_RAW_SYNC());
8163a3fc9bfSJordan Niethe EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
817b7540d62SNaveen N. Rao EMIT(PPC_RAW_ORI(_R31, _R31, 0));
818b7540d62SNaveen N. Rao break;
819b7540d62SNaveen N. Rao case STF_BARRIER_FALLBACK:
820c2067f7fSNaveen N. Rao ctx->seen |= SEEN_FUNC;
821036d559cSNaveen N. Rao PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
822036d559cSNaveen N. Rao EMIT(PPC_RAW_MTCTR(_R12));
823b7540d62SNaveen N. Rao EMIT(PPC_RAW_BCTRL());
824b7540d62SNaveen N. Rao break;
825b7540d62SNaveen N. Rao case STF_BARRIER_NONE:
826b7540d62SNaveen N. Rao break;
827b7540d62SNaveen N. Rao }
828f5e81d11SDaniel Borkmann break;
829f5e81d11SDaniel Borkmann
830f5e81d11SDaniel Borkmann /*
831156d0e29SNaveen N. Rao * BPF_ST(X)
832156d0e29SNaveen N. Rao */
833156d0e29SNaveen N. Rao case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
834156d0e29SNaveen N. Rao case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
835156d0e29SNaveen N. Rao if (BPF_CLASS(code) == BPF_ST) {
8363a3fc9bfSJordan Niethe EMIT(PPC_RAW_LI(tmp1_reg, imm));
8373a3fc9bfSJordan Niethe src_reg = tmp1_reg;
838156d0e29SNaveen N. Rao }
8393a181237SBalamuruhan S EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
840156d0e29SNaveen N. Rao break;
841156d0e29SNaveen N. Rao case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
842156d0e29SNaveen N. Rao case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
843156d0e29SNaveen N. Rao if (BPF_CLASS(code) == BPF_ST) {
8443a3fc9bfSJordan Niethe EMIT(PPC_RAW_LI(tmp1_reg, imm));
8453a3fc9bfSJordan Niethe src_reg = tmp1_reg;
846156d0e29SNaveen N. Rao }
8473a181237SBalamuruhan S EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
848156d0e29SNaveen N. Rao break;
849156d0e29SNaveen N. Rao case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
850156d0e29SNaveen N. Rao case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
851156d0e29SNaveen N. Rao if (BPF_CLASS(code) == BPF_ST) {
8523a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
8533a3fc9bfSJordan Niethe src_reg = tmp1_reg;
854156d0e29SNaveen N. Rao }
8553a181237SBalamuruhan S EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
856156d0e29SNaveen N. Rao break;
857156d0e29SNaveen N. Rao case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
858156d0e29SNaveen N. Rao case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
859156d0e29SNaveen N. Rao if (BPF_CLASS(code) == BPF_ST) {
8603a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
8613a3fc9bfSJordan Niethe src_reg = tmp1_reg;
862156d0e29SNaveen N. Rao }
863794abc08SNaveen N. Rao if (off % 4) {
8643a3fc9bfSJordan Niethe EMIT(PPC_RAW_LI(tmp2_reg, off));
8653a3fc9bfSJordan Niethe EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
866794abc08SNaveen N. Rao } else {
867794abc08SNaveen N. Rao EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
868794abc08SNaveen N. Rao }
869156d0e29SNaveen N. Rao break;
870156d0e29SNaveen N. Rao
871156d0e29SNaveen N. Rao /*
87291c960b0SBrendan Jackman * BPF_STX ATOMIC (atomic ops)
873156d0e29SNaveen N. Rao */
87491c960b0SBrendan Jackman case BPF_STX | BPF_ATOMIC | BPF_W:
87565112709SHari Bathini case BPF_STX | BPF_ATOMIC | BPF_DW:
8761e82dfaaSHari Bathini save_reg = tmp2_reg;
8771e82dfaaSHari Bathini ret_reg = src_reg;
8781e82dfaaSHari Bathini
87965112709SHari Bathini /* Get offset into TMP_REG_1 */
88065112709SHari Bathini EMIT(PPC_RAW_LI(tmp1_reg, off));
881b1e7cee9SPuranjay Mohan /*
882b1e7cee9SPuranjay Mohan * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
883b1e7cee9SPuranjay Mohan * before and after the operation.
884b1e7cee9SPuranjay Mohan *
885b1e7cee9SPuranjay Mohan * This is a requirement in the Linux Kernel Memory Model.
886b1e7cee9SPuranjay Mohan * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
887b1e7cee9SPuranjay Mohan */
888b1e7cee9SPuranjay Mohan if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
889b1e7cee9SPuranjay Mohan EMIT(PPC_RAW_SYNC());
890b9c1e60eSDaniel Borkmann tmp_idx = ctx->idx * 4;
891156d0e29SNaveen N. Rao /* load value from memory into TMP_REG_2 */
89265112709SHari Bathini if (size == BPF_DW)
89365112709SHari Bathini EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
89465112709SHari Bathini else
89565112709SHari Bathini EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
89665112709SHari Bathini
897dbe6e245SHari Bathini /* Save old value in _R0 */
898dbe6e245SHari Bathini if (imm & BPF_FETCH)
899dbe6e245SHari Bathini EMIT(PPC_RAW_MR(_R0, tmp2_reg));
900dbe6e245SHari Bathini
90165112709SHari Bathini switch (imm) {
90265112709SHari Bathini case BPF_ADD:
903dbe6e245SHari Bathini case BPF_ADD | BPF_FETCH:
9043a3fc9bfSJordan Niethe EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
905156d0e29SNaveen N. Rao break;
90665112709SHari Bathini case BPF_AND:
907dbe6e245SHari Bathini case BPF_AND | BPF_FETCH:
90865112709SHari Bathini EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
90965112709SHari Bathini break;
91065112709SHari Bathini case BPF_OR:
911dbe6e245SHari Bathini case BPF_OR | BPF_FETCH:
91265112709SHari Bathini EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
91365112709SHari Bathini break;
91465112709SHari Bathini case BPF_XOR:
915dbe6e245SHari Bathini case BPF_XOR | BPF_FETCH:
91665112709SHari Bathini EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
91765112709SHari Bathini break;
9181e82dfaaSHari Bathini case BPF_CMPXCHG:
9191e82dfaaSHari Bathini /*
9201e82dfaaSHari Bathini * Return old value in BPF_REG_0 for BPF_CMPXCHG &
9211e82dfaaSHari Bathini * in src_reg for other cases.
9221e82dfaaSHari Bathini */
9231e82dfaaSHari Bathini ret_reg = bpf_to_ppc(BPF_REG_0);
9241e82dfaaSHari Bathini
9251e82dfaaSHari Bathini /* Compare with old value in BPF_R0 */
9261e82dfaaSHari Bathini if (size == BPF_DW)
9271e82dfaaSHari Bathini EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
9281e82dfaaSHari Bathini else
9291e82dfaaSHari Bathini EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
9301e82dfaaSHari Bathini /* Don't set if different from old value */
9311e82dfaaSHari Bathini PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
9321e82dfaaSHari Bathini fallthrough;
9331e82dfaaSHari Bathini case BPF_XCHG:
9341e82dfaaSHari Bathini save_reg = src_reg;
9351e82dfaaSHari Bathini break;
93665112709SHari Bathini default:
93791c960b0SBrendan Jackman pr_err_ratelimited(
93891c960b0SBrendan Jackman "eBPF filter atomic op code %02x (@%d) unsupported\n",
93991c960b0SBrendan Jackman code, i);
94065112709SHari Bathini return -EOPNOTSUPP;
94191c960b0SBrendan Jackman }
94291c960b0SBrendan Jackman
943dbe6e245SHari Bathini /* store new value */
94465112709SHari Bathini if (size == BPF_DW)
9451e82dfaaSHari Bathini EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
94665112709SHari Bathini else
9471e82dfaaSHari Bathini EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
94865112709SHari Bathini /* we're done if this succeeded */
949b9c1e60eSDaniel Borkmann PPC_BCC_SHORT(COND_NE, tmp_idx);
950dbe6e245SHari Bathini
9511e82dfaaSHari Bathini if (imm & BPF_FETCH) {
952b1e7cee9SPuranjay Mohan /* Emit 'sync' to enforce full ordering */
953b1e7cee9SPuranjay Mohan if (IS_ENABLED(CONFIG_SMP))
954b1e7cee9SPuranjay Mohan EMIT(PPC_RAW_SYNC());
9551e82dfaaSHari Bathini EMIT(PPC_RAW_MR(ret_reg, _R0));
9561e82dfaaSHari Bathini /*
9571e82dfaaSHari Bathini * Skip unnecessary zero-extension for 32-bit cmpxchg.
9581e82dfaaSHari Bathini * For context, see commit 39491867ace5.
9591e82dfaaSHari Bathini */
9601e82dfaaSHari Bathini if (size != BPF_DW && imm == BPF_CMPXCHG &&
9611e82dfaaSHari Bathini insn_is_zext(&insn[i + 1]))
9621e82dfaaSHari Bathini addrs[++i] = ctx->idx * 4;
9631e82dfaaSHari Bathini }
964156d0e29SNaveen N. Rao break;
965156d0e29SNaveen N. Rao
966156d0e29SNaveen N. Rao /*
967156d0e29SNaveen N. Rao * BPF_LDX
968156d0e29SNaveen N. Rao */
969156d0e29SNaveen N. Rao /* dst = *(u8 *)(ul) (src + off) */
970156d0e29SNaveen N. Rao case BPF_LDX | BPF_MEM | BPF_B:
971717756c9SArtem Savkov case BPF_LDX | BPF_MEMSX | BPF_B:
972983bdc02SRavi Bangoria case BPF_LDX | BPF_PROBE_MEM | BPF_B:
973717756c9SArtem Savkov case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
974156d0e29SNaveen N. Rao /* dst = *(u16 *)(ul) (src + off) */
975156d0e29SNaveen N. Rao case BPF_LDX | BPF_MEM | BPF_H:
976717756c9SArtem Savkov case BPF_LDX | BPF_MEMSX | BPF_H:
977983bdc02SRavi Bangoria case BPF_LDX | BPF_PROBE_MEM | BPF_H:
978717756c9SArtem Savkov case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
979156d0e29SNaveen N. Rao /* dst = *(u32 *)(ul) (src + off) */
980156d0e29SNaveen N. Rao case BPF_LDX | BPF_MEM | BPF_W:
981717756c9SArtem Savkov case BPF_LDX | BPF_MEMSX | BPF_W:
982983bdc02SRavi Bangoria case BPF_LDX | BPF_PROBE_MEM | BPF_W:
983717756c9SArtem Savkov case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
984156d0e29SNaveen N. Rao /* dst = *(u64 *)(ul) (src + off) */
985156d0e29SNaveen N. Rao case BPF_LDX | BPF_MEM | BPF_DW:
986983bdc02SRavi Bangoria case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
9879c70c714SRavi Bangoria /*
9889c70c714SRavi Bangoria * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
9899c70c714SRavi Bangoria * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
9909c70c714SRavi Bangoria * load only if addr is kernel address (see is_kernel_addr()), otherwise
9919c70c714SRavi Bangoria * set dst_reg=0 and move on.
9929c70c714SRavi Bangoria */
993717756c9SArtem Savkov if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
9943a3fc9bfSJordan Niethe EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
9959c70c714SRavi Bangoria if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
9963a3fc9bfSJordan Niethe PPC_LI64(tmp2_reg, 0x8000000000000000ul);
9979c70c714SRavi Bangoria else /* BOOK3S_64 */
9983a3fc9bfSJordan Niethe PPC_LI64(tmp2_reg, PAGE_OFFSET);
9993a3fc9bfSJordan Niethe EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
1000bafb5898SNaveen N. Rao PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
10019c70c714SRavi Bangoria EMIT(PPC_RAW_LI(dst_reg, 0));
10029c70c714SRavi Bangoria /*
1003794abc08SNaveen N. Rao * Check if 'off' is word aligned for BPF_DW, because
1004794abc08SNaveen N. Rao * we might generate two instructions.
10059c70c714SRavi Bangoria */
1006717756c9SArtem Savkov if ((BPF_SIZE(code) == BPF_DW ||
1007717756c9SArtem Savkov (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) &&
1008717756c9SArtem Savkov (off & 3))
10099c70c714SRavi Bangoria PPC_JMP((ctx->idx + 3) * 4);
10109c70c714SRavi Bangoria else
10119c70c714SRavi Bangoria PPC_JMP((ctx->idx + 2) * 4);
10129c70c714SRavi Bangoria }
10139c70c714SRavi Bangoria
1014717756c9SArtem Savkov if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
1015717756c9SArtem Savkov switch (size) {
1016717756c9SArtem Savkov case BPF_B:
1017717756c9SArtem Savkov EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1018717756c9SArtem Savkov EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1019717756c9SArtem Savkov break;
1020717756c9SArtem Savkov case BPF_H:
1021717756c9SArtem Savkov EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1022717756c9SArtem Savkov break;
1023717756c9SArtem Savkov case BPF_W:
1024717756c9SArtem Savkov EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1025717756c9SArtem Savkov break;
1026717756c9SArtem Savkov }
1027717756c9SArtem Savkov } else {
1028efa95f03SHari Bathini switch (size) {
1029efa95f03SHari Bathini case BPF_B:
1030efa95f03SHari Bathini EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1031efa95f03SHari Bathini break;
1032efa95f03SHari Bathini case BPF_H:
1033efa95f03SHari Bathini EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1034efa95f03SHari Bathini break;
1035efa95f03SHari Bathini case BPF_W:
1036efa95f03SHari Bathini EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1037efa95f03SHari Bathini break;
1038efa95f03SHari Bathini case BPF_DW:
1039794abc08SNaveen N. Rao if (off % 4) {
10403a3fc9bfSJordan Niethe EMIT(PPC_RAW_LI(tmp1_reg, off));
10413a3fc9bfSJordan Niethe EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1042794abc08SNaveen N. Rao } else {
1043794abc08SNaveen N. Rao EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1044794abc08SNaveen N. Rao }
1045156d0e29SNaveen N. Rao break;
1046efa95f03SHari Bathini }
1047717756c9SArtem Savkov }
1048efa95f03SHari Bathini
1049efa95f03SHari Bathini if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1050efa95f03SHari Bathini addrs[++i] = ctx->idx * 4;
1051983bdc02SRavi Bangoria
1052983bdc02SRavi Bangoria if (BPF_MODE(code) == BPF_PROBE_MEM) {
105390d862f3SHari Bathini ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
105490d862f3SHari Bathini ctx->idx - 1, 4, dst_reg);
1055983bdc02SRavi Bangoria if (ret)
1056983bdc02SRavi Bangoria return ret;
1057983bdc02SRavi Bangoria }
1058efa95f03SHari Bathini break;
1059156d0e29SNaveen N. Rao
1060156d0e29SNaveen N. Rao /*
1061156d0e29SNaveen N. Rao * Doubleword load
1062156d0e29SNaveen N. Rao * 16 byte instruction that uses two 'struct bpf_insn'
1063156d0e29SNaveen N. Rao */
1064156d0e29SNaveen N. Rao case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1065156d0e29SNaveen N. Rao imm64 = ((u64)(u32) insn[i].imm) |
1066156d0e29SNaveen N. Rao (((u64)(u32) insn[i+1].imm) << 32);
1067f9320c49SNaveen N. Rao tmp_idx = ctx->idx;
1068f9320c49SNaveen N. Rao PPC_LI64(dst_reg, imm64);
1069f9320c49SNaveen N. Rao /* padding to allow full 5 instructions for later patching */
1070d3921cbbSChristophe Leroy if (!image)
1071f9320c49SNaveen N. Rao for (j = ctx->idx - tmp_idx; j < 5; j++)
1072f9320c49SNaveen N. Rao EMIT(PPC_RAW_NOP());
1073156d0e29SNaveen N. Rao /* Adjust for two bpf instructions */
1074156d0e29SNaveen N. Rao addrs[++i] = ctx->idx * 4;
1075156d0e29SNaveen N. Rao break;
1076156d0e29SNaveen N. Rao
1077156d0e29SNaveen N. Rao /*
1078156d0e29SNaveen N. Rao * Return/Exit
1079156d0e29SNaveen N. Rao */
1080156d0e29SNaveen N. Rao case BPF_JMP | BPF_EXIT:
1081156d0e29SNaveen N. Rao /*
1082156d0e29SNaveen N. Rao * If this isn't the very last instruction, branch to
1083156d0e29SNaveen N. Rao * the epilogue. If we _are_ the last instruction,
1084156d0e29SNaveen N. Rao * we'll just fall through to the epilogue.
1085156d0e29SNaveen N. Rao */
10860ffdbce6SNaveen N. Rao if (i != flen - 1) {
10873a3fc9bfSJordan Niethe ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
10880ffdbce6SNaveen N. Rao if (ret)
10890ffdbce6SNaveen N. Rao return ret;
10900ffdbce6SNaveen N. Rao }
1091156d0e29SNaveen N. Rao /* else fall through to the epilogue */
1092156d0e29SNaveen N. Rao break;
1093156d0e29SNaveen N. Rao
1094156d0e29SNaveen N. Rao /*
10958484ce83SSandipan Das * Call kernel helper or bpf function
1096156d0e29SNaveen N. Rao */
1097156d0e29SNaveen N. Rao case BPF_JMP | BPF_CALL:
1098156d0e29SNaveen N. Rao ctx->seen |= SEEN_FUNC;
10998484ce83SSandipan Das
110085e03115SChristophe Leroy ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1101e2c95a61SDaniel Borkmann &func_addr, &func_addr_fixed);
1102e2c95a61SDaniel Borkmann if (ret < 0)
1103e2c95a61SDaniel Borkmann return ret;
1104156d0e29SNaveen N. Rao
1105e2c95a61SDaniel Borkmann if (func_addr_fixed)
11062ecfe59cSHari Bathini ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
1107e2c95a61SDaniel Borkmann else
110890d862f3SHari Bathini ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
110943d636f8SNaveen N. Rao
111043d636f8SNaveen N. Rao if (ret)
111143d636f8SNaveen N. Rao return ret;
111243d636f8SNaveen N. Rao
1113156d0e29SNaveen N. Rao /* move return value from r3 to BPF_REG_0 */
111449c3af43SNaveen N. Rao EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1115156d0e29SNaveen N. Rao break;
1116156d0e29SNaveen N. Rao
1117156d0e29SNaveen N. Rao /*
1118156d0e29SNaveen N. Rao * Jumps and branches
1119156d0e29SNaveen N. Rao */
1120156d0e29SNaveen N. Rao case BPF_JMP | BPF_JA:
1121156d0e29SNaveen N. Rao PPC_JMP(addrs[i + 1 + off]);
1122156d0e29SNaveen N. Rao break;
11233c086ce2SArtem Savkov case BPF_JMP32 | BPF_JA:
11243c086ce2SArtem Savkov PPC_JMP(addrs[i + 1 + imm]);
11253c086ce2SArtem Savkov break;
1126156d0e29SNaveen N. Rao
1127156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGT | BPF_K:
1128156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGT | BPF_X:
1129156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGT | BPF_K:
1130156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGT | BPF_X:
11315f645996SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_K:
11325f645996SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_X:
11335f645996SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_K:
11345f645996SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_X:
1135156d0e29SNaveen N. Rao true_cond = COND_GT;
1136156d0e29SNaveen N. Rao goto cond_branch;
113720dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_K:
113820dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_X:
113920dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_K:
114020dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_X:
11415f645996SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_K:
11425f645996SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_X:
11435f645996SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_K:
11445f645996SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_X:
114520dbf5ccSDaniel Borkmann true_cond = COND_LT;
114620dbf5ccSDaniel Borkmann goto cond_branch;
1147156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGE | BPF_K:
1148156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGE | BPF_X:
1149156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGE | BPF_K:
1150156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGE | BPF_X:
11515f645996SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_K:
11525f645996SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_X:
11535f645996SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_K:
11545f645996SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_X:
1155156d0e29SNaveen N. Rao true_cond = COND_GE;
1156156d0e29SNaveen N. Rao goto cond_branch;
115720dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_K:
115820dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_X:
115920dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_K:
116020dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_X:
11615f645996SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_K:
11625f645996SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_X:
11635f645996SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_K:
11645f645996SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_X:
116520dbf5ccSDaniel Borkmann true_cond = COND_LE;
116620dbf5ccSDaniel Borkmann goto cond_branch;
1167156d0e29SNaveen N. Rao case BPF_JMP | BPF_JEQ | BPF_K:
1168156d0e29SNaveen N. Rao case BPF_JMP | BPF_JEQ | BPF_X:
11695f645996SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_K:
11705f645996SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_X:
1171156d0e29SNaveen N. Rao true_cond = COND_EQ;
1172156d0e29SNaveen N. Rao goto cond_branch;
1173156d0e29SNaveen N. Rao case BPF_JMP | BPF_JNE | BPF_K:
1174156d0e29SNaveen N. Rao case BPF_JMP | BPF_JNE | BPF_X:
11755f645996SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_K:
11765f645996SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_X:
1177156d0e29SNaveen N. Rao true_cond = COND_NE;
1178156d0e29SNaveen N. Rao goto cond_branch;
1179156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSET | BPF_K:
1180156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSET | BPF_X:
11815f645996SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_K:
11825f645996SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_X:
1183156d0e29SNaveen N. Rao true_cond = COND_NE;
1184156d0e29SNaveen N. Rao /* Fall through */
1185156d0e29SNaveen N. Rao
1186156d0e29SNaveen N. Rao cond_branch:
1187156d0e29SNaveen N. Rao switch (code) {
1188156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGT | BPF_X:
118920dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_X:
1190156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGE | BPF_X:
119120dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_X:
1192156d0e29SNaveen N. Rao case BPF_JMP | BPF_JEQ | BPF_X:
1193156d0e29SNaveen N. Rao case BPF_JMP | BPF_JNE | BPF_X:
11945f645996SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_X:
11955f645996SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_X:
11965f645996SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_X:
11975f645996SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_X:
11985f645996SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_X:
11995f645996SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_X:
1200156d0e29SNaveen N. Rao /* unsigned comparison */
12015f645996SJiong Wang if (BPF_CLASS(code) == BPF_JMP32)
12023a181237SBalamuruhan S EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
12035f645996SJiong Wang else
12043a181237SBalamuruhan S EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1205156d0e29SNaveen N. Rao break;
1206156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGT | BPF_X:
120720dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_X:
1208156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGE | BPF_X:
120920dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_X:
12105f645996SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_X:
12115f645996SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_X:
12125f645996SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_X:
12135f645996SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_X:
1214156d0e29SNaveen N. Rao /* signed comparison */
12155f645996SJiong Wang if (BPF_CLASS(code) == BPF_JMP32)
12163a181237SBalamuruhan S EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
12175f645996SJiong Wang else
12183a181237SBalamuruhan S EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1219156d0e29SNaveen N. Rao break;
1220156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSET | BPF_X:
12215f645996SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_X:
12225f645996SJiong Wang if (BPF_CLASS(code) == BPF_JMP) {
12233a3fc9bfSJordan Niethe EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
12245f645996SJiong Wang } else {
12253a3fc9bfSJordan Niethe EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
12263a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
12275f645996SJiong Wang }
1228156d0e29SNaveen N. Rao break;
1229156d0e29SNaveen N. Rao case BPF_JMP | BPF_JNE | BPF_K:
1230156d0e29SNaveen N. Rao case BPF_JMP | BPF_JEQ | BPF_K:
1231156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGT | BPF_K:
123220dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLT | BPF_K:
1233156d0e29SNaveen N. Rao case BPF_JMP | BPF_JGE | BPF_K:
123420dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JLE | BPF_K:
12355f645996SJiong Wang case BPF_JMP32 | BPF_JNE | BPF_K:
12365f645996SJiong Wang case BPF_JMP32 | BPF_JEQ | BPF_K:
12375f645996SJiong Wang case BPF_JMP32 | BPF_JGT | BPF_K:
12385f645996SJiong Wang case BPF_JMP32 | BPF_JLT | BPF_K:
12395f645996SJiong Wang case BPF_JMP32 | BPF_JGE | BPF_K:
12405f645996SJiong Wang case BPF_JMP32 | BPF_JLE | BPF_K:
12415f645996SJiong Wang {
12425f645996SJiong Wang bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
12435f645996SJiong Wang
1244156d0e29SNaveen N. Rao /*
1245156d0e29SNaveen N. Rao * Need sign-extended load, so only positive
1246156d0e29SNaveen N. Rao * values can be used as imm in cmpldi
1247156d0e29SNaveen N. Rao */
12485f645996SJiong Wang if (imm >= 0 && imm < 32768) {
12495f645996SJiong Wang if (is_jmp32)
12503a181237SBalamuruhan S EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
12515f645996SJiong Wang else
12523a181237SBalamuruhan S EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
12535f645996SJiong Wang } else {
1254156d0e29SNaveen N. Rao /* sign-extending load */
12553a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
1256156d0e29SNaveen N. Rao /* ... but unsigned comparison */
12575f645996SJiong Wang if (is_jmp32)
12583a3fc9bfSJordan Niethe EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
12595f645996SJiong Wang else
12603a3fc9bfSJordan Niethe EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1261156d0e29SNaveen N. Rao }
1262156d0e29SNaveen N. Rao break;
12635f645996SJiong Wang }
1264156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGT | BPF_K:
126520dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLT | BPF_K:
1266156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSGE | BPF_K:
126720dbf5ccSDaniel Borkmann case BPF_JMP | BPF_JSLE | BPF_K:
12685f645996SJiong Wang case BPF_JMP32 | BPF_JSGT | BPF_K:
12695f645996SJiong Wang case BPF_JMP32 | BPF_JSLT | BPF_K:
12705f645996SJiong Wang case BPF_JMP32 | BPF_JSGE | BPF_K:
12715f645996SJiong Wang case BPF_JMP32 | BPF_JSLE | BPF_K:
12725f645996SJiong Wang {
12735f645996SJiong Wang bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
12745f645996SJiong Wang
1275156d0e29SNaveen N. Rao /*
1276156d0e29SNaveen N. Rao * signed comparison, so any 16-bit value
1277156d0e29SNaveen N. Rao * can be used in cmpdi
1278156d0e29SNaveen N. Rao */
12795f645996SJiong Wang if (imm >= -32768 && imm < 32768) {
12805f645996SJiong Wang if (is_jmp32)
12813a181237SBalamuruhan S EMIT(PPC_RAW_CMPWI(dst_reg, imm));
12825f645996SJiong Wang else
12833a181237SBalamuruhan S EMIT(PPC_RAW_CMPDI(dst_reg, imm));
12845f645996SJiong Wang } else {
12853a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
12865f645996SJiong Wang if (is_jmp32)
12873a3fc9bfSJordan Niethe EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
12885f645996SJiong Wang else
12893a3fc9bfSJordan Niethe EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1290156d0e29SNaveen N. Rao }
1291156d0e29SNaveen N. Rao break;
12925f645996SJiong Wang }
1293156d0e29SNaveen N. Rao case BPF_JMP | BPF_JSET | BPF_K:
12945f645996SJiong Wang case BPF_JMP32 | BPF_JSET | BPF_K:
1295156d0e29SNaveen N. Rao /* andi does not sign-extend the immediate */
1296156d0e29SNaveen N. Rao if (imm >= 0 && imm < 32768)
1297156d0e29SNaveen N. Rao /* PPC_ANDI is _only/always_ dot-form */
12983a3fc9bfSJordan Niethe EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1299156d0e29SNaveen N. Rao else {
13003a3fc9bfSJordan Niethe PPC_LI32(tmp1_reg, imm);
13015f645996SJiong Wang if (BPF_CLASS(code) == BPF_JMP) {
13023a3fc9bfSJordan Niethe EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
13033a3fc9bfSJordan Niethe tmp1_reg));
13045f645996SJiong Wang } else {
13053a3fc9bfSJordan Niethe EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
13063a3fc9bfSJordan Niethe EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
13073a181237SBalamuruhan S 0, 0, 31));
13085f645996SJiong Wang }
1309156d0e29SNaveen N. Rao }
1310156d0e29SNaveen N. Rao break;
1311156d0e29SNaveen N. Rao }
1312156d0e29SNaveen N. Rao PPC_BCC(true_cond, addrs[i + 1 + off]);
1313156d0e29SNaveen N. Rao break;
1314156d0e29SNaveen N. Rao
1315156d0e29SNaveen N. Rao /*
1316ce076141SNaveen N. Rao * Tail call
1317156d0e29SNaveen N. Rao */
131871189fa9SAlexei Starovoitov case BPF_JMP | BPF_TAIL_CALL:
1319ce076141SNaveen N. Rao ctx->seen |= SEEN_TAILCALL;
13203832ba4eSNaveen N. Rao ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
13213832ba4eSNaveen N. Rao if (ret < 0)
13223832ba4eSNaveen N. Rao return ret;
1323ce076141SNaveen N. Rao break;
1324156d0e29SNaveen N. Rao
1325156d0e29SNaveen N. Rao default:
1326156d0e29SNaveen N. Rao /*
1327156d0e29SNaveen N. Rao * The filter contains something cruel & unusual.
1328156d0e29SNaveen N. Rao * We don't handle it, but also there shouldn't be
1329156d0e29SNaveen N. Rao * anything missing from our list.
1330156d0e29SNaveen N. Rao */
1331156d0e29SNaveen N. Rao pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1332156d0e29SNaveen N. Rao code, i);
1333156d0e29SNaveen N. Rao return -ENOTSUPP;
1334156d0e29SNaveen N. Rao }
1335156d0e29SNaveen N. Rao }
1336156d0e29SNaveen N. Rao
1337156d0e29SNaveen N. Rao /* Set end-of-body-code address for exit. */
1338156d0e29SNaveen N. Rao addrs[i] = ctx->idx * 4;
1339156d0e29SNaveen N. Rao
1340156d0e29SNaveen N. Rao return 0;
1341156d0e29SNaveen N. Rao }
1342