xref: /linux/arch/arc/net/bpf_jit.h (revision c771600c6af14749609b49565ffb4cac2959710d)
1f122668dSShahab Vahedi /* SPDX-License-Identifier: GPL-2.0 */
2f122668dSShahab Vahedi /*
3f122668dSShahab Vahedi  * The interface that a back-end should provide to bpf_jit_core.c.
4f122668dSShahab Vahedi  *
5f122668dSShahab Vahedi  * Copyright (c) 2024 Synopsys Inc.
6f122668dSShahab Vahedi  * Author: Shahab Vahedi <shahab@synopsys.com>
7f122668dSShahab Vahedi  */
8f122668dSShahab Vahedi 
9f122668dSShahab Vahedi #ifndef _ARC_BPF_JIT_H
10f122668dSShahab Vahedi #define _ARC_BPF_JIT_H
11f122668dSShahab Vahedi 
12f122668dSShahab Vahedi #include <linux/bpf.h>
13f122668dSShahab Vahedi #include <linux/filter.h>
14f122668dSShahab Vahedi 
15f122668dSShahab Vahedi /* Print debug info and assert. */
16f122668dSShahab Vahedi //#define ARC_BPF_JIT_DEBUG
17f122668dSShahab Vahedi 
18f122668dSShahab Vahedi /* Determine the address type of the target. */
19f122668dSShahab Vahedi #ifdef CONFIG_ISA_ARCV2
20f122668dSShahab Vahedi #define ARC_ADDR u32
21f122668dSShahab Vahedi #endif
22f122668dSShahab Vahedi 
23f122668dSShahab Vahedi /*
24f122668dSShahab Vahedi  * For the translation of some BPF instructions, a temporary register
25f122668dSShahab Vahedi  * might be needed for some interim data.
26f122668dSShahab Vahedi  */
27f122668dSShahab Vahedi #define JIT_REG_TMP MAX_BPF_JIT_REG
28f122668dSShahab Vahedi 
29f122668dSShahab Vahedi /*
30f122668dSShahab Vahedi  * Buffer access: If buffer "b" is not NULL, advance by "n" bytes.
31f122668dSShahab Vahedi  *
32f122668dSShahab Vahedi  * This macro must be used in any place that potentially requires a
33f122668dSShahab Vahedi  * "buf + len". This way, we make sure that the "buf" argument for
34f122668dSShahab Vahedi  * the underlying "arc_*(buf, ...)" ends up as NULL instead of something
35f122668dSShahab Vahedi  * like "0+4" or "0+8", etc. Those "arc_*()" functions check their "buf"
36f122668dSShahab Vahedi  * value to decide if instructions should be emitted or not.
37f122668dSShahab Vahedi  */
38f122668dSShahab Vahedi #define BUF(b, n) (((b) != NULL) ? ((b) + (n)) : (b))
39f122668dSShahab Vahedi 
40f122668dSShahab Vahedi /************** Functions that the back-end must provide **************/
41f122668dSShahab Vahedi /* Extension for 32-bit operations. */
42*dd6a4037SShahab Vahedi u8 zext(u8 *buf, u8 rd);
43f122668dSShahab Vahedi /***** Moves *****/
44f122668dSShahab Vahedi u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
45f122668dSShahab Vahedi u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
46f122668dSShahab Vahedi u8 mov_r64(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
47f122668dSShahab Vahedi u8 mov_r64_i32(u8 *buf, u8 reg, s32 imm);
48f122668dSShahab Vahedi u8 mov_r64_i64(u8 *buf, u8 reg, u32 lo, u32 hi);
49f122668dSShahab Vahedi /***** Loads and stores *****/
50f122668dSShahab Vahedi u8 load_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size, bool sign_ext);
51f122668dSShahab Vahedi u8 store_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size);
52f122668dSShahab Vahedi u8 store_i(u8 *buf, s32 imm, u8 rd, s16 off, u8 size);
53f122668dSShahab Vahedi /***** Addition *****/
54f122668dSShahab Vahedi u8 add_r32(u8 *buf, u8 rd, u8 rs);
55f122668dSShahab Vahedi u8 add_r32_i32(u8 *buf, u8 rd, s32 imm);
56f122668dSShahab Vahedi u8 add_r64(u8 *buf, u8 rd, u8 rs);
57f122668dSShahab Vahedi u8 add_r64_i32(u8 *buf, u8 rd, s32 imm);
58f122668dSShahab Vahedi /***** Subtraction *****/
59f122668dSShahab Vahedi u8 sub_r32(u8 *buf, u8 rd, u8 rs);
60f122668dSShahab Vahedi u8 sub_r32_i32(u8 *buf, u8 rd, s32 imm);
61f122668dSShahab Vahedi u8 sub_r64(u8 *buf, u8 rd, u8 rs);
62f122668dSShahab Vahedi u8 sub_r64_i32(u8 *buf, u8 rd, s32 imm);
63f122668dSShahab Vahedi /***** Multiplication *****/
64f122668dSShahab Vahedi u8 mul_r32(u8 *buf, u8 rd, u8 rs);
65f122668dSShahab Vahedi u8 mul_r32_i32(u8 *buf, u8 rd, s32 imm);
66f122668dSShahab Vahedi u8 mul_r64(u8 *buf, u8 rd, u8 rs);
67f122668dSShahab Vahedi u8 mul_r64_i32(u8 *buf, u8 rd, s32 imm);
68f122668dSShahab Vahedi /***** Division *****/
69f122668dSShahab Vahedi u8 div_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext);
70f122668dSShahab Vahedi u8 div_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext);
71f122668dSShahab Vahedi /***** Remainder *****/
72f122668dSShahab Vahedi u8 mod_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext);
73f122668dSShahab Vahedi u8 mod_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext);
74f122668dSShahab Vahedi /***** Bitwise AND *****/
75f122668dSShahab Vahedi u8 and_r32(u8 *buf, u8 rd, u8 rs);
76f122668dSShahab Vahedi u8 and_r32_i32(u8 *buf, u8 rd, s32 imm);
77f122668dSShahab Vahedi u8 and_r64(u8 *buf, u8 rd, u8 rs);
78f122668dSShahab Vahedi u8 and_r64_i32(u8 *buf, u8 rd, s32 imm);
79f122668dSShahab Vahedi /***** Bitwise OR *****/
80f122668dSShahab Vahedi u8 or_r32(u8 *buf, u8 rd, u8 rs);
81f122668dSShahab Vahedi u8 or_r32_i32(u8 *buf, u8 rd, s32 imm);
82f122668dSShahab Vahedi u8 or_r64(u8 *buf, u8 rd, u8 rs);
83f122668dSShahab Vahedi u8 or_r64_i32(u8 *buf, u8 rd, s32 imm);
84f122668dSShahab Vahedi /***** Bitwise XOR *****/
85f122668dSShahab Vahedi u8 xor_r32(u8 *buf, u8 rd, u8 rs);
86f122668dSShahab Vahedi u8 xor_r32_i32(u8 *buf, u8 rd, s32 imm);
87f122668dSShahab Vahedi u8 xor_r64(u8 *buf, u8 rd, u8 rs);
88f122668dSShahab Vahedi u8 xor_r64_i32(u8 *buf, u8 rd, s32 imm);
89f122668dSShahab Vahedi /***** Bitwise Negate *****/
90f122668dSShahab Vahedi u8 neg_r32(u8 *buf, u8 r);
91f122668dSShahab Vahedi u8 neg_r64(u8 *buf, u8 r);
92f122668dSShahab Vahedi /***** Bitwise left shift *****/
93f122668dSShahab Vahedi u8 lsh_r32(u8 *buf, u8 rd, u8 rs);
94f122668dSShahab Vahedi u8 lsh_r32_i32(u8 *buf, u8 rd, u8 imm);
95f122668dSShahab Vahedi u8 lsh_r64(u8 *buf, u8 rd, u8 rs);
96f122668dSShahab Vahedi u8 lsh_r64_i32(u8 *buf, u8 rd, s32 imm);
97f122668dSShahab Vahedi /***** Bitwise right shift (logical) *****/
98f122668dSShahab Vahedi u8 rsh_r32(u8 *buf, u8 rd, u8 rs);
99f122668dSShahab Vahedi u8 rsh_r32_i32(u8 *buf, u8 rd, u8 imm);
100f122668dSShahab Vahedi u8 rsh_r64(u8 *buf, u8 rd, u8 rs);
101f122668dSShahab Vahedi u8 rsh_r64_i32(u8 *buf, u8 rd, s32 imm);
102f122668dSShahab Vahedi /***** Bitwise right shift (arithmetic) *****/
103f122668dSShahab Vahedi u8 arsh_r32(u8 *buf, u8 rd, u8 rs);
104f122668dSShahab Vahedi u8 arsh_r32_i32(u8 *buf, u8 rd, u8 imm);
105f122668dSShahab Vahedi u8 arsh_r64(u8 *buf, u8 rd, u8 rs);
106f122668dSShahab Vahedi u8 arsh_r64_i32(u8 *buf, u8 rd, s32 imm);
107f122668dSShahab Vahedi /***** Frame related *****/
108f122668dSShahab Vahedi u32 mask_for_used_regs(u8 bpf_reg, bool is_call);
109f122668dSShahab Vahedi u8 arc_prologue(u8 *buf, u32 usage, u16 frame_size);
110f122668dSShahab Vahedi u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size);
111f122668dSShahab Vahedi /***** Jumps *****/
112f122668dSShahab Vahedi /*
113f122668dSShahab Vahedi  * Different sorts of conditions (ARC enum as opposed to BPF_*).
114f122668dSShahab Vahedi  *
115f122668dSShahab Vahedi  * Do not change the order of enums here. ARC_CC_SLE+1 is used
116f122668dSShahab Vahedi  * to determine the number of JCCs.
117f122668dSShahab Vahedi  */
118f122668dSShahab Vahedi enum ARC_CC {
119f122668dSShahab Vahedi 	ARC_CC_UGT = 0,		/* unsigned >  */
120f122668dSShahab Vahedi 	ARC_CC_UGE,		/* unsigned >= */
121f122668dSShahab Vahedi 	ARC_CC_ULT,		/* unsigned <  */
122f122668dSShahab Vahedi 	ARC_CC_ULE,		/* unsigned <= */
123f122668dSShahab Vahedi 	ARC_CC_SGT,		/*   signed >  */
124f122668dSShahab Vahedi 	ARC_CC_SGE,		/*   signed >= */
125f122668dSShahab Vahedi 	ARC_CC_SLT,		/*   signed <  */
126f122668dSShahab Vahedi 	ARC_CC_SLE,		/*   signed <= */
127f122668dSShahab Vahedi 	ARC_CC_AL,		/* always      */
128f122668dSShahab Vahedi 	ARC_CC_EQ,		/*          == */
129f122668dSShahab Vahedi 	ARC_CC_NE,		/*          != */
130f122668dSShahab Vahedi 	ARC_CC_SET,		/* test        */
131f122668dSShahab Vahedi 	ARC_CC_LAST
132f122668dSShahab Vahedi };
133f122668dSShahab Vahedi 
134f122668dSShahab Vahedi /*
135f122668dSShahab Vahedi  * A few notes:
136f122668dSShahab Vahedi  *
137f122668dSShahab Vahedi  * - check_jmp_*() are prerequisites before calling the gen_jmp_*().
138f122668dSShahab Vahedi  *   They return "true" if the jump is possible and "false" otherwise.
139f122668dSShahab Vahedi  *
140f122668dSShahab Vahedi  * - The notion of "*_off" is to emphasize that these parameters are
141f122668dSShahab Vahedi  *   merely offsets in the JIT stream and not absolute addresses. One
142f122668dSShahab Vahedi  *   can look at them as addresses if the JIT code would start from
143f122668dSShahab Vahedi  *   address 0x0000_0000. Nonetheless, since the buffer address for the
144f122668dSShahab Vahedi  *   JIT is on a word-aligned address, this works and actually makes
145f122668dSShahab Vahedi  *   things simpler (offsets are in the range of u32 which is more than
146f122668dSShahab Vahedi  *   enough).
147f122668dSShahab Vahedi  */
148f122668dSShahab Vahedi bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond);
149f122668dSShahab Vahedi bool check_jmp_64(u32 curr_off, u32 targ_off, u8 cond);
150f122668dSShahab Vahedi u8 gen_jmp_32(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off);
151f122668dSShahab Vahedi u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off);
152f122668dSShahab Vahedi /***** Miscellaneous *****/
153f122668dSShahab Vahedi u8 gen_func_call(u8 *buf, ARC_ADDR func_addr, bool external_func);
154f122668dSShahab Vahedi u8 arc_to_bpf_return(u8 *buf);
155f122668dSShahab Vahedi /*
156f122668dSShahab Vahedi  * - Perform byte swaps on "rd" based on the "size".
157f122668dSShahab Vahedi  * - If "force" is set, do it unconditionally. Otherwise, consider the
158f122668dSShahab Vahedi  *   desired "endian"ness and the host endianness.
159f122668dSShahab Vahedi  * - For data "size"s up to 32 bits, perform a zero-extension if asked
160f122668dSShahab Vahedi  *   by the "do_zext" boolean.
161f122668dSShahab Vahedi  */
162f122668dSShahab Vahedi u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext);
163f122668dSShahab Vahedi 
164f122668dSShahab Vahedi #endif /* _ARC_BPF_JIT_H */
165