xref: /linux/arch/arc/net/bpf_jit.h (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * The interface that a back-end should provide to bpf_jit_core.c.
4  *
5  * Copyright (c) 2024 Synopsys Inc.
6  * Author: Shahab Vahedi <shahab@synopsys.com>
7  */
8 
9 #ifndef _ARC_BPF_JIT_H
10 #define _ARC_BPF_JIT_H
11 
12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 
15 /* Print debug info and assert. */
16 //#define ARC_BPF_JIT_DEBUG
17 
18 /* Determine the address type of the target. */
19 #ifdef CONFIG_ISA_ARCV2
20 #define ARC_ADDR u32
21 #endif
22 
23 /*
24  * For the translation of some BPF instructions, a temporary register
25  * might be needed for some interim data.
26  */
27 #define JIT_REG_TMP MAX_BPF_JIT_REG
28 
29 /*
30  * Buffer access: If buffer "b" is not NULL, advance by "n" bytes.
31  *
32  * This macro must be used in any place that potentially requires a
33  * "buf + len". This way, we make sure that the "buf" argument for
34  * the underlying "arc_*(buf, ...)" ends up as NULL instead of something
35  * like "0+4" or "0+8", etc. Those "arc_*()" functions check their "buf"
36  * value to decide if instructions should be emitted or not.
37  */
38 #define BUF(b, n) (((b) != NULL) ? ((b) + (n)) : (b))
39 
40 /************** Functions that the back-end must provide **************/
41 /* Extension for 32-bit operations. */
42 u8 zext(u8 *buf, u8 rd);
43 /***** Moves *****/
44 u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
45 u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
46 u8 mov_r64(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
47 u8 mov_r64_i32(u8 *buf, u8 reg, s32 imm);
48 u8 mov_r64_i64(u8 *buf, u8 reg, u32 lo, u32 hi);
49 /***** Loads and stores *****/
50 u8 load_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size, bool sign_ext);
51 u8 store_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size);
52 u8 store_i(u8 *buf, s32 imm, u8 rd, s16 off, u8 size);
53 /***** Addition *****/
54 u8 add_r32(u8 *buf, u8 rd, u8 rs);
55 u8 add_r32_i32(u8 *buf, u8 rd, s32 imm);
56 u8 add_r64(u8 *buf, u8 rd, u8 rs);
57 u8 add_r64_i32(u8 *buf, u8 rd, s32 imm);
58 /***** Subtraction *****/
59 u8 sub_r32(u8 *buf, u8 rd, u8 rs);
60 u8 sub_r32_i32(u8 *buf, u8 rd, s32 imm);
61 u8 sub_r64(u8 *buf, u8 rd, u8 rs);
62 u8 sub_r64_i32(u8 *buf, u8 rd, s32 imm);
63 /***** Multiplication *****/
64 u8 mul_r32(u8 *buf, u8 rd, u8 rs);
65 u8 mul_r32_i32(u8 *buf, u8 rd, s32 imm);
66 u8 mul_r64(u8 *buf, u8 rd, u8 rs);
67 u8 mul_r64_i32(u8 *buf, u8 rd, s32 imm);
68 /***** Division *****/
69 u8 div_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext);
70 u8 div_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext);
71 /***** Remainder *****/
72 u8 mod_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext);
73 u8 mod_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext);
74 /***** Bitwise AND *****/
75 u8 and_r32(u8 *buf, u8 rd, u8 rs);
76 u8 and_r32_i32(u8 *buf, u8 rd, s32 imm);
77 u8 and_r64(u8 *buf, u8 rd, u8 rs);
78 u8 and_r64_i32(u8 *buf, u8 rd, s32 imm);
79 /***** Bitwise OR *****/
80 u8 or_r32(u8 *buf, u8 rd, u8 rs);
81 u8 or_r32_i32(u8 *buf, u8 rd, s32 imm);
82 u8 or_r64(u8 *buf, u8 rd, u8 rs);
83 u8 or_r64_i32(u8 *buf, u8 rd, s32 imm);
84 /***** Bitwise XOR *****/
85 u8 xor_r32(u8 *buf, u8 rd, u8 rs);
86 u8 xor_r32_i32(u8 *buf, u8 rd, s32 imm);
87 u8 xor_r64(u8 *buf, u8 rd, u8 rs);
88 u8 xor_r64_i32(u8 *buf, u8 rd, s32 imm);
89 /***** Bitwise Negate *****/
90 u8 neg_r32(u8 *buf, u8 r);
91 u8 neg_r64(u8 *buf, u8 r);
92 /***** Bitwise left shift *****/
93 u8 lsh_r32(u8 *buf, u8 rd, u8 rs);
94 u8 lsh_r32_i32(u8 *buf, u8 rd, u8 imm);
95 u8 lsh_r64(u8 *buf, u8 rd, u8 rs);
96 u8 lsh_r64_i32(u8 *buf, u8 rd, s32 imm);
97 /***** Bitwise right shift (logical) *****/
98 u8 rsh_r32(u8 *buf, u8 rd, u8 rs);
99 u8 rsh_r32_i32(u8 *buf, u8 rd, u8 imm);
100 u8 rsh_r64(u8 *buf, u8 rd, u8 rs);
101 u8 rsh_r64_i32(u8 *buf, u8 rd, s32 imm);
102 /***** Bitwise right shift (arithmetic) *****/
103 u8 arsh_r32(u8 *buf, u8 rd, u8 rs);
104 u8 arsh_r32_i32(u8 *buf, u8 rd, u8 imm);
105 u8 arsh_r64(u8 *buf, u8 rd, u8 rs);
106 u8 arsh_r64_i32(u8 *buf, u8 rd, s32 imm);
107 /***** Frame related *****/
108 u32 mask_for_used_regs(u8 bpf_reg, bool is_call);
109 u8 arc_prologue(u8 *buf, u32 usage, u16 frame_size);
110 u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size);
111 /***** Jumps *****/
112 /*
113  * Different sorts of conditions (ARC enum as opposed to BPF_*).
114  *
115  * Do not change the order of enums here. ARC_CC_SLE+1 is used
116  * to determine the number of JCCs.
117  */
118 enum ARC_CC {
119 	ARC_CC_UGT = 0,		/* unsigned >  */
120 	ARC_CC_UGE,		/* unsigned >= */
121 	ARC_CC_ULT,		/* unsigned <  */
122 	ARC_CC_ULE,		/* unsigned <= */
123 	ARC_CC_SGT,		/*   signed >  */
124 	ARC_CC_SGE,		/*   signed >= */
125 	ARC_CC_SLT,		/*   signed <  */
126 	ARC_CC_SLE,		/*   signed <= */
127 	ARC_CC_AL,		/* always      */
128 	ARC_CC_EQ,		/*          == */
129 	ARC_CC_NE,		/*          != */
130 	ARC_CC_SET,		/* test        */
131 	ARC_CC_LAST
132 };
133 
134 /*
135  * A few notes:
136  *
137  * - check_jmp_*() are prerequisites before calling the gen_jmp_*().
138  *   They return "true" if the jump is possible and "false" otherwise.
139  *
140  * - The notion of "*_off" is to emphasize that these parameters are
141  *   merely offsets in the JIT stream and not absolute addresses. One
142  *   can look at them as addresses if the JIT code would start from
143  *   address 0x0000_0000. Nonetheless, since the buffer address for the
144  *   JIT is on a word-aligned address, this works and actually makes
145  *   things simpler (offsets are in the range of u32 which is more than
146  *   enough).
147  */
148 bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond);
149 bool check_jmp_64(u32 curr_off, u32 targ_off, u8 cond);
150 u8 gen_jmp_32(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off);
151 u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off);
152 /***** Miscellaneous *****/
153 u8 gen_func_call(u8 *buf, ARC_ADDR func_addr, bool external_func);
154 u8 arc_to_bpf_return(u8 *buf);
155 /*
156  * - Perform byte swaps on "rd" based on the "size".
157  * - If "force" is set, do it unconditionally. Otherwise, consider the
158  *   desired "endian"ness and the host endianness.
159  * - For data "size"s up to 32 bits, perform a zero-extension if asked
160  *   by the "do_zext" boolean.
161  */
162 u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext);
163 
164 #endif /* _ARC_BPF_JIT_H */
165