xref: /linux/arch/arm64/net/bpf_jit.h (revision d257f9bf06129613de539ea71ecea60848b662cd)
1 /*
2  * BPF JIT compiler for ARM64
3  *
4  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef _BPF_JIT_H
19 #define _BPF_JIT_H
20 
21 #include <asm/insn.h>
22 
23 /* 5-bit Register Operand */
24 #define A64_R(x)	AARCH64_INSN_REG_##x
25 #define A64_FP		AARCH64_INSN_REG_FP
26 #define A64_LR		AARCH64_INSN_REG_LR
27 #define A64_ZR		AARCH64_INSN_REG_ZR
28 #define A64_SP		AARCH64_INSN_REG_SP
29 
30 #define A64_VARIANT(sf) \
31 	((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
32 
33 /* Compare & branch (immediate) */
34 #define A64_COMP_BRANCH(sf, Rt, offset, type) \
35 	aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
36 		AARCH64_INSN_BRANCH_COMP_##type)
37 #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
38 #define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
39 
40 /* Conditional branch (immediate) */
41 #define A64_COND_BRANCH(cond, offset) \
42 	aarch64_insn_gen_cond_branch_imm(0, offset, cond)
43 #define A64_COND_EQ	AARCH64_INSN_COND_EQ /* == */
44 #define A64_COND_NE	AARCH64_INSN_COND_NE /* != */
45 #define A64_COND_CS	AARCH64_INSN_COND_CS /* unsigned >= */
46 #define A64_COND_HI	AARCH64_INSN_COND_HI /* unsigned > */
47 #define A64_COND_GE	AARCH64_INSN_COND_GE /* signed >= */
48 #define A64_COND_GT	AARCH64_INSN_COND_GT /* signed > */
49 #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
50 
51 /* Unconditional branch (immediate) */
52 #define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
53 	AARCH64_INSN_BRANCH_##type)
54 #define A64_B(imm26)  A64_BRANCH((imm26) << 2, NOLINK)
55 #define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
56 
57 /* Unconditional branch (register) */
58 #define A64_BR(Rn)  aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
59 #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
60 #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
61 
62 /* Load/store register (register offset) */
63 #define A64_LS_REG(Rt, Rn, Rm, size, type) \
64 	aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
65 		AARCH64_INSN_SIZE_##size, \
66 		AARCH64_INSN_LDST_##type##_REG_OFFSET)
67 #define A64_STRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, STORE)
68 #define A64_LDRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
69 #define A64_STRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, STORE)
70 #define A64_LDRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
71 #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
72 #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
73 #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
74 #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
75 
76 /* Load/store register pair */
77 #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
78 	aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
79 		AARCH64_INSN_VARIANT_64BIT, \
80 		AARCH64_INSN_LDST_##ls##_PAIR_##type)
81 /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
82 #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
83 /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
84 #define A64_POP(Rt, Rt2, Rn)  A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
85 
86 /* Load/store exclusive */
87 #define A64_SIZE(sf) \
88 	((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
89 #define A64_LSX(sf, Rt, Rn, Rs, type) \
90 	aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
91 				       AARCH64_INSN_LDST_##type)
92 /* Rt = [Rn]; (atomic) */
93 #define A64_LDXR(sf, Rt, Rn) \
94 	A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
95 /* [Rn] = Rt; (atomic) Rs = [state] */
96 #define A64_STXR(sf, Rt, Rn, Rs) \
97 	A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
98 
99 /* Prefetch */
100 #define A64_PRFM(Rn, type, target, policy) \
101 	aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
102 				  AARCH64_INSN_PRFM_TARGET_##target, \
103 				  AARCH64_INSN_PRFM_POLICY_##policy)
104 
105 /* Add/subtract (immediate) */
106 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
107 	aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
108 		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
109 /* Rd = Rn OP imm12 */
110 #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
111 #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
112 /* Rd = Rn */
113 #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
114 
115 /* Bitfield move */
116 #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
117 	aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
118 		A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
119 /* Signed, with sign replication to left and zeros to right */
120 #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
121 /* Unsigned, with zeros to left and right */
122 #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
123 
124 /* Rd = Rn << shift */
125 #define A64_LSL(sf, Rd, Rn, shift) ({	\
126 	int sz = (sf) ? 64 : 32;	\
127 	A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
128 })
129 /* Rd = Rn >> shift */
130 #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
131 /* Rd = Rn >> shift; signed */
132 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
133 
134 /* Zero extend */
135 #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
136 #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
137 
138 /* Move wide (immediate) */
139 #define A64_MOVEW(sf, Rd, imm16, shift, type) \
140 	aarch64_insn_gen_movewide(Rd, imm16, shift, \
141 		A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
142 /* Rd = Zeros (for MOVZ);
143  * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
144  * Rd = ~Rd; (for MOVN); */
145 #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
146 #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
147 #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
148 
149 /* Add/subtract (shifted register) */
150 #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
151 	aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
152 		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
153 /* Rd = Rn OP Rm */
154 #define A64_ADD(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
155 #define A64_SUB(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
156 #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
157 /* Rd = -Rm */
158 #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
159 /* Rn - Rm; set condition flags */
160 #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
161 
162 /* Data-processing (1 source) */
163 #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
164 	A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
165 /* Rd = BSWAPx(Rn) */
166 #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
167 #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
168 #define A64_REV64(Rd, Rn)     A64_DATA1(1, Rd, Rn, REVERSE_64)
169 
170 /* Data-processing (2 source) */
171 /* Rd = Rn OP Rm */
172 #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
173 	A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
174 #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
175 #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
176 #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
177 #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
178 
179 /* Data-processing (3 source) */
180 /* Rd = Ra + Rn * Rm */
181 #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
182 	A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
183 /* Rd = Rn * Rm */
184 #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
185 
186 /* Logical (shifted register) */
187 #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
188 	aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
189 		A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
190 /* Rd = Rn OP Rm */
191 #define A64_AND(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
192 #define A64_ORR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
193 #define A64_EOR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
194 #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
195 /* Rn & Rm; set condition flags */
196 #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
197 
198 #endif /* _BPF_JIT_H */
199