xref: /linux/arch/arm64/net/bpf_jit.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * BPF JIT compiler for ARM64
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  */
7 #ifndef _BPF_JIT_H
8 #define _BPF_JIT_H
9 
10 #include <asm/insn.h>
11 
12 /* 5-bit Register Operand */
13 #define A64_R(x)	AARCH64_INSN_REG_##x
14 #define A64_FP		AARCH64_INSN_REG_FP
15 #define A64_LR		AARCH64_INSN_REG_LR
16 #define A64_ZR		AARCH64_INSN_REG_ZR
17 #define A64_SP		AARCH64_INSN_REG_SP
18 
19 #define A64_VARIANT(sf) \
20 	((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
21 
22 /* Compare & branch (immediate) */
23 #define A64_COMP_BRANCH(sf, Rt, offset, type) \
24 	aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
25 		AARCH64_INSN_BRANCH_COMP_##type)
26 #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
27 #define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
28 
29 /* Conditional branch (immediate) */
30 #define A64_COND_BRANCH(cond, offset) \
31 	aarch64_insn_gen_cond_branch_imm(0, offset, cond)
32 #define A64_COND_EQ	AARCH64_INSN_COND_EQ /* == */
33 #define A64_COND_NE	AARCH64_INSN_COND_NE /* != */
34 #define A64_COND_CS	AARCH64_INSN_COND_CS /* unsigned >= */
35 #define A64_COND_HI	AARCH64_INSN_COND_HI /* unsigned > */
36 #define A64_COND_LS	AARCH64_INSN_COND_LS /* unsigned <= */
37 #define A64_COND_CC	AARCH64_INSN_COND_CC /* unsigned < */
38 #define A64_COND_GE	AARCH64_INSN_COND_GE /* signed >= */
39 #define A64_COND_GT	AARCH64_INSN_COND_GT /* signed > */
40 #define A64_COND_LE	AARCH64_INSN_COND_LE /* signed <= */
41 #define A64_COND_LT	AARCH64_INSN_COND_LT /* signed < */
42 #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
43 
44 /* Unconditional branch (immediate) */
45 #define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
46 	AARCH64_INSN_BRANCH_##type)
47 #define A64_B(imm26)  A64_BRANCH((imm26) << 2, NOLINK)
48 #define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
49 
50 /* Unconditional branch (register) */
51 #define A64_BR(Rn)  aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
52 #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
53 #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
54 
55 /* Load/store register (register offset) */
56 #define A64_LS_REG(Rt, Rn, Rm, size, type) \
57 	aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
58 		AARCH64_INSN_SIZE_##size, \
59 		AARCH64_INSN_LDST_##type##_REG_OFFSET)
60 #define A64_STRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, STORE)
61 #define A64_LDRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
62 #define A64_LDRSB(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 8, SIGNED_LOAD)
63 #define A64_STRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, STORE)
64 #define A64_LDRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
65 #define A64_LDRSH(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 16, SIGNED_LOAD)
66 #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
67 #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
68 #define A64_LDRSW(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 32, SIGNED_LOAD)
69 #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
70 #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
71 
72 /* Load/store register (immediate offset) */
73 #define A64_LS_IMM(Rt, Rn, imm, size, type) \
74 	aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
75 		AARCH64_INSN_SIZE_##size, \
76 		AARCH64_INSN_LDST_##type##_IMM_OFFSET)
77 #define A64_STRBI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 8, STORE)
78 #define A64_LDRBI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
79 #define A64_LDRSBI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 8, SIGNED_LOAD)
80 #define A64_STRHI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 16, STORE)
81 #define A64_LDRHI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
82 #define A64_LDRSHI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 16, SIGNED_LOAD)
83 #define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
84 #define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
85 #define A64_LDRSWI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 32, SIGNED_LOAD)
86 #define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
87 #define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
88 
89 /* LDR (literal) */
90 #define A64_LDR32LIT(Wt, offset) \
91 	aarch64_insn_gen_load_literal(0, offset, Wt, false)
92 #define A64_LDR64LIT(Xt, offset) \
93 	aarch64_insn_gen_load_literal(0, offset, Xt, true)
94 
95 /* Load/store register pair */
96 #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
97 	aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
98 		AARCH64_INSN_VARIANT_64BIT, \
99 		AARCH64_INSN_LDST_##ls##_PAIR_##type)
100 /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
101 #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
102 /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
103 #define A64_POP(Rt, Rt2, Rn)  A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
104 
105 /* Load/store exclusive */
106 #define A64_SIZE(sf) \
107 	((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
108 #define A64_LSX(sf, Rt, Rn, Rs, type) \
109 	aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
110 				       AARCH64_INSN_LDST_##type)
111 /* Rt = [Rn]; (atomic) */
112 #define A64_LDXR(sf, Rt, Rn) \
113 	A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
114 /* [Rn] = Rt; (atomic) Rs = [state] */
115 #define A64_STXR(sf, Rt, Rn, Rs) \
116 	A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
117 /* [Rn] = Rt (store release); (atomic) Rs = [state] */
118 #define A64_STLXR(sf, Rt, Rn, Rs) \
119 	aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
120 				       AARCH64_INSN_LDST_STORE_REL_EX)
121 
122 /* Load-acquire & store-release */
123 #define A64_LDAR(Rt, Rn, size)  \
124 	aarch64_insn_gen_load_acq_store_rel(Rt, Rn, AARCH64_INSN_SIZE_##size, \
125 					    AARCH64_INSN_LDST_LOAD_ACQ)
126 #define A64_STLR(Rt, Rn, size)  \
127 	aarch64_insn_gen_load_acq_store_rel(Rt, Rn, AARCH64_INSN_SIZE_##size, \
128 					    AARCH64_INSN_LDST_STORE_REL)
129 
130 /* Rt = [Rn] (load acquire) */
131 #define A64_LDARB(Wt, Xn)	A64_LDAR(Wt, Xn, 8)
132 #define A64_LDARH(Wt, Xn)	A64_LDAR(Wt, Xn, 16)
133 #define A64_LDAR32(Wt, Xn)	A64_LDAR(Wt, Xn, 32)
134 #define A64_LDAR64(Xt, Xn)	A64_LDAR(Xt, Xn, 64)
135 
136 /* [Rn] = Rt (store release) */
137 #define A64_STLRB(Wt, Xn)	A64_STLR(Wt, Xn, 8)
138 #define A64_STLRH(Wt, Xn)	A64_STLR(Wt, Xn, 16)
139 #define A64_STLR32(Wt, Xn)	A64_STLR(Wt, Xn, 32)
140 #define A64_STLR64(Xt, Xn)	A64_STLR(Xt, Xn, 64)
141 
142 /*
143  * LSE atomics
144  *
145  * ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
146  * LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
147  */
148 #define A64_ST_OP(sf, Rn, Rs, op) \
149 	aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
150 		A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
151 		AARCH64_INSN_MEM_ORDER_NONE)
152 /* [Rn] <op>= Rs */
153 #define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
154 #define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
155 #define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
156 #define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
157 
158 #define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
159 	aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
160 		A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
161 		AARCH64_INSN_MEM_ORDER_ACQREL)
162 /* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
163 #define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
164 #define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
165 #define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
166 #define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
167 /* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
168 #define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
169 /* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
170 #define A64_CASAL(sf, Rt, Rn, Rs) \
171 	aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
172 		AARCH64_INSN_MEM_ORDER_ACQREL)
173 
174 /* Add/subtract (immediate) */
175 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
176 	aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
177 		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
178 /* Rd = Rn OP imm12 */
179 #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
180 #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
181 #define A64_ADDS_I(sf, Rd, Rn, imm12) \
182 	A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
183 #define A64_SUBS_I(sf, Rd, Rn, imm12) \
184 	A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
185 /* Rn + imm12; set condition flags */
186 #define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
187 /* Rn - imm12; set condition flags */
188 #define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
189 /* Rd = Rn */
190 #define A64_MOV(sf, Rd, Rn) \
191 	(((Rd) == A64_SP || (Rn) == A64_SP) ? A64_ADD_I(sf, Rd, Rn, 0) : \
192 	 aarch64_insn_gen_move_reg(Rd, Rn, A64_VARIANT(sf)))
193 
194 /* Bitfield move */
195 #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
196 	aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
197 		A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
198 /* Signed, with sign replication to left and zeros to right */
199 #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
200 /* Unsigned, with zeros to left and right */
201 #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
202 
203 /* Rd = Rn << shift */
204 #define A64_LSL(sf, Rd, Rn, shift) ({	\
205 	int sz = (sf) ? 64 : 32;	\
206 	A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
207 })
208 /* Rd = Rn >> shift */
209 #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
210 /* Rd = Rn >> shift; signed */
211 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
212 
213 /* Zero extend */
214 #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
215 #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
216 
217 /* Sign extend */
218 #define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7)
219 #define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15)
220 #define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31)
221 
222 /* Move wide (immediate) */
223 #define A64_MOVEW(sf, Rd, imm16, shift, type) \
224 	aarch64_insn_gen_movewide(Rd, imm16, shift, \
225 		A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
226 /* Rd = Zeros (for MOVZ);
227  * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
228  * Rd = ~Rd; (for MOVN); */
229 #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
230 #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
231 #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
232 
233 /* Add/subtract (shifted register) */
234 #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
235 	aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
236 		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
237 /* Rd = Rn OP Rm */
238 #define A64_ADD(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
239 #define A64_SUB(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
240 #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
241 /* Rd = -Rm */
242 #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
243 /* Rn - Rm; set condition flags */
244 #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
245 
246 /* Data-processing (1 source) */
247 #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
248 	A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
249 /* Rd = BSWAPx(Rn) */
250 #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
251 #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
252 #define A64_REV64(Rd, Rn)     A64_DATA1(1, Rd, Rn, REVERSE_64)
253 
254 /* Data-processing (2 source) */
255 /* Rd = Rn OP Rm */
256 #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
257 	A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
258 #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
259 #define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV)
260 #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
261 #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
262 #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
263 
264 /* Data-processing (3 source) */
265 /* Rd = Ra + Rn * Rm */
266 #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
267 	A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
268 /* Rd = Ra - Rn * Rm */
269 #define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
270 	A64_VARIANT(sf), AARCH64_INSN_DATA3_MSUB)
271 /* Rd = Rn * Rm */
272 #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
273 
274 /* Logical (shifted register) */
275 #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
276 	aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
277 		A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
278 /* Rd = Rn OP Rm */
279 #define A64_AND(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
280 #define A64_ORR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
281 #define A64_EOR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
282 #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
283 /* Rn & Rm; set condition flags */
284 #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
285 /* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
286 #define A64_MVN(sf, Rd, Rm)  \
287 	A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
288 
289 /* Logical (immediate) */
290 #define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
291 	u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \
292 	aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \
293 		A64_VARIANT(sf), Rn, Rd, imm64); \
294 })
295 /* Rd = Rn OP imm */
296 #define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
297 #define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
298 #define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
299 #define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
300 /* Rn & imm; set condition flags */
301 #define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)
302 
303 /* HINTs */
304 #define A64_HINT(x) aarch64_insn_gen_hint(x)
305 
306 #define A64_PACIASP A64_HINT(AARCH64_INSN_HINT_PACIASP)
307 #define A64_AUTIASP A64_HINT(AARCH64_INSN_HINT_AUTIASP)
308 
309 /* BTI */
310 #define A64_BTI_C  A64_HINT(AARCH64_INSN_HINT_BTIC)
311 #define A64_BTI_J  A64_HINT(AARCH64_INSN_HINT_BTIJ)
312 #define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
313 #define A64_NOP    A64_HINT(AARCH64_INSN_HINT_NOP)
314 
315 /* DMB */
316 #define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
317 
318 /* ADR */
319 #define A64_ADR(Rd, offset) \
320 	aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
321 
322 /* MRS */
323 #define A64_MRS_TPIDR_EL1(Rt) \
324 	aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL1)
325 #define A64_MRS_TPIDR_EL2(Rt) \
326 	aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL2)
327 #define A64_MRS_SP_EL0(Rt) \
328 	aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0)
329 
330 /* Barriers */
331 #define A64_SB aarch64_insn_get_sb_value()
332 #define A64_DSB_NSH (aarch64_insn_get_dsb_base_value() | 0x7 << 8)
333 #define A64_ISB aarch64_insn_get_isb_value()
334 
335 #endif /* _BPF_JIT_H */
336