Lines Matching full:rd
175 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ argument
176 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
178 /* Rd = Rn OP imm12 */
179 #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD) argument
180 #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB) argument
181 #define A64_ADDS_I(sf, Rd, Rn, imm12) \ argument
182 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
183 #define A64_SUBS_I(sf, Rd, Rn, imm12) \ argument
184 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
189 /* Rd = Rn */
190 #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0) argument
193 #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \ argument
194 aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
197 #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED) argument
199 #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED) argument
201 /* Rd = Rn << shift */
202 #define A64_LSL(sf, Rd, Rn, shift) ({ \ argument
204 A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
206 /* Rd = Rn >> shift */
207 #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) argument
208 /* Rd = Rn >> shift; signed */
209 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) argument
212 #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15) argument
213 #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31) argument
216 #define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7) argument
217 #define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15) argument
218 #define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31) argument
221 #define A64_MOVEW(sf, Rd, imm16, shift, type) \ argument
222 aarch64_insn_gen_movewide(Rd, imm16, shift, \
224 /* Rd = Zeros (for MOVZ);
225 * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
226 * Rd = ~Rd; (for MOVN); */
227 #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE) argument
228 #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO) argument
229 #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP) argument
232 #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \ argument
233 aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
235 /* Rd = Rn OP Rm */
236 #define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD) argument
237 #define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB) argument
238 #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS) argument
239 /* Rd = -Rm */
240 #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm) argument
245 #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \ argument
247 /* Rd = BSWAPx(Rn) */
248 #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16) argument
249 #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32) argument
250 #define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64) argument
253 /* Rd = Rn OP Rm */
254 #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \ argument
256 #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV) argument
257 #define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV) argument
258 #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV) argument
259 #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV) argument
260 #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV) argument
263 /* Rd = Ra + Rn * Rm */
264 #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ argument
266 /* Rd = Ra - Rn * Rm */
267 #define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ argument
269 /* Rd = Rn * Rm */
270 #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm) argument
273 #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \ argument
274 aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
276 /* Rd = Rn OP Rm */
277 #define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND) argument
278 #define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR) argument
279 #define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR) argument
280 #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS) argument
283 /* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
284 #define A64_MVN(sf, Rd, Rm) \ argument
285 A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
288 #define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \ argument
291 A64_VARIANT(sf), Rn, Rd, imm64); \
293 /* Rd = Rn OP imm */
294 #define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND) argument
295 #define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR) argument
296 #define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR) argument
297 #define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS) argument
317 #define A64_ADR(Rd, offset) \ argument
318 aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)