xref: /freebsd/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1//===--- arm_neon.td - ARM NEON compiler interface ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//  This file defines the TableGen definitions from which the ARM NEON header
10//  file will be generated.  See ARM document DUI0348B.
11//
12//===----------------------------------------------------------------------===//
13
14include "arm_neon_incl.td"
15
16def OP_ADD      : Op<(op "+", $p0, $p1)>;
17def OP_ADDL     : Op<(op "+", (call "vmovl", $p0), (call "vmovl", $p1))>;
18def OP_ADDLHi   : Op<(op "+", (call "vmovl_high", $p0),
19                              (call "vmovl_high", $p1))>;
20def OP_ADDW     : Op<(op "+", $p0, (call "vmovl", $p1))>;
21def OP_ADDWHi   : Op<(op "+", $p0, (call "vmovl_high", $p1))>;
22def OP_SUB      : Op<(op "-", $p0, $p1)>;
23def OP_SUBL     : Op<(op "-", (call "vmovl", $p0), (call "vmovl", $p1))>;
24def OP_SUBLHi   : Op<(op "-", (call "vmovl_high", $p0),
25                              (call "vmovl_high", $p1))>;
26def OP_SUBW     : Op<(op "-", $p0, (call "vmovl", $p1))>;
27def OP_SUBWHi   : Op<(op "-", $p0, (call "vmovl_high", $p1))>;
28def OP_MUL      : Op<(op "*", $p0, $p1)>;
29def OP_MLA      : Op<(op "+", $p0, (op "*", $p1, $p2))>;
30def OP_MLAL     : Op<(op "+", $p0, (call "vmull", $p1, $p2))>;
31def OP_MULLHi   : Op<(call "vmull", (call "vget_high", $p0),
32                                    (call "vget_high", $p1))>;
33def OP_MULLHi_P64 : Op<(call "vmull",
34                         (cast "poly64_t", (call "vget_high", $p0)),
35                         (cast "poly64_t", (call "vget_high", $p1)))>;
36def OP_MULLHi_N : Op<(call "vmull_n", (call "vget_high", $p0), $p1)>;
37def OP_MLALHi   : Op<(call "vmlal", $p0, (call "vget_high", $p1),
38                                         (call "vget_high", $p2))>;
39def OP_MLALHi_N : Op<(call "vmlal_n", $p0, (call "vget_high", $p1), $p2)>;
40def OP_MLS      : Op<(op "-", $p0, (op "*", $p1, $p2))>;
41def OP_FMLS     : Op<(call "vfma", $p0, (op "-", $p1), $p2)>;
42def OP_MLSL     : Op<(op "-", $p0, (call "vmull", $p1, $p2))>;
43def OP_MLSLHi   : Op<(call "vmlsl", $p0, (call "vget_high", $p1),
44                                         (call "vget_high", $p2))>;
45def OP_MLSLHi_N : Op<(call "vmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
46def OP_MUL_N    : Op<(op "*", $p0, (dup $p1))>;
47def OP_MULX_N   : Op<(call "vmulx", $p0, (dup $p1))>;
48def OP_MLA_N    : Op<(op "+", $p0, (op "*", $p1, (dup $p2)))>;
49def OP_MLS_N    : Op<(op "-", $p0, (op "*", $p1, (dup $p2)))>;
50def OP_FMLA_N   : Op<(call "vfma", $p0, $p1, (dup $p2))>;
51def OP_FMLS_N   : Op<(call "vfma", $p0, (op "-", $p1), (dup $p2))>;
52def OP_MLAL_N   : Op<(op "+", $p0, (call "vmull", $p1, (dup $p2)))>;
53def OP_MLSL_N   : Op<(op "-", $p0, (call "vmull", $p1, (dup $p2)))>;
54def OP_MUL_LN   : Op<(op "*", $p0, (call_mangled "splat_lane", $p1, $p2))>;
55def OP_MULX_LN  : Op<(call "vmulx", $p0, (call_mangled "splat_lane", $p1, $p2))>;
56def OP_MULL_N  : Op<(call "vmull", $p0, (dup $p1))>;
57def OP_MULL_LN  : Op<(call "vmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
58def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (call_mangled "splat_lane", $p1, $p2))>;
59def OP_MLA_LN   : Op<(op "+", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
60def OP_MLS_LN   : Op<(op "-", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
61def OP_MLAL_LN  : Op<(op "+", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
62def OP_MLALHi_LN: Op<(op "+", $p0, (call "vmull", (call "vget_high", $p1),
63                                                  (call_mangled "splat_lane", $p2, $p3)))>;
64def OP_MLSL_LN  : Op<(op "-", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
65def OP_MLSLHi_LN : Op<(op "-", $p0, (call "vmull", (call "vget_high", $p1),
66                                                   (call_mangled "splat_lane", $p2, $p3)))>;
67def OP_QDMULL_N : Op<(call "vqdmull", $p0, (dup $p1))>;
68def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
69def OP_QDMULLHi_LN : Op<(call "vqdmull", (call "vget_high", $p0),
70                                         (call_mangled "splat_lane", $p1, $p2))>;
71def OP_QDMLAL_N : Op<(call "vqdmlal", $p0, $p1, (dup $p2))>;
72def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
73def OP_QDMLALHi_LN : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
74                                              (call_mangled "splat_lane", $p2, $p3))>;
75def OP_QDMLSL_N : Op<(call "vqdmlsl", $p0, $p1, (dup $p2))>;
76def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
77def OP_QDMLSLHi_LN : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
78                                              (call_mangled "splat_lane", $p2, $p3))>;
79def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>;
80def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
81def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
82def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>;
83def OP_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
84def OP_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
85def OP_FMS_LN   : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>;
86def OP_FMS_LNQ  : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>;
87def OP_TRN1     : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2),
88                                                    (decimate mask1, 2)))>;
89def OP_ZIP1     : Op<(shuffle $p0, $p1, (lowhalf (interleave mask0, mask1)))>;
90def OP_UZP1     : Op<(shuffle $p0, $p1, (add (decimate mask0, 2),
91                                             (decimate mask1, 2)))>;
92def OP_TRN2     : Op<(shuffle $p0, $p1, (interleave
93                                          (decimate (rotl mask0, 1), 2),
94                                          (decimate (rotl mask1, 1), 2)))>;
95def OP_ZIP2     : Op<(shuffle $p0, $p1, (highhalf (interleave mask0, mask1)))>;
96def OP_UZP2     : Op<(shuffle $p0, $p1, (add (decimate (rotl mask0, 1), 2),
97                                             (decimate (rotl mask1, 1), 2)))>;
98def OP_EQ       : Op<(cast "R", (op "==", $p0, $p1))>;
99def OP_GE       : Op<(cast "R", (op ">=", $p0, $p1))>;
100def OP_LE       : Op<(cast "R", (op "<=", $p0, $p1))>;
101def OP_GT       : Op<(cast "R", (op ">", $p0, $p1))>;
102def OP_LT       : Op<(cast "R", (op "<", $p0, $p1))>;
103def OP_NEG      : Op<(op "-", $p0)>;
104def OP_NOT      : Op<(op "~", $p0)>;
105def OP_AND      : Op<(op "&", $p0, $p1)>;
106def OP_OR       : Op<(op "|", $p0, $p1)>;
107def OP_XOR      : Op<(op "^", $p0, $p1)>;
108def OP_ANDN     : Op<(op "&", $p0, (op "~", $p1))>;
109def OP_ORN      : Op<(op "|", $p0, (op "~", $p1))>;
110def OP_CAST     : LOp<[(save_temp $promote, $p0),
111                       (cast "R", $promote)]>;
112def OP_HI       : Op<(shuffle $p0, $p0, (highhalf mask0))>;
113def OP_LO       : Op<(shuffle $p0, $p0, (lowhalf mask0))>;
114def OP_CONC     : Op<(shuffle $p0, $p1, (add mask0, mask1))>;
115def OP_DUP      : Op<(dup $p0)>;
116def OP_DUP_LN   : Op<(call_mangled "splat_lane", $p0, $p1)>;
117def OP_SEL      : Op<(cast "R", (op "|",
118                                    (op "&", $p0, (cast $p0, $p1)),
119                                    (op "&", (op "~", $p0), (cast $p0, $p2))))>;
120def OP_REV16    : Op<(shuffle $p0, $p0, (rev 16, mask0))>;
121def OP_REV32    : Op<(shuffle $p0, $p0, (rev 32, mask0))>;
122def OP_REV64    : Op<(shuffle $p0, $p0, (rev 64, mask0))>;
123def OP_XTN      : Op<(call "vcombine", $p0, (call "vmovn", $p1))>;
124def OP_SQXTUN   : Op<(call "vcombine", (cast $p0, "U", $p0),
125                                       (call "vqmovun", $p1))>;
126def OP_QXTN     : Op<(call "vcombine", $p0, (call "vqmovn", $p1))>;
127def OP_VCVT_NA_HI_F16 : Op<(call "vcombine", $p0, (call "vcvt_f16_f32", $p1))>;
128def OP_VCVT_NA_HI_F32 : Op<(call "vcombine", $p0, (call "vcvt_f32_f64", $p1))>;
129def OP_VCVT_EX_HI_F32 : Op<(call "vcvt_f32_f16", (call "vget_high", $p0))>;
130def OP_VCVT_EX_HI_F64 : Op<(call "vcvt_f64_f32", (call "vget_high", $p0))>;
131def OP_VCVTX_HI : Op<(call "vcombine", $p0, (call "vcvtx_f32", $p1))>;
132def OP_REINT    : Op<(cast "R", $p0)>;
133def OP_ADDHNHi  : Op<(call "vcombine", $p0, (call "vaddhn", $p1, $p2))>;
134def OP_RADDHNHi : Op<(call "vcombine", $p0, (call "vraddhn", $p1, $p2))>;
135def OP_SUBHNHi  : Op<(call "vcombine", $p0, (call "vsubhn", $p1, $p2))>;
136def OP_RSUBHNHi : Op<(call "vcombine", $p0, (call "vrsubhn", $p1, $p2))>;
137def OP_ABDL     : Op<(cast "R", (call "vmovl", (cast $p0, "U",
138                                                     (call "vabd", $p0, $p1))))>;
139def OP_ABDLHi   : Op<(call "vabdl", (call "vget_high", $p0),
140                                    (call "vget_high", $p1))>;
141def OP_ABA      : Op<(op "+", $p0, (call "vabd", $p1, $p2))>;
142def OP_ABAL     : Op<(op "+", $p0, (call "vabdl", $p1, $p2))>;
143def OP_ABALHi   : Op<(call "vabal", $p0, (call "vget_high", $p1),
144                                       (call "vget_high", $p2))>;
145def OP_QDMULLHi : Op<(call "vqdmull", (call "vget_high", $p0),
146                                      (call "vget_high", $p1))>;
147def OP_QDMULLHi_N : Op<(call "vqdmull_n", (call "vget_high", $p0), $p1)>;
148def OP_QDMLALHi : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
149                                           (call "vget_high", $p2))>;
150def OP_QDMLALHi_N : Op<(call "vqdmlal_n", $p0, (call "vget_high", $p1), $p2)>;
151def OP_QDMLSLHi : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
152                                           (call "vget_high", $p2))>;
153def OP_QDMLSLHi_N : Op<(call "vqdmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
154def OP_DIV  : Op<(op "/", $p0, $p1)>;
155def OP_LONG_HI : Op<(cast "R", (call (name_replace "_high_", "_"),
156                                                (call "vget_high", $p0), $p1))>;
157def OP_NARROW_HI : Op<(cast "R", (call "vcombine",
158                                       (cast "R", "H", $p0),
159                                       (cast "R", "H",
160                                           (call (name_replace "_high_", "_"),
161                                                 $p1, $p2))))>;
162def OP_MOVL_HI  : LOp<[(save_temp $a1, (call "vget_high", $p0)),
163                       (cast "R",
164                            (call "vshll_n", $a1, (literal "int32_t", "0")))]>;
165def OP_COPY_LN : Op<(call "vset_lane", (call "vget_lane", $p2, $p3), $p0, $p1)>;
166def OP_SCALAR_MUL_LN : Op<(op "*", $p0, (call "vget_lane", $p1, $p2))>;
167def OP_SCALAR_MULX_LN : Op<(call "vmulx", $p0, (call "vget_lane", $p1, $p2))>;
168def OP_SCALAR_VMULX_LN : LOp<[(save_temp $x, (call "vget_lane", $p0,
169                                                    (literal "int32_t", "0"))),
170                              (save_temp $y, (call "vget_lane", $p1, $p2)),
171                              (save_temp $z, (call "vmulx", $x, $y)),
172                              (call "vset_lane", $z, $p0, $p2)]>;
173def OP_SCALAR_VMULX_LNQ : LOp<[(save_temp $x, (call "vget_lane", $p0,
174                                                     (literal "int32_t", "0"))),
175                               (save_temp $y, (call "vget_lane", $p1, $p2)),
176                               (save_temp $z, (call "vmulx", $x, $y)),
177                               (call "vset_lane", $z, $p0, (literal "int32_t",
178                                                                     "0"))]>;
179class ScalarMulOp<string opname> :
180  Op<(call opname, $p0, (call "vget_lane", $p1, $p2))>;
181
182def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">;
183def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">;
184def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">;
185
186def OP_SCALAR_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1,
187                                (call "vget_lane", $p2, $p3))>;
188def OP_SCALAR_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1,
189                                (call "vget_lane", $p2, $p3))>;
190
191def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t",
192                                   (call "vget_lane",
193                                         (bitcast "int16x4_t", $p0), $p1))>;
194def OP_SCALAR_HALF_GET_LNQ : Op<(bitcast "float16_t",
195                                    (call "vget_lane",
196                                          (bitcast "int16x8_t", $p0), $p1))>;
197def OP_SCALAR_HALF_SET_LN : Op<(bitcast "float16x4_t",
198                                   (call "vset_lane",
199                                         (bitcast "int16_t", $p0),
200                                         (bitcast "int16x4_t", $p1), $p2))>;
201def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t",
202                                    (call "vset_lane",
203                                          (bitcast "int16_t", $p0),
204                                          (bitcast "int16x8_t", $p1), $p2))>;
205
206def OP_DOT_LN
207    : Op<(call "vdot", $p0, $p1,
208          (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
209def OP_DOT_LNQ
210    : Op<(call "vdot", $p0, $p1,
211          (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
212
213def OP_FMLAL_LN     : Op<(call "vfmlal_low", $p0, $p1,
214                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
215def OP_FMLSL_LN     : Op<(call "vfmlsl_low", $p0, $p1,
216                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
217def OP_FMLAL_LN_Hi  : Op<(call "vfmlal_high", $p0, $p1,
218                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
219def OP_FMLSL_LN_Hi  : Op<(call "vfmlsl_high", $p0, $p1,
220                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
221
222def OP_USDOT_LN
223    : Op<(call "vusdot", $p0, $p1,
224          (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)))>;
225def OP_USDOT_LNQ
226    : Op<(call "vusdot", $p0, $p1,
227          (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)))>;
228
229// sudot splats the second vector and then calls vusdot
230def OP_SUDOT_LN
231    : Op<(call "vusdot", $p0,
232          (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)), $p1)>;
233def OP_SUDOT_LNQ
234    : Op<(call "vusdot", $p0,
235          (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)), $p1)>;
236
237def OP_BFDOT_LN
238    : Op<(call "vbfdot", $p0, $p1,
239          (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x2_t", $p2), $p3)))>;
240
241def OP_BFDOT_LNQ
242    : Op<(call "vbfdot", $p0, $p1,
243          (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x4_t", $p2), $p3)))>;
244
245def OP_BFMLALB_LN
246    : Op<(call "vbfmlalb", $p0, $p1,
247          (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
248
249def OP_BFMLALT_LN
250    : Op<(call "vbfmlalt", $p0, $p1,
251          (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
252
253def OP_VCVT_F32_BF16
254    : Op<(bitcast "R",
255          (call "vshll_n", (bitcast "int16x4_t", $p0),
256                           (literal "int32_t", "16")))>;
257def OP_VCVT_F32_BF16_LO
258    : Op<(call "vcvt_f32_bf16", (call "vget_low", $p0))>;
259def OP_VCVT_F32_BF16_HI
260    : Op<(call "vcvt_f32_bf16", (call "vget_high", $p0))>;
261
262def OP_VCVT_BF16_F32_LO_A64
263    : Op<(call "__a64_vcvtq_low_bf16", $p0)>;
264def OP_VCVT_BF16_F32_A64
265    : Op<(call "vget_low", (call "__a64_vcvtq_low_bf16", $p0))>;
266
267def OP_VCVT_BF16_F32_A32
268    : Op<(call "__a32_vcvt_bf16", $p0)>;
269
270def OP_VCVT_BF16_F32_LO_A32
271    : Op<(call "vcombine", (cast "bfloat16x4_t", (literal "uint64_t", "0ULL")),
272                           (call "__a32_vcvt_bf16", $p0))>;
273def OP_VCVT_BF16_F32_HI_A32
274    : Op<(call "vcombine", (call "__a32_vcvt_bf16", $p1),
275                           (call "vget_low", $p0))>;
276
277def OP_CVT_F32_BF16
278    : Op<(bitcast "R", (op "<<", (cast "int32_t", (bitcast "int16_t", $p0)),
279                                 (literal "int32_t", "16")))>;
280
281//===----------------------------------------------------------------------===//
282// Auxiliary Instructions
283//===----------------------------------------------------------------------===//
284
285// Splat operation - performs a range-checked splat over a vector
286def SPLAT  : WInst<"splat_lane", ".(!q)I",
287                   "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl">;
288def SPLATQ : WInst<"splat_laneq", ".(!Q)I",
289                   "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl"> {
290  let isLaneQ = 1;
291}
292let TargetGuard = "bf16,neon" in {
293  def SPLAT_BF  : WInst<"splat_lane", ".(!q)I", "bQb">;
294  def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb"> {
295    let isLaneQ = 1;
296  }
297}
298
299//===----------------------------------------------------------------------===//
300// Intrinsics
301//===----------------------------------------------------------------------===//
302
303////////////////////////////////////////////////////////////////////////////////
304// E.3.1 Addition
305def VADD    : IOpInst<"vadd", "...",
306                      "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
307def VADDL   : SOpInst<"vaddl", "(>Q)..", "csiUcUsUi", OP_ADDL>;
308def VADDW   : SOpInst<"vaddw", "(>Q)(>Q).", "csiUcUsUi", OP_ADDW>;
309def VHADD   : SInst<"vhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
310def VRHADD  : SInst<"vrhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
311def VQADD   : SInst<"vqadd", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
312def VADDHN  : IInst<"vaddhn", "<QQ", "silUsUiUl">;
313def VRADDHN : IInst<"vraddhn", "<QQ", "silUsUiUl">;
314
315////////////////////////////////////////////////////////////////////////////////
316// E.3.2 Multiplication
317def VMUL     : IOpInst<"vmul", "...", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>;
318def VMULP    : SInst<"vmul", "...", "PcQPc">;
319def VMLA     : IOpInst<"vmla", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
320def VMLAL    : SOpInst<"vmlal", "(>Q)(>Q)..", "csiUcUsUi", OP_MLAL>;
321def VMLS     : IOpInst<"vmls", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
322def VMLSL    : SOpInst<"vmlsl", "(>Q)(>Q)..", "csiUcUsUi", OP_MLSL>;
323def VQDMULH  : SInst<"vqdmulh", "...", "siQsQi">;
324def VQRDMULH : SInst<"vqrdmulh", "...", "siQsQi">;
325
326let TargetGuard = "v8.1a,neon" in {
327def VQRDMLAH : SInst<"vqrdmlah", "....", "siQsQi">;
328def VQRDMLSH : SInst<"vqrdmlsh", "....", "siQsQi">;
329}
330
331def VQDMLAL  : SInst<"vqdmlal", "(>Q)(>Q)..", "si">;
332def VQDMLSL  : SInst<"vqdmlsl", "(>Q)(>Q)..", "si">;
333def VMULL    : SInst<"vmull", "(>Q)..", "csiUcUsUiPc">;
334def VQDMULL  : SInst<"vqdmull", "(>Q)..", "si">;
335
336////////////////////////////////////////////////////////////////////////////////
337// E.3.3 Subtraction
338def VSUB    : IOpInst<"vsub", "...",
339                      "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
340def VSUBL   : SOpInst<"vsubl", "(>Q)..", "csiUcUsUi", OP_SUBL>;
341def VSUBW   : SOpInst<"vsubw", "(>Q)(>Q).", "csiUcUsUi", OP_SUBW>;
342def VQSUB   : SInst<"vqsub", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
343def VHSUB   : SInst<"vhsub", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
344def VSUBHN  : IInst<"vsubhn", "<QQ", "silUsUiUl">;
345def VRSUBHN : IInst<"vrsubhn", "<QQ", "silUsUiUl">;
346
347////////////////////////////////////////////////////////////////////////////////
348// E.3.4 Comparison
349def VCEQ  : IOpInst<"vceq", "U..", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
350def VCGE  : SOpInst<"vcge", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
351let InstName = "vcge" in
352def VCLE  : SOpInst<"vcle", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
353def VCGT  : SOpInst<"vcgt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
354let InstName = "vcgt" in
355def VCLT  : SOpInst<"vclt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
356let InstName = "vacge" in {
357def VCAGE : IInst<"vcage", "U..", "fQf">;
358def VCALE : IInst<"vcale", "U..", "fQf">;
359}
360let InstName = "vacgt" in {
361def VCAGT : IInst<"vcagt", "U..", "fQf">;
362def VCALT : IInst<"vcalt", "U..", "fQf">;
363}
364def VTST  : WInst<"vtst", "U..", "csiUcUsUiPcPsQcQsQiQUcQUsQUiQPcQPs">;
365
366////////////////////////////////////////////////////////////////////////////////
367// E.3.5 Absolute Difference
368def VABD  : SInst<"vabd", "...",  "csiUcUsUifQcQsQiQUcQUsQUiQf">;
369def VABDL : SOpInst<"vabdl", "(>Q)..",  "csiUcUsUi", OP_ABDL>;
370def VABA  : SOpInst<"vaba", "....", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>;
371def VABAL : SOpInst<"vabal", "(>Q)(>Q)..", "csiUcUsUi", OP_ABAL>;
372
373////////////////////////////////////////////////////////////////////////////////
374// E.3.6 Max/Min
375def VMAX : SInst<"vmax", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
376def VMIN : SInst<"vmin", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
377
378////////////////////////////////////////////////////////////////////////////////
379// E.3.7 Pairwise Addition
380def VPADD  : IInst<"vpadd", "...", "csiUcUsUif">;
381def VPADDL : SInst<"vpaddl", ">.",  "csiUcUsUiQcQsQiQUcQUsQUi">;
382def VPADAL : SInst<"vpadal", ">>.", "csiUcUsUiQcQsQiQUcQUsQUi">;
383
384////////////////////////////////////////////////////////////////////////////////
385// E.3.8-9 Folding Max/Min
386def VPMAX : SInst<"vpmax", "...", "csiUcUsUif">;
387def VPMIN : SInst<"vpmin", "...", "csiUcUsUif">;
388
389////////////////////////////////////////////////////////////////////////////////
390// E.3.10 Reciprocal/Sqrt
391def VRECPS  : IInst<"vrecps", "...", "fQf">;
392def VRSQRTS : IInst<"vrsqrts", "...", "fQf">;
393
394////////////////////////////////////////////////////////////////////////////////
395// E.3.11 Shifts by signed variable
396def VSHL   : SInst<"vshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
397def VQSHL  : SInst<"vqshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
398def VRSHL  : SInst<"vrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
399def VQRSHL : SInst<"vqrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
400
401////////////////////////////////////////////////////////////////////////////////
402// E.3.12 Shifts by constant
403let isShift = 1 in {
404def VSHR_N     : SInst<"vshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
405def VSHL_N     : IInst<"vshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
406def VRSHR_N    : SInst<"vrshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
407def VSRA_N     : SInst<"vsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
408def VRSRA_N    : SInst<"vrsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
409def VQSHL_N    : SInst<"vqshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
410def VQSHLU_N   : SInst<"vqshlu_n", "U.I", "csilQcQsQiQl">;
411def VSHRN_N    : IInst<"vshrn_n", "<QI", "silUsUiUl">;
412def VQSHRUN_N  : SInst<"vqshrun_n", "(<U)QI", "sil">;
413def VQRSHRUN_N : SInst<"vqrshrun_n", "(<U)QI", "sil">;
414def VQSHRN_N   : SInst<"vqshrn_n", "<QI", "silUsUiUl">;
415def VRSHRN_N   : IInst<"vrshrn_n", "<QI", "silUsUiUl">;
416def VQRSHRN_N  : SInst<"vqrshrn_n", "<QI", "silUsUiUl">;
417def VSHLL_N    : SInst<"vshll_n", "(>Q).I", "csiUcUsUi">;
418
419////////////////////////////////////////////////////////////////////////////////
420// E.3.13 Shifts with insert
421def VSRI_N : WInst<"vsri_n", "...I",
422                   "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
423def VSLI_N : WInst<"vsli_n", "...I",
424                   "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
425}
426
427////////////////////////////////////////////////////////////////////////////////
428// E.3.14 Loads and stores of a single vector
429def VLD1      : WInst<"vld1", ".(c*!)",
430                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
431def VLD1_X2   : WInst<"vld1_x2", "2(c*!)",
432                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
433def VLD1_X3   : WInst<"vld1_x3", "3(c*!)",
434                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
435def VLD1_X4   : WInst<"vld1_x4", "4(c*!)",
436                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
437def VLD1_LANE : WInst<"vld1_lane", ".(c*!).I",
438                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
439def VLD1_DUP  : WInst<"vld1_dup", ".(c*!)",
440                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
441def VST1      : WInst<"vst1", "v*(.!)",
442                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
443def VST1_X2   : WInst<"vst1_x2", "v*(2!)",
444                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
445def VST1_X3   : WInst<"vst1_x3", "v*(3!)",
446                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
447def VST1_X4   : WInst<"vst1_x4", "v*(4!)",
448                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
449def VST1_LANE : WInst<"vst1_lane", "v*(.!)I",
450                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
451let ArchGuard = "(__ARM_FP & 2)" in {
452def VLD1_F16      : WInst<"vld1", ".(c*!)", "hQh">;
453def VLD1_X2_F16   : WInst<"vld1_x2", "2(c*!)", "hQh">;
454def VLD1_X3_F16   : WInst<"vld1_x3", "3(c*!)", "hQh">;
455def VLD1_X4_F16   : WInst<"vld1_x4", "4(c*!)", "hQh">;
456def VLD1_LANE_F16 : WInst<"vld1_lane", ".(c*!).I", "hQh">;
457def VLD1_DUP_F16  : WInst<"vld1_dup", ".(c*!)", "hQh">;
458def VST1_F16      : WInst<"vst1", "v*(.!)", "hQh">;
459def VST1_X2_F16   : WInst<"vst1_x2", "v*(2!)", "hQh">;
460def VST1_X3_F16   : WInst<"vst1_x3", "v*(3!)", "hQh">;
461def VST1_X4_F16   : WInst<"vst1_x4", "v*(4!)", "hQh">;
462def VST1_LANE_F16 : WInst<"vst1_lane", "v*(.!)I", "hQh">;
463}
464
465////////////////////////////////////////////////////////////////////////////////
466// E.3.15 Loads and stores of an N-element structure
467def VLD2 : WInst<"vld2", "2(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
468def VLD3 : WInst<"vld3", "3(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
469def VLD4 : WInst<"vld4", "4(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
470def VLD2_DUP  : WInst<"vld2_dup", "2(c*!)",
471                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
472def VLD3_DUP  : WInst<"vld3_dup", "3(c*!)",
473                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
474def VLD4_DUP  : WInst<"vld4_dup", "4(c*!)",
475                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
476def VLD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
477def VLD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
478def VLD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
479def VST2 : WInst<"vst2", "v*(2!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
480def VST3 : WInst<"vst3", "v*(3!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
481def VST4 : WInst<"vst4", "v*(4!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
482def VST2_LANE : WInst<"vst2_lane", "v*(2!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
483def VST3_LANE : WInst<"vst3_lane", "v*(3!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
484def VST4_LANE : WInst<"vst4_lane", "v*(4!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
485let ArchGuard = "(__ARM_FP & 2)" in {
486def VLD2_F16      : WInst<"vld2", "2(c*!)", "hQh">;
487def VLD3_F16      : WInst<"vld3", "3(c*!)", "hQh">;
488def VLD4_F16      : WInst<"vld4", "4(c*!)", "hQh">;
489def VLD2_DUP_F16  : WInst<"vld2_dup", "2(c*!)", "hQh">;
490def VLD3_DUP_F16  : WInst<"vld3_dup", "3(c*!)", "hQh">;
491def VLD4_DUP_F16  : WInst<"vld4_dup", "4(c*!)", "hQh">;
492def VLD2_LANE_F16 : WInst<"vld2_lane", "2(c*!)2I", "hQh">;
493def VLD3_LANE_F16 : WInst<"vld3_lane", "3(c*!)3I", "hQh">;
494def VLD4_LANE_F16 : WInst<"vld4_lane", "4(c*!)4I", "hQh">;
495def VST2_F16      : WInst<"vst2", "v*(2!)", "hQh">;
496def VST3_F16      : WInst<"vst3", "v*(3!)", "hQh">;
497def VST4_F16      : WInst<"vst4", "v*(4!)", "hQh">;
498def VST2_LANE_F16 : WInst<"vst2_lane", "v*(2!)I", "hQh">;
499def VST3_LANE_F16 : WInst<"vst3_lane", "v*(3!)I", "hQh">;
500def VST4_LANE_F16 : WInst<"vst4_lane", "v*(4!)I", "hQh">;
501}
502
503////////////////////////////////////////////////////////////////////////////////
504// E.3.16 Extract lanes from a vector
505let InstName = "vmov" in
506def VGET_LANE : IInst<"vget_lane", "1.I",
507                      "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
508
509////////////////////////////////////////////////////////////////////////////////
510// E.3.17 Set lanes within a vector
511let InstName = "vmov" in
512def VSET_LANE : IInst<"vset_lane", ".1.I",
513                      "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
514
515////////////////////////////////////////////////////////////////////////////////
516// E.3.18 Initialize a vector from bit pattern
517def VCREATE : NoTestOpInst<"vcreate", ".(IU>)", "csihfUcUsUiUlPcPsl", OP_CAST> {
518  let BigEndianSafe = 1;
519}
520
521////////////////////////////////////////////////////////////////////////////////
522// E.3.19 Set all lanes to same value
523let InstName = "vmov" in {
524def VDUP_N   : WOpInst<"vdup_n", ".1",
525                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
526                       OP_DUP>;
527def VMOV_N   : WOpInst<"vmov_n", ".1",
528                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
529                       OP_DUP>;
530}
531let InstName = "" in
532def VDUP_LANE: WOpInst<"vdup_lane", ".qI",
533                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
534                       OP_DUP_LN>;
535
536////////////////////////////////////////////////////////////////////////////////
537// E.3.20 Combining vectors
538def VCOMBINE : NoTestOpInst<"vcombine", "Q..", "csilhfUcUsUiUlPcPs", OP_CONC>;
539
540////////////////////////////////////////////////////////////////////////////////
541// E.3.21 Splitting vectors
542// Note that the ARM NEON Reference 2.0 mistakenly document the vget_high_f16()
543// and vget_low_f16() intrinsics as AArch64-only. We (and GCC) support all
544// versions of these intrinsics in both AArch32 and AArch64 architectures. See
545// D45668 for more details.
546let InstName = "vmov" in {
547def VGET_HIGH : NoTestOpInst<"vget_high", ".Q", "csilhfUcUsUiUlPcPs", OP_HI>;
548def VGET_LOW  : NoTestOpInst<"vget_low", ".Q", "csilhfUcUsUiUlPcPs", OP_LO>;
549}
550
551////////////////////////////////////////////////////////////////////////////////
552// E.3.22 Converting vectors
553
554let ArchGuard = "(__ARM_FP & 2)" in {
555  def VCVT_F16_F32 : SInst<"vcvt_f16_f32", "(<q)(.!)", "Hf">;
556  def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "(>Q)(.!)", "h">;
557}
558
559def VCVT_S32     : SInst<"vcvt_s32", "S.",  "fQf">;
560def VCVT_U32     : SInst<"vcvt_u32", "U.",  "fQf">;
561def VCVT_F32     : SInst<"vcvt_f32", "F(.!)",  "iUiQiQUi">;
562let isVCVT_N = 1 in {
563def VCVT_N_S32   : SInst<"vcvt_n_s32", "S.I", "fQf">;
564def VCVT_N_U32   : SInst<"vcvt_n_u32", "U.I", "fQf">;
565def VCVT_N_F32   : SInst<"vcvt_n_f32", "F(.!)I", "iUiQiQUi">;
566}
567
568def VMOVN        : IInst<"vmovn", "<Q",  "silUsUiUl">;
569def VMOVL        : SInst<"vmovl", "(>Q).",  "csiUcUsUi">;
570def VQMOVN       : SInst<"vqmovn", "<Q",  "silUsUiUl">;
571def VQMOVUN      : SInst<"vqmovun", "(<U)Q",  "sil">;
572
573////////////////////////////////////////////////////////////////////////////////
574// E.3.23-24 Table lookup, Extended table lookup
575let InstName = "vtbl" in {
576def VTBL1 : WInst<"vtbl1", "..p",  "UccPc">;
577def VTBL2 : WInst<"vtbl2", ".2p",  "UccPc">;
578def VTBL3 : WInst<"vtbl3", ".3p",  "UccPc">;
579def VTBL4 : WInst<"vtbl4", ".4p",  "UccPc">;
580}
581let InstName = "vtbx" in {
582def VTBX1 : WInst<"vtbx1", "...p", "UccPc">;
583def VTBX2 : WInst<"vtbx2", "..2p", "UccPc">;
584def VTBX3 : WInst<"vtbx3", "..3p", "UccPc">;
585def VTBX4 : WInst<"vtbx4", "..4p", "UccPc">;
586}
587
588////////////////////////////////////////////////////////////////////////////////
589// E.3.25 Operations with a scalar value
590def VMLA_LANE     : IOpInst<"vmla_lane", "...qI",
591                            "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
592def VMLAL_LANE    : SOpInst<"vmlal_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLAL_LN>;
593def VQDMLAL_LANE  : SOpInst<"vqdmlal_lane", "(>Q)(>Q)..I", "si", OP_QDMLAL_LN>;
594def VMLS_LANE     : IOpInst<"vmls_lane", "...qI",
595                            "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
596def VMLSL_LANE    : SOpInst<"vmlsl_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLSL_LN>;
597def VQDMLSL_LANE  : SOpInst<"vqdmlsl_lane", "(>Q)(>Q)..I", "si", OP_QDMLSL_LN>;
598def VMUL_N        : IOpInst<"vmul_n", "..1", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
599def VMUL_LANE     : IOpInst<"vmul_lane", "..qI",
600                            "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>;
601def VMULL_N       : SOpInst<"vmull_n", "(>Q).1", "siUsUi", OP_MULL_N>;
602def VMULL_LANE    : SOpInst<"vmull_lane", "(>Q)..I", "siUsUi", OP_MULL_LN>;
603def VQDMULL_N     : SOpInst<"vqdmull_n", "(>Q).1", "si", OP_QDMULL_N>;
604def VQDMULL_LANE  : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>;
605def VQDMULH_N     : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>;
606def VQRDMULH_N    : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>;
607
608let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)" in {
609def VQDMULH_LANE  : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>;
610def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>;
611}
612let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
613def A64_VQDMULH_LANE  : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">;
614def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">;
615}
616
617let TargetGuard = "v8.1a,neon" in {
618def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "...qI", "siQsQi", OP_QRDMLAH_LN>;
619def VQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "...qI", "siQsQi", OP_QRDMLSH_LN>;
620}
621
622def VMLA_N        : IOpInst<"vmla_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
623def VMLAL_N       : SOpInst<"vmlal_n", "(>Q)(>Q).1", "siUsUi", OP_MLAL_N>;
624def VQDMLAL_N     : SOpInst<"vqdmlal_n", "(>Q)(>Q).1", "si", OP_QDMLAL_N>;
625def VMLS_N        : IOpInst<"vmls_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
626def VMLSL_N       : SOpInst<"vmlsl_n", "(>Q)(>Q).1", "siUsUi", OP_MLSL_N>;
627def VQDMLSL_N     : SOpInst<"vqdmlsl_n", "(>Q)(>Q).1", "si", OP_QDMLSL_N>;
628
629////////////////////////////////////////////////////////////////////////////////
630// E.3.26 Vector Extract
631def VEXT : WInst<"vext", "...I",
632                 "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf">;
633
634////////////////////////////////////////////////////////////////////////////////
635// E.3.27 Reverse vector elements
636def VREV64 : WOpInst<"vrev64", "..", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf",
637                  OP_REV64>;
638def VREV32 : WOpInst<"vrev32", "..", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>;
639def VREV16 : WOpInst<"vrev16", "..", "cUcPcQcQUcQPc", OP_REV16>;
640
641////////////////////////////////////////////////////////////////////////////////
642// E.3.28 Other single operand arithmetic
643def VABS    : SInst<"vabs", "..", "csifQcQsQiQf">;
644def VQABS   : SInst<"vqabs", "..", "csiQcQsQi">;
645def VNEG    : SOpInst<"vneg", "..", "csifQcQsQiQf", OP_NEG>;
646def VQNEG   : SInst<"vqneg", "..", "csiQcQsQi">;
647def VCLS    : SInst<"vcls", "S.", "csiUcUsUiQcQsQiQUcQUsQUi">;
648def VCLZ    : IInst<"vclz", "..", "csiUcUsUiQcQsQiQUcQUsQUi">;
649def VCNT    : WInst<"vcnt", "..", "UccPcQUcQcQPc">;
650def VRECPE  : SInst<"vrecpe", "..", "fUiQfQUi">;
651def VRSQRTE : SInst<"vrsqrte", "..", "fUiQfQUi">;
652
653////////////////////////////////////////////////////////////////////////////////
654// E.3.29 Logical operations
655def VMVN : LOpInst<"vmvn", "..", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
656def VAND : LOpInst<"vand", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
657def VORR : LOpInst<"vorr", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
658def VEOR : LOpInst<"veor", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
659def VBIC : LOpInst<"vbic", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
660def VORN : LOpInst<"vorn", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
661let isHiddenLInst = 1 in
662def VBSL : SInst<"vbsl", ".U..",
663                "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs">;
664
665////////////////////////////////////////////////////////////////////////////////
666// E.3.30 Transposition operations
667def VTRN : WInst<"vtrn", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
668def VZIP : WInst<"vzip", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
669def VUZP : WInst<"vuzp", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
670
671////////////////////////////////////////////////////////////////////////////////
672
673class REINTERPRET_CROSS_SELF<string Types> :
674  NoTestOpInst<"vreinterpret", "..", Types, OP_REINT> {
675    let CartesianProductWith = Types;
676}
677
678multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> {
679  def AXB: NoTestOpInst<"vreinterpret", "..", TypesA, OP_REINT> {
680    let CartesianProductWith = TypesB;
681  }
682  def BXA: NoTestOpInst<"vreinterpret", "..", TypesB, OP_REINT> {
683    let CartesianProductWith = TypesA;
684  }
685}
686
687// E.3.31 Vector reinterpret cast operations
688def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> {
689  let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)";
690  let BigEndianSafe = 1;
691}
692
693////////////////////////////////////////////////////////////////////////////////
694// Vector fused multiply-add operations
695
696let ArchGuard = "defined(__ARM_FEATURE_FMA)" in {
697  def VFMA : SInst<"vfma", "....", "fQf">;
698  def VFMS : SOpInst<"vfms", "....", "fQf", OP_FMLS>;
699  def FMLA_N_F32 : SOpInst<"vfma_n", "...1", "fQf", OP_FMLA_N>;
700}
701
702////////////////////////////////////////////////////////////////////////////////
703// fp16 vector operations
704def SCALAR_HALF_GET_LANE : IOpInst<"vget_lane", "1.I", "h", OP_SCALAR_HALF_GET_LN>;
705def SCALAR_HALF_SET_LANE : IOpInst<"vset_lane", ".1.I", "h", OP_SCALAR_HALF_SET_LN>;
706def SCALAR_HALF_GET_LANEQ : IOpInst<"vget_lane", "1.I", "Qh", OP_SCALAR_HALF_GET_LNQ>;
707def SCALAR_HALF_SET_LANEQ : IOpInst<"vset_lane", ".1.I", "Qh", OP_SCALAR_HALF_SET_LNQ>;
708
709////////////////////////////////////////////////////////////////////////////////
710// Non poly128_t vaddp for Arm and AArch64
711// TODO: poly128_t not implemented on arm32
712def VADDP   : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">;
713
714////////////////////////////////////////////////////////////////////////////////
715// AArch64 Intrinsics
716
717let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
718
719////////////////////////////////////////////////////////////////////////////////
720// Load/Store
721def LD1 : WInst<"vld1", ".(c*!)", "dQdPlQPl">;
722def LD2 : WInst<"vld2", "2(c*!)", "QUlQldQdPlQPl">;
723def LD3 : WInst<"vld3", "3(c*!)", "QUlQldQdPlQPl">;
724def LD4 : WInst<"vld4", "4(c*!)", "QUlQldQdPlQPl">;
725def ST1 : WInst<"vst1", "v*(.!)", "dQdPlQPl">;
726def ST2 : WInst<"vst2", "v*(2!)", "QUlQldQdPlQPl">;
727def ST3 : WInst<"vst3", "v*(3!)", "QUlQldQdPlQPl">;
728def ST4 : WInst<"vst4", "v*(4!)", "QUlQldQdPlQPl">;
729
730def LD1_X2 : WInst<"vld1_x2", "2(c*!)",
731                   "dQdPlQPl">;
732def LD1_X3 : WInst<"vld1_x3", "3(c*!)",
733                   "dQdPlQPl">;
734def LD1_X4 : WInst<"vld1_x4", "4(c*!)",
735                   "dQdPlQPl">;
736
737def ST1_X2 : WInst<"vst1_x2", "v*(2!)", "dQdPlQPl">;
738def ST1_X3 : WInst<"vst1_x3", "v*(3!)", "dQdPlQPl">;
739def ST1_X4 : WInst<"vst1_x4", "v*(4!)", "dQdPlQPl">;
740
741def LD1_LANE : WInst<"vld1_lane", ".(c*!).I", "dQdPlQPl">;
742def LD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "lUlQcQUcQPcQlQUldQdPlQPl">;
743def LD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "lUlQcQUcQPcQlQUldQdPlQPl">;
744def LD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "lUlQcQUcQPcQlQUldQdPlQPl">;
745def ST1_LANE : WInst<"vst1_lane", "v*(.!)I", "dQdPlQPl">;
746def ST2_LANE : WInst<"vst2_lane", "v*(2!)I", "lUlQcQUcQPcQlQUldQdPlQPl">;
747def ST3_LANE : WInst<"vst3_lane", "v*(3!)I", "lUlQcQUcQPcQlQUldQdPlQPl">;
748def ST4_LANE : WInst<"vst4_lane", "v*(4!)I", "lUlQcQUcQPcQlQUldQdPlQPl">;
749
750def LD1_DUP  : WInst<"vld1_dup", ".(c*!)", "dQdPlQPl">;
751def LD2_DUP  : WInst<"vld2_dup", "2(c*!)", "dQdPlQPl">;
752def LD3_DUP  : WInst<"vld3_dup", "3(c*!)", "dQdPlQPl">;
753def LD4_DUP  : WInst<"vld4_dup", "4(c*!)", "dQdPlQPl">;
754
755def VLDRQ : WInst<"vldrq", "1(c*!)", "Pk">;
756def VSTRQ : WInst<"vstrq", "v*(1!)", "Pk">;
757
758////////////////////////////////////////////////////////////////////////////////
759// Addition
760def ADD : IOpInst<"vadd", "...", "dQd", OP_ADD>;
761
762////////////////////////////////////////////////////////////////////////////////
763// Subtraction
764def SUB : IOpInst<"vsub", "...", "dQd", OP_SUB>;
765
766////////////////////////////////////////////////////////////////////////////////
767// Multiplication
768def MUL     : IOpInst<"vmul", "...", "dQd", OP_MUL>;
769def MLA     : IOpInst<"vmla", "....", "dQd", OP_MLA>;
770def MLS     : IOpInst<"vmls", "....", "dQd", OP_MLS>;
771
772////////////////////////////////////////////////////////////////////////////////
773// Multiplication Extended
774def MULX : SInst<"vmulx", "...", "fdQfQd">;
775
776////////////////////////////////////////////////////////////////////////////////
777// Division
778def FDIV : IOpInst<"vdiv", "...",  "fdQfQd", OP_DIV>;
779
780////////////////////////////////////////////////////////////////////////////////
781// Vector fused multiply-add operations
782def FMLA : SInst<"vfma", "....", "dQd">;
783def FMLS : SOpInst<"vfms", "....", "dQd", OP_FMLS>;
784
785////////////////////////////////////////////////////////////////////////////////
786// MUL, MLA, MLS, FMA, FMS definitions with scalar argument
787def VMUL_N_A64 : IOpInst<"vmul_n", "..1", "Qd", OP_MUL_N>;
788
789def FMLA_N : SOpInst<"vfma_n", "...1", "dQd", OP_FMLA_N>;
790def FMLS_N : SOpInst<"vfms_n", "...1", "fdQfQd", OP_FMLS_N>;
791
792////////////////////////////////////////////////////////////////////////////////
793// Logical operations
794def BSL : SInst<"vbsl", ".U..", "dPlQdQPl">;
795
796////////////////////////////////////////////////////////////////////////////////
797// Absolute Difference
798def ABD  : SInst<"vabd", "...",  "dQd">;
799
800////////////////////////////////////////////////////////////////////////////////
801// saturating absolute/negate
802def ABS    : SInst<"vabs", "..", "dQdlQl">;
803def QABS   : SInst<"vqabs", "..", "lQl">;
804def NEG    : SOpInst<"vneg", "..", "dlQdQl", OP_NEG>;
805def QNEG   : SInst<"vqneg", "..", "lQl">;
806
807////////////////////////////////////////////////////////////////////////////////
808// Signed Saturating Accumulated of Unsigned Value
809def SUQADD : SInst<"vuqadd", "..U", "csilQcQsQiQl">;
810
811////////////////////////////////////////////////////////////////////////////////
812// Unsigned Saturating Accumulated of Signed Value
813def USQADD : SInst<"vsqadd", "..S", "UcUsUiUlQUcQUsQUiQUl">;
814
815////////////////////////////////////////////////////////////////////////////////
816// Reciprocal/Sqrt
817def FRECPS  : IInst<"vrecps", "...", "dQd">;
818def FRSQRTS : IInst<"vrsqrts", "...", "dQd">;
819def FRECPE  : SInst<"vrecpe", "..", "dQd">;
820def FRSQRTE : SInst<"vrsqrte", "..", "dQd">;
821def FSQRT   : SInst<"vsqrt", "..", "fdQfQd">;
822
823////////////////////////////////////////////////////////////////////////////////
824// bitwise reverse
825def RBIT : IInst<"vrbit", "..", "cUcPcQcQUcQPc">;
826
827////////////////////////////////////////////////////////////////////////////////
828// Integer extract and narrow to high
829def XTN2 : SOpInst<"vmovn_high", "(<Q)<Q", "silUsUiUl", OP_XTN>;
830
831////////////////////////////////////////////////////////////////////////////////
832// Signed integer saturating extract and unsigned narrow to high
833def SQXTUN2 : SOpInst<"vqmovun_high", "(<U)(<Uq).", "HsHiHl", OP_SQXTUN>;
834
835////////////////////////////////////////////////////////////////////////////////
836// Integer saturating extract and narrow to high
837def QXTN2 : SOpInst<"vqmovn_high", "(<Q)<Q", "silUsUiUl", OP_QXTN>;
838
839////////////////////////////////////////////////////////////////////////////////
840// Converting vectors
841
842def VCVT_F32_F64 : SInst<"vcvt_f32_f64", "(<q).", "Qd">;
843def VCVT_F64_F32 : SInst<"vcvt_f64_f32", "(>Q).", "f">;
844
845def VCVT_S64 : SInst<"vcvt_s64", "S.",  "dQd">;
846def VCVT_U64 : SInst<"vcvt_u64", "U.",  "dQd">;
847def VCVT_F64 : SInst<"vcvt_f64", "F(.!)",  "lUlQlQUl">;
848
849def VCVT_HIGH_F16_F32 : SOpInst<"vcvt_high_f16", "<(<q!)Q", "Hf", OP_VCVT_NA_HI_F16>;
850def VCVT_HIGH_F32_F16 : SOpInst<"vcvt_high_f32", "(>Q)(Q!)", "h", OP_VCVT_EX_HI_F32>;
851def VCVT_HIGH_F32_F64 : SOpInst<"vcvt_high_f32", "(<Q)(F<!)Q", "d", OP_VCVT_NA_HI_F32>;
852def VCVT_HIGH_F64_F32 : SOpInst<"vcvt_high_f64", "(>Q)(Q!)", "f", OP_VCVT_EX_HI_F64>;
853
854def VCVTX_F32_F64      : SInst<"vcvtx_f32", "(F<)(Q!)",  "d">;
855def VCVTX_HIGH_F32_F64 : SOpInst<"vcvtx_high_f32", "(<Q)(F<!)Q", "d", OP_VCVTX_HI>;
856
857////////////////////////////////////////////////////////////////////////////////
858// Comparison
859def FCAGE : IInst<"vcage", "U..", "dQd">;
860def FCAGT : IInst<"vcagt", "U..", "dQd">;
861def FCALE : IInst<"vcale", "U..", "dQd">;
862def FCALT : IInst<"vcalt", "U..", "dQd">;
863def CMTST  : WInst<"vtst", "U..", "lUlPlQlQUlQPl">;
864def CFMEQ  : SOpInst<"vceq", "U..", "lUldQdQlQUlPlQPl", OP_EQ>;
865def CFMGE  : SOpInst<"vcge", "U..", "lUldQdQlQUl", OP_GE>;
866def CFMLE  : SOpInst<"vcle", "U..", "lUldQdQlQUl", OP_LE>;
867def CFMGT  : SOpInst<"vcgt", "U..", "lUldQdQlQUl", OP_GT>;
868def CFMLT  : SOpInst<"vclt", "U..", "lUldQdQlQUl", OP_LT>;
869
870def CMEQ  : SInst<"vceqz", "U.",
871                  "csilfUcUsUiUlPcPlQcQsQiQlQfQUcQUsQUiQUlQPcdQdQPl">;
872def CMGE  : SInst<"vcgez", "U.", "csilfdQcQsQiQlQfQd">;
873def CMLE  : SInst<"vclez", "U.", "csilfdQcQsQiQlQfQd">;
874def CMGT  : SInst<"vcgtz", "U.", "csilfdQcQsQiQlQfQd">;
875def CMLT  : SInst<"vcltz", "U.", "csilfdQcQsQiQlQfQd">;
876
877////////////////////////////////////////////////////////////////////////////////
878// Max/Min Integer
879def MAX : SInst<"vmax", "...", "dQd">;
880def MIN : SInst<"vmin", "...", "dQd">;
881
882////////////////////////////////////////////////////////////////////////////////
883// Pairwise Max/Min
884def MAXP : SInst<"vpmax", "...", "QcQsQiQUcQUsQUiQfQd">;
885def MINP : SInst<"vpmin", "...", "QcQsQiQUcQUsQUiQfQd">;
886
887////////////////////////////////////////////////////////////////////////////////
888// Pairwise MaxNum/MinNum Floating Point
889def FMAXNMP : SInst<"vpmaxnm", "...", "fQfQd">;
890def FMINNMP : SInst<"vpminnm", "...", "fQfQd">;
891
892////////////////////////////////////////////////////////////////////////////////
893// Pairwise Addition
894def ADDP  : IInst<"vpadd", "...", "QcQsQiQlQUcQUsQUiQUlQfQd">;
895
896////////////////////////////////////////////////////////////////////////////////
897// Shifts by constant
898let isShift = 1 in {
899// Left shift long high
900def SHLL_HIGH_N    : SOpInst<"vshll_high_n", ">.I", "HcHsHiHUcHUsHUi",
901                             OP_LONG_HI>;
902
903////////////////////////////////////////////////////////////////////////////////
904def SRI_N : WInst<"vsri_n", "...I", "PlQPl">;
905def SLI_N : WInst<"vsli_n", "...I", "PlQPl">;
906
907// Right shift narrow high
908def SHRN_HIGH_N    : IOpInst<"vshrn_high_n", "<(<q).I",
909                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
910def QSHRUN_HIGH_N  : SOpInst<"vqshrun_high_n", "<(<q).I",
911                             "HsHiHl", OP_NARROW_HI>;
912def RSHRN_HIGH_N   : IOpInst<"vrshrn_high_n", "<(<q).I",
913                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
914def QRSHRUN_HIGH_N : SOpInst<"vqrshrun_high_n", "<(<q).I",
915                             "HsHiHl", OP_NARROW_HI>;
916def QSHRN_HIGH_N   : SOpInst<"vqshrn_high_n", "<(<q).I",
917                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
918def QRSHRN_HIGH_N  : SOpInst<"vqrshrn_high_n", "<(<q).I",
919                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
920}
921
922////////////////////////////////////////////////////////////////////////////////
923// Converting vectors
924def VMOVL_HIGH   : SOpInst<"vmovl_high", ">.", "HcHsHiHUcHUsHUi", OP_MOVL_HI>;
925
926let isVCVT_N = 1 in {
927def CVTF_N_F64   : SInst<"vcvt_n_f64", "F(.!)I", "lUlQlQUl">;
928def FCVTZS_N_S64 : SInst<"vcvt_n_s64", "S.I", "dQd">;
929def FCVTZS_N_U64 : SInst<"vcvt_n_u64", "U.I", "dQd">;
930}
931
932////////////////////////////////////////////////////////////////////////////////
933// 3VDiff class using high 64-bit in operands
934def VADDL_HIGH   : SOpInst<"vaddl_high", "(>Q)QQ", "csiUcUsUi", OP_ADDLHi>;
935def VADDW_HIGH   : SOpInst<"vaddw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_ADDWHi>;
936def VSUBL_HIGH   : SOpInst<"vsubl_high", "(>Q)QQ", "csiUcUsUi", OP_SUBLHi>;
937def VSUBW_HIGH   : SOpInst<"vsubw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_SUBWHi>;
938
939def VABDL_HIGH   : SOpInst<"vabdl_high", "(>Q)QQ",  "csiUcUsUi", OP_ABDLHi>;
940def VABAL_HIGH   : SOpInst<"vabal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_ABALHi>;
941
942def VMULL_HIGH   : SOpInst<"vmull_high", "(>Q)QQ", "csiUcUsUiPc", OP_MULLHi>;
943def VMULL_HIGH_N : SOpInst<"vmull_high_n", "(>Q)Q1", "siUsUi", OP_MULLHi_N>;
944def VMLAL_HIGH   : SOpInst<"vmlal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLALHi>;
945def VMLAL_HIGH_N : SOpInst<"vmlal_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLALHi_N>;
946def VMLSL_HIGH   : SOpInst<"vmlsl_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLSLHi>;
947def VMLSL_HIGH_N : SOpInst<"vmlsl_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLSLHi_N>;
948
949def VADDHN_HIGH  : SOpInst<"vaddhn_high", "(<Q)<QQ", "silUsUiUl", OP_ADDHNHi>;
950def VRADDHN_HIGH : SOpInst<"vraddhn_high", "(<Q)<QQ", "silUsUiUl", OP_RADDHNHi>;
951def VSUBHN_HIGH  : SOpInst<"vsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_SUBHNHi>;
952def VRSUBHN_HIGH : SOpInst<"vrsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_RSUBHNHi>;
953
954def VQDMULL_HIGH : SOpInst<"vqdmull_high", "(>Q)QQ", "si", OP_QDMULLHi>;
955def VQDMULL_HIGH_N : SOpInst<"vqdmull_high_n", "(>Q)Q1", "si", OP_QDMULLHi_N>;
956def VQDMLAL_HIGH : SOpInst<"vqdmlal_high", "(>Q)(>Q)QQ", "si", OP_QDMLALHi>;
957def VQDMLAL_HIGH_N : SOpInst<"vqdmlal_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLALHi_N>;
958def VQDMLSL_HIGH : SOpInst<"vqdmlsl_high", "(>Q)(>Q)QQ", "si", OP_QDMLSLHi>;
959def VQDMLSL_HIGH_N : SOpInst<"vqdmlsl_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLSLHi_N>;
960let TargetGuard = "aes,neon" in {
961  def VMULL_P64    : SInst<"vmull", "(1>)11", "Pl">;
962  def VMULL_HIGH_P64 : SOpInst<"vmull_high", "(1>)..", "HPl", OP_MULLHi_P64>;
963}
964
965
966////////////////////////////////////////////////////////////////////////////////
967// Extract or insert element from vector
968def GET_LANE : IInst<"vget_lane", "1.I", "dQdPlQPl">;
969def SET_LANE : IInst<"vset_lane", ".1.I", "dQdPlQPl">;
970def COPY_LANE : IOpInst<"vcopy_lane", "..I.I",
971                        "csilUcUsUiUlPcPsPlfd", OP_COPY_LN>;
972def COPYQ_LANE : IOpInst<"vcopy_lane", "..IqI",
973                        "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>;
974def COPY_LANEQ : IOpInst<"vcopy_laneq", "..IQI",
975                     "csilPcPsPlUcUsUiUlfd", OP_COPY_LN> {
976  let isLaneQ = 1;
977}
978def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "..I.I",
979                     "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN> {
980  let isLaneQ = 1;
981}
982
983////////////////////////////////////////////////////////////////////////////////
984// Set all lanes to same value
985def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "dQdPlQPl", OP_DUP_LN>;
986def VDUP_LANE2: WOpInst<"vdup_laneq", ".QI",
987                  "csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl",
988                        OP_DUP_LN> {
989  let isLaneQ = 1;
990}
991def DUP_N   : WOpInst<"vdup_n", ".1", "dQdPlQPl", OP_DUP>;
992def MOV_N   : WOpInst<"vmov_n", ".1", "dQdPlQPl", OP_DUP>;
993
994////////////////////////////////////////////////////////////////////////////////
995def COMBINE : NoTestOpInst<"vcombine", "Q..", "dPl", OP_CONC>;
996
997////////////////////////////////////////////////////////////////////////////////
998//Initialize a vector from bit pattern
999def CREATE : NoTestOpInst<"vcreate", ".(IU>)", "dPl", OP_CAST> {
1000  let BigEndianSafe = 1;
1001}
1002
1003////////////////////////////////////////////////////////////////////////////////
1004
1005def VMLA_LANEQ   : IOpInst<"vmla_laneq", "...QI",
1006                           "siUsUifQsQiQUsQUiQf", OP_MLA_LN> {
1007  let isLaneQ = 1;
1008}
1009def VMLS_LANEQ   : IOpInst<"vmls_laneq", "...QI",
1010                           "siUsUifQsQiQUsQUiQf", OP_MLS_LN> {
1011  let isLaneQ = 1;
1012}
1013
1014def VFMA_LANE    : IInst<"vfma_lane", "...qI", "fdQfQd">;
1015def VFMA_LANEQ   : IInst<"vfma_laneq", "...QI", "fdQfQd"> {
1016  let isLaneQ = 1;
1017}
1018def VFMS_LANE    : IOpInst<"vfms_lane", "...qI", "fdQfQd", OP_FMS_LN>;
1019def VFMS_LANEQ   : IOpInst<"vfms_laneq", "...QI", "fdQfQd", OP_FMS_LNQ> {
1020  let isLaneQ = 1;
1021}
1022
1023def VMLAL_LANEQ  : SOpInst<"vmlal_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLAL_LN> {
1024  let isLaneQ = 1;
1025}
1026def VMLAL_HIGH_LANE   : SOpInst<"vmlal_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
1027                                OP_MLALHi_LN>;
1028def VMLAL_HIGH_LANEQ  : SOpInst<"vmlal_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
1029                                OP_MLALHi_LN> {
1030  let isLaneQ = 1;
1031}
1032def VMLSL_LANEQ  : SOpInst<"vmlsl_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLSL_LN> {
1033  let isLaneQ = 1;
1034}
1035def VMLSL_HIGH_LANE   : SOpInst<"vmlsl_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
1036                                OP_MLSLHi_LN>;
1037def VMLSL_HIGH_LANEQ  : SOpInst<"vmlsl_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
1038                                OP_MLSLHi_LN> {
1039  let isLaneQ = 1;
1040}
1041
1042def VQDMLAL_LANEQ  : SOpInst<"vqdmlal_laneq", "(>Q)(>Q).QI", "si", OP_QDMLAL_LN> {
1043  let isLaneQ = 1;
1044}
1045def VQDMLAL_HIGH_LANE   : SOpInst<"vqdmlal_high_lane", "(>Q)(>Q)Q.I", "si",
1046                                OP_QDMLALHi_LN>;
1047def VQDMLAL_HIGH_LANEQ  : SOpInst<"vqdmlal_high_laneq", "(>Q)(>Q)QQI", "si",
1048                                OP_QDMLALHi_LN> {
1049  let isLaneQ = 1;
1050}
1051def VQDMLSL_LANEQ  : SOpInst<"vqdmlsl_laneq", "(>Q)(>Q).QI", "si", OP_QDMLSL_LN> {
1052  let isLaneQ = 1;
1053}
1054def VQDMLSL_HIGH_LANE   : SOpInst<"vqdmlsl_high_lane", "(>Q)(>Q)Q.I", "si",
1055                                OP_QDMLSLHi_LN>;
1056def VQDMLSL_HIGH_LANEQ  : SOpInst<"vqdmlsl_high_laneq", "(>Q)(>Q)QQI", "si",
1057                                OP_QDMLSLHi_LN> {
1058  let isLaneQ = 1;
1059}
1060
1061// Newly add double parameter for vmul_lane in aarch64
1062// Note: d type is handled by SCALAR_VMUL_LANE
1063def VMUL_LANE_A64 : IOpInst<"vmul_lane", "..qI", "Qd", OP_MUL_LN>;
1064
1065// Note: d type is handled by SCALAR_VMUL_LANEQ
1066def VMUL_LANEQ   : IOpInst<"vmul_laneq", "..QI",
1067                           "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN> {
1068  let isLaneQ = 1;
1069}
1070def VMULL_LANEQ  : SOpInst<"vmull_laneq", "(>Q).QI", "siUsUi", OP_MULL_LN> {
1071  let isLaneQ = 1;
1072}
1073def VMULL_HIGH_LANE   : SOpInst<"vmull_high_lane", "(>Q)Q.I", "siUsUi",
1074                                OP_MULLHi_LN>;
1075def VMULL_HIGH_LANEQ  : SOpInst<"vmull_high_laneq", "(>Q)QQI", "siUsUi",
1076                                OP_MULLHi_LN> {
1077  let isLaneQ = 1;
1078}
1079
1080def VQDMULL_LANEQ  : SOpInst<"vqdmull_laneq", "(>Q).QI", "si", OP_QDMULL_LN> {
1081  let isLaneQ = 1;
1082}
1083def VQDMULL_HIGH_LANE   : SOpInst<"vqdmull_high_lane", "(>Q)Q.I", "si",
1084                                  OP_QDMULLHi_LN>;
1085def VQDMULL_HIGH_LANEQ  : SOpInst<"vqdmull_high_laneq", "(>Q)QQI", "si",
1086                                  OP_QDMULLHi_LN> {
1087  let isLaneQ = 1;
1088}
1089
1090let isLaneQ = 1 in {
1091def VQDMULH_LANEQ  : SInst<"vqdmulh_laneq", "..QI", "siQsQi">;
1092def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">;
1093}
1094let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a,neon" in {
1095def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> {
1096  let isLaneQ = 1;
1097}
1098def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> {
1099  let isLaneQ = 1;
1100}
1101} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a"
1102
1103// Note: d type implemented by SCALAR_VMULX_LANE
1104def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>;
1105// Note: d type is implemented by SCALAR_VMULX_LANEQ
1106def VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "fQfQd", OP_MULX_LN> {
1107  let isLaneQ = 1;
1108}
1109
1110////////////////////////////////////////////////////////////////////////////////
1111// Across vectors class
1112def VADDLV  : SInst<"vaddlv", "(1>).", "csiUcUsUiQcQsQiQUcQUsQUi">;
1113def VMAXV   : SInst<"vmaxv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
1114def VMINV   : SInst<"vminv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
1115def VADDV   : SInst<"vaddv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQdQlQUl">;
1116def FMAXNMV : SInst<"vmaxnmv", "1.", "fQfQd">;
1117def FMINNMV : SInst<"vminnmv", "1.", "fQfQd">;
1118
1119////////////////////////////////////////////////////////////////////////////////
1120// Newly added Vector Extract for f64
1121def VEXT_A64 : WInst<"vext", "...I", "dQdPlQPl">;
1122
1123////////////////////////////////////////////////////////////////////////////////
1124// Crypto
1125let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "aes,neon" in {
1126def AESE : SInst<"vaese", "...", "QUc">;
1127def AESD : SInst<"vaesd", "...", "QUc">;
1128def AESMC : SInst<"vaesmc", "..", "QUc">;
1129def AESIMC : SInst<"vaesimc", "..", "QUc">;
1130}
1131
1132let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "sha2,neon" in {
1133def SHA1H : SInst<"vsha1h", "11", "Ui">;
1134def SHA1SU1 : SInst<"vsha1su1", "...", "QUi">;
1135def SHA256SU0 : SInst<"vsha256su0", "...", "QUi">;
1136
1137def SHA1C : SInst<"vsha1c", "..1.", "QUi">;
1138def SHA1P : SInst<"vsha1p", "..1.", "QUi">;
1139def SHA1M : SInst<"vsha1m", "..1.", "QUi">;
1140def SHA1SU0 : SInst<"vsha1su0", "....", "QUi">;
1141def SHA256H : SInst<"vsha256h", "....", "QUi">;
1142def SHA256H2 : SInst<"vsha256h2", "....", "QUi">;
1143def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">;
1144}
1145
1146let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sha3,neon" in {
1147def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">;
1148def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">;
1149def RAX1 : SInst<"vrax1", "...", "QUl">;
1150
1151let isVXAR = 1 in {
1152def XAR :  SInst<"vxar", "...I", "QUl">;
1153}
1154}
1155
1156let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sha3,neon" in {
1157def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">;
1158def SHA512su1 : SInst<"vsha512su1", "....", "QUl">;
1159def SHA512H : SInst<"vsha512h", "....", "QUl">;
1160def SHA512H2 : SInst<"vsha512h2", "....", "QUl">;
1161}
1162
1163let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sm4,neon" in {
1164def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">;
1165def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">;
1166def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">;
1167def SM3TT2A : SInst<"vsm3tt2a", "....I", "QUi">;
1168def SM3TT2B : SInst<"vsm3tt2b", "....I", "QUi">;
1169def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">;
1170def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">;
1171}
1172
1173let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sm4,neon" in {
1174def SM4E : SInst<"vsm4e", "...", "QUi">;
1175def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">;
1176}
1177
1178////////////////////////////////////////////////////////////////////////////////
1179// poly128_t vadd for AArch64 only see VADDP for the rest
1180def VADDP_Q   : WInst<"vadd", "...", "QPk">;
1181
1182////////////////////////////////////////////////////////////////////////////////
1183// Float -> Int conversions with explicit rounding mode
1184
1185let ArchGuard = "__ARM_ARCH >= 8" in {
1186def FCVTNS_S32 : SInst<"vcvtn_s32", "S.", "fQf">;
1187def FCVTNU_S32 : SInst<"vcvtn_u32", "U.", "fQf">;
1188def FCVTPS_S32 : SInst<"vcvtp_s32", "S.", "fQf">;
1189def FCVTPU_S32 : SInst<"vcvtp_u32", "U.", "fQf">;
1190def FCVTMS_S32 : SInst<"vcvtm_s32", "S.", "fQf">;
1191def FCVTMU_S32 : SInst<"vcvtm_u32", "U.", "fQf">;
1192def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">;
1193def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">;
1194}
1195
1196let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
1197def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">;
1198def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">;
1199def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">;
1200def FCVTPU_S64 : SInst<"vcvtp_u64", "U.", "dQd">;
1201def FCVTMS_S64 : SInst<"vcvtm_s64", "S.", "dQd">;
1202def FCVTMU_S64 : SInst<"vcvtm_u64", "U.", "dQd">;
1203def FCVTAS_S64 : SInst<"vcvta_s64", "S.", "dQd">;
1204def FCVTAU_S64 : SInst<"vcvta_u64", "U.", "dQd">;
1205}
1206
1207////////////////////////////////////////////////////////////////////////////////
1208// Round to Integral
1209
1210let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1211def FRINTN_S32 : SInst<"vrndn", "..", "fQf">;
1212def FRINTA_S32 : SInst<"vrnda", "..", "fQf">;
1213def FRINTP_S32 : SInst<"vrndp", "..", "fQf">;
1214def FRINTM_S32 : SInst<"vrndm", "..", "fQf">;
1215def FRINTX_S32 : SInst<"vrndx", "..", "fQf">;
1216def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">;
1217def FRINTI_S32 : SInst<"vrndi", "..", "fQf">;
1218}
1219
1220let ArchGuard = "(defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1221def FRINTN_S64 : SInst<"vrndn", "..", "dQd">;
1222def FRINTA_S64 : SInst<"vrnda", "..", "dQd">;
1223def FRINTP_S64 : SInst<"vrndp", "..", "dQd">;
1224def FRINTM_S64 : SInst<"vrndm", "..", "dQd">;
1225def FRINTX_S64 : SInst<"vrndx", "..", "dQd">;
1226def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
1227def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
1228}
1229
1230let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.5a,neon" in {
1231def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
1232def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
1233def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
1234def FRINT64Z_S32 : SInst<"vrnd64z", "..", "fQf">;
1235
1236def FRINT32X_S64 : SInst<"vrnd32x", "..", "dQd">;
1237def FRINT32Z_S64 : SInst<"vrnd32z", "..", "dQd">;
1238def FRINT64X_S64 : SInst<"vrnd64x", "..", "dQd">;
1239def FRINT64Z_S64 : SInst<"vrnd64z", "..", "dQd">;
1240}
1241
1242////////////////////////////////////////////////////////////////////////////////
1243// MaxNum/MinNum Floating Point
1244
1245let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1246def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">;
1247def FMINNM_S32 : SInst<"vminnm", "...", "fQf">;
1248}
1249
1250let ArchGuard = "(defined(__aarch64__)  || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1251def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">;
1252def FMINNM_S64 : SInst<"vminnm", "...", "dQd">;
1253}
1254
1255////////////////////////////////////////////////////////////////////////////////
1256// Permutation
1257def VTRN1 : SOpInst<"vtrn1", "...",
1258                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN1>;
1259def VZIP1 : SOpInst<"vzip1", "...",
1260                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP1>;
1261def VUZP1 : SOpInst<"vuzp1", "...",
1262                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP1>;
1263def VTRN2 : SOpInst<"vtrn2", "...",
1264                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN2>;
1265def VZIP2 : SOpInst<"vzip2", "...",
1266                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP2>;
1267def VUZP2 : SOpInst<"vuzp2", "...",
1268                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP2>;
1269
1270////////////////////////////////////////////////////////////////////////////////
1271// Table lookup
1272let InstName = "vtbl" in {
1273def VQTBL1_A64 : WInst<"vqtbl1", ".QU",  "UccPcQUcQcQPc">;
1274def VQTBL2_A64 : WInst<"vqtbl2", ".(2Q)U",  "UccPcQUcQcQPc">;
1275def VQTBL3_A64 : WInst<"vqtbl3", ".(3Q)U",  "UccPcQUcQcQPc">;
1276def VQTBL4_A64 : WInst<"vqtbl4", ".(4Q)U",  "UccPcQUcQcQPc">;
1277}
1278let InstName = "vtbx" in {
1279def VQTBX1_A64 : WInst<"vqtbx1", "..QU", "UccPcQUcQcQPc">;
1280def VQTBX2_A64 : WInst<"vqtbx2", "..(2Q)U", "UccPcQUcQcQPc">;
1281def VQTBX3_A64 : WInst<"vqtbx3", "..(3Q)U", "UccPcQUcQcQPc">;
1282def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
1283}
1284
1285////////////////////////////////////////////////////////////////////////////////
1286// Vector reinterpret cast operations
1287
1288// NeonEmitter implicitly takes the cartesian product of the type string with
1289// itself during generation so, unlike all other intrinsics, this one should
1290// include *all* types, not just additional ones.
1291def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> {
1292  let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)";
1293  let BigEndianSafe = 1;
1294}
1295
1296////////////////////////////////////////////////////////////////////////////////
1297// Scalar Intrinsics
1298// Scalar Arithmetic
1299
1300// Scalar Addition
1301def SCALAR_ADD : SInst<"vadd", "111",  "SlSUl">;
1302// Scalar  Saturating Add
1303def SCALAR_QADD   : SInst<"vqadd", "111", "ScSsSiSlSUcSUsSUiSUl">;
1304
1305// Scalar Subtraction
1306def SCALAR_SUB : SInst<"vsub", "111",  "SlSUl">;
1307// Scalar  Saturating Sub
1308def SCALAR_QSUB   : SInst<"vqsub", "111", "ScSsSiSlSUcSUsSUiSUl">;
1309
1310let InstName = "vmov" in {
1311def VGET_HIGH_A64 : NoTestOpInst<"vget_high", ".Q", "dPl", OP_HI>;
1312def VGET_LOW_A64  : NoTestOpInst<"vget_low", ".Q", "dPl", OP_LO>;
1313}
1314
1315////////////////////////////////////////////////////////////////////////////////
1316// Scalar Shift
1317// Scalar Shift Left
1318def SCALAR_SHL: SInst<"vshl", "11(S1)", "SlSUl">;
1319// Scalar Saturating Shift Left
1320def SCALAR_QSHL: SInst<"vqshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">;
1321// Scalar Saturating Rounding Shift Left
1322def SCALAR_QRSHL: SInst<"vqrshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">;
1323// Scalar Shift Rounding Left
1324def SCALAR_RSHL: SInst<"vrshl", "11(S1)", "SlSUl">;
1325
1326////////////////////////////////////////////////////////////////////////////////
1327// Scalar Shift (Immediate)
1328let isScalarShift = 1 in {
1329// Signed/Unsigned Shift Right (Immediate)
1330def SCALAR_SSHR_N: SInst<"vshr_n", "11I", "SlSUl">;
1331// Signed/Unsigned Rounding Shift Right (Immediate)
1332def SCALAR_SRSHR_N: SInst<"vrshr_n", "11I", "SlSUl">;
1333
1334// Signed/Unsigned Shift Right and Accumulate (Immediate)
1335def SCALAR_SSRA_N: SInst<"vsra_n", "111I", "SlSUl">;
1336// Signed/Unsigned Rounding Shift Right and Accumulate (Immediate)
1337def SCALAR_SRSRA_N: SInst<"vrsra_n", "111I", "SlSUl">;
1338
1339// Shift Left (Immediate)
1340def SCALAR_SHL_N: SInst<"vshl_n", "11I", "SlSUl">;
1341// Signed/Unsigned Saturating Shift Left (Immediate)
1342def SCALAR_SQSHL_N: SInst<"vqshl_n", "11I", "ScSsSiSlSUcSUsSUiSUl">;
1343// Signed Saturating Shift Left Unsigned (Immediate)
1344def SCALAR_SQSHLU_N: SInst<"vqshlu_n", "11I", "ScSsSiSl">;
1345
1346// Shift Right And Insert (Immediate)
1347def SCALAR_SRI_N: SInst<"vsri_n", "111I", "SlSUl">;
1348// Shift Left And Insert (Immediate)
1349def SCALAR_SLI_N: SInst<"vsli_n", "111I", "SlSUl">;
1350
1351let isScalarNarrowShift = 1 in {
1352  // Signed/Unsigned Saturating Shift Right Narrow (Immediate)
1353  def SCALAR_SQSHRN_N: SInst<"vqshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl">;
1354  // Signed/Unsigned Saturating Rounded Shift Right Narrow (Immediate)
1355  def SCALAR_SQRSHRN_N: SInst<"vqrshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl">;
1356  // Signed Saturating Shift Right Unsigned Narrow (Immediate)
1357  def SCALAR_SQSHRUN_N: SInst<"vqshrun_n", "(1<U)1I", "SsSiSl">;
1358  // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
1359  def SCALAR_SQRSHRUN_N: SInst<"vqrshrun_n", "(1<U)1I", "SsSiSl">;
1360}
1361
1362////////////////////////////////////////////////////////////////////////////////
1363// Scalar Signed/Unsigned Fixed-point Convert To Floating-Point (Immediate)
1364def SCALAR_SCVTF_N_F32: SInst<"vcvt_n_f32", "(1F)(1!)I", "SiSUi">;
1365def SCALAR_SCVTF_N_F64: SInst<"vcvt_n_f64", "(1F)(1!)I", "SlSUl">;
1366
1367////////////////////////////////////////////////////////////////////////////////
1368// Scalar Floating-point Convert To Signed/Unsigned Fixed-point (Immediate)
1369def SCALAR_FCVTZS_N_S32 : SInst<"vcvt_n_s32", "(1S)1I", "Sf">;
1370def SCALAR_FCVTZU_N_U32 : SInst<"vcvt_n_u32", "(1U)1I", "Sf">;
1371def SCALAR_FCVTZS_N_S64 : SInst<"vcvt_n_s64", "(1S)1I", "Sd">;
1372def SCALAR_FCVTZU_N_U64 : SInst<"vcvt_n_u64", "(1U)1I", "Sd">;
1373}
1374
1375////////////////////////////////////////////////////////////////////////////////
1376// Scalar Floating-point Round to Integral
1377let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1378def SCALAR_FRINTN_S32 : SInst<"vrndn", "11", "Sf">;
1379}
1380
1381////////////////////////////////////////////////////////////////////////////////
1382// Scalar Reduce Pairwise Addition (Scalar and Floating Point)
1383def SCALAR_ADDP  : SInst<"vpadd", "1.", "SfSHlSHdSHUl">;
1384
1385////////////////////////////////////////////////////////////////////////////////
1386// Scalar Reduce Floating Point Pairwise Max/Min
1387def SCALAR_FMAXP : SInst<"vpmax", "1.", "SfSQd">;
1388
1389def SCALAR_FMINP : SInst<"vpmin", "1.", "SfSQd">;
1390
1391////////////////////////////////////////////////////////////////////////////////
1392// Scalar Reduce Floating Point Pairwise maxNum/minNum
1393def SCALAR_FMAXNMP : SInst<"vpmaxnm", "1.", "SfSQd">;
1394def SCALAR_FMINNMP : SInst<"vpminnm", "1.", "SfSQd">;
1395
1396////////////////////////////////////////////////////////////////////////////////
1397// Scalar Integer Saturating Doubling Multiply Half High
1398def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">;
1399
1400////////////////////////////////////////////////////////////////////////////////
1401// Scalar Integer Saturating Rounding Doubling Multiply Half High
1402def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">;
1403
1404let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a,neon" in {
1405////////////////////////////////////////////////////////////////////////////////
1406// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1407def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">;
1408
1409////////////////////////////////////////////////////////////////////////////////
1410// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1411def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">;
1412} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a"
1413
1414////////////////////////////////////////////////////////////////////////////////
1415// Scalar Floating-point Multiply Extended
1416def SCALAR_FMULX : IInst<"vmulx", "111", "SfSd">;
1417
1418////////////////////////////////////////////////////////////////////////////////
1419// Scalar Floating-point Reciprocal Step
1420def SCALAR_FRECPS : IInst<"vrecps", "111", "SfSd">;
1421
1422////////////////////////////////////////////////////////////////////////////////
1423// Scalar Floating-point Reciprocal Square Root Step
1424def SCALAR_FRSQRTS : IInst<"vrsqrts", "111", "SfSd">;
1425
1426////////////////////////////////////////////////////////////////////////////////
1427// Scalar Signed Integer Convert To Floating-point
1428def SCALAR_SCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "Si">;
1429def SCALAR_SCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "Sl">;
1430
1431////////////////////////////////////////////////////////////////////////////////
1432// Scalar Unsigned Integer Convert To Floating-point
1433def SCALAR_UCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "SUi">;
1434def SCALAR_UCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "SUl">;
1435
1436////////////////////////////////////////////////////////////////////////////////
1437// Scalar Floating-point Converts
1438def SCALAR_FCVTXN  : IInst<"vcvtx_f32", "(1F<)(1!)", "Sd">;
1439def SCALAR_FCVTNSS : SInst<"vcvtn_s32", "(1S)1", "Sf">;
1440def SCALAR_FCVTNUS : SInst<"vcvtn_u32", "(1U)1", "Sf">;
1441def SCALAR_FCVTNSD : SInst<"vcvtn_s64", "(1S)1", "Sd">;
1442def SCALAR_FCVTNUD : SInst<"vcvtn_u64", "(1U)1", "Sd">;
1443def SCALAR_FCVTMSS : SInst<"vcvtm_s32", "(1S)1", "Sf">;
1444def SCALAR_FCVTMUS : SInst<"vcvtm_u32", "(1U)1", "Sf">;
1445def SCALAR_FCVTMSD : SInst<"vcvtm_s64", "(1S)1", "Sd">;
1446def SCALAR_FCVTMUD : SInst<"vcvtm_u64", "(1U)1", "Sd">;
1447def SCALAR_FCVTASS : SInst<"vcvta_s32", "(1S)1", "Sf">;
1448def SCALAR_FCVTAUS : SInst<"vcvta_u32", "(1U)1", "Sf">;
1449def SCALAR_FCVTASD : SInst<"vcvta_s64", "(1S)1", "Sd">;
1450def SCALAR_FCVTAUD : SInst<"vcvta_u64", "(1U)1", "Sd">;
1451def SCALAR_FCVTPSS : SInst<"vcvtp_s32", "(1S)1", "Sf">;
1452def SCALAR_FCVTPUS : SInst<"vcvtp_u32", "(1U)1", "Sf">;
1453def SCALAR_FCVTPSD : SInst<"vcvtp_s64", "(1S)1", "Sd">;
1454def SCALAR_FCVTPUD : SInst<"vcvtp_u64", "(1U)1", "Sd">;
1455def SCALAR_FCVTZSS : SInst<"vcvt_s32", "(1S)1", "Sf">;
1456def SCALAR_FCVTZUS : SInst<"vcvt_u32", "(1U)1", "Sf">;
1457def SCALAR_FCVTZSD : SInst<"vcvt_s64", "(1S)1", "Sd">;
1458def SCALAR_FCVTZUD : SInst<"vcvt_u64", "(1U)1", "Sd">;
1459
1460////////////////////////////////////////////////////////////////////////////////
1461// Scalar Floating-point Reciprocal Estimate
1462def SCALAR_FRECPE : IInst<"vrecpe", "11", "SfSd">;
1463
1464////////////////////////////////////////////////////////////////////////////////
1465// Scalar Floating-point Reciprocal Exponent
1466def SCALAR_FRECPX : IInst<"vrecpx", "11", "SfSd">;
1467
1468////////////////////////////////////////////////////////////////////////////////
1469// Scalar Floating-point Reciprocal Square Root Estimate
1470def SCALAR_FRSQRTE : IInst<"vrsqrte", "11", "SfSd">;
1471
1472////////////////////////////////////////////////////////////////////////////////
1473// Scalar Integer Comparison
1474def SCALAR_CMEQ : SInst<"vceq", "(U1)11", "SlSUl">;
1475def SCALAR_CMEQZ : SInst<"vceqz", "(U1)1", "SlSUl">;
1476def SCALAR_CMGE : SInst<"vcge", "(U1)11", "Sl">;
1477def SCALAR_CMGEZ : SInst<"vcgez", "(U1)1", "Sl">;
1478def SCALAR_CMHS : SInst<"vcge", "(U1)11", "SUl">;
1479def SCALAR_CMLE : SInst<"vcle", "(U1)11", "SlSUl">;
1480def SCALAR_CMLEZ : SInst<"vclez", "(U1)1", "Sl">;
1481def SCALAR_CMLT : SInst<"vclt", "(U1)11", "SlSUl">;
1482def SCALAR_CMLTZ : SInst<"vcltz", "(U1)1", "Sl">;
1483def SCALAR_CMGT : SInst<"vcgt", "(U1)11", "Sl">;
1484def SCALAR_CMGTZ : SInst<"vcgtz", "(U1)1", "Sl">;
1485def SCALAR_CMHI : SInst<"vcgt", "(U1)11", "SUl">;
1486def SCALAR_CMTST : SInst<"vtst", "(U1)11", "SlSUl">;
1487
1488////////////////////////////////////////////////////////////////////////////////
1489// Scalar Floating-point Comparison
1490def SCALAR_FCMEQ : IInst<"vceq", "(1U)11", "SfSd">;
1491def SCALAR_FCMEQZ : IInst<"vceqz", "(1U)1", "SfSd">;
1492def SCALAR_FCMGE : IInst<"vcge", "(1U)11", "SfSd">;
1493def SCALAR_FCMGEZ : IInst<"vcgez", "(1U)1", "SfSd">;
1494def SCALAR_FCMGT : IInst<"vcgt", "(1U)11", "SfSd">;
1495def SCALAR_FCMGTZ : IInst<"vcgtz", "(1U)1", "SfSd">;
1496def SCALAR_FCMLE : IInst<"vcle", "(1U)11", "SfSd">;
1497def SCALAR_FCMLEZ : IInst<"vclez", "(1U)1", "SfSd">;
1498def SCALAR_FCMLT : IInst<"vclt", "(1U)11", "SfSd">;
1499def SCALAR_FCMLTZ : IInst<"vcltz", "(1U)1", "SfSd">;
1500
1501////////////////////////////////////////////////////////////////////////////////
1502// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
1503def SCALAR_FACGE : IInst<"vcage", "(1U)11", "SfSd">;
1504def SCALAR_FACLE : IInst<"vcale", "(1U)11", "SfSd">;
1505
1506////////////////////////////////////////////////////////////////////////////////
1507// Scalar Floating-point Absolute Compare Mask Greater Than
1508def SCALAR_FACGT : IInst<"vcagt", "(1U)11", "SfSd">;
1509def SCALAR_FACLT : IInst<"vcalt", "(1U)11", "SfSd">;
1510
1511////////////////////////////////////////////////////////////////////////////////
1512// Scalar Absolute Value
1513def SCALAR_ABS : SInst<"vabs", "11", "Sl">;
1514
1515////////////////////////////////////////////////////////////////////////////////
1516// Scalar Absolute Difference
1517def SCALAR_ABD : IInst<"vabd", "111", "SfSd">;
1518
1519////////////////////////////////////////////////////////////////////////////////
1520// Scalar Signed Saturating Absolute Value
1521def SCALAR_SQABS : SInst<"vqabs", "11", "ScSsSiSl">;
1522
1523////////////////////////////////////////////////////////////////////////////////
1524// Scalar Negate
1525def SCALAR_NEG : SInst<"vneg", "11", "Sl">;
1526
1527////////////////////////////////////////////////////////////////////////////////
1528// Scalar Signed Saturating Negate
1529def SCALAR_SQNEG : SInst<"vqneg", "11", "ScSsSiSl">;
1530
1531////////////////////////////////////////////////////////////////////////////////
1532// Scalar Signed Saturating Accumulated of Unsigned Value
1533def SCALAR_SUQADD : SInst<"vuqadd", "11(1U)", "ScSsSiSl">;
1534
1535////////////////////////////////////////////////////////////////////////////////
1536// Scalar Unsigned Saturating Accumulated of Signed Value
1537def SCALAR_USQADD : SInst<"vsqadd", "11(1S)", "SUcSUsSUiSUl">;
1538
1539////////////////////////////////////////////////////////////////////////////////
1540// Signed Saturating Doubling Multiply-Add Long
1541def SCALAR_SQDMLAL : SInst<"vqdmlal", "(1>)(1>)11", "SsSi">;
1542
1543////////////////////////////////////////////////////////////////////////////////
1544// Signed Saturating Doubling Multiply-Subtract Long
1545def SCALAR_SQDMLSL : SInst<"vqdmlsl", "(1>)(1>)11", "SsSi">;
1546
1547////////////////////////////////////////////////////////////////////////////////
1548// Signed Saturating Doubling Multiply Long
1549def SCALAR_SQDMULL : SInst<"vqdmull", "(1>)11", "SsSi">;
1550
1551////////////////////////////////////////////////////////////////////////////////
1552// Scalar Signed Saturating Extract Unsigned Narrow
1553def SCALAR_SQXTUN : SInst<"vqmovun", "(U1<)1", "SsSiSl">;
1554
1555////////////////////////////////////////////////////////////////////////////////
1556// Scalar Signed Saturating Extract Narrow
1557def SCALAR_SQXTN : SInst<"vqmovn", "(1<)1", "SsSiSl">;
1558
1559////////////////////////////////////////////////////////////////////////////////
1560// Scalar Unsigned Saturating Extract Narrow
1561def SCALAR_UQXTN : SInst<"vqmovn", "(1<)1", "SUsSUiSUl">;
1562
1563// Scalar Floating Point  multiply (scalar, by element)
1564def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "11.I", "SfSd", OP_SCALAR_MUL_LN>;
1565def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "11QI", "SfSd", OP_SCALAR_MUL_LN> {
1566  let isLaneQ = 1;
1567}
1568
1569// Scalar Floating Point  multiply extended (scalar, by element)
1570def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "11.I", "SfSd", OP_SCALAR_MULX_LN>;
1571def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "11QI", "SfSd", OP_SCALAR_MULX_LN> {
1572  let isLaneQ = 1;
1573}
1574
1575def SCALAR_VMUL_N : IInst<"vmul_n", "..1", "d">;
1576
1577// VMUL_LANE_A64 d type implemented using scalar mul lane
1578def SCALAR_VMUL_LANE : IInst<"vmul_lane", "..qI", "d">;
1579
1580// VMUL_LANEQ d type implemented using scalar mul lane
1581def SCALAR_VMUL_LANEQ   : IInst<"vmul_laneq", "..QI", "d"> {
1582  let isLaneQ = 1;
1583}
1584
1585// VMULX_LANE d type implemented using scalar vmulx_lane
1586def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "d", OP_SCALAR_VMULX_LN>;
1587
1588// VMULX_LANEQ d type implemented using scalar vmulx_laneq
1589def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "d", OP_SCALAR_VMULX_LNQ> {
1590  let isLaneQ = 1;
1591}
1592
1593// Scalar Floating Point fused multiply-add (scalar, by element)
1594def SCALAR_FMLA_LANE : IInst<"vfma_lane", "111.I", "SfSd">;
1595def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "111QI", "SfSd"> {
1596  let isLaneQ = 1;
1597}
1598
1599// Scalar Floating Point fused multiply-subtract (scalar, by element)
1600def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "111.I", "SfSd", OP_FMS_LN>;
1601def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "111QI", "SfSd", OP_FMS_LNQ> {
1602  let isLaneQ = 1;
1603}
1604
1605// Signed Saturating Doubling Multiply Long (scalar by element)
1606def SCALAR_SQDMULL_LANE : SOpInst<"vqdmull_lane", "(1>)1.I", "SsSi", OP_SCALAR_QDMULL_LN>;
1607def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(1>)1QI", "SsSi", OP_SCALAR_QDMULL_LN> {
1608  let isLaneQ = 1;
1609}
1610
1611// Signed Saturating Doubling Multiply-Add Long (scalar by element)
1612def SCALAR_SQDMLAL_LANE : SInst<"vqdmlal_lane", "(1>)(1>)1.I", "SsSi">;
1613def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "(1>)(1>)1QI", "SsSi"> {
1614  let isLaneQ = 1;
1615}
1616
1617// Signed Saturating Doubling Multiply-Subtract Long (scalar by element)
1618def SCALAR_SQDMLS_LANE : SInst<"vqdmlsl_lane", "(1>)(1>)1.I", "SsSi">;
1619def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "(1>)(1>)1QI", "SsSi"> {
1620  let isLaneQ = 1;
1621}
1622
1623// Scalar Integer Saturating Doubling Multiply Half High (scalar by element)
1624def SCALAR_SQDMULH_LANE : SOpInst<"vqdmulh_lane", "11.I", "SsSi", OP_SCALAR_QDMULH_LN>;
1625def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QDMULH_LN> {
1626  let isLaneQ = 1;
1627}
1628
1629// Scalar Integer Saturating Rounding Doubling Multiply Half High
1630def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "11.I", "SsSi", OP_SCALAR_QRDMULH_LN>;
1631def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QRDMULH_LN> {
1632  let isLaneQ = 1;
1633}
1634
1635let TargetGuard = "v8.1a,neon" in {
1636// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1637def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "111.I", "SsSi", OP_SCALAR_QRDMLAH_LN>;
1638def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN> {
1639  let isLaneQ = 1;
1640}
1641
1642// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1643def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "111.I", "SsSi", OP_SCALAR_QRDMLSH_LN>;
1644def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN> {
1645  let isLaneQ = 1;
1646}
1647} // TargetGuard = "v8.1a"
1648
1649def SCALAR_VDUP_LANE : IInst<"vdup_lane", "1.I", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
1650def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs"> {
1651  let isLaneQ = 1;
1652}
1653
1654} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)"
1655
1656// ARMv8.2-A FP16 vector intrinsics for A32/A64.
1657let TargetGuard = "fullfp16,neon" in {
1658
1659  // ARMv8.2-A FP16 one-operand vector intrinsics.
1660
1661  // Comparison
1662  def CMEQH    : SInst<"vceqz", "U.", "hQh">;
1663  def CMGEH    : SInst<"vcgez", "U.", "hQh">;
1664  def CMGTH    : SInst<"vcgtz", "U.", "hQh">;
1665  def CMLEH    : SInst<"vclez", "U.", "hQh">;
1666  def CMLTH    : SInst<"vcltz", "U.", "hQh">;
1667
1668  // Vector conversion
1669  def VCVT_F16     : SInst<"vcvt_f16", "F(.!)",  "sUsQsQUs">;
1670  def VCVT_S16     : SInst<"vcvt_s16", "S.",  "hQh">;
1671  def VCVT_U16     : SInst<"vcvt_u16", "U.",  "hQh">;
1672  def VCVTA_S16    : SInst<"vcvta_s16", "S.", "hQh">;
1673  def VCVTA_U16    : SInst<"vcvta_u16", "U.", "hQh">;
1674  def VCVTM_S16    : SInst<"vcvtm_s16", "S.", "hQh">;
1675  def VCVTM_U16    : SInst<"vcvtm_u16", "U.", "hQh">;
1676  def VCVTN_S16    : SInst<"vcvtn_s16", "S.", "hQh">;
1677  def VCVTN_U16    : SInst<"vcvtn_u16", "U.", "hQh">;
1678  def VCVTP_S16    : SInst<"vcvtp_s16", "S.", "hQh">;
1679  def VCVTP_U16    : SInst<"vcvtp_u16", "U.", "hQh">;
1680
1681  // Vector rounding
1682  let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)", TargetGuard = "fullfp16,neon" in {
1683    def FRINTZH      : SInst<"vrnd",  "..", "hQh">;
1684    def FRINTNH      : SInst<"vrndn", "..", "hQh">;
1685    def FRINTAH      : SInst<"vrnda", "..", "hQh">;
1686    def FRINTPH      : SInst<"vrndp", "..", "hQh">;
1687    def FRINTMH      : SInst<"vrndm", "..", "hQh">;
1688    def FRINTXH      : SInst<"vrndx", "..", "hQh">;
1689  }
1690
1691  // Misc.
1692  def VABSH        : SInst<"vabs", "..", "hQh">;
1693  def VNEGH        : SOpInst<"vneg", "..", "hQh", OP_NEG>;
1694  def VRECPEH      : SInst<"vrecpe", "..", "hQh">;
1695  def FRSQRTEH     : SInst<"vrsqrte", "..", "hQh">;
1696
1697  // ARMv8.2-A FP16 two-operands vector intrinsics.
1698
1699  // Misc.
1700  def VADDH        : SOpInst<"vadd", "...", "hQh", OP_ADD>;
1701  def VABDH        : SInst<"vabd", "...",  "hQh">;
1702  def VSUBH         : SOpInst<"vsub", "...", "hQh", OP_SUB>;
1703
1704  // Comparison
1705  let InstName = "vacge" in {
1706    def VCAGEH     : SInst<"vcage", "U..", "hQh">;
1707    def VCALEH     : SInst<"vcale", "U..", "hQh">;
1708  }
1709  let InstName = "vacgt" in {
1710    def VCAGTH     : SInst<"vcagt", "U..", "hQh">;
1711    def VCALTH     : SInst<"vcalt", "U..", "hQh">;
1712  }
1713  def VCEQH        : SOpInst<"vceq", "U..", "hQh", OP_EQ>;
1714  def VCGEH        : SOpInst<"vcge", "U..", "hQh", OP_GE>;
1715  def VCGTH        : SOpInst<"vcgt", "U..", "hQh", OP_GT>;
1716  let InstName = "vcge" in
1717    def VCLEH      : SOpInst<"vcle", "U..", "hQh", OP_LE>;
1718  let InstName = "vcgt" in
1719    def VCLTH      : SOpInst<"vclt", "U..", "hQh", OP_LT>;
1720
1721  // Vector conversion
1722  let isVCVT_N = 1 in {
1723    def VCVT_N_F16 : SInst<"vcvt_n_f16", "F(.!)I", "sUsQsQUs">;
1724    def VCVT_N_S16 : SInst<"vcvt_n_s16", "S.I", "hQh">;
1725    def VCVT_N_U16 : SInst<"vcvt_n_u16", "U.I", "hQh">;
1726  }
1727
1728  // Max/Min
1729  def VMAXH         : SInst<"vmax", "...", "hQh">;
1730  def VMINH         : SInst<"vmin", "...", "hQh">;
1731  let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)", TargetGuard = "fullfp16,neon" in {
1732    def FMAXNMH       : SInst<"vmaxnm", "...", "hQh">;
1733    def FMINNMH       : SInst<"vminnm", "...", "hQh">;
1734  }
1735
1736  // Multiplication/Division
1737  def VMULH         : SOpInst<"vmul", "...", "hQh", OP_MUL>;
1738
1739  // Pairwise addition
1740  def VPADDH        : SInst<"vpadd", "...", "h">;
1741
1742  // Pairwise Max/Min
1743  def VPMAXH        : SInst<"vpmax", "...", "h">;
1744  def VPMINH        : SInst<"vpmin", "...", "h">;
1745
1746  // Reciprocal/Sqrt
1747  def VRECPSH       : SInst<"vrecps", "...", "hQh">;
1748  def VRSQRTSH      : SInst<"vrsqrts", "...", "hQh">;
1749
1750  // ARMv8.2-A FP16 three-operands vector intrinsics.
1751
1752  // Vector fused multiply-add operations
1753  def VFMAH        : SInst<"vfma", "....", "hQh">;
1754  def VFMSH        : SOpInst<"vfms", "....", "hQh", OP_FMLS>;
1755
1756  // ARMv8.2-A FP16 lane vector intrinsics.
1757
1758  // Mul lane
1759  def VMUL_LANEH    : IOpInst<"vmul_lane", "..qI", "hQh", OP_MUL_LN>;
1760  def VMUL_NH       : IOpInst<"vmul_n", "..1", "hQh", OP_MUL_N>;
1761}
1762
1763// Data processing intrinsics - section 5. Do not require fullfp16.
1764
1765// Logical operations
1766let isHiddenLInst = 1 in
1767def VBSLH    : SInst<"vbsl", ".U..", "hQh">;
1768// Transposition operations
1769def VZIPH    : WInst<"vzip", "2..", "hQh">;
1770def VUZPH    : WInst<"vuzp", "2..", "hQh">;
1771def VTRNH    : WInst<"vtrn", "2..", "hQh">;
1772// Vector Extract
1773def VEXTH      : WInst<"vext", "...I", "hQh">;
1774// Reverse vector elements
1775def VREV64H    : WOpInst<"vrev64", "..", "hQh", OP_REV64>;
1776
1777// ARMv8.2-A FP16 vector intrinsics for A64 only.
1778let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fullfp16,neon" in {
1779
1780  // Vector rounding
1781  def FRINTIH      : SInst<"vrndi", "..", "hQh">;
1782
1783  // Misc.
1784  def FSQRTH       : SInst<"vsqrt", "..", "hQh">;
1785
1786  // Multiplication/Division
1787  def MULXH         : SInst<"vmulx", "...", "hQh">;
1788  def FDIVH         : IOpInst<"vdiv", "...",  "hQh", OP_DIV>;
1789
1790  // Pairwise addition
1791  def VPADDH1       : SInst<"vpadd", "...", "Qh">;
1792
1793  // Pairwise Max/Min
1794  def VPMAXH1       : SInst<"vpmax", "...", "Qh">;
1795  def VPMINH1       : SInst<"vpmin", "...", "Qh">;
1796
1797  // Pairwise MaxNum/MinNum
1798  def FMAXNMPH      : SInst<"vpmaxnm", "...", "hQh">;
1799  def FMINNMPH      : SInst<"vpminnm", "...", "hQh">;
1800
1801  // ARMv8.2-A FP16 lane vector intrinsics.
1802
1803  // FMA lane
1804  def VFMA_LANEH   : IInst<"vfma_lane", "...qI", "hQh">;
1805  def VFMA_LANEQH  : IInst<"vfma_laneq", "...QI", "hQh"> {
1806    let isLaneQ = 1;
1807  }
1808
1809  // FMA lane with scalar argument
1810  def FMLA_NH      : SOpInst<"vfma_n", "...1", "hQh", OP_FMLA_N>;
1811  // Scalar floating point fused multiply-add (scalar, by element)
1812  def SCALAR_FMLA_LANEH  : IInst<"vfma_lane", "111.I", "Sh">;
1813  def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "111QI", "Sh"> {
1814    let isLaneQ = 1;
1815  }
1816
1817  // FMS lane
1818  def VFMS_LANEH   : IOpInst<"vfms_lane", "...qI", "hQh", OP_FMS_LN>;
1819  def VFMS_LANEQH  : IOpInst<"vfms_laneq", "...QI", "hQh", OP_FMS_LNQ> {
1820    let isLaneQ = 1;
1821  }
1822  // FMS lane with scalar argument
1823  def FMLS_NH      : SOpInst<"vfms_n", "...1", "hQh", OP_FMLS_N>;
1824  // Scalar floating foint fused multiply-subtract (scalar, by element)
1825  def SCALAR_FMLS_LANEH  : IOpInst<"vfms_lane", "111.I", "Sh", OP_FMS_LN>;
1826  def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "111QI", "Sh", OP_FMS_LNQ> {
1827    let isLaneQ = 1;
1828  }
1829
1830  // Mul lane
1831  def VMUL_LANEQH   : IOpInst<"vmul_laneq", "..QI", "hQh", OP_MUL_LN> {
1832    let isLaneQ = 1;
1833  }
1834  // Scalar floating point  multiply (scalar, by element)
1835  def SCALAR_FMUL_LANEH  : IOpInst<"vmul_lane", "11.I", "Sh", OP_SCALAR_MUL_LN>;
1836  def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "11QI", "Sh", OP_SCALAR_MUL_LN> {
1837    let isLaneQ = 1;
1838  }
1839
1840  // Mulx lane
1841  def VMULX_LANEH   : IOpInst<"vmulx_lane", "..qI", "hQh", OP_MULX_LN>;
1842  def VMULX_LANEQH  : IOpInst<"vmulx_laneq", "..QI", "hQh", OP_MULX_LN> {
1843    let isLaneQ = 1;
1844  }
1845  def VMULX_NH      : IOpInst<"vmulx_n", "..1", "hQh", OP_MULX_N>;
1846  // Scalar floating point  mulx (scalar, by element)
1847  def SCALAR_FMULX_LANEH : IInst<"vmulx_lane", "11.I", "Sh">;
1848  def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "11QI", "Sh"> {
1849    let isLaneQ = 1;
1850  }
1851
1852  // ARMv8.2-A FP16 reduction vector intrinsics.
1853  def VMAXVH   : SInst<"vmaxv", "1.", "hQh">;
1854  def VMINVH   : SInst<"vminv", "1.", "hQh">;
1855  def FMAXNMVH : SInst<"vmaxnmv", "1.", "hQh">;
1856  def FMINNMVH : SInst<"vminnmv", "1.", "hQh">;
1857}
1858
1859let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
1860  // Permutation
1861  def VTRN1H     : SOpInst<"vtrn1", "...", "hQh", OP_TRN1>;
1862  def VZIP1H     : SOpInst<"vzip1", "...", "hQh", OP_ZIP1>;
1863  def VUZP1H     : SOpInst<"vuzp1", "...", "hQh", OP_UZP1>;
1864  def VTRN2H     : SOpInst<"vtrn2", "...", "hQh", OP_TRN2>;
1865  def VZIP2H     : SOpInst<"vzip2", "...", "hQh", OP_ZIP2>;
1866  def VUZP2H     : SOpInst<"vuzp2", "...", "hQh", OP_UZP2>;
1867
1868  def SCALAR_VDUP_LANEH  : IInst<"vdup_lane", "1.I", "Sh">;
1869  def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "1QI", "Sh"> {
1870    let isLaneQ = 1;
1871  }
1872}
1873
1874// v8.2-A dot product instructions.
1875let TargetGuard = "dotprod,neon" in {
1876  def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">;
1877  def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<<q)I", "iUiQiQUi", OP_DOT_LN>;
1878}
1879let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "dotprod,neon" in {
1880  // Variants indexing into a 128-bit vector are A64 only.
1881  def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ> {
1882    let isLaneQ = 1;
1883  }
1884}
1885
1886// v8.2-A FP16 fused multiply-add long instructions.
1887let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fp16fml,neon" in {
1888  def VFMLAL_LOW  : SInst<"vfmlal_low",  ">>..", "hQh">;
1889  def VFMLSL_LOW  : SInst<"vfmlsl_low",  ">>..", "hQh">;
1890  def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">;
1891  def VFMLSL_HIGH : SInst<"vfmlsl_high", ">>..", "hQh">;
1892
1893  def VFMLAL_LANE_LOW  : SOpInst<"vfmlal_lane_low",  "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN>;
1894  def VFMLSL_LANE_LOW  : SOpInst<"vfmlsl_lane_low",  "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN>;
1895  def VFMLAL_LANE_HIGH : SOpInst<"vfmlal_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN_Hi>;
1896  def VFMLSL_LANE_HIGH : SOpInst<"vfmlsl_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN_Hi>;
1897
1898  def VFMLAL_LANEQ_LOW  : SOpInst<"vfmlal_laneq_low",  "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN> {
1899    let isLaneQ = 1;
1900  }
1901  def VFMLSL_LANEQ_LOW  : SOpInst<"vfmlsl_laneq_low",  "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN> {
1902    let isLaneQ = 1;
1903  }
1904  def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN_Hi> {
1905    let isLaneQ = 1;
1906  }
1907  def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN_Hi> {
1908    let isLaneQ = 1;
1909  }
1910}
1911
1912let TargetGuard = "i8mm,neon" in {
1913  def VMMLA   : SInst<"vmmla", "..(<<)(<<)", "QUiQi">;
1914  def VUSMMLA : SInst<"vusmmla", "..(<<U)(<<)", "Qi">;
1915
1916  def VUSDOT  : SInst<"vusdot", "..(<<U)(<<)", "iQi">;
1917
1918  def VUSDOT_LANE  : SOpInst<"vusdot_lane", "..(<<U)(<<q)I", "iQi", OP_USDOT_LN>;
1919  def VSUDOT_LANE  : SOpInst<"vsudot_lane", "..(<<)(<<qU)I", "iQi", OP_SUDOT_LN>;
1920
1921  let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
1922    let isLaneQ = 1 in {
1923      def VUSDOT_LANEQ  : SOpInst<"vusdot_laneq", "..(<<U)(<<Q)I", "iQi", OP_USDOT_LNQ>;
1924      def VSUDOT_LANEQ  : SOpInst<"vsudot_laneq", "..(<<)(<<QU)I", "iQi", OP_SUDOT_LNQ>;
1925    }
1926  }
1927}
1928
1929let TargetGuard = "bf16,neon" in {
1930  def VDOT_BF : SInst<"vbfdot", "..BB", "fQf">;
1931  def VDOT_LANE_BF : SOpInst<"vbfdot_lane", "..B(Bq)I", "fQf", OP_BFDOT_LN>;
1932  def VDOT_LANEQ_BF : SOpInst<"vbfdot_laneq", "..B(BQ)I", "fQf", OP_BFDOT_LNQ> {
1933    let isLaneQ = 1;
1934  }
1935
1936  def VFMMLA_BF : SInst<"vbfmmla", "..BB", "Qf">;
1937
1938  def VFMLALB_BF : SInst<"vbfmlalb", "..BB", "Qf">;
1939  def VFMLALT_BF : SInst<"vbfmlalt", "..BB", "Qf">;
1940
1941  def VFMLALB_LANE_BF : SOpInst<"vbfmlalb_lane", "..B(Bq)I", "Qf", OP_BFMLALB_LN>;
1942  def VFMLALB_LANEQ_BF : SOpInst<"vbfmlalb_laneq", "..B(BQ)I", "Qf", OP_BFMLALB_LN>;
1943
1944  def VFMLALT_LANE_BF : SOpInst<"vbfmlalt_lane", "..B(Bq)I", "Qf", OP_BFMLALT_LN>;
1945  def VFMLALT_LANEQ_BF : SOpInst<"vbfmlalt_laneq", "..B(BQ)I", "Qf", OP_BFMLALT_LN>;
1946}
1947
1948multiclass VCMLA_ROTS<string type, string lanety, string laneqty> {
1949  foreach ROT = ["", "_rot90", "_rot180", "_rot270" ] in {
1950    def   : SInst<"vcmla" # ROT, "....", type # "Q" # type>;
1951
1952    // vcmla{ROT}_lane
1953    def : SOpInst<"vcmla" # ROT # "_lane", "...qI", type, Op<(call "vcmla" # ROT, $p0, $p1,
1954           (bitcast $p0, (dup_typed lanety , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>;
1955
1956    // vcmlaq{ROT}_lane
1957    def : SOpInst<"vcmla" # ROT # "_lane", "...qI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1,
1958           (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>;
1959
1960    let isLaneQ = 1 in  {
1961      // vcmla{ROT}_laneq
1962      def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", type,  Op<(call "vcmla" # ROT, $p0, $p1,
1963              (bitcast $p0, (dup_typed lanety, (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>;
1964
1965      // vcmlaq{ROT}_laneq
1966      def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1,
1967             (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>;
1968    }
1969  }
1970}
1971
1972// v8.3-A Vector complex addition intrinsics
1973let TargetGuard = "v8.3a,fullfp16,neon" in {
1974  def VCADD_ROT90_FP16   : SInst<"vcadd_rot90", "...", "h">;
1975  def VCADD_ROT270_FP16  : SInst<"vcadd_rot270", "...", "h">;
1976  def VCADDQ_ROT90_FP16  : SInst<"vcaddq_rot90", "QQQ", "h">;
1977  def VCADDQ_ROT270_FP16 : SInst<"vcaddq_rot270", "QQQ", "h">;
1978
1979  defm VCMLA_FP16  : VCMLA_ROTS<"h", "uint32x2_t", "uint32x4_t">;
1980}
1981let TargetGuard = "v8.3a,neon" in {
1982  def VCADD_ROT90   : SInst<"vcadd_rot90", "...", "f">;
1983  def VCADD_ROT270  : SInst<"vcadd_rot270", "...", "f">;
1984  def VCADDQ_ROT90  : SInst<"vcaddq_rot90", "QQQ", "f">;
1985  def VCADDQ_ROT270 : SInst<"vcaddq_rot270", "QQQ", "f">;
1986
1987  defm VCMLA_F32        : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">;
1988}
1989let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.3a,neon" in {
1990  def VCADDQ_ROT90_FP64  : SInst<"vcaddq_rot90", "QQQ", "d">;
1991  def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
1992
1993  defm VCMLA_FP64 : VCMLA_ROTS<"d", "uint64x2_t", "uint64x2_t">;
1994}
1995
1996// V8.2-A BFloat intrinsics
1997let TargetGuard = "bf16,neon" in {
1998  def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> {
1999    let BigEndianSafe = 1;
2000  }
2001
2002  def VDUP_N_BF    : WOpInst<"vdup_n", ".1", "bQb", OP_DUP>;
2003
2004  def VDUP_LANE_BF : WOpInst<"vdup_lane", ".qI", "bQb", OP_DUP_LN>;
2005  def VDUP_LANEQ_BF: WOpInst<"vdup_laneq", ".QI", "bQb", OP_DUP_LN> {
2006    let isLaneQ = 1;
2007  }
2008
2009  def VCOMBINE_BF  : NoTestOpInst<"vcombine", "Q..", "b", OP_CONC>;
2010
2011  def VGET_HIGH_BF : NoTestOpInst<"vget_high", ".Q", "b", OP_HI>;
2012  def VGET_LOW_BF  : NoTestOpInst<"vget_low", ".Q", "b", OP_LO>;
2013
2014  def VGET_LANE_BF : IInst<"vget_lane", "1.I", "bQb">;
2015  def VSET_LANE_BF : IInst<"vset_lane", ".1.I", "bQb">;
2016  def SCALAR_VDUP_LANE_BF : IInst<"vdup_lane", "1.I", "Sb">;
2017  def SCALAR_VDUP_LANEQ_BF : IInst<"vdup_laneq", "1QI", "Sb"> {
2018    let isLaneQ = 1;
2019  }
2020
2021  def VLD1_BF : WInst<"vld1", ".(c*!)", "bQb">;
2022  def VLD2_BF : WInst<"vld2", "2(c*!)", "bQb">;
2023  def VLD3_BF : WInst<"vld3", "3(c*!)", "bQb">;
2024  def VLD4_BF : WInst<"vld4", "4(c*!)", "bQb">;
2025
2026  def VST1_BF : WInst<"vst1", "v*(.!)", "bQb">;
2027  def VST2_BF : WInst<"vst2", "v*(2!)", "bQb">;
2028  def VST3_BF : WInst<"vst3", "v*(3!)", "bQb">;
2029  def VST4_BF : WInst<"vst4", "v*(4!)", "bQb">;
2030
2031  def VLD1_X2_BF : WInst<"vld1_x2", "2(c*!)", "bQb">;
2032  def VLD1_X3_BF : WInst<"vld1_x3", "3(c*!)", "bQb">;
2033  def VLD1_X4_BF : WInst<"vld1_x4", "4(c*!)", "bQb">;
2034
2035  def VST1_X2_BF : WInst<"vst1_x2", "v*(2!)", "bQb">;
2036  def VST1_X3_BF : WInst<"vst1_x3", "v*(3!)", "bQb">;
2037  def VST1_X4_BF : WInst<"vst1_x4", "v*(4!)", "bQb">;
2038
2039  def VLD1_LANE_BF : WInst<"vld1_lane", ".(c*!).I", "bQb">;
2040  def VLD2_LANE_BF : WInst<"vld2_lane", "2(c*!)2I", "bQb">;
2041  def VLD3_LANE_BF : WInst<"vld3_lane", "3(c*!)3I", "bQb">;
2042  def VLD4_LANE_BF : WInst<"vld4_lane", "4(c*!)4I", "bQb">;
2043  def VST1_LANE_BF : WInst<"vst1_lane", "v*(.!)I", "bQb">;
2044  def VST2_LANE_BF : WInst<"vst2_lane", "v*(2!)I", "bQb">;
2045  def VST3_LANE_BF : WInst<"vst3_lane", "v*(3!)I", "bQb">;
2046  def VST4_LANE_BF : WInst<"vst4_lane", "v*(4!)I", "bQb">;
2047
2048  def VLD1_DUP_BF : WInst<"vld1_dup", ".(c*!)", "bQb">;
2049  def VLD2_DUP_BF : WInst<"vld2_dup", "2(c*!)", "bQb">;
2050  def VLD3_DUP_BF : WInst<"vld3_dup", "3(c*!)", "bQb">;
2051  def VLD4_DUP_BF : WInst<"vld4_dup", "4(c*!)", "bQb">;
2052
2053  def VCVT_F32_BF16 : SOpInst<"vcvt_f32_bf16", "(F>)(Bq!)",  "Qb", OP_VCVT_F32_BF16>;
2054  def VCVT_LOW_F32_BF16 : SOpInst<"vcvt_low_f32", "(F>)(BQ!)",  "Qb", OP_VCVT_F32_BF16_LO>;
2055  def VCVT_HIGH_F32_BF16 : SOpInst<"vcvt_high_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_HI>;
2056
2057  def SCALAR_CVT_BF16_F32 : SInst<"vcvth_bf16", "(1B)1", "f">;
2058  def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>;
2059}
2060
2061let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2062  def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">;
2063  def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>;
2064  def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16",  "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>;
2065  def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>;
2066}
2067
2068let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2069  def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">;
2070  def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>;
2071  def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">;
2072  def VCVT_BF16_F32 : SOpInst<"vcvt_bf16",    "BQ", "f", OP_VCVT_BF16_F32_A64>;
2073
2074  def COPY_LANE_BF16 : IOpInst<"vcopy_lane", "..I.I", "b", OP_COPY_LN>;
2075  def COPYQ_LANE_BF16 : IOpInst<"vcopy_lane", "..IqI", "Qb", OP_COPY_LN>;
2076  def COPY_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..IQI", "b", OP_COPY_LN>;
2077  def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>;
2078}
2079
2080let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2081  let BigEndianSafe = 1 in {
2082    defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2083        "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
2084  }
2085}
2086
2087let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2088  let BigEndianSafe = 1 in {
2089    defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2090        "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
2091  }
2092}
2093
2094// v8.9a/v9.4a LRCPC3 intrinsics
2095let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "rcpc3,neon" in {
2096  def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl">;
2097  def VSTL1_LANE  : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl">;
2098}
2099