xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision 9e5787d2284e187abb5b654d924394a65772e004)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">;
28def HasVH            : Predicate<"Subtarget->hasVH()">,
29                       AssemblerPredicate<(all_of FeatureVH), "vh">;
30
31def HasLOR           : Predicate<"Subtarget->hasLOR()">,
32                       AssemblerPredicate<(all_of FeatureLOR), "lor">;
33
34def HasPA            : Predicate<"Subtarget->hasPA()">,
35                       AssemblerPredicate<(all_of FeaturePA), "pa">;
36
37def HasJS            : Predicate<"Subtarget->hasJS()">,
38                       AssemblerPredicate<(all_of FeatureJS), "jsconv">;
39
40def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
41                       AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">;
42
43def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
44                       AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">;
45
46def HasNV            : Predicate<"Subtarget->hasNV()">,
47                       AssemblerPredicate<(all_of FeatureNV), "nv">;
48
49def HasRASv8_4       : Predicate<"Subtarget->hasRASv8_4()">,
50                       AssemblerPredicate<(all_of FeatureRASv8_4), "rasv8_4">;
51
52def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
53                       AssemblerPredicate<(all_of FeatureMPAM), "mpam">;
54
55def HasDIT           : Predicate<"Subtarget->hasDIT()">,
56                       AssemblerPredicate<(all_of FeatureDIT), "dit">;
57
58def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
59                       AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">;
60
61def HasAM            : Predicate<"Subtarget->hasAM()">,
62                       AssemblerPredicate<(all_of FeatureAM), "am">;
63
64def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
65                       AssemblerPredicate<(all_of FeatureSEL2), "sel2">;
66
67def HasPMU           : Predicate<"Subtarget->hasPMU()">,
68                       AssemblerPredicate<(all_of FeaturePMU), "pmu">;
69
70def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
71                       AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">;
72
73def HasFMI           : Predicate<"Subtarget->hasFMI()">,
74                       AssemblerPredicate<(all_of FeatureFMI), "fmi">;
75
76def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
77                       AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
78
79def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
80                               AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">;
81def HasNEON          : Predicate<"Subtarget->hasNEON()">,
82                                 AssemblerPredicate<(all_of FeatureNEON), "neon">;
83def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
84                                 AssemblerPredicate<(all_of FeatureCrypto), "crypto">;
85def HasSM4           : Predicate<"Subtarget->hasSM4()">,
86                                 AssemblerPredicate<(all_of FeatureSM4), "sm4">;
87def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
88                                 AssemblerPredicate<(all_of FeatureSHA3), "sha3">;
89def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
90                                 AssemblerPredicate<(all_of FeatureSHA2), "sha2">;
91def HasAES           : Predicate<"Subtarget->hasAES()">,
92                                 AssemblerPredicate<(all_of FeatureAES), "aes">;
93def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
94                                 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">;
95def HasCRC           : Predicate<"Subtarget->hasCRC()">,
96                                 AssemblerPredicate<(all_of FeatureCRC), "crc">;
97def HasLSE           : Predicate<"Subtarget->hasLSE()">,
98                                 AssemblerPredicate<(all_of FeatureLSE), "lse">;
99def HasRAS           : Predicate<"Subtarget->hasRAS()">,
100                                 AssemblerPredicate<(all_of FeatureRAS), "ras">;
101def HasRDM           : Predicate<"Subtarget->hasRDM()">,
102                                 AssemblerPredicate<(all_of FeatureRDM), "rdm">;
103def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
104def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
105                                 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">;
106def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
107                                 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">;
108def HasSPE           : Predicate<"Subtarget->hasSPE()">,
109                                 AssemblerPredicate<(all_of FeatureSPE), "spe">;
110def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
111                                 AssemblerPredicate<(all_of FeatureFuseAES),
112                                 "fuse-aes">;
113def HasSVE           : Predicate<"Subtarget->hasSVE()">,
114                                 AssemblerPredicate<(all_of FeatureSVE), "sve">;
115def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
116                                 AssemblerPredicate<(all_of FeatureSVE2), "sve2">;
117def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
118                                 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">;
119def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
120                                 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">;
121def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
122                                 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">;
123def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
124                                 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
125def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
126                                 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">;
127def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
128                       AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">;
129def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
130                       AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">;
131def HasSB            : Predicate<"Subtarget->hasSB()">,
132                       AssemblerPredicate<(all_of FeatureSB), "sb">;
133def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
134                       AssemblerPredicate<(all_of FeaturePredRes), "predres">;
135def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
136                       AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">;
137def HasBTI           : Predicate<"Subtarget->hasBTI()">,
138                       AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">;
139def HasMTE           : Predicate<"Subtarget->hasMTE()">,
140                       AssemblerPredicate<(all_of FeatureMTE), "mte">;
141def HasTME           : Predicate<"Subtarget->hasTME()">,
142                       AssemblerPredicate<(all_of FeatureTME), "tme">;
143def HasETE           : Predicate<"Subtarget->hasETE()">,
144                       AssemblerPredicate<(all_of FeatureETE), "ete">;
145def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
146                       AssemblerPredicate<(all_of FeatureTRBE), "trbe">;
147def HasBF16          : Predicate<"Subtarget->hasBF16()">,
148                       AssemblerPredicate<(all_of FeatureBF16), "bf16">;
149def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
150                       AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">;
151def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
152                       AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">;
153def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
154                       AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">;
155def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
156def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
157def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
158def UseExperimentalZeroingPseudos
159    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
160def UseAlternateSExtLoadCVTF32
161    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
162
163def UseNegativeImmediates
164    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
165                                             "NegativeImmediates">;
166
167def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
168                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
169                                                       SDTCisInt<1>]>>;
170
171
172//===----------------------------------------------------------------------===//
173// AArch64-specific DAG Nodes.
174//
175
176// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
177def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
178                                              [SDTCisSameAs<0, 2>,
179                                               SDTCisSameAs<0, 3>,
180                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
181
182// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
183def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
184                                            [SDTCisSameAs<0, 1>,
185                                             SDTCisSameAs<0, 2>,
186                                             SDTCisInt<0>,
187                                             SDTCisVT<3, i32>]>;
188
189// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
190def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
191                                            [SDTCisSameAs<0, 2>,
192                                             SDTCisSameAs<0, 3>,
193                                             SDTCisInt<0>,
194                                             SDTCisVT<1, i32>,
195                                             SDTCisVT<4, i32>]>;
196
197def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
198                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
199                                      SDTCisVT<2, i32>]>;
200def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
201def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
202                                        SDTCisVT<2, OtherVT>]>;
203
204
205def SDT_AArch64CSel  : SDTypeProfile<1, 4,
206                                   [SDTCisSameAs<0, 1>,
207                                    SDTCisSameAs<0, 2>,
208                                    SDTCisInt<3>,
209                                    SDTCisVT<4, i32>]>;
210def SDT_AArch64CCMP : SDTypeProfile<1, 5,
211                                    [SDTCisVT<0, i32>,
212                                     SDTCisInt<1>,
213                                     SDTCisSameAs<1, 2>,
214                                     SDTCisInt<3>,
215                                     SDTCisInt<4>,
216                                     SDTCisVT<5, i32>]>;
217def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
218                                     [SDTCisVT<0, i32>,
219                                      SDTCisFP<1>,
220                                      SDTCisSameAs<1, 2>,
221                                      SDTCisInt<3>,
222                                      SDTCisInt<4>,
223                                      SDTCisVT<5, i32>]>;
224def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
225                                   [SDTCisFP<0>,
226                                    SDTCisSameAs<0, 1>]>;
227def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
228def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
229def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
230def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
231                                          SDTCisSameAs<0, 1>,
232                                          SDTCisSameAs<0, 2>]>;
233def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
234def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
235def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
236                                           SDTCisInt<2>, SDTCisInt<3>]>;
237def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
238def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
239                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
240def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
241
242def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
243                                                 SDTCisSameAs<0,1>,
244                                                 SDTCisSameAs<0,2>]>;
245
246def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
247def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
248def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
249def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
250                                           SDTCisSameAs<0,2>]>;
251def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
252                                           SDTCisSameAs<0,2>,
253                                           SDTCisSameAs<0,3>]>;
254def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
255def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
256
257def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
258
259def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
260                                                 SDTCisPtrTy<1>]>;
261
262def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
263def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
264def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
265
266// Generates the general dynamic sequences, i.e.
267//  adrp  x0, :tlsdesc:var
268//  ldr   x1, [x0, #:tlsdesc_lo12:var]
269//  add   x0, x0, #:tlsdesc_lo12:var
270//  .tlsdesccall var
271//  blr   x1
272
273// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
274// number of operands (the variable)
275def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
276                                          [SDTCisPtrTy<0>]>;
277
278def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
279                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
280                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
281                                         SDTCisSameAs<1, 4>]>;
282
283def SDT_AArch64TBL : SDTypeProfile<1, 2, [
284  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
285]>;
286
287// non-extending masked load fragment.
288def nonext_masked_load :
289  PatFrag<(ops node:$ptr, node:$pred, node:$def),
290          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
291  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
292         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
293         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
294}]>;
295// sign extending masked load fragments.
296def asext_masked_load :
297  PatFrag<(ops node:$ptr, node:$pred, node:$def),
298          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
299  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
300          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
301         cast<MaskedLoadSDNode>(N)->isUnindexed();
302}]>;
303def asext_masked_load_i8 :
304  PatFrag<(ops node:$ptr, node:$pred, node:$def),
305          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
306  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
307}]>;
308def asext_masked_load_i16 :
309  PatFrag<(ops node:$ptr, node:$pred, node:$def),
310          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
311  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
312}]>;
313def asext_masked_load_i32 :
314  PatFrag<(ops node:$ptr, node:$pred, node:$def),
315          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
316  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
317}]>;
318// zero extending masked load fragments.
319def zext_masked_load :
320  PatFrag<(ops node:$ptr, node:$pred, node:$def),
321          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
322  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
323         cast<MaskedLoadSDNode>(N)->isUnindexed();
324}]>;
325def zext_masked_load_i8 :
326  PatFrag<(ops node:$ptr, node:$pred, node:$def),
327          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
328  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
329}]>;
330def zext_masked_load_i16 :
331  PatFrag<(ops node:$ptr, node:$pred, node:$def),
332          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
333  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
334}]>;
335def zext_masked_load_i32 :
336  PatFrag<(ops node:$ptr, node:$pred, node:$def),
337          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
338  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
339}]>;
340
341def non_temporal_load :
342   PatFrag<(ops node:$ptr, node:$pred, node:$def),
343           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
344   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
345          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
346          cast<MaskedLoadSDNode>(N)->isNonTemporal();
347}]>;
348
349// non-truncating masked store fragment.
350def nontrunc_masked_store :
351  PatFrag<(ops node:$val, node:$ptr, node:$pred),
352          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
353  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
354         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
355         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
356}]>;
357// truncating masked store fragments.
358def trunc_masked_store :
359  PatFrag<(ops node:$val, node:$ptr, node:$pred),
360          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
361  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
362         cast<MaskedStoreSDNode>(N)->isUnindexed();
363}]>;
364def trunc_masked_store_i8 :
365  PatFrag<(ops node:$val, node:$ptr, node:$pred),
366          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
367  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
368}]>;
369def trunc_masked_store_i16 :
370  PatFrag<(ops node:$val, node:$ptr, node:$pred),
371          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
372  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
373}]>;
374def trunc_masked_store_i32 :
375  PatFrag<(ops node:$val, node:$ptr, node:$pred),
376          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
377  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
378}]>;
379
380def non_temporal_store :
381  PatFrag<(ops node:$val, node:$ptr, node:$pred),
382          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
383  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
384         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
385         cast<MaskedStoreSDNode>(N)->isNonTemporal();
386}]>;
387
388// Node definitions.
389def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
390def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
391def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
392def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
393def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
394                                SDCallSeqStart<[ SDTCisVT<0, i32>,
395                                                 SDTCisVT<1, i32> ]>,
396                                [SDNPHasChain, SDNPOutGlue]>;
397def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
398                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
399                                               SDTCisVT<1, i32> ]>,
400                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
401def AArch64call          : SDNode<"AArch64ISD::CALL",
402                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
403                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
404                                 SDNPVariadic]>;
405def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
406                                [SDNPHasChain]>;
407def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
408                                [SDNPHasChain]>;
409def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
410                                [SDNPHasChain]>;
411def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
412                                [SDNPHasChain]>;
413def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
414                                [SDNPHasChain]>;
415
416
417def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
418def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
419def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
420def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
421def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
422                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
423def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
424def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
425def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
426                            [SDNPCommutative]>;
427def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
428def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
429                            [SDNPCommutative]>;
430def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
431def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
432
433def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
434def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
435def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
436
437def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
438
439def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
440def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
441                                 [SDNPHasChain]>;
442def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
443                                 [SDNPHasChain]>;
444def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
445                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
446                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
447
448def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
449def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
450def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
451def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
452def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
453
454def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
455
456def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
457def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
458def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
459def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
460def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
461def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
462
463def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
464def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
465def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
466def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
467def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
468def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
469def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
470
471def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
472def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
473def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
474def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
475
476def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
477def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
478def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
479def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
480def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
481def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
482def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
483def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
484def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
485def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
486
487def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
488def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
489def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
490
491def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
492def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
493def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
494def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
495def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
496
497def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
498def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
499def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
500
501def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
502def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
503def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
504def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
505def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
506def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
507                        (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
508
509def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
510def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
511def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
512def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
513def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
514
515def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
516def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
517
518def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
519
520def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
521                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
522
523def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
524                               [SDNPHasChain, SDNPSideEffect]>;
525
526def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
527def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
528
529def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
530                                    SDT_AArch64TLSDescCallSeq,
531                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
532                                     SDNPVariadic]>;
533
534
535def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
536                                 SDT_AArch64WrapperLarge>;
537
538def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
539
540def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
541                                    SDTCisSameAs<1, 2>]>;
542def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
543def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
544
545def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
546def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
547def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
548def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
549
550def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
551def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
552def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
553def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
554def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
555def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
556
557def AArch64srhadd   : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
558def AArch64urhadd   : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
559
560def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
561def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
562def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
563def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
564def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
565
566def SDT_AArch64unpk : SDTypeProfile<1, 1, [
567    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
568]>;
569def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
570def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
571def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
572def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
573
574def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
575def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
576def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
577
578def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
579
580//===----------------------------------------------------------------------===//
581
582//===----------------------------------------------------------------------===//
583
584// AArch64 Instruction Predicate Definitions.
585// We could compute these on a per-module basis but doing so requires accessing
586// the Function object through the <Target>Subtarget and objections were raised
587// to that (see post-commit review comments for r301750).
588let RecomputePerFunction = 1 in {
589  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
590  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
591  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
592  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
593
594  def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
595  def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
596
597  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
598  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
599  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
600  // optimizing. This allows us to selectively use patterns without impacting
601  // SelectionDAG's behaviour.
602  // FIXME: One day there will probably be a nicer way to check for this, but
603  // today is not that day.
604  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
605}
606
607include "AArch64InstrFormats.td"
608include "SVEInstrFormats.td"
609
610//===----------------------------------------------------------------------===//
611
612//===----------------------------------------------------------------------===//
613// Miscellaneous instructions.
614//===----------------------------------------------------------------------===//
615
616let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
617// We set Sched to empty list because we expect these instructions to simply get
618// removed in most cases.
619def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
620                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
621                              Sched<[]>;
622def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
623                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
624                            Sched<[]>;
625} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
626
627let isReMaterializable = 1, isCodeGenOnly = 1 in {
628// FIXME: The following pseudo instructions are only needed because remat
629// cannot handle multiple instructions.  When that changes, they can be
630// removed, along with the AArch64Wrapper node.
631
632let AddedComplexity = 10 in
633def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
634                     [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
635              Sched<[WriteLDAdr]>;
636
637// The MOVaddr instruction should match only when the add is not folded
638// into a load or store address.
639def MOVaddr
640    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
641             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
642                                            tglobaladdr:$low))]>,
643      Sched<[WriteAdrAdr]>;
644def MOVaddrJT
645    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
646             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
647                                             tjumptable:$low))]>,
648      Sched<[WriteAdrAdr]>;
649def MOVaddrCP
650    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
651             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
652                                             tconstpool:$low))]>,
653      Sched<[WriteAdrAdr]>;
654def MOVaddrBA
655    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
656             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
657                                             tblockaddress:$low))]>,
658      Sched<[WriteAdrAdr]>;
659def MOVaddrTLS
660    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
661             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
662                                            tglobaltlsaddr:$low))]>,
663      Sched<[WriteAdrAdr]>;
664def MOVaddrEXT
665    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
666             [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
667                                            texternalsym:$low))]>,
668      Sched<[WriteAdrAdr]>;
669// Normally AArch64addlow either gets folded into a following ldr/str,
670// or together with an adrp into MOVaddr above. For cases with TLS, it
671// might appear without either of them, so allow lowering it into a plain
672// add.
673def ADDlowTLS
674    : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low),
675             [(set GPR64:$dst, (AArch64addlow GPR64:$src,
676                                            tglobaltlsaddr:$low))]>,
677      Sched<[WriteAdr]>;
678
679} // isReMaterializable, isCodeGenOnly
680
681def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
682          (LOADgot tglobaltlsaddr:$addr)>;
683
684def : Pat<(AArch64LOADgot texternalsym:$addr),
685          (LOADgot texternalsym:$addr)>;
686
687def : Pat<(AArch64LOADgot tconstpool:$addr),
688          (LOADgot tconstpool:$addr)>;
689
690// 32-bit jump table destination is actually only 2 instructions since we can
691// use the table itself as a PC-relative base. But optimization occurs after
692// branch relaxation so be pessimistic.
693let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch" in {
694def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
695                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
696                      Sched<[]>;
697def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
698                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
699                      Sched<[]>;
700def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
701                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
702                     Sched<[]>;
703}
704
705// Space-consuming pseudo to aid testing of placement and reachability
706// algorithms. Immediate operand is the number of bytes this "instruction"
707// occupies; register operands can be used to enforce dependency and constrain
708// the scheduler.
709let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
710def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
711                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
712            Sched<[]>;
713
714let hasSideEffects = 1, isCodeGenOnly = 1 in {
715  def SpeculationSafeValueX
716      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
717  def SpeculationSafeValueW
718      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
719}
720
721// SpeculationBarrierEndBB must only be used after an unconditional control
722// flow, i.e. after a terminator for which isBarrier is True.
723let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
724  def SpeculationBarrierISBDSBEndBB
725      : Pseudo<(outs), (ins), []>, Sched<[]>;
726  def SpeculationBarrierSBEndBB
727      : Pseudo<(outs), (ins), []>, Sched<[]>;
728}
729
730//===----------------------------------------------------------------------===//
731// System instructions.
732//===----------------------------------------------------------------------===//
733
734def HINT : HintI<"hint">;
735def : InstAlias<"nop",  (HINT 0b000)>;
736def : InstAlias<"yield",(HINT 0b001)>;
737def : InstAlias<"wfe",  (HINT 0b010)>;
738def : InstAlias<"wfi",  (HINT 0b011)>;
739def : InstAlias<"sev",  (HINT 0b100)>;
740def : InstAlias<"sevl", (HINT 0b101)>;
741def : InstAlias<"dgh",  (HINT 0b110)>;
742def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
743def : InstAlias<"csdb", (HINT 20)>;
744// In order to be able to write readable assembly, LLVM should accept assembly
745// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
746// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
747// should not emit these mnemonics unless BTI is enabled.
748def : InstAlias<"bti",  (HINT 32), 0>;
749def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
750def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
751def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
752
753// v8.2a Statistical Profiling extension
754def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
755
756// As far as LLVM is concerned this writes to the system's exclusive monitors.
757let mayLoad = 1, mayStore = 1 in
758def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
759
760// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
761// model patterns with sufficiently fine granularity.
762let mayLoad = ?, mayStore = ? in {
763def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
764                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
765
766def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
767                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
768
769def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
770                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
771
772def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
773  let CRm        = 0b0010;
774  let Inst{12}   = 0;
775  let Predicates = [HasTRACEV8_4];
776}
777}
778
779// ARMv8.2-A Dot Product
780let Predicates = [HasDotProd] in {
781defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", int_aarch64_neon_sdot>;
782defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", int_aarch64_neon_udot>;
783defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", int_aarch64_neon_sdot>;
784defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", int_aarch64_neon_udot>;
785}
786
787// ARMv8.6-A BFloat
788let Predicates = [HasBF16] in {
789defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
790defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
791def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
792def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
793def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
794def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
795def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
796def BFCVTN       : SIMD_BFCVTN;
797def BFCVTN2      : SIMD_BFCVTN2;
798def BFCVT        : BF16ToSinglePrecision<"bfcvt">;
799}
800
801// ARMv8.6A AArch64 matrix multiplication
802let Predicates = [HasMatMulInt8] in {
803def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
804def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
805def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
806defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
807defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
808
809// sudot lane has a pattern where usdot is expected (there is no sudot).
810// The second operand is used in the dup operation to repeat the indexed
811// element.
812class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
813                         string rhs_kind, RegisterOperand RegType,
814                         ValueType AccumType, ValueType InputType>
815      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
816                                        lhs_kind, rhs_kind, RegType, AccumType,
817                                        InputType, null_frag> {
818  let Pattern = [(set (AccumType RegType:$dst),
819                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
820                                 (InputType (bitconvert (AccumType
821                                    (AArch64duplane32 (v4i32 V128:$Rm),
822                                        VectorIndexS:$idx)))),
823                                 (InputType RegType:$Rn))))];
824}
825
826multiclass SIMDSUDOTIndex {
827  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
828  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
829}
830
831defm SUDOTlane : SIMDSUDOTIndex;
832
833}
834
835// ARMv8.2-A FP16 Fused Multiply-Add Long
836let Predicates = [HasNEON, HasFP16FML] in {
837defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
838defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
839defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
840defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
841defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
842defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
843defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
844defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
845}
846
847// Armv8.2-A Crypto extensions
848let Predicates = [HasSHA3] in {
849def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
850def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
851def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
852def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
853def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
854def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
855def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
856def XAR       : CryptoRRRi6<"xar">;
857} // HasSHA3
858
859let Predicates = [HasSM4] in {
860def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
861def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
862def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
863def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
864def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
865def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
866def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
867def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
868def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
869} // HasSM4
870
871let Predicates = [HasRCPC] in {
872  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
873  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
874  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
875  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
876  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
877}
878
879// v8.3a complex add and multiply-accumulate. No predicate here, that is done
880// inside the multiclass as the FP16 versions need different predicates.
881defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
882                                               "fcmla", null_frag>;
883defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
884                                           "fcadd", null_frag>;
885defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla",
886                                       null_frag>;
887
888let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
889  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
890            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
891  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
892            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
893  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
894            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
895  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
896            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
897}
898let Predicates = [HasComplxNum, HasNEON] in {
899  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
900            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
901  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
902            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
903  foreach Ty = [v4f32, v2f64] in {
904    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
905              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
906    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
907              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
908  }
909}
910
911// v8.3a Pointer Authentication
912// These instructions inhabit part of the hint space and so can be used for
913// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
914// important for compatibility with other assemblers (e.g. GAS) when building
915// software compatible with both CPUs that do or don't implement PA.
916let Uses = [LR], Defs = [LR] in {
917  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
918  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
919  let isAuthenticated = 1 in {
920    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
921    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
922  }
923}
924let Uses = [LR, SP], Defs = [LR] in {
925  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
926  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
927  let isAuthenticated = 1 in {
928    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
929    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
930  }
931}
932let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
933  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
934  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
935  let isAuthenticated = 1 in {
936    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
937    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
938  }
939}
940
941let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
942  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
943}
944
945// In order to be able to write readable assembly, LLVM should accept assembly
946// inputs that use pointer authentication mnemonics, even with PA disabled.
947// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
948// should not emit these mnemonics unless PA is enabled.
949def : InstAlias<"paciaz", (PACIAZ), 0>;
950def : InstAlias<"pacibz", (PACIBZ), 0>;
951def : InstAlias<"autiaz", (AUTIAZ), 0>;
952def : InstAlias<"autibz", (AUTIBZ), 0>;
953def : InstAlias<"paciasp", (PACIASP), 0>;
954def : InstAlias<"pacibsp", (PACIBSP), 0>;
955def : InstAlias<"autiasp", (AUTIASP), 0>;
956def : InstAlias<"autibsp", (AUTIBSP), 0>;
957def : InstAlias<"pacia1716", (PACIA1716), 0>;
958def : InstAlias<"pacib1716", (PACIB1716), 0>;
959def : InstAlias<"autia1716", (AUTIA1716), 0>;
960def : InstAlias<"autib1716", (AUTIB1716), 0>;
961def : InstAlias<"xpaclri", (XPACLRI), 0>;
962
963// These pointer authentication instructions require armv8.3a
964let Predicates = [HasPA] in {
965
966  // When PA is enabled, a better mnemonic should be emitted.
967  def : InstAlias<"paciaz", (PACIAZ), 1>;
968  def : InstAlias<"pacibz", (PACIBZ), 1>;
969  def : InstAlias<"autiaz", (AUTIAZ), 1>;
970  def : InstAlias<"autibz", (AUTIBZ), 1>;
971  def : InstAlias<"paciasp", (PACIASP), 1>;
972  def : InstAlias<"pacibsp", (PACIBSP), 1>;
973  def : InstAlias<"autiasp", (AUTIASP), 1>;
974  def : InstAlias<"autibsp", (AUTIBSP), 1>;
975  def : InstAlias<"pacia1716", (PACIA1716), 1>;
976  def : InstAlias<"pacib1716", (PACIB1716), 1>;
977  def : InstAlias<"autia1716", (AUTIA1716), 1>;
978  def : InstAlias<"autib1716", (AUTIB1716), 1>;
979  def : InstAlias<"xpaclri", (XPACLRI), 1>;
980
981  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm> {
982    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia")>;
983    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib")>;
984    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da")>;
985    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db")>;
986    def IZA  : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza")>;
987    def DZA  : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza")>;
988    def IZB  : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb")>;
989    def DZB  : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb")>;
990  }
991
992  defm PAC : SignAuth<0b000, 0b010, "pac">;
993  defm AUT : SignAuth<0b001, 0b011, "aut">;
994
995  def XPACI : SignAuthZero<0b100, 0b00, "xpaci">;
996  def XPACD : SignAuthZero<0b100, 0b01, "xpacd">;
997  def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>;
998
999  // Combined Instructions
1000  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1001    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1002    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1003  }
1004  let isCall = 1, Defs = [LR], Uses = [SP] in {
1005    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1006    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1007  }
1008
1009  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1010    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1011    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1012  }
1013  let isCall = 1, Defs = [LR], Uses = [SP] in {
1014    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1015    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1016  }
1017
1018  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1019    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1020    def RETAB   : AuthReturn<0b010, 1, "retab">;
1021    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1022    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1023  }
1024
1025  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1026  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1027
1028}
1029
1030// v8.3a floating point conversion for javascript
1031let Predicates = [HasJS, HasFPARMv8] in
1032def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1033                                      "fjcvtzs",
1034                                      [(set GPR32:$Rd,
1035                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1036  let Inst{31} = 0;
1037} // HasJS, HasFPARMv8
1038
1039// v8.4 Flag manipulation instructions
1040let Predicates = [HasFMI] in {
1041def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1042  let Inst{20-5} = 0b0000001000000000;
1043}
1044def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1045def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1046def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1047                        "{\t$Rn, $imm, $mask}">;
1048} // HasFMI
1049
1050// v8.5 flag manipulation instructions
1051let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1052
1053def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1054  let Inst{18-16} = 0b000;
1055  let Inst{11-8} = 0b0000;
1056  let Unpredictable{11-8} = 0b1111;
1057  let Inst{7-5} = 0b001;
1058}
1059
1060def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1061  let Inst{18-16} = 0b000;
1062  let Inst{11-8} = 0b0000;
1063  let Unpredictable{11-8} = 0b1111;
1064  let Inst{7-5} = 0b010;
1065}
1066} // HasAltNZCV
1067
1068
1069// Armv8.5-A speculation barrier
1070def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1071  let Inst{20-5} = 0b0001100110000111;
1072  let Unpredictable{11-8} = 0b1111;
1073  let Predicates = [HasSB];
1074  let hasSideEffects = 1;
1075}
1076
1077def : InstAlias<"clrex", (CLREX 0xf)>;
1078def : InstAlias<"isb", (ISB 0xf)>;
1079def : InstAlias<"ssbb", (DSB 0)>;
1080def : InstAlias<"pssbb", (DSB 4)>;
1081
1082def MRS    : MRSI;
1083def MSR    : MSRI;
1084def MSRpstateImm1 : MSRpstateImm0_1;
1085def MSRpstateImm4 : MSRpstateImm0_15;
1086
1087// The thread pointer (on Linux, at least, where this has been implemented) is
1088// TPIDR_EL0.
1089def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1090                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1091
1092let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1093def HWASAN_CHECK_MEMACCESS : Pseudo<
1094  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1095  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1096  Sched<[]>;
1097def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1098  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1099  [(int_hwasan_check_memaccess_shortgranules X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1100  Sched<[]>;
1101}
1102
1103// The cycle counter PMC register is PMCCNTR_EL0.
1104let Predicates = [HasPerfMon] in
1105def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1106
1107// FPCR register
1108def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1109
1110// Generic system instructions
1111def SYSxt  : SystemXtI<0, "sys">;
1112def SYSLxt : SystemLXtI<1, "sysl">;
1113
1114def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1115                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1116                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1117
1118
1119let Predicates = [HasTME] in {
1120
1121def TSTART : TMSystemI<0b0000, "tstart",
1122                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1123
1124def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1125
1126def TCANCEL : TMSystemException<0b011, "tcancel",
1127                                [(int_aarch64_tcancel i64_imm0_65535:$imm)]>;
1128
1129def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1130  let mayLoad = 0;
1131  let mayStore = 0;
1132}
1133} // HasTME
1134
1135//===----------------------------------------------------------------------===//
1136// Move immediate instructions.
1137//===----------------------------------------------------------------------===//
1138
1139defm MOVK : InsertImmediate<0b11, "movk">;
1140defm MOVN : MoveImmediate<0b00, "movn">;
1141
1142let PostEncoderMethod = "fixMOVZ" in
1143defm MOVZ : MoveImmediate<0b10, "movz">;
1144
1145// First group of aliases covers an implicit "lsl #0".
1146def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, i32_imm0_65535:$imm, 0), 0>;
1147def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, i32_imm0_65535:$imm, 0), 0>;
1148def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1149def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1150def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1151def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1152
1153// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1154def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1155def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1156def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1157def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1158
1159def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1160def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1161def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1162def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1163
1164def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1165def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1166def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1167def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1168
1169def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1170def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1171
1172def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1173def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1174
1175def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1176def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1177
1178// Final group of aliases covers true "mov $Rd, $imm" cases.
1179multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1180                          int width, int shift> {
1181  def _asmoperand : AsmOperandClass {
1182    let Name = basename # width # "_lsl" # shift # "MovAlias";
1183    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1184                               # shift # ">";
1185    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1186  }
1187
1188  def _movimm : Operand<i32> {
1189    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1190  }
1191
1192  def : InstAlias<"mov $Rd, $imm",
1193                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1194}
1195
1196defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1197defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1198
1199defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1200defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1201defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1202defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1203
1204defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1205defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1206
1207defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1208defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1209defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1210defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1211
1212let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1213    isAsCheapAsAMove = 1 in {
1214// FIXME: The following pseudo instructions are only needed because remat
1215// cannot handle multiple instructions.  When that changes, we can select
1216// directly to the real instructions and get rid of these pseudos.
1217
1218def MOVi32imm
1219    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1220             [(set GPR32:$dst, imm:$src)]>,
1221      Sched<[WriteImm]>;
1222def MOVi64imm
1223    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1224             [(set GPR64:$dst, imm:$src)]>,
1225      Sched<[WriteImm]>;
1226} // isReMaterializable, isCodeGenOnly
1227
1228// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1229// eventual expansion code fewer bits to worry about getting right. Marshalling
1230// the types is a little tricky though:
1231def i64imm_32bit : ImmLeaf<i64, [{
1232  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1233}]>;
1234
1235def s64imm_32bit : ImmLeaf<i64, [{
1236  int64_t Imm64 = static_cast<int64_t>(Imm);
1237  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1238         Imm64 <= std::numeric_limits<int32_t>::max();
1239}]>;
1240
1241def trunc_imm : SDNodeXForm<imm, [{
1242  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1243}]>;
1244
1245def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1246  GISDNodeXFormEquiv<trunc_imm>;
1247
1248let Predicates = [OptimizedGISelOrOtherSelector] in {
1249// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1250// copies.
1251def : Pat<(i64 i64imm_32bit:$src),
1252          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1253}
1254
1255// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1256def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1257return CurDAG->getTargetConstant(
1258  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1259}]>;
1260
1261def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1262return CurDAG->getTargetConstant(
1263  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1264}]>;
1265
1266
1267def : Pat<(f32 fpimm:$in),
1268  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1269def : Pat<(f64 fpimm:$in),
1270  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1271
1272
1273// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1274// sequences.
1275def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1276                             tglobaladdr:$g1, tglobaladdr:$g0),
1277          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1278                                  tglobaladdr:$g1, 16),
1279                          tglobaladdr:$g2, 32),
1280                  tglobaladdr:$g3, 48)>;
1281
1282def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1283                             tblockaddress:$g1, tblockaddress:$g0),
1284          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1285                                  tblockaddress:$g1, 16),
1286                          tblockaddress:$g2, 32),
1287                  tblockaddress:$g3, 48)>;
1288
1289def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1290                             tconstpool:$g1, tconstpool:$g0),
1291          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1292                                  tconstpool:$g1, 16),
1293                          tconstpool:$g2, 32),
1294                  tconstpool:$g3, 48)>;
1295
1296def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1297                             tjumptable:$g1, tjumptable:$g0),
1298          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1299                                  tjumptable:$g1, 16),
1300                          tjumptable:$g2, 32),
1301                  tjumptable:$g3, 48)>;
1302
1303
1304//===----------------------------------------------------------------------===//
1305// Arithmetic instructions.
1306//===----------------------------------------------------------------------===//
1307
1308// Add/subtract with carry.
1309defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1310defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1311
1312def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1313def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1314def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1315def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1316
1317// Add/subtract
1318defm ADD : AddSub<0, "add", "sub", add>;
1319defm SUB : AddSub<1, "sub", "add">;
1320
1321def : InstAlias<"mov $dst, $src",
1322                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1323def : InstAlias<"mov $dst, $src",
1324                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1325def : InstAlias<"mov $dst, $src",
1326                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1327def : InstAlias<"mov $dst, $src",
1328                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1329
1330defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1331defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1332
1333// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1334def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1335          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1336def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1337          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1338def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1339          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1340def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1341          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1342def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1343          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1344def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1345          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1346let AddedComplexity = 1 in {
1347def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1348          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1349def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1350          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1351}
1352
1353// Because of the immediate format for add/sub-imm instructions, the
1354// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1355//  These patterns capture that transformation.
1356let AddedComplexity = 1 in {
1357def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1358          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1359def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1360          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1361def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1362          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1363def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1364          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1365}
1366
1367// Because of the immediate format for add/sub-imm instructions, the
1368// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1369//  These patterns capture that transformation.
1370let AddedComplexity = 1 in {
1371def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1372          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1373def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1374          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1375def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1376          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1377def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1378          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1379}
1380
1381def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1382def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1383def : InstAlias<"neg $dst, $src$shift",
1384                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1385def : InstAlias<"neg $dst, $src$shift",
1386                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1387
1388def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1389def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1390def : InstAlias<"negs $dst, $src$shift",
1391                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1392def : InstAlias<"negs $dst, $src$shift",
1393                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1394
1395
1396// Unsigned/Signed divide
1397defm UDIV : Div<0, "udiv", udiv>;
1398defm SDIV : Div<1, "sdiv", sdiv>;
1399
1400def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1401def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1402def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1403def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1404
1405// Variable shift
1406defm ASRV : Shift<0b10, "asr", sra>;
1407defm LSLV : Shift<0b00, "lsl", shl>;
1408defm LSRV : Shift<0b01, "lsr", srl>;
1409defm RORV : Shift<0b11, "ror", rotr>;
1410
1411def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1412def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1413def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1414def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1415def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1416def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1417def : ShiftAlias<"rorv", RORVWr, GPR32>;
1418def : ShiftAlias<"rorv", RORVXr, GPR64>;
1419
1420// Multiply-add
1421let AddedComplexity = 5 in {
1422defm MADD : MulAccum<0, "madd", add>;
1423defm MSUB : MulAccum<1, "msub", sub>;
1424
1425def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1426          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1427def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1428          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1429
1430def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1431          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1432def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1433          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1434def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1435          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1436def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1437          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1438} // AddedComplexity = 5
1439
1440let AddedComplexity = 5 in {
1441def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1442def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1443def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1444def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1445
1446def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1447          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1448def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1449          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1450
1451def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1452          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1453def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1454          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1455
1456def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1457          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1458def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1459          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1460def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1461          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1462                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1463
1464def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1465          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1466def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1467          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1468def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1469          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1470                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1471
1472def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1473          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1474def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1475          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1476def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1477                    GPR64:$Ra)),
1478          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1479                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1480
1481def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1482          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1483def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1484          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1485def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1486                                    (s64imm_32bit:$C)))),
1487          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1488                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1489} // AddedComplexity = 5
1490
1491def : MulAccumWAlias<"mul", MADDWrrr>;
1492def : MulAccumXAlias<"mul", MADDXrrr>;
1493def : MulAccumWAlias<"mneg", MSUBWrrr>;
1494def : MulAccumXAlias<"mneg", MSUBXrrr>;
1495def : WideMulAccumAlias<"smull", SMADDLrrr>;
1496def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1497def : WideMulAccumAlias<"umull", UMADDLrrr>;
1498def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1499
1500// Multiply-high
1501def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1502def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1503
1504// CRC32
1505def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1506def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1507def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1508def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1509
1510def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1511def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1512def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1513def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1514
1515// v8.1 atomic CAS
1516defm CAS   : CompareAndSwap<0, 0, "">;
1517defm CASA  : CompareAndSwap<1, 0, "a">;
1518defm CASL  : CompareAndSwap<0, 1, "l">;
1519defm CASAL : CompareAndSwap<1, 1, "al">;
1520
1521// v8.1 atomic CASP
1522defm CASP   : CompareAndSwapPair<0, 0, "">;
1523defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1524defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1525defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1526
1527// v8.1 atomic SWP
1528defm SWP   : Swap<0, 0, "">;
1529defm SWPA  : Swap<1, 0, "a">;
1530defm SWPL  : Swap<0, 1, "l">;
1531defm SWPAL : Swap<1, 1, "al">;
1532
1533// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1534defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1535defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1536defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1537defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1538
1539defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1540defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1541defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1542defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1543
1544defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1545defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1546defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1547defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1548
1549defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1550defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1551defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1552defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1553
1554defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1555defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1556defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1557defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1558
1559defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1560defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1561defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1562defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1563
1564defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1565defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1566defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1567defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1568
1569defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1570defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1571defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1572defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1573
1574// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1575defm : STOPregister<"stadd","LDADD">; // STADDx
1576defm : STOPregister<"stclr","LDCLR">; // STCLRx
1577defm : STOPregister<"steor","LDEOR">; // STEORx
1578defm : STOPregister<"stset","LDSET">; // STSETx
1579defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1580defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1581defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1582defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1583
1584// v8.5 Memory Tagging Extension
1585let Predicates = [HasMTE] in {
1586
1587def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1588            Sched<[]>{
1589  let Inst{31} = 1;
1590}
1591def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1592  let Inst{31} = 1;
1593  let isNotDuplicable = 1;
1594}
1595def ADDG  : AddSubG<0, "addg", null_frag>;
1596def SUBG  : AddSubG<1, "subg", null_frag>;
1597
1598def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1599
1600def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1601def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1602  let Defs = [NZCV];
1603}
1604
1605def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1606
1607def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1608
1609def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1610          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1611def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1612          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1613
1614def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1615
1616def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1617                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1618def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1619                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1620def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1621                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1622  let Inst{23} = 0;
1623}
1624
1625defm STG   : MemTagStore<0b00, "stg">;
1626defm STZG  : MemTagStore<0b01, "stzg">;
1627defm ST2G  : MemTagStore<0b10, "st2g">;
1628defm STZ2G : MemTagStore<0b11, "stz2g">;
1629
1630def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1631          (STGOffset $Rn, $Rm, $imm)>;
1632def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1633          (STZGOffset $Rn, $Rm, $imm)>;
1634def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1635          (ST2GOffset $Rn, $Rm, $imm)>;
1636def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1637          (STZ2GOffset $Rn, $Rm, $imm)>;
1638
1639defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1640def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1641def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1642
1643def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1644          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1645
1646def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1647          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1648
1649def IRGstack
1650    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1651      Sched<[]>;
1652def TAGPstack
1653    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1654      Sched<[]>;
1655
1656// Explicit SP in the first operand prevents ShrinkWrap optimization
1657// from leaving this instruction out of the stack frame. When IRGstack
1658// is transformed into IRG, this operand is replaced with the actual
1659// register / expression for the tagged base pointer of the current function.
1660def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1661
1662// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
1663// $Rn_wback is one past the end of the range. $Rm is the loop counter.
1664let isCodeGenOnly=1, mayStore=1 in {
1665def STGloop_wback
1666    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1667             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1668      Sched<[WriteAdr, WriteST]>;
1669
1670def STZGloop_wback
1671    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1672             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1673      Sched<[WriteAdr, WriteST]>;
1674
1675// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
1676// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
1677def STGloop
1678    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1679             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1680      Sched<[WriteAdr, WriteST]>;
1681
1682def STZGloop
1683    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1684             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1685      Sched<[WriteAdr, WriteST]>;
1686}
1687
1688} // Predicates = [HasMTE]
1689
1690//===----------------------------------------------------------------------===//
1691// Logical instructions.
1692//===----------------------------------------------------------------------===//
1693
1694// (immediate)
1695defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1696defm AND  : LogicalImm<0b00, "and", and, "bic">;
1697defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1698defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1699
1700// FIXME: these aliases *are* canonical sometimes (when movz can't be
1701// used). Actually, it seems to be working right now, but putting logical_immXX
1702// here is a bit dodgy on the AsmParser side too.
1703def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1704                                          logical_imm32:$imm), 0>;
1705def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1706                                          logical_imm64:$imm), 0>;
1707
1708
1709// (register)
1710defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1711defm BICS : LogicalRegS<0b11, 1, "bics",
1712                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1713defm AND  : LogicalReg<0b00, 0, "and", and>;
1714defm BIC  : LogicalReg<0b00, 1, "bic",
1715                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1716defm EON  : LogicalReg<0b10, 1, "eon",
1717                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1718defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
1719defm ORN  : LogicalReg<0b01, 1, "orn",
1720                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1721defm ORR  : LogicalReg<0b01, 0, "orr", or>;
1722
1723def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1724def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1725
1726def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1727def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1728
1729def : InstAlias<"mvn $Wd, $Wm$sh",
1730                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1731def : InstAlias<"mvn $Xd, $Xm$sh",
1732                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
1733
1734def : InstAlias<"tst $src1, $src2",
1735                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
1736def : InstAlias<"tst $src1, $src2",
1737                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
1738
1739def : InstAlias<"tst $src1, $src2",
1740                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
1741def : InstAlias<"tst $src1, $src2",
1742                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
1743
1744def : InstAlias<"tst $src1, $src2$sh",
1745               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
1746def : InstAlias<"tst $src1, $src2$sh",
1747               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
1748
1749
1750def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
1751def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
1752
1753
1754//===----------------------------------------------------------------------===//
1755// One operand data processing instructions.
1756//===----------------------------------------------------------------------===//
1757
1758defm CLS    : OneOperandData<0b101, "cls">;
1759defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
1760defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
1761
1762def  REV16Wr : OneWRegData<0b001, "rev16",
1763                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
1764def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
1765
1766def : Pat<(cttz GPR32:$Rn),
1767          (CLZWr (RBITWr GPR32:$Rn))>;
1768def : Pat<(cttz GPR64:$Rn),
1769          (CLZXr (RBITXr GPR64:$Rn))>;
1770def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
1771                (i32 1))),
1772          (CLSWr GPR32:$Rn)>;
1773def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
1774                (i64 1))),
1775          (CLSXr GPR64:$Rn)>;
1776def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
1777def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
1778
1779// Unlike the other one operand instructions, the instructions with the "rev"
1780// mnemonic do *not* just different in the size bit, but actually use different
1781// opcode bits for the different sizes.
1782def REVWr   : OneWRegData<0b010, "rev", bswap>;
1783def REVXr   : OneXRegData<0b011, "rev", bswap>;
1784def REV32Xr : OneXRegData<0b010, "rev32",
1785                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
1786
1787def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
1788
1789// The bswap commutes with the rotr so we want a pattern for both possible
1790// orders.
1791def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
1792def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
1793
1794//===----------------------------------------------------------------------===//
1795// Bitfield immediate extraction instruction.
1796//===----------------------------------------------------------------------===//
1797let hasSideEffects = 0 in
1798defm EXTR : ExtractImm<"extr">;
1799def : InstAlias<"ror $dst, $src, $shift",
1800            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
1801def : InstAlias<"ror $dst, $src, $shift",
1802            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
1803
1804def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
1805          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
1806def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
1807          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1808
1809//===----------------------------------------------------------------------===//
1810// Other bitfield immediate instructions.
1811//===----------------------------------------------------------------------===//
1812let hasSideEffects = 0 in {
1813defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
1814defm SBFM : BitfieldImm<0b00, "sbfm">;
1815defm UBFM : BitfieldImm<0b10, "ubfm">;
1816}
1817
1818def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1819  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1820  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1821}]>;
1822
1823def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1824  uint64_t enc = 31 - N->getZExtValue();
1825  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1826}]>;
1827
1828// min(7, 31 - shift_amt)
1829def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1830  uint64_t enc = 31 - N->getZExtValue();
1831  enc = enc > 7 ? 7 : enc;
1832  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1833}]>;
1834
1835// min(15, 31 - shift_amt)
1836def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1837  uint64_t enc = 31 - N->getZExtValue();
1838  enc = enc > 15 ? 15 : enc;
1839  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1840}]>;
1841
1842def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1843  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1844  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1845}]>;
1846
1847def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1848  uint64_t enc = 63 - N->getZExtValue();
1849  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1850}]>;
1851
1852// min(7, 63 - shift_amt)
1853def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1854  uint64_t enc = 63 - N->getZExtValue();
1855  enc = enc > 7 ? 7 : enc;
1856  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1857}]>;
1858
1859// min(15, 63 - shift_amt)
1860def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1861  uint64_t enc = 63 - N->getZExtValue();
1862  enc = enc > 15 ? 15 : enc;
1863  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1864}]>;
1865
1866// min(31, 63 - shift_amt)
1867def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1868  uint64_t enc = 63 - N->getZExtValue();
1869  enc = enc > 31 ? 31 : enc;
1870  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1871}]>;
1872
1873def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1874          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1875                              (i64 (i32shift_b imm0_31:$imm)))>;
1876def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1877          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1878                              (i64 (i64shift_b imm0_63:$imm)))>;
1879
1880let AddedComplexity = 10 in {
1881def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1882          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1883def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1884          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1885}
1886
1887def : InstAlias<"asr $dst, $src, $shift",
1888                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1889def : InstAlias<"asr $dst, $src, $shift",
1890                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1891def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1892def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1893def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1894def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1895def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1896
1897def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1898          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1899def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1900          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1901
1902def : InstAlias<"lsr $dst, $src, $shift",
1903                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1904def : InstAlias<"lsr $dst, $src, $shift",
1905                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1906def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1907def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1908def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1909def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1910def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1911
1912//===----------------------------------------------------------------------===//
1913// Conditional comparison instructions.
1914//===----------------------------------------------------------------------===//
1915defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1916defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1917
1918//===----------------------------------------------------------------------===//
1919// Conditional select instructions.
1920//===----------------------------------------------------------------------===//
1921defm CSEL  : CondSelect<0, 0b00, "csel">;
1922
1923def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1924defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1925defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1926defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1927
1928def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1929          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1930def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1931          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1932def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1933          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1934def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1935          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1936def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1937          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1938def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1939          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1940
1941def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1942          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1943def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1944          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1945def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
1946          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1947def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
1948          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1949def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
1950          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1951def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
1952          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1953def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1954          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1955def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1956          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1957def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
1958          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1959def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
1960          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1961def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
1962          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1963def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
1964          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1965
1966// The inverse of the condition code from the alias instruction is what is used
1967// in the aliased instruction. The parser all ready inverts the condition code
1968// for these aliases.
1969def : InstAlias<"cset $dst, $cc",
1970                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1971def : InstAlias<"cset $dst, $cc",
1972                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1973
1974def : InstAlias<"csetm $dst, $cc",
1975                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1976def : InstAlias<"csetm $dst, $cc",
1977                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1978
1979def : InstAlias<"cinc $dst, $src, $cc",
1980                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1981def : InstAlias<"cinc $dst, $src, $cc",
1982                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1983
1984def : InstAlias<"cinv $dst, $src, $cc",
1985                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1986def : InstAlias<"cinv $dst, $src, $cc",
1987                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1988
1989def : InstAlias<"cneg $dst, $src, $cc",
1990                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1991def : InstAlias<"cneg $dst, $src, $cc",
1992                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1993
1994//===----------------------------------------------------------------------===//
1995// PC-relative instructions.
1996//===----------------------------------------------------------------------===//
1997let isReMaterializable = 1 in {
1998let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1999def ADR  : ADRI<0, "adr", adrlabel,
2000                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2001} // hasSideEffects = 0
2002
2003def ADRP : ADRI<1, "adrp", adrplabel,
2004                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2005} // isReMaterializable = 1
2006
2007// page address of a constant pool entry, block address
2008def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2009def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2010def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2011def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2012def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2013def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2014def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2015
2016//===----------------------------------------------------------------------===//
2017// Unconditional branch (register) instructions.
2018//===----------------------------------------------------------------------===//
2019
2020let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2021def RET  : BranchReg<0b0010, "ret", []>;
2022def DRPS : SpecialReturn<0b0101, "drps">;
2023def ERET : SpecialReturn<0b0100, "eret">;
2024} // isReturn = 1, isTerminator = 1, isBarrier = 1
2025
2026// Default to the LR register.
2027def : InstAlias<"ret", (RET LR)>;
2028
2029let isCall = 1, Defs = [LR], Uses = [SP] in {
2030  def BLR : BranchReg<0b0001, "blr", []>;
2031  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2032                Sched<[WriteBrReg]>,
2033                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2034} // isCall
2035
2036def : Pat<(AArch64call GPR64:$Rn),
2037          (BLR GPR64:$Rn)>,
2038      Requires<[NoSLSBLRMitigation]>;
2039def : Pat<(AArch64call GPR64noip:$Rn),
2040          (BLRNoIP GPR64noip:$Rn)>,
2041      Requires<[SLSBLRMitigation]>;
2042
2043let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2044def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2045} // isBranch, isTerminator, isBarrier, isIndirectBranch
2046
2047// Create a separate pseudo-instruction for codegen to use so that we don't
2048// flag lr as used in every function. It'll be restored before the RET by the
2049// epilogue if it's legitimately used.
2050def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2051                   Sched<[WriteBrReg]> {
2052  let isTerminator = 1;
2053  let isBarrier = 1;
2054  let isReturn = 1;
2055}
2056
2057// This is a directive-like pseudo-instruction. The purpose is to insert an
2058// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2059// (which in the usual case is a BLR).
2060let hasSideEffects = 1 in
2061def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2062  let AsmString = ".tlsdesccall $sym";
2063}
2064
2065// Pseudo instruction to tell the streamer to emit a 'B' character into the
2066// augmentation string.
2067def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2068
2069// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2070// FIXME: can "hasSideEffects be dropped?
2071let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
2072    isCodeGenOnly = 1 in
2073def TLSDESC_CALLSEQ
2074    : Pseudo<(outs), (ins i64imm:$sym),
2075             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2076      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2077def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2078          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2079
2080//===----------------------------------------------------------------------===//
2081// Conditional branch (immediate) instruction.
2082//===----------------------------------------------------------------------===//
2083def Bcc : BranchCond;
2084
2085//===----------------------------------------------------------------------===//
2086// Compare-and-branch instructions.
2087//===----------------------------------------------------------------------===//
2088defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2089defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2090
2091//===----------------------------------------------------------------------===//
2092// Test-bit-and-branch instructions.
2093//===----------------------------------------------------------------------===//
2094defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2095defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2096
2097//===----------------------------------------------------------------------===//
2098// Unconditional branch (immediate) instructions.
2099//===----------------------------------------------------------------------===//
2100let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2101def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2102} // isBranch, isTerminator, isBarrier
2103
2104let isCall = 1, Defs = [LR], Uses = [SP] in {
2105def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2106} // isCall
2107def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2108
2109//===----------------------------------------------------------------------===//
2110// Exception generation instructions.
2111//===----------------------------------------------------------------------===//
2112let isTrap = 1 in {
2113def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
2114}
2115def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2116def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2117def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
2118def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2119def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2120def SMC   : ExceptionGeneration<0b000, 0b11, "smc">;
2121def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2122
2123// DCPSn defaults to an immediate operand of zero if unspecified.
2124def : InstAlias<"dcps1", (DCPS1 0)>;
2125def : InstAlias<"dcps2", (DCPS2 0)>;
2126def : InstAlias<"dcps3", (DCPS3 0)>;
2127
2128def UDF : UDFType<0, "udf">;
2129
2130//===----------------------------------------------------------------------===//
2131// Load instructions.
2132//===----------------------------------------------------------------------===//
2133
2134// Pair (indexed, offset)
2135defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2136defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2137defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2138defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2139defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2140
2141defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2142
2143// Pair (pre-indexed)
2144def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2145def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2146def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2147def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2148def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2149
2150def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2151
2152// Pair (post-indexed)
2153def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2154def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2155def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2156def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2157def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2158
2159def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2160
2161
2162// Pair (no allocate)
2163defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2164defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2165defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2166defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2167defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2168
2169def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2170          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2171
2172//---
2173// (register offset)
2174//---
2175
2176// Integer
2177defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2178defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2179defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2180defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2181
2182// Floating-point
2183defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2184defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2185defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2186defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2187defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2188
2189// Load sign-extended half-word
2190defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2191defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2192
2193// Load sign-extended byte
2194defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2195defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2196
2197// Load sign-extended word
2198defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2199
2200// Pre-fetch.
2201defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2202
2203// For regular load, we do not have any alignment requirement.
2204// Thus, it is safe to directly map the vector loads with interesting
2205// addressing modes.
2206// FIXME: We could do the same for bitconvert to floating point vectors.
2207multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2208                              ValueType ScalTy, ValueType VecTy,
2209                              Instruction LOADW, Instruction LOADX,
2210                              SubRegIndex sub> {
2211  def : Pat<(VecTy (scalar_to_vector (ScalTy
2212              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2213            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2214                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2215                           sub)>;
2216
2217  def : Pat<(VecTy (scalar_to_vector (ScalTy
2218              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2219            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2220                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2221                           sub)>;
2222}
2223
2224let AddedComplexity = 10 in {
2225defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2226defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2227
2228defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2229defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2230
2231defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2232defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2233
2234defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2235defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2236
2237defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2238defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2239
2240defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2241
2242defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2243
2244
2245def : Pat <(v1i64 (scalar_to_vector (i64
2246                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2247                                           ro_Wextend64:$extend))))),
2248           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2249
2250def : Pat <(v1i64 (scalar_to_vector (i64
2251                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2252                                           ro_Xextend64:$extend))))),
2253           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2254}
2255
2256// Match all load 64 bits width whose type is compatible with FPR64
2257multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2258                        Instruction LOADW, Instruction LOADX> {
2259
2260  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2261            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2262
2263  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2264            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2265}
2266
2267let AddedComplexity = 10 in {
2268let Predicates = [IsLE] in {
2269  // We must do vector loads with LD1 in big-endian.
2270  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2271  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2272  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2273  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2274  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2275  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2276}
2277
2278defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2279defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2280
2281// Match all load 128 bits width whose type is compatible with FPR128
2282let Predicates = [IsLE] in {
2283  // We must do vector loads with LD1 in big-endian.
2284  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2285  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2286  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2287  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2288  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2289  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2290  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2291  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2292}
2293} // AddedComplexity = 10
2294
2295// zextload -> i64
2296multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2297                            Instruction INSTW, Instruction INSTX> {
2298  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2299            (SUBREG_TO_REG (i64 0),
2300                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2301                           sub_32)>;
2302
2303  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2304            (SUBREG_TO_REG (i64 0),
2305                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2306                           sub_32)>;
2307}
2308
2309let AddedComplexity = 10 in {
2310  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2311  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2312  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2313
2314  // zextloadi1 -> zextloadi8
2315  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2316
2317  // extload -> zextload
2318  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2319  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2320  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2321
2322  // extloadi1 -> zextloadi8
2323  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2324}
2325
2326
2327// zextload -> i64
2328multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2329                            Instruction INSTW, Instruction INSTX> {
2330  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2331            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2332
2333  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2334            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2335
2336}
2337
2338let AddedComplexity = 10 in {
2339  // extload -> zextload
2340  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2341  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2342  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2343
2344  // zextloadi1 -> zextloadi8
2345  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2346}
2347
2348//---
2349// (unsigned immediate)
2350//---
2351defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2352                   [(set GPR64z:$Rt,
2353                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2354defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2355                   [(set GPR32z:$Rt,
2356                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2357defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2358                   [(set FPR8Op:$Rt,
2359                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2360defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2361                   [(set (f16 FPR16Op:$Rt),
2362                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2363defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2364                   [(set (f32 FPR32Op:$Rt),
2365                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2366defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2367                   [(set (f64 FPR64Op:$Rt),
2368                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2369defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2370                 [(set (f128 FPR128Op:$Rt),
2371                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2372
2373// bf16 load pattern
2374def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2375           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2376
2377// For regular load, we do not have any alignment requirement.
2378// Thus, it is safe to directly map the vector loads with interesting
2379// addressing modes.
2380// FIXME: We could do the same for bitconvert to floating point vectors.
2381def : Pat <(v8i8 (scalar_to_vector (i32
2382               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2383           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2384                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2385def : Pat <(v16i8 (scalar_to_vector (i32
2386               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2387           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2388                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2389def : Pat <(v4i16 (scalar_to_vector (i32
2390               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2391           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2392                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2393def : Pat <(v8i16 (scalar_to_vector (i32
2394               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2395           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2396                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2397def : Pat <(v2i32 (scalar_to_vector (i32
2398               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2399           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2400                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2401def : Pat <(v4i32 (scalar_to_vector (i32
2402               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2403           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2404                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2405def : Pat <(v1i64 (scalar_to_vector (i64
2406               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2407           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2408def : Pat <(v2i64 (scalar_to_vector (i64
2409               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2410           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2411                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2412
2413// Match all load 64 bits width whose type is compatible with FPR64
2414let Predicates = [IsLE] in {
2415  // We must use LD1 to perform vector loads in big-endian.
2416  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2417            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2418  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2419            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2420  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2421            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2422  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2423            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2424  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2425            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2426  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2427            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2428}
2429def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2430          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2431def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2432          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2433
2434// Match all load 128 bits width whose type is compatible with FPR128
2435let Predicates = [IsLE] in {
2436  // We must use LD1 to perform vector loads in big-endian.
2437  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2438            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2439  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2440            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2441  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2442            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2443  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2444            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2445  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2446            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2447  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2448            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2449  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2450            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2451  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2452            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2453}
2454def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2455          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2456
2457defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2458                    [(set GPR32:$Rt,
2459                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2460                                                     uimm12s2:$offset)))]>;
2461defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2462                    [(set GPR32:$Rt,
2463                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2464                                                   uimm12s1:$offset)))]>;
2465// zextload -> i64
2466def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2467    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2468def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2469    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2470
2471// zextloadi1 -> zextloadi8
2472def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2473          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2474def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2475    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2476
2477// extload -> zextload
2478def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2479          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2480def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2481          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2482def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2483          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2484def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2485    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2486def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2487    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2488def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2489    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2490def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2491    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2492
2493// load sign-extended half-word
2494defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2495                     [(set GPR32:$Rt,
2496                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2497                                                      uimm12s2:$offset)))]>;
2498defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2499                     [(set GPR64:$Rt,
2500                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2501                                                      uimm12s2:$offset)))]>;
2502
2503// load sign-extended byte
2504defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2505                     [(set GPR32:$Rt,
2506                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2507                                                    uimm12s1:$offset)))]>;
2508defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2509                     [(set GPR64:$Rt,
2510                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2511                                                    uimm12s1:$offset)))]>;
2512
2513// load sign-extended word
2514defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2515                     [(set GPR64:$Rt,
2516                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2517                                                      uimm12s4:$offset)))]>;
2518
2519// load zero-extended word
2520def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2521      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2522
2523// Pre-fetch.
2524def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2525                        [(AArch64Prefetch imm:$Rt,
2526                                        (am_indexed64 GPR64sp:$Rn,
2527                                                      uimm12s8:$offset))]>;
2528
2529def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2530
2531//---
2532// (literal)
2533
2534def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2535  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2536    const DataLayout &DL = MF->getDataLayout();
2537    Align Align = G->getGlobal()->getPointerAlignment(DL);
2538    return Align >= 4 && G->getOffset() % 4 == 0;
2539  }
2540  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2541    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2542  return false;
2543}]>;
2544
2545def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2546  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2547def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2548  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2549def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2550  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2551def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2552  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2553def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2554  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2555
2556// load sign-extended word
2557def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2558  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2559
2560let AddedComplexity = 20 in {
2561def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2562        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2563}
2564
2565// prefetch
2566def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2567//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2568
2569//---
2570// (unscaled immediate)
2571defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2572                    [(set GPR64z:$Rt,
2573                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2574defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2575                    [(set GPR32z:$Rt,
2576                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2577defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2578                    [(set FPR8Op:$Rt,
2579                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2580defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2581                    [(set (f16 FPR16Op:$Rt),
2582                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2583defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2584                    [(set (f32 FPR32Op:$Rt),
2585                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2586defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2587                    [(set (f64 FPR64Op:$Rt),
2588                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2589defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2590                    [(set (f128 FPR128Op:$Rt),
2591                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2592
2593defm LDURHH
2594    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2595             [(set GPR32:$Rt,
2596                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2597defm LDURBB
2598    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2599             [(set GPR32:$Rt,
2600                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2601
2602// Match all load 64 bits width whose type is compatible with FPR64
2603let Predicates = [IsLE] in {
2604  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2605            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2606  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2607            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2608  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2609            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2610  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2611            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2612  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2613            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2614}
2615def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2616          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2617def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2618          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2619
2620// Match all load 128 bits width whose type is compatible with FPR128
2621let Predicates = [IsLE] in {
2622  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2623            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2624  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2625            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2626  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2627            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2628  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2629            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2630  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2631            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2632  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2633            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2634  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2635            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2636}
2637
2638//  anyext -> zext
2639def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2640          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2641def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2642          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2643def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2644          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2645def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2646    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2647def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2648    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2649def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2650    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2651def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2652    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2653// unscaled zext
2654def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2655          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2656def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2657          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2658def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2659          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2660def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2661    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2662def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2663    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2664def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2665    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2666def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2667    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2668
2669
2670//---
2671// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2672
2673// Define new assembler match classes as we want to only match these when
2674// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2675// associate a DiagnosticType either, as we want the diagnostic for the
2676// canonical form (the scaled operand) to take precedence.
2677class SImm9OffsetOperand<int Width> : AsmOperandClass {
2678  let Name = "SImm9OffsetFB" # Width;
2679  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2680  let RenderMethod = "addImmOperands";
2681}
2682
2683def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2684def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2685def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2686def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2687def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2688
2689def simm9_offset_fb8 : Operand<i64> {
2690  let ParserMatchClass = SImm9OffsetFB8Operand;
2691}
2692def simm9_offset_fb16 : Operand<i64> {
2693  let ParserMatchClass = SImm9OffsetFB16Operand;
2694}
2695def simm9_offset_fb32 : Operand<i64> {
2696  let ParserMatchClass = SImm9OffsetFB32Operand;
2697}
2698def simm9_offset_fb64 : Operand<i64> {
2699  let ParserMatchClass = SImm9OffsetFB64Operand;
2700}
2701def simm9_offset_fb128 : Operand<i64> {
2702  let ParserMatchClass = SImm9OffsetFB128Operand;
2703}
2704
2705def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2706                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2707def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2708                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2709def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2710                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2711def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2712                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2713def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2714                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2715def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2716                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2717def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2718               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2719
2720// zextload -> i64
2721def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2722  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2723def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2724  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2725
2726// load sign-extended half-word
2727defm LDURSHW
2728    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
2729               [(set GPR32:$Rt,
2730                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2731defm LDURSHX
2732    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
2733              [(set GPR64:$Rt,
2734                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2735
2736// load sign-extended byte
2737defm LDURSBW
2738    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
2739                [(set GPR32:$Rt,
2740                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2741defm LDURSBX
2742    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
2743                [(set GPR64:$Rt,
2744                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2745
2746// load sign-extended word
2747defm LDURSW
2748    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
2749              [(set GPR64:$Rt,
2750                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2751
2752// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
2753def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
2754                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2755def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
2756                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2757def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2758                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2759def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2760                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2761def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2762                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2763def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2764                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2765def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
2766                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2767
2768// Pre-fetch.
2769defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
2770                  [(AArch64Prefetch imm:$Rt,
2771                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2772
2773//---
2774// (unscaled immediate, unprivileged)
2775defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
2776defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
2777
2778defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
2779defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
2780
2781// load sign-extended half-word
2782defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
2783defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
2784
2785// load sign-extended byte
2786defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
2787defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
2788
2789// load sign-extended word
2790defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
2791
2792//---
2793// (immediate pre-indexed)
2794def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2795def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2796def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
2797def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2798def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2799def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2800def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2801
2802// load sign-extended half-word
2803def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2804def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2805
2806// load sign-extended byte
2807def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2808def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2809
2810// load zero-extended byte
2811def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2812def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2813
2814// load sign-extended word
2815def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2816
2817//---
2818// (immediate post-indexed)
2819def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2820def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2821def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
2822def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2823def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2824def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2825def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2826
2827// load sign-extended half-word
2828def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2829def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2830
2831// load sign-extended byte
2832def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2833def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2834
2835// load zero-extended byte
2836def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2837def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2838
2839// load sign-extended word
2840def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2841
2842//===----------------------------------------------------------------------===//
2843// Store instructions.
2844//===----------------------------------------------------------------------===//
2845
2846// Pair (indexed, offset)
2847// FIXME: Use dedicated range-checked addressing mode operand here.
2848defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
2849defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
2850defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
2851defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
2852defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
2853
2854// Pair (pre-indexed)
2855def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2856def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2857def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2858def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2859def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2860
2861// Pair (pre-indexed)
2862def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2863def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2864def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2865def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2866def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2867
2868// Pair (no allocate)
2869defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
2870defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
2871defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
2872defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
2873defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
2874
2875def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2876          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
2877
2878def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
2879          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
2880
2881
2882//---
2883// (Register offset)
2884
2885// Integer
2886defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
2887defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
2888defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
2889defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
2890
2891
2892// Floating-point
2893defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
2894defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
2895defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
2896defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
2897defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str", f128,    store>;
2898
2899let Predicates = [UseSTRQro], AddedComplexity = 10 in {
2900  def : Pat<(store (f128 FPR128:$Rt),
2901                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
2902                                        ro_Wextend128:$extend)),
2903            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
2904  def : Pat<(store (f128 FPR128:$Rt),
2905                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
2906                                        ro_Xextend128:$extend)),
2907            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
2908}
2909
2910multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
2911                                 Instruction STRW, Instruction STRX> {
2912
2913  def : Pat<(storeop GPR64:$Rt,
2914                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2915            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2916                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2917
2918  def : Pat<(storeop GPR64:$Rt,
2919                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2920            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2921                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2922}
2923
2924let AddedComplexity = 10 in {
2925  // truncstore i64
2926  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
2927  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
2928  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
2929}
2930
2931multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
2932                         Instruction STRW, Instruction STRX> {
2933  def : Pat<(store (VecTy FPR:$Rt),
2934                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2935            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2936
2937  def : Pat<(store (VecTy FPR:$Rt),
2938                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2939            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2940}
2941
2942let AddedComplexity = 10 in {
2943// Match all store 64 bits width whose type is compatible with FPR64
2944let Predicates = [IsLE] in {
2945  // We must use ST1 to store vectors in big-endian.
2946  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
2947  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
2948  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
2949  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
2950  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
2951  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
2952}
2953
2954defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
2955defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
2956
2957// Match all store 128 bits width whose type is compatible with FPR128
2958let Predicates = [IsLE, UseSTRQro] in {
2959  // We must use ST1 to store vectors in big-endian.
2960  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2961  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2962  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2963  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2964  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2965  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2966  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2967  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
2968}
2969} // AddedComplexity = 10
2970
2971// Match stores from lane 0 to the appropriate subreg's store.
2972multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2973                              ValueType VecTy, ValueType STy,
2974                              SubRegIndex SubRegIdx,
2975                              Instruction STRW, Instruction STRX> {
2976
2977  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2978                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2979            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2980                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2981
2982  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2983                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2984            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2985                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2986}
2987
2988let AddedComplexity = 19 in {
2989  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2990  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
2991  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
2992  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
2993  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
2994  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
2995}
2996
2997//---
2998// (unsigned immediate)
2999defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3000                   [(store GPR64z:$Rt,
3001                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3002defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3003                    [(store GPR32z:$Rt,
3004                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3005defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3006                    [(store FPR8Op:$Rt,
3007                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3008defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3009                    [(store (f16 FPR16Op:$Rt),
3010                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3011defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3012                    [(store (f32 FPR32Op:$Rt),
3013                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3014defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3015                    [(store (f64 FPR64Op:$Rt),
3016                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3017defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3018
3019defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3020                     [(truncstorei16 GPR32z:$Rt,
3021                                     (am_indexed16 GPR64sp:$Rn,
3022                                                   uimm12s2:$offset))]>;
3023defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3024                     [(truncstorei8 GPR32z:$Rt,
3025                                    (am_indexed8 GPR64sp:$Rn,
3026                                                 uimm12s1:$offset))]>;
3027
3028// bf16 store pattern
3029def : Pat<(store (bf16 FPR16Op:$Rt),
3030                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3031          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3032
3033let AddedComplexity = 10 in {
3034
3035// Match all store 64 bits width whose type is compatible with FPR64
3036def : Pat<(store (v1i64 FPR64:$Rt),
3037                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3038          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3039def : Pat<(store (v1f64 FPR64:$Rt),
3040                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3041          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3042
3043let Predicates = [IsLE] in {
3044  // We must use ST1 to store vectors in big-endian.
3045  def : Pat<(store (v2f32 FPR64:$Rt),
3046                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3047            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3048  def : Pat<(store (v8i8 FPR64:$Rt),
3049                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3050            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3051  def : Pat<(store (v4i16 FPR64:$Rt),
3052                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3053            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3054  def : Pat<(store (v2i32 FPR64:$Rt),
3055                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3056            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3057  def : Pat<(store (v4f16 FPR64:$Rt),
3058                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3059            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3060  def : Pat<(store (v4bf16 FPR64:$Rt),
3061                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3062            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3063}
3064
3065// Match all store 128 bits width whose type is compatible with FPR128
3066def : Pat<(store (f128  FPR128:$Rt),
3067                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3068          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3069
3070let Predicates = [IsLE] in {
3071  // We must use ST1 to store vectors in big-endian.
3072  def : Pat<(store (v4f32 FPR128:$Rt),
3073                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3074            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3075  def : Pat<(store (v2f64 FPR128:$Rt),
3076                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3077            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3078  def : Pat<(store (v16i8 FPR128:$Rt),
3079                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3080            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3081  def : Pat<(store (v8i16 FPR128:$Rt),
3082                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3083            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3084  def : Pat<(store (v4i32 FPR128:$Rt),
3085                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3086            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3087  def : Pat<(store (v2i64 FPR128:$Rt),
3088                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3089            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3090  def : Pat<(store (v8f16 FPR128:$Rt),
3091                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3092            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3093  def : Pat<(store (v8bf16 FPR128:$Rt),
3094                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3095            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3096}
3097
3098// truncstore i64
3099def : Pat<(truncstorei32 GPR64:$Rt,
3100                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3101  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3102def : Pat<(truncstorei16 GPR64:$Rt,
3103                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3104  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3105def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3106  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3107
3108} // AddedComplexity = 10
3109
3110// Match stores from lane 0 to the appropriate subreg's store.
3111multiclass VecStoreLane0Pat<Operand UIAddrMode, SDPatternOperator storeop,
3112                            ValueType VTy, ValueType STy,
3113                            SubRegIndex SubRegIdx, Operand IndexType,
3114                            Instruction STR> {
3115  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3116                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3117            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3118                 GPR64sp:$Rn, IndexType:$offset)>;
3119}
3120
3121let AddedComplexity = 19 in {
3122  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3123  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3124  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3125  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3126  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3127  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3128}
3129
3130//---
3131// (unscaled immediate)
3132defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3133                         [(store GPR64z:$Rt,
3134                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3135defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3136                         [(store GPR32z:$Rt,
3137                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3138defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3139                         [(store FPR8Op:$Rt,
3140                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3141defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3142                         [(store (f16 FPR16Op:$Rt),
3143                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3144defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3145                         [(store (f32 FPR32Op:$Rt),
3146                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3147defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3148                         [(store (f64 FPR64Op:$Rt),
3149                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3150defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3151                         [(store (f128 FPR128Op:$Rt),
3152                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3153defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3154                         [(truncstorei16 GPR32z:$Rt,
3155                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3156defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3157                         [(truncstorei8 GPR32z:$Rt,
3158                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3159
3160// Armv8.4 Weaker Release Consistency enhancements
3161//         LDAPR & STLR with Immediate Offset instructions
3162let Predicates = [HasRCPC_IMMO] in {
3163defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3164defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3165defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3166defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3167defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3168defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3169defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3170defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3171defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3172defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3173defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3174defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3175defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3176}
3177
3178// Match all store 64 bits width whose type is compatible with FPR64
3179def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3180          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3181def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3182          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3183
3184let AddedComplexity = 10 in {
3185
3186let Predicates = [IsLE] in {
3187  // We must use ST1 to store vectors in big-endian.
3188  def : Pat<(store (v2f32 FPR64:$Rt),
3189                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3190            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3191  def : Pat<(store (v8i8 FPR64:$Rt),
3192                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3193            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3194  def : Pat<(store (v4i16 FPR64:$Rt),
3195                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3196            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3197  def : Pat<(store (v2i32 FPR64:$Rt),
3198                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3199            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3200  def : Pat<(store (v4f16 FPR64:$Rt),
3201                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3202            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3203  def : Pat<(store (v4bf16 FPR64:$Rt),
3204                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3205            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3206}
3207
3208// Match all store 128 bits width whose type is compatible with FPR128
3209def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3210          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3211
3212let Predicates = [IsLE] in {
3213  // We must use ST1 to store vectors in big-endian.
3214  def : Pat<(store (v4f32 FPR128:$Rt),
3215                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3216            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3217  def : Pat<(store (v2f64 FPR128:$Rt),
3218                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3219            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3220  def : Pat<(store (v16i8 FPR128:$Rt),
3221                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3222            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3223  def : Pat<(store (v8i16 FPR128:$Rt),
3224                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3225            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3226  def : Pat<(store (v4i32 FPR128:$Rt),
3227                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3228            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3229  def : Pat<(store (v2i64 FPR128:$Rt),
3230                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3231            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3232  def : Pat<(store (v2f64 FPR128:$Rt),
3233                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3234            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3235  def : Pat<(store (v8f16 FPR128:$Rt),
3236                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3237            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3238  def : Pat<(store (v8bf16 FPR128:$Rt),
3239                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3240            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3241}
3242
3243} // AddedComplexity = 10
3244
3245// unscaled i64 truncating stores
3246def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3247  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3248def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3249  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3250def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3251  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3252
3253// Match stores from lane 0 to the appropriate subreg's store.
3254multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3255                             ValueType VTy, ValueType STy,
3256                             SubRegIndex SubRegIdx, Instruction STR> {
3257  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3258}
3259
3260let AddedComplexity = 19 in {
3261  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3262  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3263  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3264  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3265  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3266  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3267}
3268
3269//---
3270// STR mnemonics fall back to STUR for negative or unaligned offsets.
3271def : InstAlias<"str $Rt, [$Rn, $offset]",
3272                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3273def : InstAlias<"str $Rt, [$Rn, $offset]",
3274                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3275def : InstAlias<"str $Rt, [$Rn, $offset]",
3276                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3277def : InstAlias<"str $Rt, [$Rn, $offset]",
3278                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3279def : InstAlias<"str $Rt, [$Rn, $offset]",
3280                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3281def : InstAlias<"str $Rt, [$Rn, $offset]",
3282                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3283def : InstAlias<"str $Rt, [$Rn, $offset]",
3284                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3285
3286def : InstAlias<"strb $Rt, [$Rn, $offset]",
3287                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3288def : InstAlias<"strh $Rt, [$Rn, $offset]",
3289                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3290
3291//---
3292// (unscaled immediate, unprivileged)
3293defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3294defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3295
3296defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3297defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3298
3299//---
3300// (immediate pre-indexed)
3301def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3302def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3303def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3304def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3305def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3306def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3307def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3308
3309def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3310def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3311
3312// truncstore i64
3313def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3314  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3315           simm9:$off)>;
3316def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3317  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3318            simm9:$off)>;
3319def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3320  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3321            simm9:$off)>;
3322
3323def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3324          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3325def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3326          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3327def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3328          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3329def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3330          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3331def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3332          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3333def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3334          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3335def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3336          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3337
3338def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3339          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3340def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3341          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3342def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3343          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3344def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3345          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3346def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3347          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3348def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3349          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3350def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3351          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3352
3353//---
3354// (immediate post-indexed)
3355def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3356def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3357def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3358def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3359def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3360def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3361def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3362
3363def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3364def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3365
3366// truncstore i64
3367def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3368  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3369            simm9:$off)>;
3370def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3371  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3372             simm9:$off)>;
3373def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3374  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3375             simm9:$off)>;
3376
3377def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3378          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3379def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3380          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3381def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3382          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3383def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3384          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3385def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3386          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3387def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3388          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3389def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3390          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3391
3392def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3393          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3394def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3395          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3396def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3397          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3398def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3399          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3400def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3401          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3402def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3403          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3404def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3405          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3406
3407//===----------------------------------------------------------------------===//
3408// Load/store exclusive instructions.
3409//===----------------------------------------------------------------------===//
3410
3411def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3412def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3413def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3414def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3415
3416def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3417def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3418def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3419def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3420
3421def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3422def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3423def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3424def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3425
3426def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3427def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3428def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3429def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3430
3431def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3432def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3433def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3434def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3435
3436def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3437def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3438def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3439def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3440
3441def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3442def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3443
3444def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3445def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3446
3447def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3448def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3449
3450def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3451def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3452
3453let Predicates = [HasLOR] in {
3454  // v8.1a "Limited Order Region" extension load-acquire instructions
3455  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3456  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3457  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3458  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3459
3460  // v8.1a "Limited Order Region" extension store-release instructions
3461  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3462  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3463  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3464  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3465}
3466
3467//===----------------------------------------------------------------------===//
3468// Scaled floating point to integer conversion instructions.
3469//===----------------------------------------------------------------------===//
3470
3471defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3472defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3473defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3474defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3475defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3476defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3477defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3478defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3479defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3480defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3481defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3482defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3483
3484multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3485  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3486  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3487  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3488  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3489  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3490  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3491
3492  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3493            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3494  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3495            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3496  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3497            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3498  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3499            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3500  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3501            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3502  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3503            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3504}
3505
3506defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3507defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3508
3509multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
3510  def : Pat<(i32 (to_int (round f32:$Rn))),
3511            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3512  def : Pat<(i64 (to_int (round f32:$Rn))),
3513            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3514  def : Pat<(i32 (to_int (round f64:$Rn))),
3515            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3516  def : Pat<(i64 (to_int (round f64:$Rn))),
3517            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3518}
3519
3520defm : FPToIntegerPats<fp_to_sint, fceil,  "FCVTPS">;
3521defm : FPToIntegerPats<fp_to_uint, fceil,  "FCVTPU">;
3522defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
3523defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
3524defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
3525defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
3526defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">;
3527defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">;
3528
3529let Predicates = [HasFullFP16] in {
3530  def : Pat<(i32 (lround f16:$Rn)),
3531            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3532  def : Pat<(i64 (lround f16:$Rn)),
3533            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3534  def : Pat<(i64 (llround f16:$Rn)),
3535            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3536}
3537def : Pat<(i32 (lround f32:$Rn)),
3538          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3539def : Pat<(i32 (lround f64:$Rn)),
3540          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3541def : Pat<(i64 (lround f32:$Rn)),
3542          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3543def : Pat<(i64 (lround f64:$Rn)),
3544          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3545def : Pat<(i64 (llround f32:$Rn)),
3546          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3547def : Pat<(i64 (llround f64:$Rn)),
3548          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3549
3550//===----------------------------------------------------------------------===//
3551// Scaled integer to floating point conversion instructions.
3552//===----------------------------------------------------------------------===//
3553
3554defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3555defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3556
3557//===----------------------------------------------------------------------===//
3558// Unscaled integer to floating point conversion instruction.
3559//===----------------------------------------------------------------------===//
3560
3561defm FMOV : UnscaledConversion<"fmov">;
3562
3563// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3564let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3565def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3566    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3567def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3568    Sched<[WriteF]>;
3569def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3570    Sched<[WriteF]>;
3571}
3572// Similarly add aliases
3573def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3574    Requires<[HasFullFP16]>;
3575def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3576def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3577
3578//===----------------------------------------------------------------------===//
3579// Floating point conversion instruction.
3580//===----------------------------------------------------------------------===//
3581
3582defm FCVT : FPConversion<"fcvt">;
3583
3584//===----------------------------------------------------------------------===//
3585// Floating point single operand instructions.
3586//===----------------------------------------------------------------------===//
3587
3588defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3589defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3590defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3591defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3592defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3593defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3594defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
3595defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3596
3597def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
3598          (FRINTNDr FPR64:$Rn)>;
3599
3600defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3601defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3602
3603let SchedRW = [WriteFDiv] in {
3604defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3605}
3606
3607let Predicates = [HasFRInt3264] in {
3608  defm FRINT32Z : FRIntNNT<0b00, "frint32z">;
3609  defm FRINT64Z : FRIntNNT<0b10, "frint64z">;
3610  defm FRINT32X : FRIntNNT<0b01, "frint32x">;
3611  defm FRINT64X : FRIntNNT<0b11, "frint64x">;
3612} // HasFRInt3264
3613
3614let Predicates = [HasFullFP16] in {
3615  def : Pat<(i32 (lrint f16:$Rn)),
3616            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3617  def : Pat<(i64 (lrint f16:$Rn)),
3618            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3619  def : Pat<(i64 (llrint f16:$Rn)),
3620            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3621}
3622def : Pat<(i32 (lrint f32:$Rn)),
3623          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3624def : Pat<(i32 (lrint f64:$Rn)),
3625          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3626def : Pat<(i64 (lrint f32:$Rn)),
3627          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3628def : Pat<(i64 (lrint f64:$Rn)),
3629          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3630def : Pat<(i64 (llrint f32:$Rn)),
3631          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3632def : Pat<(i64 (llrint f64:$Rn)),
3633          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3634
3635//===----------------------------------------------------------------------===//
3636// Floating point two operand instructions.
3637//===----------------------------------------------------------------------===//
3638
3639defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
3640let SchedRW = [WriteFDiv] in {
3641defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3642}
3643defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3644defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3645defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3646defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
3647let SchedRW = [WriteFMul] in {
3648defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
3649defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3650}
3651defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
3652
3653def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3654          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3655def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3656          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
3657def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3658          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
3659def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3660          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
3661
3662//===----------------------------------------------------------------------===//
3663// Floating point three operand instructions.
3664//===----------------------------------------------------------------------===//
3665
3666defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
3667defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
3668     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
3669defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
3670     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
3671defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
3672     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
3673
3674// The following def pats catch the case where the LHS of an FMA is negated.
3675// The TriOpFrag above catches the case where the middle operand is negated.
3676
3677// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
3678// the NEON variant.
3679
3680// Here we handle first -(a + b*c) for FNMADD:
3681
3682let Predicates = [HasNEON, HasFullFP16] in
3683def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
3684          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3685
3686def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
3687          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3688
3689def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
3690          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3691
3692// Now it's time for "(-a) + (-b)*c"
3693
3694let Predicates = [HasNEON, HasFullFP16] in
3695def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
3696          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3697
3698def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
3699          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3700
3701def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
3702          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3703
3704// And here "(-a) + b*(-c)"
3705
3706let Predicates = [HasNEON, HasFullFP16] in
3707def : Pat<(f16 (fma FPR16:$Rn, (fneg FPR16:$Rm), (fneg FPR16:$Ra))),
3708          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3709
3710def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
3711          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3712
3713def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
3714          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3715
3716//===----------------------------------------------------------------------===//
3717// Floating point comparison instructions.
3718//===----------------------------------------------------------------------===//
3719
3720defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
3721defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
3722
3723//===----------------------------------------------------------------------===//
3724// Floating point conditional comparison instructions.
3725//===----------------------------------------------------------------------===//
3726
3727defm FCCMPE : FPCondComparison<1, "fccmpe">;
3728defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
3729
3730//===----------------------------------------------------------------------===//
3731// Floating point conditional select instruction.
3732//===----------------------------------------------------------------------===//
3733
3734defm FCSEL : FPCondSelect<"fcsel">;
3735
3736// CSEL instructions providing f128 types need to be handled by a
3737// pseudo-instruction since the eventual code will need to introduce basic
3738// blocks and control flow.
3739def F128CSEL : Pseudo<(outs FPR128:$Rd),
3740                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
3741                      [(set (f128 FPR128:$Rd),
3742                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
3743                                       (i32 imm:$cond), NZCV))]> {
3744  let Uses = [NZCV];
3745  let usesCustomInserter = 1;
3746  let hasNoSchedulingInfo = 1;
3747}
3748
3749//===----------------------------------------------------------------------===//
3750// Instructions used for emitting unwind opcodes on ARM64 Windows.
3751//===----------------------------------------------------------------------===//
3752let isPseudo = 1 in {
3753  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
3754  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3755  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3756  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3757  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3758  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3759  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3760  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3761  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3762  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3763  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3764  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
3765  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3766  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
3767  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3768  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
3769  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3770}
3771
3772// Pseudo instructions for Windows EH
3773//===----------------------------------------------------------------------===//
3774let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
3775    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
3776   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
3777   let usesCustomInserter = 1 in
3778     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
3779                    Sched<[]>;
3780}
3781
3782//===----------------------------------------------------------------------===//
3783// Floating point immediate move.
3784//===----------------------------------------------------------------------===//
3785
3786let isReMaterializable = 1 in {
3787defm FMOV : FPMoveImmediate<"fmov">;
3788}
3789
3790//===----------------------------------------------------------------------===//
3791// Advanced SIMD two vector instructions.
3792//===----------------------------------------------------------------------===//
3793
3794defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
3795                                          int_aarch64_neon_uabd>;
3796// Match UABDL in log2-shuffle patterns.
3797def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
3798                           (zext (v8i8 V64:$opB))))),
3799          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3800def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3801               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
3802                                (zext (v8i8 V64:$opB))),
3803                           (AArch64vashr v8i16:$src, (i32 15))))),
3804          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3805def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
3806                           (zext (extract_high_v16i8 V128:$opB))))),
3807          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3808def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3809               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
3810                                (zext (extract_high_v16i8 V128:$opB))),
3811                           (AArch64vashr v8i16:$src, (i32 15))))),
3812          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3813def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
3814                           (zext (v4i16 V64:$opB))))),
3815          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
3816def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
3817                           (zext (extract_high_v8i16 V128:$opB))))),
3818          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
3819def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
3820                           (zext (v2i32 V64:$opB))))),
3821          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
3822def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
3823                           (zext (extract_high_v4i32 V128:$opB))))),
3824          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
3825
3826defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
3827defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
3828defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
3829defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
3830defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
3831defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
3832defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
3833defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
3834defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
3835defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
3836
3837defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3838defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3839defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3840defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3841defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3842defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
3843defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
3844defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
3845def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
3846          (FCVTLv4i16 V64:$Rn)>;
3847def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
3848                                                              (i64 4)))),
3849          (FCVTLv8i16 V128:$Rn)>;
3850def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
3851
3852def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
3853
3854defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
3855defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
3856defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
3857defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
3858defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
3859def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
3860          (FCVTNv4i16 V128:$Rn)>;
3861def : Pat<(concat_vectors V64:$Rd,
3862                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
3863          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3864def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
3865def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
3866def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
3867          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3868defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
3869defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
3870defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
3871                                        int_aarch64_neon_fcvtxn>;
3872defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
3873defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
3874
3875def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
3876def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
3877def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
3878def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
3879def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
3880
3881def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
3882def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
3883def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
3884def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
3885def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
3886
3887defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
3888defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
3889defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
3890defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
3891defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
3892defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
3893defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
3894defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
3895defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
3896
3897let Predicates = [HasFRInt3264] in {
3898  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z">;
3899  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z">;
3900  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x">;
3901  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x">;
3902} // HasFRInt3264
3903
3904defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
3905defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
3906defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
3907                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3908defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
3909// Aliases for MVN -> NOT.
3910def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
3911                (NOTv8i8 V64:$Vd, V64:$Vn)>;
3912def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
3913                (NOTv16i8 V128:$Vd, V128:$Vn)>;
3914
3915def : Pat<(AArch64neg (v8i8  V64:$Rn)),  (NEGv8i8  V64:$Rn)>;
3916def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
3917def : Pat<(AArch64neg (v4i16 V64:$Rn)),  (NEGv4i16 V64:$Rn)>;
3918def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
3919def : Pat<(AArch64neg (v2i32 V64:$Rn)),  (NEGv2i32 V64:$Rn)>;
3920def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
3921def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
3922
3923def : Pat<(AArch64not (v8i8 V64:$Rn)),   (NOTv8i8  V64:$Rn)>;
3924def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3925def : Pat<(AArch64not (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3926def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3927def : Pat<(AArch64not (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3928def : Pat<(AArch64not (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3929def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3930def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3931
3932def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3933def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3934def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3935def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3936def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3937
3938defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
3939defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
3940defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
3941defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
3942defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
3943       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
3944defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
3945defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
3946defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
3947defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3948defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3949defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
3950defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
3951defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
3952defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
3953       BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
3954defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
3955                    int_aarch64_neon_uaddlp>;
3956defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
3957defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
3958defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
3959defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
3960defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
3961defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
3962
3963def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
3964def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
3965def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
3966def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
3967def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
3968def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
3969def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
3970def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
3971def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
3972def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
3973
3974// Patterns for vector long shift (by element width). These need to match all
3975// three of zext, sext and anyext so it's easier to pull the patterns out of the
3976// definition.
3977multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
3978  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
3979            (SHLLv8i8 V64:$Rn)>;
3980  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
3981            (SHLLv16i8 V128:$Rn)>;
3982  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
3983            (SHLLv4i16 V64:$Rn)>;
3984  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
3985            (SHLLv8i16 V128:$Rn)>;
3986  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
3987            (SHLLv2i32 V64:$Rn)>;
3988  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
3989            (SHLLv4i32 V128:$Rn)>;
3990}
3991
3992defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
3993defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
3994defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
3995
3996//===----------------------------------------------------------------------===//
3997// Advanced SIMD three vector instructions.
3998//===----------------------------------------------------------------------===//
3999
4000defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4001defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
4002defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4003defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4004defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4005defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4006defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4007defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4008defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4009let Predicates = [HasNEON] in {
4010foreach VT = [ v2f32, v4f32, v2f64 ] in
4011def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4012}
4013let Predicates = [HasNEON, HasFullFP16] in {
4014foreach VT = [ v4f16, v8f16 ] in
4015def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4016}
4017defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4018defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4019defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
4020defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
4021defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4022defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4023defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4024defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
4025defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4026defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
4027defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4028defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
4029defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4030defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
4031defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4032defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
4033
4034// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4035// instruction expects the addend first, while the fma intrinsic puts it last.
4036defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4037            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4038defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4039            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4040
4041// The following def pats catch the case where the LHS of an FMA is negated.
4042// The TriOpFrag above catches the case where the middle operand is negated.
4043def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
4044          (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
4045
4046def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
4047          (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
4048
4049def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
4050          (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
4051
4052defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4053defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
4054defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4055defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4056defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
4057
4058// MLA and MLS are generated in MachineCombine
4059defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4060defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4061
4062defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4063defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4064defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4065      TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
4066defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
4067defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
4068defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4069defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4070defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4071defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4072defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4073defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4074defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4075defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4076defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4077defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4078defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4079defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
4080defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4081defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4082defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4083defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4084      TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
4085defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
4086defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
4087defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4088defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4089defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4090defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4091defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4092defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4093defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4094defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4095defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4096defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
4097defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4098defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4099defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4100                                                  int_aarch64_neon_sqadd>;
4101defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4102                                                    int_aarch64_neon_sqsub>;
4103
4104// Extra saturate patterns, other than the intrinsics matches above
4105defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4106defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4107defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4108defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4109
4110defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4111defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4112                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4113defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4114defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4115                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4116defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4117
4118// Pseudo bitwise select pattern BSP.
4119// It is expanded into BSL/BIT/BIF after register allocation.
4120defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4121                                                      (and (vnot node:$LHS), node:$RHS))>>;
4122defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4123defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4124defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4125
4126def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4127          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4128def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4129          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4130def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4131          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4132def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4133          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4134
4135def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4136          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4137def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4138          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4139def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4140          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4141def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4142          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4143
4144def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4145                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4146def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4147                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4148def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4149                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4150def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4151                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4152
4153def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4154                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4155def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4156                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4157def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4158                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4159def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4160                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4161
4162def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4163                "|cmls.8b\t$dst, $src1, $src2}",
4164                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4165def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4166                "|cmls.16b\t$dst, $src1, $src2}",
4167                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4168def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4169                "|cmls.4h\t$dst, $src1, $src2}",
4170                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4171def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4172                "|cmls.8h\t$dst, $src1, $src2}",
4173                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4174def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4175                "|cmls.2s\t$dst, $src1, $src2}",
4176                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4177def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4178                "|cmls.4s\t$dst, $src1, $src2}",
4179                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4180def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4181                "|cmls.2d\t$dst, $src1, $src2}",
4182                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4183
4184def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4185                "|cmlo.8b\t$dst, $src1, $src2}",
4186                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4187def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4188                "|cmlo.16b\t$dst, $src1, $src2}",
4189                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4190def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4191                "|cmlo.4h\t$dst, $src1, $src2}",
4192                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4193def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4194                "|cmlo.8h\t$dst, $src1, $src2}",
4195                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4196def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4197                "|cmlo.2s\t$dst, $src1, $src2}",
4198                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4199def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4200                "|cmlo.4s\t$dst, $src1, $src2}",
4201                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4202def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4203                "|cmlo.2d\t$dst, $src1, $src2}",
4204                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4205
4206def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4207                "|cmle.8b\t$dst, $src1, $src2}",
4208                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4209def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4210                "|cmle.16b\t$dst, $src1, $src2}",
4211                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4212def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4213                "|cmle.4h\t$dst, $src1, $src2}",
4214                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4215def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4216                "|cmle.8h\t$dst, $src1, $src2}",
4217                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4218def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4219                "|cmle.2s\t$dst, $src1, $src2}",
4220                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4221def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4222                "|cmle.4s\t$dst, $src1, $src2}",
4223                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4224def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4225                "|cmle.2d\t$dst, $src1, $src2}",
4226                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4227
4228def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4229                "|cmlt.8b\t$dst, $src1, $src2}",
4230                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4231def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4232                "|cmlt.16b\t$dst, $src1, $src2}",
4233                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4234def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4235                "|cmlt.4h\t$dst, $src1, $src2}",
4236                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4237def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4238                "|cmlt.8h\t$dst, $src1, $src2}",
4239                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4240def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4241                "|cmlt.2s\t$dst, $src1, $src2}",
4242                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4243def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4244                "|cmlt.4s\t$dst, $src1, $src2}",
4245                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4246def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4247                "|cmlt.2d\t$dst, $src1, $src2}",
4248                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4249
4250let Predicates = [HasNEON, HasFullFP16] in {
4251def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4252                "|fcmle.4h\t$dst, $src1, $src2}",
4253                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4254def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4255                "|fcmle.8h\t$dst, $src1, $src2}",
4256                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4257}
4258def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4259                "|fcmle.2s\t$dst, $src1, $src2}",
4260                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4261def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4262                "|fcmle.4s\t$dst, $src1, $src2}",
4263                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4264def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4265                "|fcmle.2d\t$dst, $src1, $src2}",
4266                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4267
4268let Predicates = [HasNEON, HasFullFP16] in {
4269def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4270                "|fcmlt.4h\t$dst, $src1, $src2}",
4271                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4272def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4273                "|fcmlt.8h\t$dst, $src1, $src2}",
4274                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4275}
4276def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4277                "|fcmlt.2s\t$dst, $src1, $src2}",
4278                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4279def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4280                "|fcmlt.4s\t$dst, $src1, $src2}",
4281                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4282def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4283                "|fcmlt.2d\t$dst, $src1, $src2}",
4284                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4285
4286let Predicates = [HasNEON, HasFullFP16] in {
4287def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4288                "|facle.4h\t$dst, $src1, $src2}",
4289                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4290def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4291                "|facle.8h\t$dst, $src1, $src2}",
4292                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4293}
4294def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4295                "|facle.2s\t$dst, $src1, $src2}",
4296                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4297def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4298                "|facle.4s\t$dst, $src1, $src2}",
4299                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4300def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4301                "|facle.2d\t$dst, $src1, $src2}",
4302                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4303
4304let Predicates = [HasNEON, HasFullFP16] in {
4305def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4306                "|faclt.4h\t$dst, $src1, $src2}",
4307                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4308def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4309                "|faclt.8h\t$dst, $src1, $src2}",
4310                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4311}
4312def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4313                "|faclt.2s\t$dst, $src1, $src2}",
4314                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4315def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4316                "|faclt.4s\t$dst, $src1, $src2}",
4317                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4318def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4319                "|faclt.2d\t$dst, $src1, $src2}",
4320                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4321
4322//===----------------------------------------------------------------------===//
4323// Advanced SIMD three scalar instructions.
4324//===----------------------------------------------------------------------===//
4325
4326defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4327defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4328defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4329defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4330defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4331defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4332defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4333defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4334def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4335          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4336let Predicates = [HasFullFP16] in {
4337def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4338}
4339def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4340def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4341defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4342                                     int_aarch64_neon_facge>;
4343defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4344                                     int_aarch64_neon_facgt>;
4345defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4346defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4347defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4348defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
4349defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
4350defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
4351defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4352defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4353defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4354defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4355defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4356defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4357defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4358defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4359defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4360defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4361defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4362defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4363defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4364defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4365defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4366let Predicates = [HasRDM] in {
4367  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4368  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4369  def : Pat<(i32 (int_aarch64_neon_sqadd
4370                   (i32 FPR32:$Rd),
4371                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4372                                                   (i32 FPR32:$Rm))))),
4373            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4374  def : Pat<(i32 (int_aarch64_neon_sqsub
4375                   (i32 FPR32:$Rd),
4376                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4377                                                   (i32 FPR32:$Rm))))),
4378            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4379}
4380
4381def : InstAlias<"cmls $dst, $src1, $src2",
4382                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4383def : InstAlias<"cmle $dst, $src1, $src2",
4384                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4385def : InstAlias<"cmlo $dst, $src1, $src2",
4386                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4387def : InstAlias<"cmlt $dst, $src1, $src2",
4388                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4389def : InstAlias<"fcmle $dst, $src1, $src2",
4390                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4391def : InstAlias<"fcmle $dst, $src1, $src2",
4392                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4393def : InstAlias<"fcmlt $dst, $src1, $src2",
4394                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4395def : InstAlias<"fcmlt $dst, $src1, $src2",
4396                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4397def : InstAlias<"facle $dst, $src1, $src2",
4398                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4399def : InstAlias<"facle $dst, $src1, $src2",
4400                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4401def : InstAlias<"faclt $dst, $src1, $src2",
4402                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4403def : InstAlias<"faclt $dst, $src1, $src2",
4404                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4405
4406//===----------------------------------------------------------------------===//
4407// Advanced SIMD three scalar instructions (mixed operands).
4408//===----------------------------------------------------------------------===//
4409defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4410                                       int_aarch64_neon_sqdmulls_scalar>;
4411defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4412defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4413
4414def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4415                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4416                                                        (i32 FPR32:$Rm))))),
4417          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4418def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4419                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4420                                                        (i32 FPR32:$Rm))))),
4421          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4422
4423//===----------------------------------------------------------------------===//
4424// Advanced SIMD two scalar instructions.
4425//===----------------------------------------------------------------------===//
4426
4427defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4428defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4429defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4430defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4431defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4432defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4433defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4434defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4435defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4436defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4437defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4438defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4439defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4440defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4441defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4442defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4443defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4444defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4445defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4446def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4447defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4448defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4449defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe">;
4450defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx">;
4451defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte">;
4452defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4453                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4454defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4455defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4456defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4457defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4458defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4459defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4460                                     int_aarch64_neon_suqadd>;
4461defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4462defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4463defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4464                                    int_aarch64_neon_usqadd>;
4465
4466def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
4467
4468def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4469          (FCVTASv1i64 FPR64:$Rn)>;
4470def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4471          (FCVTAUv1i64 FPR64:$Rn)>;
4472def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4473          (FCVTMSv1i64 FPR64:$Rn)>;
4474def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4475          (FCVTMUv1i64 FPR64:$Rn)>;
4476def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4477          (FCVTNSv1i64 FPR64:$Rn)>;
4478def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4479          (FCVTNUv1i64 FPR64:$Rn)>;
4480def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4481          (FCVTPSv1i64 FPR64:$Rn)>;
4482def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4483          (FCVTPUv1i64 FPR64:$Rn)>;
4484
4485def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4486          (FRECPEv1f16 FPR16:$Rn)>;
4487def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4488          (FRECPEv1i32 FPR32:$Rn)>;
4489def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4490          (FRECPEv1i64 FPR64:$Rn)>;
4491def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4492          (FRECPEv1i64 FPR64:$Rn)>;
4493
4494def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4495          (FRECPEv1i32 FPR32:$Rn)>;
4496def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4497          (FRECPEv2f32 V64:$Rn)>;
4498def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4499          (FRECPEv4f32 FPR128:$Rn)>;
4500def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4501          (FRECPEv1i64 FPR64:$Rn)>;
4502def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4503          (FRECPEv1i64 FPR64:$Rn)>;
4504def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4505          (FRECPEv2f64 FPR128:$Rn)>;
4506
4507def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4508          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4509def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4510          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4511def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4512          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4513def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4514          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4515def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4516          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4517
4518def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4519          (FRECPXv1f16 FPR16:$Rn)>;
4520def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4521          (FRECPXv1i32 FPR32:$Rn)>;
4522def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4523          (FRECPXv1i64 FPR64:$Rn)>;
4524
4525def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4526          (FRSQRTEv1f16 FPR16:$Rn)>;
4527def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4528          (FRSQRTEv1i32 FPR32:$Rn)>;
4529def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4530          (FRSQRTEv1i64 FPR64:$Rn)>;
4531def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4532          (FRSQRTEv1i64 FPR64:$Rn)>;
4533
4534def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4535          (FRSQRTEv1i32 FPR32:$Rn)>;
4536def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4537          (FRSQRTEv2f32 V64:$Rn)>;
4538def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4539          (FRSQRTEv4f32 FPR128:$Rn)>;
4540def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4541          (FRSQRTEv1i64 FPR64:$Rn)>;
4542def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4543          (FRSQRTEv1i64 FPR64:$Rn)>;
4544def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4545          (FRSQRTEv2f64 FPR128:$Rn)>;
4546
4547def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4548          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4549def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4550          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4551def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4552          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4553def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4554          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4555def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4556          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4557
4558// If an integer is about to be converted to a floating point value,
4559// just load it on the floating point unit.
4560// Here are the patterns for 8 and 16-bits to float.
4561// 8-bits -> float.
4562multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4563                             SDPatternOperator loadop, Instruction UCVTF,
4564                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4565                             SubRegIndex sub> {
4566  def : Pat<(DstTy (uint_to_fp (SrcTy
4567                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4568                                      ro.Wext:$extend))))),
4569           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4570                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4571                                 sub))>;
4572
4573  def : Pat<(DstTy (uint_to_fp (SrcTy
4574                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4575                                      ro.Wext:$extend))))),
4576           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4577                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4578                                 sub))>;
4579}
4580
4581defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4582                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4583def : Pat <(f32 (uint_to_fp (i32
4584               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4585           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4586                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4587def : Pat <(f32 (uint_to_fp (i32
4588                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4589           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4590                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4591// 16-bits -> float.
4592defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4593                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4594def : Pat <(f32 (uint_to_fp (i32
4595                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4596           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4597                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4598def : Pat <(f32 (uint_to_fp (i32
4599                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4600           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4601                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4602// 32-bits are handled in target specific dag combine:
4603// performIntToFpCombine.
4604// 64-bits integer to 32-bits floating point, not possible with
4605// UCVTF on floating point registers (both source and destination
4606// must have the same size).
4607
4608// Here are the patterns for 8, 16, 32, and 64-bits to double.
4609// 8-bits -> double.
4610defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4611                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4612def : Pat <(f64 (uint_to_fp (i32
4613                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4614           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4615                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4616def : Pat <(f64 (uint_to_fp (i32
4617                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4618           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4619                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4620// 16-bits -> double.
4621defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
4622                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
4623def : Pat <(f64 (uint_to_fp (i32
4624                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4625           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4626                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4627def : Pat <(f64 (uint_to_fp (i32
4628                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4629           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4630                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4631// 32-bits -> double.
4632defm : UIntToFPROLoadPat<f64, i32, load,
4633                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
4634def : Pat <(f64 (uint_to_fp (i32
4635                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4636           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4637                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
4638def : Pat <(f64 (uint_to_fp (i32
4639                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
4640           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4641                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
4642// 64-bits -> double are handled in target specific dag combine:
4643// performIntToFpCombine.
4644
4645//===----------------------------------------------------------------------===//
4646// Advanced SIMD three different-sized vector instructions.
4647//===----------------------------------------------------------------------===//
4648
4649defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
4650defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
4651defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
4652defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
4653defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
4654defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
4655                                             int_aarch64_neon_sabd>;
4656defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
4657                                          int_aarch64_neon_sabd>;
4658defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
4659            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
4660defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
4661                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
4662defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
4663    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4664defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
4665    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4666defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
4667defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
4668                                               int_aarch64_neon_sqadd>;
4669defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
4670                                               int_aarch64_neon_sqsub>;
4671defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
4672                                     int_aarch64_neon_sqdmull>;
4673defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
4674                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
4675defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
4676                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
4677defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
4678                                              int_aarch64_neon_uabd>;
4679defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
4680                 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
4681defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
4682                 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
4683defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
4684    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4685defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
4686    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4687defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
4688defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
4689                 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
4690defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
4691                 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
4692
4693// Additional patterns for SMULL and UMULL
4694multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
4695  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4696  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4697            (INST8B V64:$Rn, V64:$Rm)>;
4698  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4699            (INST4H V64:$Rn, V64:$Rm)>;
4700  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4701            (INST2S V64:$Rn, V64:$Rm)>;
4702}
4703
4704defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
4705  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
4706defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
4707  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
4708
4709// Patterns for smull2/umull2.
4710multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
4711  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4712  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
4713                           (extract_high_v16i8 V128:$Rm))),
4714             (INST8B V128:$Rn, V128:$Rm)>;
4715  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
4716                           (extract_high_v8i16 V128:$Rm))),
4717             (INST4H V128:$Rn, V128:$Rm)>;
4718  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
4719                           (extract_high_v4i32 V128:$Rm))),
4720             (INST2S V128:$Rn, V128:$Rm)>;
4721}
4722
4723defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
4724  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
4725defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
4726  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
4727
4728// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
4729multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
4730  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4731  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4732            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
4733  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4734            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
4735  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4736            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
4737}
4738
4739defm : Neon_mulacc_widen_patterns<
4740  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4741  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
4742defm : Neon_mulacc_widen_patterns<
4743  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4744  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
4745defm : Neon_mulacc_widen_patterns<
4746  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4747  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
4748defm : Neon_mulacc_widen_patterns<
4749  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4750  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
4751
4752// Patterns for 64-bit pmull
4753def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
4754          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
4755def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
4756                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
4757          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
4758
4759// CodeGen patterns for addhn and subhn instructions, which can actually be
4760// written in LLVM IR without too much difficulty.
4761
4762// ADDHN
4763def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
4764          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4765def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4766                                           (i32 16))))),
4767          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4768def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4769                                           (i32 32))))),
4770          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4771def : Pat<(concat_vectors (v8i8 V64:$Rd),
4772                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4773                                                    (i32 8))))),
4774          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4775                            V128:$Rn, V128:$Rm)>;
4776def : Pat<(concat_vectors (v4i16 V64:$Rd),
4777                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4778                                                    (i32 16))))),
4779          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4780                            V128:$Rn, V128:$Rm)>;
4781def : Pat<(concat_vectors (v2i32 V64:$Rd),
4782                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4783                                                    (i32 32))))),
4784          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4785                            V128:$Rn, V128:$Rm)>;
4786
4787// SUBHN
4788def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
4789          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4790def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4791                                           (i32 16))))),
4792          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4793def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4794                                           (i32 32))))),
4795          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4796def : Pat<(concat_vectors (v8i8 V64:$Rd),
4797                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4798                                                    (i32 8))))),
4799          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4800                            V128:$Rn, V128:$Rm)>;
4801def : Pat<(concat_vectors (v4i16 V64:$Rd),
4802                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4803                                                    (i32 16))))),
4804          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4805                            V128:$Rn, V128:$Rm)>;
4806def : Pat<(concat_vectors (v2i32 V64:$Rd),
4807                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4808                                                    (i32 32))))),
4809          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4810                            V128:$Rn, V128:$Rm)>;
4811
4812//----------------------------------------------------------------------------
4813// AdvSIMD bitwise extract from vector instruction.
4814//----------------------------------------------------------------------------
4815
4816defm EXT : SIMDBitwiseExtract<"ext">;
4817
4818def AdjustExtImm : SDNodeXForm<imm, [{
4819  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
4820}]>;
4821multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
4822  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
4823            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
4824  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
4825            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
4826  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
4827  // 128-bit vector.
4828  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
4829            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
4830  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
4831  // single 128-bit EXT.
4832  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
4833                              (extract_subvector V128:$Rn, (i64 N)),
4834                              (i32 imm:$imm))),
4835            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
4836  // A 64-bit EXT of the high half of a 128-bit register can be done using a
4837  // 128-bit EXT of the whole register with an adjustment to the immediate. The
4838  // top half of the other operand will be unset, but that doesn't matter as it
4839  // will not be used.
4840  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
4841                              V64:$Rm,
4842                              (i32 imm:$imm))),
4843            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
4844                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4845                                      (AdjustExtImm imm:$imm)), dsub)>;
4846}
4847
4848defm : ExtPat<v8i8, v16i8, 8>;
4849defm : ExtPat<v4i16, v8i16, 4>;
4850defm : ExtPat<v4f16, v8f16, 4>;
4851defm : ExtPat<v4bf16, v8bf16, 4>;
4852defm : ExtPat<v2i32, v4i32, 2>;
4853defm : ExtPat<v2f32, v4f32, 2>;
4854defm : ExtPat<v1i64, v2i64, 1>;
4855defm : ExtPat<v1f64, v2f64, 1>;
4856
4857//----------------------------------------------------------------------------
4858// AdvSIMD zip vector
4859//----------------------------------------------------------------------------
4860
4861defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
4862defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
4863defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
4864defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
4865defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
4866defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
4867
4868//----------------------------------------------------------------------------
4869// AdvSIMD TBL/TBX instructions
4870//----------------------------------------------------------------------------
4871
4872defm TBL : SIMDTableLookup<    0, "tbl">;
4873defm TBX : SIMDTableLookupTied<1, "tbx">;
4874
4875def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4876          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
4877def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4878          (TBLv16i8One V128:$Ri, V128:$Rn)>;
4879
4880def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
4881                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4882          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
4883def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
4884                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4885          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
4886
4887
4888//----------------------------------------------------------------------------
4889// AdvSIMD scalar CPY instruction
4890//----------------------------------------------------------------------------
4891
4892defm CPY : SIMDScalarCPY<"cpy">;
4893
4894//----------------------------------------------------------------------------
4895// AdvSIMD scalar pairwise instructions
4896//----------------------------------------------------------------------------
4897
4898defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
4899defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
4900defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
4901defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
4902defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
4903defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
4904def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
4905          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4906def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
4907          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4908def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
4909          (FADDPv2i32p V64:$Rn)>;
4910def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
4911          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
4912def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
4913          (FADDPv2i64p V128:$Rn)>;
4914def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
4915          (FMAXNMPv2i32p V64:$Rn)>;
4916def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
4917          (FMAXNMPv2i64p V128:$Rn)>;
4918def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
4919          (FMAXPv2i32p V64:$Rn)>;
4920def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
4921          (FMAXPv2i64p V128:$Rn)>;
4922def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
4923          (FMINNMPv2i32p V64:$Rn)>;
4924def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
4925          (FMINNMPv2i64p V128:$Rn)>;
4926def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
4927          (FMINPv2i32p V64:$Rn)>;
4928def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
4929          (FMINPv2i64p V128:$Rn)>;
4930
4931//----------------------------------------------------------------------------
4932// AdvSIMD INS/DUP instructions
4933//----------------------------------------------------------------------------
4934
4935def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
4936def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
4937def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
4938def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
4939def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
4940def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
4941def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
4942
4943def DUPv2i64lane : SIMDDup64FromElement;
4944def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
4945def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
4946def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
4947def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
4948def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
4949def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
4950
4951// DUP from a 64-bit register to a 64-bit register is just a copy
4952def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
4953          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
4954def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
4955          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
4956
4957def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
4958          (v2f32 (DUPv2i32lane
4959            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4960            (i64 0)))>;
4961def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
4962          (v4f32 (DUPv4i32lane
4963            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4964            (i64 0)))>;
4965def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
4966          (v2f64 (DUPv2i64lane
4967            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
4968            (i64 0)))>;
4969def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
4970          (v4f16 (DUPv4i16lane
4971            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4972            (i64 0)))>;
4973def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
4974          (v4bf16 (DUPv4i16lane
4975            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4976            (i64 0)))>;
4977def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
4978          (v8f16 (DUPv8i16lane
4979            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4980            (i64 0)))>;
4981def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
4982          (v8bf16 (DUPv8i16lane
4983            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4984            (i64 0)))>;
4985
4986def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4987          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
4988def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4989          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
4990
4991def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
4992          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
4993def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
4994          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
4995
4996def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4997          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
4998def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4999         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5000def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5001          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5002
5003// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5004// instruction even if the types don't match: we just have to remap the lane
5005// carefully. N.b. this trick only applies to truncations.
5006def VecIndex_x2 : SDNodeXForm<imm, [{
5007  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5008}]>;
5009def VecIndex_x4 : SDNodeXForm<imm, [{
5010  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5011}]>;
5012def VecIndex_x8 : SDNodeXForm<imm, [{
5013  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5014}]>;
5015
5016multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5017                            ValueType Src128VT, ValueType ScalVT,
5018                            Instruction DUP, SDNodeXForm IdxXFORM> {
5019  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5020                                                     imm:$idx)))),
5021            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5022
5023  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5024                                                     imm:$idx)))),
5025            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5026}
5027
5028defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5029defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5030defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5031
5032defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5033defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5034defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5035
5036multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5037                               SDNodeXForm IdxXFORM> {
5038  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5039                                                         imm:$idx))))),
5040            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5041
5042  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5043                                                       imm:$idx))))),
5044            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5045}
5046
5047defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5048defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5049defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5050
5051defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5052defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5053defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5054
5055// SMOV and UMOV definitions, with some extra patterns for convenience
5056defm SMOV : SMov;
5057defm UMOV : UMov;
5058
5059def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5060          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5061def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5062          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5063def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5064          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5065def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5066          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5067def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5068          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5069def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5070          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5071
5072def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5073            VectorIndexB:$idx)))), i8),
5074          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5075def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5076            VectorIndexH:$idx)))), i16),
5077          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5078
5079// Extracting i8 or i16 elements will have the zero-extend transformed to
5080// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5081// for AArch64. Match these patterns here since UMOV already zeroes out the high
5082// bits of the destination register.
5083def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5084               (i32 0xff)),
5085          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5086def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5087               (i32 0xffff)),
5088          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5089
5090defm INS : SIMDIns;
5091
5092def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5093          (SUBREG_TO_REG (i32 0),
5094                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5095def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5096          (SUBREG_TO_REG (i32 0),
5097                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5098
5099def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5100          (SUBREG_TO_REG (i32 0),
5101                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5102def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5103          (SUBREG_TO_REG (i32 0),
5104                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5105
5106def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5107          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5108def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5109          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5110
5111def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5112          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5113def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5114          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5115
5116def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5117            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5118                                  (i32 FPR32:$Rn), ssub))>;
5119def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5120            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5121                                  (i32 FPR32:$Rn), ssub))>;
5122
5123def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5124            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5125                                  (i64 FPR64:$Rn), dsub))>;
5126
5127def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5128          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5129def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5130          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5131
5132def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5133          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5134def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5135          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5136
5137def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5138          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5139def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5140          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5141
5142def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5143          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5144
5145def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5146            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5147          (EXTRACT_SUBREG
5148            (INSvi16lane
5149              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5150              VectorIndexS:$imm,
5151              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5152              (i64 0)),
5153            dsub)>;
5154
5155def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5156            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5157          (INSvi16lane
5158            V128:$Rn, VectorIndexH:$imm,
5159            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5160            (i64 0))>;
5161
5162def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5163            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5164          (EXTRACT_SUBREG
5165            (INSvi16lane
5166              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5167              VectorIndexS:$imm,
5168              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5169              (i64 0)),
5170            dsub)>;
5171
5172def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5173            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5174          (INSvi16lane
5175            V128:$Rn, VectorIndexH:$imm,
5176            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5177            (i64 0))>;
5178
5179def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5180            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5181          (EXTRACT_SUBREG
5182            (INSvi32lane
5183              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5184              VectorIndexS:$imm,
5185              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5186              (i64 0)),
5187            dsub)>;
5188def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5189            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5190          (INSvi32lane
5191            V128:$Rn, VectorIndexS:$imm,
5192            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5193            (i64 0))>;
5194def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5195            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5196          (INSvi64lane
5197            V128:$Rn, VectorIndexD:$imm,
5198            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5199            (i64 0))>;
5200
5201// Copy an element at a constant index in one vector into a constant indexed
5202// element of another.
5203// FIXME refactor to a shared class/dev parameterized on vector type, vector
5204// index type and INS extension
5205def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5206                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5207                   VectorIndexB:$idx2)),
5208          (v16i8 (INSvi8lane
5209                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5210          )>;
5211def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5212                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5213                   VectorIndexH:$idx2)),
5214          (v8i16 (INSvi16lane
5215                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5216          )>;
5217def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5218                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5219                   VectorIndexS:$idx2)),
5220          (v4i32 (INSvi32lane
5221                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5222          )>;
5223def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5224                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5225                   VectorIndexD:$idx2)),
5226          (v2i64 (INSvi64lane
5227                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5228          )>;
5229
5230multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5231                                ValueType VTScal, Instruction INS> {
5232  def : Pat<(VT128 (vector_insert V128:$src,
5233                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5234                        imm:$Immd)),
5235            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5236
5237  def : Pat<(VT128 (vector_insert V128:$src,
5238                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5239                        imm:$Immd)),
5240            (INS V128:$src, imm:$Immd,
5241                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5242
5243  def : Pat<(VT64 (vector_insert V64:$src,
5244                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5245                        imm:$Immd)),
5246            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5247                                 imm:$Immd, V128:$Rn, imm:$Immn),
5248                            dsub)>;
5249
5250  def : Pat<(VT64 (vector_insert V64:$src,
5251                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5252                        imm:$Immd)),
5253            (EXTRACT_SUBREG
5254                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5255                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5256                dsub)>;
5257}
5258
5259defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5260defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5261defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5262defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5263
5264
5265// Floating point vector extractions are codegen'd as either a sequence of
5266// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
5267// the lane number is anything other than zero.
5268def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5269          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5270def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5271          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5272def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5273          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5274def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5275          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5276
5277
5278def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5279          (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
5280def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5281          (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
5282def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5283          (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5284def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5285          (bf16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5286
5287// All concat_vectors operations are canonicalised to act on i64 vectors for
5288// AArch64. In the general case we need an instruction, which had just as well be
5289// INS.
5290class ConcatPat<ValueType DstTy, ValueType SrcTy>
5291  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5292        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5293                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5294
5295def : ConcatPat<v2i64, v1i64>;
5296def : ConcatPat<v2f64, v1f64>;
5297def : ConcatPat<v4i32, v2i32>;
5298def : ConcatPat<v4f32, v2f32>;
5299def : ConcatPat<v8i16, v4i16>;
5300def : ConcatPat<v8f16, v4f16>;
5301def : ConcatPat<v8bf16, v4bf16>;
5302def : ConcatPat<v16i8, v8i8>;
5303
5304// If the high lanes are undef, though, we can just ignore them:
5305class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5306  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5307        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5308
5309def : ConcatUndefPat<v2i64, v1i64>;
5310def : ConcatUndefPat<v2f64, v1f64>;
5311def : ConcatUndefPat<v4i32, v2i32>;
5312def : ConcatUndefPat<v4f32, v2f32>;
5313def : ConcatUndefPat<v8i16, v4i16>;
5314def : ConcatUndefPat<v16i8, v8i8>;
5315
5316//----------------------------------------------------------------------------
5317// AdvSIMD across lanes instructions
5318//----------------------------------------------------------------------------
5319
5320defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5321defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5322defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5323defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5324defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5325defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5326defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5327defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5328defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5329defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5330defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5331
5332// Patterns for across-vector intrinsics, that have a node equivalent, that
5333// returns a vector (with only the low lane defined) instead of a scalar.
5334// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5335multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5336                                    SDPatternOperator opNode> {
5337// If a lane instruction caught the vector_extract around opNode, we can
5338// directly match the latter to the instruction.
5339def : Pat<(v8i8 (opNode V64:$Rn)),
5340          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5341           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5342def : Pat<(v16i8 (opNode V128:$Rn)),
5343          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5344           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5345def : Pat<(v4i16 (opNode V64:$Rn)),
5346          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5347           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5348def : Pat<(v8i16 (opNode V128:$Rn)),
5349          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5350           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5351def : Pat<(v4i32 (opNode V128:$Rn)),
5352          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5353           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5354
5355
5356// If none did, fallback to the explicit patterns, consuming the vector_extract.
5357def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5358            (i32 0)), (i64 0))),
5359          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5360            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5361            bsub), ssub)>;
5362def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5363          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5364            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5365            bsub), ssub)>;
5366def : Pat<(i32 (vector_extract (insert_subvector undef,
5367            (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
5368          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5369            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5370            hsub), ssub)>;
5371def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5372          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5373            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5374            hsub), ssub)>;
5375def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5376          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5377            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5378            ssub), ssub)>;
5379
5380}
5381
5382multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5383                                          SDPatternOperator opNode>
5384    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5385// If there is a sign extension after this intrinsic, consume it as smov already
5386// performed it
5387def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5388            (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
5389          (i32 (SMOVvi8to32
5390            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5391              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5392            (i64 0)))>;
5393def : Pat<(i32 (sext_inreg (i32 (vector_extract
5394            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5395          (i32 (SMOVvi8to32
5396            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5397             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5398            (i64 0)))>;
5399def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5400            (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
5401          (i32 (SMOVvi16to32
5402           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5403            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5404           (i64 0)))>;
5405def : Pat<(i32 (sext_inreg (i32 (vector_extract
5406            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5407          (i32 (SMOVvi16to32
5408            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5409             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5410            (i64 0)))>;
5411}
5412
5413multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5414                                            SDPatternOperator opNode>
5415    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5416// If there is a masking operation keeping only what has been actually
5417// generated, consume it.
5418def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5419            (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
5420      (i32 (EXTRACT_SUBREG
5421        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5422          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5423        ssub))>;
5424def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5425            maski8_or_more)),
5426        (i32 (EXTRACT_SUBREG
5427          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5428            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5429          ssub))>;
5430def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5431            (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
5432          (i32 (EXTRACT_SUBREG
5433            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5434              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5435            ssub))>;
5436def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5437            maski16_or_more)),
5438        (i32 (EXTRACT_SUBREG
5439          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5440            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5441          ssub))>;
5442}
5443
5444defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
5445// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5446def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5447          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5448
5449defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5450// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5451def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5452          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5453
5454defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
5455def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
5456          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
5457
5458defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
5459def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
5460          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
5461
5462defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
5463def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
5464          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
5465
5466defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
5467def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
5468          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
5469
5470multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
5471  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5472        (i32 (SMOVvi16to32
5473          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5474            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5475          (i64 0)))>;
5476def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5477        (i32 (SMOVvi16to32
5478          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5479           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5480          (i64 0)))>;
5481
5482def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5483          (i32 (EXTRACT_SUBREG
5484           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5485            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5486           ssub))>;
5487def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5488        (i32 (EXTRACT_SUBREG
5489          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5490           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5491          ssub))>;
5492
5493def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5494        (i64 (EXTRACT_SUBREG
5495          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5496           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5497          dsub))>;
5498}
5499
5500multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
5501                                                Intrinsic intOp> {
5502  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5503        (i32 (EXTRACT_SUBREG
5504          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5505            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5506          ssub))>;
5507def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5508        (i32 (EXTRACT_SUBREG
5509          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5510            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5511          ssub))>;
5512
5513def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5514          (i32 (EXTRACT_SUBREG
5515            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5516              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5517            ssub))>;
5518def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5519        (i32 (EXTRACT_SUBREG
5520          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5521            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5522          ssub))>;
5523
5524def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5525        (i64 (EXTRACT_SUBREG
5526          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5527            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5528          dsub))>;
5529}
5530
5531defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
5532defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
5533
5534// The vaddlv_s32 intrinsic gets mapped to SADDLP.
5535def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
5536          (i64 (EXTRACT_SUBREG
5537            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5538              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
5539            dsub))>;
5540// The vaddlv_u32 intrinsic gets mapped to UADDLP.
5541def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
5542          (i64 (EXTRACT_SUBREG
5543            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5544              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
5545            dsub))>;
5546
5547//------------------------------------------------------------------------------
5548// AdvSIMD modified immediate instructions
5549//------------------------------------------------------------------------------
5550
5551// AdvSIMD BIC
5552defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
5553// AdvSIMD ORR
5554defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
5555
5556def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5557def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5558def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5559def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5560
5561def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5562def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5563def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5564def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5565
5566def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5567def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5568def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5569def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5570
5571def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5572def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5573def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5574def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5575
5576// AdvSIMD FMOV
5577def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
5578                                              "fmov", ".2d",
5579                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5580def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
5581                                              "fmov", ".2s",
5582                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5583def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
5584                                              "fmov", ".4s",
5585                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5586let Predicates = [HasNEON, HasFullFP16] in {
5587def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
5588                                              "fmov", ".4h",
5589                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5590def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
5591                                              "fmov", ".8h",
5592                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5593} // Predicates = [HasNEON, HasFullFP16]
5594
5595// AdvSIMD MOVI
5596
5597// EDIT byte mask: scalar
5598let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5599def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
5600                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
5601// The movi_edit node has the immediate value already encoded, so we use
5602// a plain imm0_255 here.
5603def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
5604          (MOVID imm0_255:$shift)>;
5605
5606// EDIT byte mask: 2d
5607
5608// The movi_edit node has the immediate value already encoded, so we use
5609// a plain imm0_255 in the pattern
5610let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5611def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
5612                                                simdimmtype10,
5613                                                "movi", ".2d",
5614                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
5615
5616def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5617def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5618def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5619def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5620
5621def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5622def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5623def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5624def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5625
5626// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
5627// extract is free and this gives better MachineCSE results.
5628def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5629def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5630def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5631def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5632
5633def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5634def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5635def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5636def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5637
5638// EDIT per word & halfword: 2s, 4h, 4s, & 8h
5639let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5640defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
5641
5642def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5643def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5644def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5645def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5646
5647def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5648def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5649def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5650def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5651
5652def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5653          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
5654def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5655          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
5656def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5657          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
5658def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5659          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
5660
5661let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5662// EDIT per word: 2s & 4s with MSL shifter
5663def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
5664                      [(set (v2i32 V64:$Rd),
5665                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5666def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
5667                      [(set (v4i32 V128:$Rd),
5668                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5669
5670// Per byte: 8b & 16b
5671def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
5672                                                 "movi", ".8b",
5673                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
5674
5675def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
5676                                                 "movi", ".16b",
5677                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
5678}
5679
5680// AdvSIMD MVNI
5681
5682// EDIT per word & halfword: 2s, 4h, 4s, & 8h
5683let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5684defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
5685
5686def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5687def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5688def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5689def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5690
5691def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5692def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5693def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5694def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5695
5696def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5697          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
5698def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5699          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
5700def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5701          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
5702def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5703          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
5704
5705// EDIT per word: 2s & 4s with MSL shifter
5706let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5707def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
5708                      [(set (v2i32 V64:$Rd),
5709                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5710def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
5711                      [(set (v4i32 V128:$Rd),
5712                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5713}
5714
5715//----------------------------------------------------------------------------
5716// AdvSIMD indexed element
5717//----------------------------------------------------------------------------
5718
5719let hasSideEffects = 0 in {
5720  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
5721  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
5722}
5723
5724// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
5725// instruction expects the addend first, while the intrinsic expects it last.
5726
5727// On the other hand, there are quite a few valid combinatorial options due to
5728// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
5729defm : SIMDFPIndexedTiedPatterns<"FMLA",
5730           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
5731defm : SIMDFPIndexedTiedPatterns<"FMLA",
5732           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
5733
5734defm : SIMDFPIndexedTiedPatterns<"FMLS",
5735           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
5736defm : SIMDFPIndexedTiedPatterns<"FMLS",
5737           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
5738defm : SIMDFPIndexedTiedPatterns<"FMLS",
5739           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
5740defm : SIMDFPIndexedTiedPatterns<"FMLS",
5741           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
5742
5743multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
5744  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5745  // and DUP scalar.
5746  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5747                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5748                                           VectorIndexS:$idx))),
5749            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5750  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5751                           (v2f32 (AArch64duplane32
5752                                      (v4f32 (insert_subvector undef,
5753                                                 (v2f32 (fneg V64:$Rm)),
5754                                                 (i32 0))),
5755                                      VectorIndexS:$idx)))),
5756            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5757                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5758                               VectorIndexS:$idx)>;
5759  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5760                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5761            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5762                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5763
5764  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5765  // and DUP scalar.
5766  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5767                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5768                                           VectorIndexS:$idx))),
5769            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
5770                               VectorIndexS:$idx)>;
5771  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5772                           (v4f32 (AArch64duplane32
5773                                      (v4f32 (insert_subvector undef,
5774                                                 (v2f32 (fneg V64:$Rm)),
5775                                                 (i32 0))),
5776                                      VectorIndexS:$idx)))),
5777            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5778                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5779                               VectorIndexS:$idx)>;
5780  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5781                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5782            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5783                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5784
5785  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
5786  // (DUPLANE from 64-bit would be trivial).
5787  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5788                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
5789                                           VectorIndexD:$idx))),
5790            (FMLSv2i64_indexed
5791                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5792  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5793                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
5794            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
5795                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
5796
5797  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
5798  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5799                         (vector_extract (v4f32 (fneg V128:$Rm)),
5800                                         VectorIndexS:$idx))),
5801            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5802                V128:$Rm, VectorIndexS:$idx)>;
5803  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5804                         (vector_extract (v4f32 (insert_subvector undef,
5805                                                    (v2f32 (fneg V64:$Rm)),
5806                                                    (i32 0))),
5807                                         VectorIndexS:$idx))),
5808            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5809                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
5810
5811  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
5812  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
5813                         (vector_extract (v2f64 (fneg V128:$Rm)),
5814                                         VectorIndexS:$idx))),
5815            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
5816                V128:$Rm, VectorIndexS:$idx)>;
5817}
5818
5819defm : FMLSIndexedAfterNegPatterns<
5820           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
5821defm : FMLSIndexedAfterNegPatterns<
5822           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
5823
5824defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
5825defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
5826
5827def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5828          (FMULv2i32_indexed V64:$Rn,
5829            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5830            (i64 0))>;
5831def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5832          (FMULv4i32_indexed V128:$Rn,
5833            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5834            (i64 0))>;
5835def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
5836          (FMULv2i64_indexed V128:$Rn,
5837            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
5838            (i64 0))>;
5839
5840defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
5841defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
5842
5843defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
5844                                     int_aarch64_neon_sqdmulh_laneq>;
5845defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
5846                                      int_aarch64_neon_sqrdmulh_laneq>;
5847
5848// Generated by MachineCombine
5849defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
5850defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
5851
5852defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
5853defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
5854    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5855defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
5856    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5857defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
5858                int_aarch64_neon_smull>;
5859defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
5860                                           int_aarch64_neon_sqadd>;
5861defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
5862                                           int_aarch64_neon_sqsub>;
5863defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
5864                                          int_aarch64_neon_sqadd>;
5865defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
5866                                          int_aarch64_neon_sqsub>;
5867defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
5868defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
5869    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5870defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
5871    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5872defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
5873                int_aarch64_neon_umull>;
5874
5875// A scalar sqdmull with the second operand being a vector lane can be
5876// handled directly with the indexed instruction encoding.
5877def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
5878                                          (vector_extract (v4i32 V128:$Vm),
5879                                                           VectorIndexS:$idx)),
5880          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
5881
5882//----------------------------------------------------------------------------
5883// AdvSIMD scalar shift instructions
5884//----------------------------------------------------------------------------
5885defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
5886defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
5887defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
5888defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
5889// Codegen patterns for the above. We don't put these directly on the
5890// instructions because TableGen's type inference can't handle the truth.
5891// Having the same base pattern for fp <--> int totally freaks it out.
5892def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
5893          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
5894def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
5895          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
5896def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
5897          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5898def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
5899          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5900def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
5901                                            vecshiftR64:$imm)),
5902          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5903def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
5904                                            vecshiftR64:$imm)),
5905          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5906def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
5907          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5908def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5909          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5910def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
5911                                            vecshiftR64:$imm)),
5912          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5913def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5914          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5915def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
5916                                            vecshiftR64:$imm)),
5917          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5918def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
5919          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5920
5921// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
5922
5923def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
5924          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5925def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
5926          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5927def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5928          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5929def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
5930            (and FPR32:$Rn, (i32 65535)),
5931            vecshiftR16:$imm)),
5932          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5933def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
5934          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5935def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5936          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5937def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
5938          (i32 (INSERT_SUBREG
5939            (i32 (IMPLICIT_DEF)),
5940            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
5941            hsub))>;
5942def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
5943          (i64 (INSERT_SUBREG
5944            (i64 (IMPLICIT_DEF)),
5945            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
5946            hsub))>;
5947def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
5948          (i32 (INSERT_SUBREG
5949            (i32 (IMPLICIT_DEF)),
5950            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
5951            hsub))>;
5952def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
5953          (i64 (INSERT_SUBREG
5954            (i64 (IMPLICIT_DEF)),
5955            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
5956            hsub))>;
5957def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
5958          (i32 (INSERT_SUBREG
5959            (i32 (IMPLICIT_DEF)),
5960            (FACGE16 FPR16:$Rn, FPR16:$Rm),
5961            hsub))>;
5962def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
5963          (i32 (INSERT_SUBREG
5964            (i32 (IMPLICIT_DEF)),
5965            (FACGT16 FPR16:$Rn, FPR16:$Rm),
5966            hsub))>;
5967
5968defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
5969defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
5970defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
5971                                     int_aarch64_neon_sqrshrn>;
5972defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
5973                                     int_aarch64_neon_sqrshrun>;
5974defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5975defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5976defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
5977                                     int_aarch64_neon_sqshrn>;
5978defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
5979                                     int_aarch64_neon_sqshrun>;
5980defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
5981defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
5982defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
5983    TriOpFrag<(add node:$LHS,
5984                   (AArch64srshri node:$MHS, node:$RHS))>>;
5985defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
5986defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
5987    TriOpFrag<(add node:$LHS,
5988                   (AArch64vashr node:$MHS, node:$RHS))>>;
5989defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
5990                                     int_aarch64_neon_uqrshrn>;
5991defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5992defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
5993                                     int_aarch64_neon_uqshrn>;
5994defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
5995defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
5996    TriOpFrag<(add node:$LHS,
5997                   (AArch64urshri node:$MHS, node:$RHS))>>;
5998defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
5999defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6000    TriOpFrag<(add node:$LHS,
6001                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6002
6003//----------------------------------------------------------------------------
6004// AdvSIMD vector shift instructions
6005//----------------------------------------------------------------------------
6006defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6007defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6008defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6009                                   int_aarch64_neon_vcvtfxs2fp>;
6010defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6011                                         int_aarch64_neon_rshrn>;
6012defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6013defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6014                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6015defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6016def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6017                                      (i32 vecshiftL64:$imm))),
6018          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6019defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6020                                         int_aarch64_neon_sqrshrn>;
6021defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6022                                         int_aarch64_neon_sqrshrun>;
6023defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6024defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6025defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6026                                         int_aarch64_neon_sqshrn>;
6027defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6028                                         int_aarch64_neon_sqshrun>;
6029defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6030def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6031                                      (i32 vecshiftR64:$imm))),
6032          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6033defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6034defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6035                 TriOpFrag<(add node:$LHS,
6036                                (AArch64srshri node:$MHS, node:$RHS))> >;
6037defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6038                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6039
6040defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6041defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6042                TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6043defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6044                        int_aarch64_neon_vcvtfxu2fp>;
6045defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6046                                         int_aarch64_neon_uqrshrn>;
6047defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6048defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6049                                         int_aarch64_neon_uqshrn>;
6050defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6051defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6052                TriOpFrag<(add node:$LHS,
6053                               (AArch64urshri node:$MHS, node:$RHS))> >;
6054defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6055                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6056defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6057defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6058                TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6059
6060// SHRN patterns for when a logical right shift was used instead of arithmetic
6061// (the immediate guarantees no sign bits actually end up in the result so it
6062// doesn't matter).
6063def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6064          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6065def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6066          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6067def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6068          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6069
6070def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6071                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6072                                                    vecshiftR16Narrow:$imm)))),
6073          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6074                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6075def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6076                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6077                                                    vecshiftR32Narrow:$imm)))),
6078          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6079                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6080def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6081                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6082                                                    vecshiftR64Narrow:$imm)))),
6083          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6084                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6085
6086// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6087// Anyexts are implemented as zexts.
6088def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6089def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6090def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6091def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6092def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6093def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6094def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6095def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6096def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6097// Also match an extend from the upper half of a 128 bit source register.
6098def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6099          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6100def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6101          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6102def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6103          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6104def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6105          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6106def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6107          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6108def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6109          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6110def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6111          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6112def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6113          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6114def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6115          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6116
6117// Vector shift sxtl aliases
6118def : InstAlias<"sxtl.8h $dst, $src1",
6119                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6120def : InstAlias<"sxtl $dst.8h, $src1.8b",
6121                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6122def : InstAlias<"sxtl.4s $dst, $src1",
6123                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6124def : InstAlias<"sxtl $dst.4s, $src1.4h",
6125                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6126def : InstAlias<"sxtl.2d $dst, $src1",
6127                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6128def : InstAlias<"sxtl $dst.2d, $src1.2s",
6129                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6130
6131// Vector shift sxtl2 aliases
6132def : InstAlias<"sxtl2.8h $dst, $src1",
6133                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6134def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6135                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6136def : InstAlias<"sxtl2.4s $dst, $src1",
6137                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6138def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6139                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6140def : InstAlias<"sxtl2.2d $dst, $src1",
6141                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6142def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6143                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6144
6145// Vector shift uxtl aliases
6146def : InstAlias<"uxtl.8h $dst, $src1",
6147                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6148def : InstAlias<"uxtl $dst.8h, $src1.8b",
6149                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6150def : InstAlias<"uxtl.4s $dst, $src1",
6151                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6152def : InstAlias<"uxtl $dst.4s, $src1.4h",
6153                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6154def : InstAlias<"uxtl.2d $dst, $src1",
6155                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6156def : InstAlias<"uxtl $dst.2d, $src1.2s",
6157                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6158
6159// Vector shift uxtl2 aliases
6160def : InstAlias<"uxtl2.8h $dst, $src1",
6161                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6162def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6163                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6164def : InstAlias<"uxtl2.4s $dst, $src1",
6165                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6166def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6167                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6168def : InstAlias<"uxtl2.2d $dst, $src1",
6169                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6170def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6171                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6172
6173// If an integer is about to be converted to a floating point value,
6174// just load it on the floating point unit.
6175// These patterns are more complex because floating point loads do not
6176// support sign extension.
6177// The sign extension has to be explicitly added and is only supported for
6178// one step: byte-to-half, half-to-word, word-to-doubleword.
6179// SCVTF GPR -> FPR is 9 cycles.
6180// SCVTF FPR -> FPR is 4 cyclces.
6181// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6182// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6183// and still being faster.
6184// However, this is not good for code size.
6185// 8-bits -> float. 2 sizes step-up.
6186class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6187  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6188        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6189                            (SSHLLv4i16_shift
6190                              (f64
6191                                (EXTRACT_SUBREG
6192                                  (SSHLLv8i8_shift
6193                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6194                                        INST,
6195                                        bsub),
6196                                    0),
6197                                  dsub)),
6198                               0),
6199                             ssub)))>,
6200    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6201
6202def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6203                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6204def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6205                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6206def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6207                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6208def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6209                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6210
6211// 16-bits -> float. 1 size step-up.
6212class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6213  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6214        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6215                            (SSHLLv4i16_shift
6216                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6217                                  INST,
6218                                  hsub),
6219                                0),
6220                            ssub)))>, Requires<[NotForCodeSize]>;
6221
6222def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6223                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6224def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6225                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6226def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6227                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6228def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6229                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6230
6231// 32-bits to 32-bits are handled in target specific dag combine:
6232// performIntToFpCombine.
6233// 64-bits integer to 32-bits floating point, not possible with
6234// SCVTF on floating point registers (both source and destination
6235// must have the same size).
6236
6237// Here are the patterns for 8, 16, 32, and 64-bits to double.
6238// 8-bits -> double. 3 size step-up: give up.
6239// 16-bits -> double. 2 size step.
6240class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6241  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6242           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6243                              (SSHLLv2i32_shift
6244                                 (f64
6245                                  (EXTRACT_SUBREG
6246                                    (SSHLLv4i16_shift
6247                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6248                                        INST,
6249                                        hsub),
6250                                     0),
6251                                   dsub)),
6252                               0),
6253                             dsub)))>,
6254    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6255
6256def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6257                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6258def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6259                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6260def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6261                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6262def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6263                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6264// 32-bits -> double. 1 size step-up.
6265class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6266  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6267           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6268                              (SSHLLv2i32_shift
6269                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6270                                  INST,
6271                                  ssub),
6272                               0),
6273                             dsub)))>, Requires<[NotForCodeSize]>;
6274
6275def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6276                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6277def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6278                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6279def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6280                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6281def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6282                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6283
6284// 64-bits -> double are handled in target specific dag combine:
6285// performIntToFpCombine.
6286
6287
6288//----------------------------------------------------------------------------
6289// AdvSIMD Load-Store Structure
6290//----------------------------------------------------------------------------
6291defm LD1 : SIMDLd1Multiple<"ld1">;
6292defm LD2 : SIMDLd2Multiple<"ld2">;
6293defm LD3 : SIMDLd3Multiple<"ld3">;
6294defm LD4 : SIMDLd4Multiple<"ld4">;
6295
6296defm ST1 : SIMDSt1Multiple<"st1">;
6297defm ST2 : SIMDSt2Multiple<"st2">;
6298defm ST3 : SIMDSt3Multiple<"st3">;
6299defm ST4 : SIMDSt4Multiple<"st4">;
6300
6301class Ld1Pat<ValueType ty, Instruction INST>
6302  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6303
6304def : Ld1Pat<v16i8, LD1Onev16b>;
6305def : Ld1Pat<v8i16, LD1Onev8h>;
6306def : Ld1Pat<v4i32, LD1Onev4s>;
6307def : Ld1Pat<v2i64, LD1Onev2d>;
6308def : Ld1Pat<v8i8,  LD1Onev8b>;
6309def : Ld1Pat<v4i16, LD1Onev4h>;
6310def : Ld1Pat<v2i32, LD1Onev2s>;
6311def : Ld1Pat<v1i64, LD1Onev1d>;
6312
6313class St1Pat<ValueType ty, Instruction INST>
6314  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6315        (INST ty:$Vt, GPR64sp:$Rn)>;
6316
6317def : St1Pat<v16i8, ST1Onev16b>;
6318def : St1Pat<v8i16, ST1Onev8h>;
6319def : St1Pat<v4i32, ST1Onev4s>;
6320def : St1Pat<v2i64, ST1Onev2d>;
6321def : St1Pat<v8i8,  ST1Onev8b>;
6322def : St1Pat<v4i16, ST1Onev4h>;
6323def : St1Pat<v2i32, ST1Onev2s>;
6324def : St1Pat<v1i64, ST1Onev1d>;
6325
6326//---
6327// Single-element
6328//---
6329
6330defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6331defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6332defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6333defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6334let mayLoad = 1, hasSideEffects = 0 in {
6335defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6336defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6337defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6338defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6339defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6340defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6341defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6342defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6343defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6344defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6345defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6346defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6347defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6348defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6349defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6350defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6351}
6352
6353def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6354          (LD1Rv8b GPR64sp:$Rn)>;
6355def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6356          (LD1Rv16b GPR64sp:$Rn)>;
6357def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6358          (LD1Rv4h GPR64sp:$Rn)>;
6359def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6360          (LD1Rv8h GPR64sp:$Rn)>;
6361def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6362          (LD1Rv2s GPR64sp:$Rn)>;
6363def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6364          (LD1Rv4s GPR64sp:$Rn)>;
6365def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6366          (LD1Rv2d GPR64sp:$Rn)>;
6367def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6368          (LD1Rv1d GPR64sp:$Rn)>;
6369// Grab the floating point version too
6370def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6371          (LD1Rv2s GPR64sp:$Rn)>;
6372def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6373          (LD1Rv4s GPR64sp:$Rn)>;
6374def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6375          (LD1Rv2d GPR64sp:$Rn)>;
6376def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6377          (LD1Rv1d GPR64sp:$Rn)>;
6378def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6379          (LD1Rv4h GPR64sp:$Rn)>;
6380def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6381          (LD1Rv8h GPR64sp:$Rn)>;
6382def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6383          (LD1Rv4h GPR64sp:$Rn)>;
6384def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6385          (LD1Rv8h GPR64sp:$Rn)>;
6386
6387class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6388                    ValueType VTy, ValueType STy, Instruction LD1>
6389  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6390           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6391        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6392
6393def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6394def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6395def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6396def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6397def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6398def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
6399def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
6400def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
6401
6402class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
6403                   ValueType VTy, ValueType STy, Instruction LD1>
6404  : Pat<(vector_insert (VTy VecListOne64:$Rd),
6405           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6406        (EXTRACT_SUBREG
6407            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
6408                          VecIndex:$idx, GPR64sp:$Rn),
6409            dsub)>;
6410
6411def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
6412def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
6413def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
6414def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
6415def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
6416def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
6417
6418
6419defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
6420defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
6421defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
6422defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
6423
6424// Stores
6425defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
6426defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
6427defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
6428defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
6429
6430let AddedComplexity = 19 in
6431class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6432                    ValueType VTy, ValueType STy, Instruction ST1>
6433  : Pat<(scalar_store
6434             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6435             GPR64sp:$Rn),
6436        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
6437
6438def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
6439def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
6440def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
6441def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
6442def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
6443def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
6444def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
6445def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
6446
6447let AddedComplexity = 19 in
6448class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6449                   ValueType VTy, ValueType STy, Instruction ST1>
6450  : Pat<(scalar_store
6451             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6452             GPR64sp:$Rn),
6453        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6454             VecIndex:$idx, GPR64sp:$Rn)>;
6455
6456def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
6457def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
6458def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
6459def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
6460def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
6461def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
6462
6463multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6464                             ValueType VTy, ValueType STy, Instruction ST1,
6465                             int offset> {
6466  def : Pat<(scalar_store
6467              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6468              GPR64sp:$Rn, offset),
6469        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6470             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6471
6472  def : Pat<(scalar_store
6473              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6474              GPR64sp:$Rn, GPR64:$Rm),
6475        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6476             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6477}
6478
6479defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
6480defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
6481                        2>;
6482defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
6483defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
6484defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
6485defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
6486defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
6487defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
6488
6489multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6490                             ValueType VTy, ValueType STy, Instruction ST1,
6491                             int offset> {
6492  def : Pat<(scalar_store
6493              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6494              GPR64sp:$Rn, offset),
6495        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6496
6497  def : Pat<(scalar_store
6498              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6499              GPR64sp:$Rn, GPR64:$Rm),
6500        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6501}
6502
6503defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
6504                         1>;
6505defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
6506                         2>;
6507defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
6508defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
6509defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
6510defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
6511defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
6512defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
6513
6514let mayStore = 1, hasSideEffects = 0 in {
6515defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
6516defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
6517defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
6518defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
6519defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
6520defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
6521defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
6522defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
6523defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
6524defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
6525defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
6526defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
6527}
6528
6529defm ST1 : SIMDLdSt1SingleAliases<"st1">;
6530defm ST2 : SIMDLdSt2SingleAliases<"st2">;
6531defm ST3 : SIMDLdSt3SingleAliases<"st3">;
6532defm ST4 : SIMDLdSt4SingleAliases<"st4">;
6533
6534//----------------------------------------------------------------------------
6535// Crypto extensions
6536//----------------------------------------------------------------------------
6537
6538let Predicates = [HasAES] in {
6539def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
6540def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
6541def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
6542def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
6543}
6544
6545// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
6546// for AES fusion on some CPUs.
6547let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
6548def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6549                        Sched<[WriteV]>;
6550def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6551                         Sched<[WriteV]>;
6552}
6553
6554// Only use constrained versions of AES(I)MC instructions if they are paired with
6555// AESE/AESD.
6556def : Pat<(v16i8 (int_aarch64_crypto_aesmc
6557            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
6558                                            (v16i8 V128:$src2))))),
6559          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
6560                                             (v16i8 V128:$src2)))))>,
6561          Requires<[HasFuseAES]>;
6562
6563def : Pat<(v16i8 (int_aarch64_crypto_aesimc
6564            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
6565                                            (v16i8 V128:$src2))))),
6566          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
6567                                              (v16i8 V128:$src2)))))>,
6568          Requires<[HasFuseAES]>;
6569
6570let Predicates = [HasSHA2] in {
6571def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
6572def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
6573def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
6574def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
6575def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
6576def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
6577def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
6578
6579def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
6580def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
6581def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
6582}
6583
6584//----------------------------------------------------------------------------
6585// Compiler-pseudos
6586//----------------------------------------------------------------------------
6587// FIXME: Like for X86, these should go in their own separate .td file.
6588
6589def def32 : PatLeaf<(i32 GPR32:$src), [{
6590  return isDef32(*N);
6591}]>;
6592
6593// In the case of a 32-bit def that is known to implicitly zero-extend,
6594// we can use a SUBREG_TO_REG.
6595def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
6596
6597// For an anyext, we don't care what the high bits are, so we can perform an
6598// INSERT_SUBREF into an IMPLICIT_DEF.
6599def : Pat<(i64 (anyext GPR32:$src)),
6600          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
6601
6602// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
6603// then assert the extension has happened.
6604def : Pat<(i64 (zext GPR32:$src)),
6605          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
6606
6607// To sign extend, we use a signed bitfield move instruction (SBFM) on the
6608// containing super-reg.
6609def : Pat<(i64 (sext GPR32:$src)),
6610   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
6611def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
6612def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
6613def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
6614def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
6615def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
6616def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
6617def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
6618
6619def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
6620          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
6621                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
6622def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
6623          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
6624                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
6625
6626def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
6627          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
6628                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
6629def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
6630          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
6631                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
6632
6633def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
6634          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6635                   (i64 (i64shift_a        imm0_63:$imm)),
6636                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
6637
6638// sra patterns have an AddedComplexity of 10, so make sure we have a higher
6639// AddedComplexity for the following patterns since we want to match sext + sra
6640// patterns before we attempt to match a single sra node.
6641let AddedComplexity = 20 in {
6642// We support all sext + sra combinations which preserve at least one bit of the
6643// original value which is to be sign extended. E.g. we support shifts up to
6644// bitwidth-1 bits.
6645def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
6646          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
6647def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
6648          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
6649
6650def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
6651          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
6652def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
6653          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
6654
6655def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
6656          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6657                   (i64 imm0_31:$imm), 31)>;
6658} // AddedComplexity = 20
6659
6660// To truncate, we can simply extract from a subregister.
6661def : Pat<(i32 (trunc GPR64sp:$src)),
6662          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
6663
6664// __builtin_trap() uses the BRK instruction on AArch64.
6665def : Pat<(trap), (BRK 1)>;
6666def : Pat<(debugtrap), (BRK 0xF000)>, Requires<[IsWindows]>;
6667
6668// Multiply high patterns which multiply the lower subvector using smull/umull
6669// and the upper subvector with smull2/umull2. Then shuffle the high the high
6670// part of both results together.
6671def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
6672          (UZP2v16i8
6673           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6674                            (EXTRACT_SUBREG V128:$Rm, dsub)),
6675           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6676def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
6677          (UZP2v8i16
6678           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6679                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6680           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6681def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
6682          (UZP2v4i32
6683           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6684                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6685           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6686
6687def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
6688          (UZP2v16i8
6689           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6690                            (EXTRACT_SUBREG V128:$Rm, dsub)),
6691           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6692def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
6693          (UZP2v8i16
6694           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6695                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6696           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6697def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
6698          (UZP2v4i32
6699           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6700                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6701           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6702
6703// Conversions within AdvSIMD types in the same register size are free.
6704// But because we need a consistent lane ordering, in big endian many
6705// conversions require one or more REV instructions.
6706//
6707// Consider a simple memory load followed by a bitconvert then a store.
6708//   v0 = load v2i32
6709//   v1 = BITCAST v2i32 v0 to v4i16
6710//        store v4i16 v2
6711//
6712// In big endian mode every memory access has an implicit byte swap. LDR and
6713// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
6714// is, they treat the vector as a sequence of elements to be byte-swapped.
6715// The two pairs of instructions are fundamentally incompatible. We've decided
6716// to use LD1/ST1 only to simplify compiler implementation.
6717//
6718// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
6719// the original code sequence:
6720//   v0 = load v2i32
6721//   v1 = REV v2i32                  (implicit)
6722//   v2 = BITCAST v2i32 v1 to v4i16
6723//   v3 = REV v4i16 v2               (implicit)
6724//        store v4i16 v3
6725//
6726// But this is now broken - the value stored is different to the value loaded
6727// due to lane reordering. To fix this, on every BITCAST we must perform two
6728// other REVs:
6729//   v0 = load v2i32
6730//   v1 = REV v2i32                  (implicit)
6731//   v2 = REV v2i32
6732//   v3 = BITCAST v2i32 v2 to v4i16
6733//   v4 = REV v4i16
6734//   v5 = REV v4i16 v4               (implicit)
6735//        store v4i16 v5
6736//
6737// This means an extra two instructions, but actually in most cases the two REV
6738// instructions can be combined into one. For example:
6739//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
6740//
6741// There is also no 128-bit REV instruction. This must be synthesized with an
6742// EXT instruction.
6743//
6744// Most bitconverts require some sort of conversion. The only exceptions are:
6745//   a) Identity conversions -  vNfX <-> vNiX
6746//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
6747//
6748
6749// Natural vector casts (64 bit)
6750def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6751def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6752def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6753def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
6754def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
6755def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6756def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6757
6758def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6759def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
6760def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6761def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
6762def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6763def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6764
6765def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
6766def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6767def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6768def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>;
6769def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6770def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6771def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6772
6773def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6774def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6775def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6776def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
6777def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6778def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6779def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6780def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6781
6782def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6783def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6784def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6785def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
6786def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6787def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6788
6789// Natural vector casts (128 bit)
6790def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
6791def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6792def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
6793def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
6794def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
6795def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6796def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6797def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6798
6799def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
6800def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
6801def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
6802def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
6803def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6804def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6805def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6806def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6807
6808def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
6809def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6810def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
6811def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
6812def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6813def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6814def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6815def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6816
6817def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
6818def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6819def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
6820def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
6821def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6822def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
6823def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6824def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6825
6826def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
6827def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6828def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6829def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
6830def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6831def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
6832def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
6833def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6834
6835def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
6836def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6837def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6838def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6839def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
6840def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
6841def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
6842def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6843
6844let Predicates = [IsLE] in {
6845def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6846def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6847def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6848def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6849def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6850def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6851
6852def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
6853          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6854def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6855          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6856def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6857          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6858def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6859          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6860def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
6861          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6862def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6863          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6864def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6865          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6866}
6867let Predicates = [IsBE] in {
6868def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
6869                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6870def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
6871                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6872def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
6873                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6874def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
6875                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6876def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
6877                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6878def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
6879                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6880
6881def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
6882          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6883def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6884          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6885def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6886          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6887def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6888          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6889def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
6890          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6891def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6892          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6893}
6894def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6895def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6896def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
6897          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6898def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
6899          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6900def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
6901          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6902def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
6903
6904def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
6905          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
6906def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
6907          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
6908def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
6909          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6910def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
6911          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
6912def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6913          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6914
6915let Predicates = [IsLE] in {
6916def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6917def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6918def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
6919def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
6920def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
6921def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6922}
6923let Predicates = [IsBE] in {
6924def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
6925                             (v1i64 (REV64v2i32 FPR64:$src))>;
6926def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
6927                             (v1i64 (REV64v4i16 FPR64:$src))>;
6928def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
6929                             (v1i64 (REV64v8i8 FPR64:$src))>;
6930def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
6931                             (v1i64 (REV64v4i16 FPR64:$src))>;
6932def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
6933                             (v1i64 (REV64v4i16 FPR64:$src))>;
6934def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
6935                             (v1i64 (REV64v2i32 FPR64:$src))>;
6936}
6937def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6938def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
6939
6940let Predicates = [IsLE] in {
6941def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
6942def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6943def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
6944def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
6945def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6946def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
6947def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
6948}
6949let Predicates = [IsBE] in {
6950def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
6951                             (v2i32 (REV64v2i32 FPR64:$src))>;
6952def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
6953                             (v2i32 (REV32v4i16 FPR64:$src))>;
6954def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
6955                             (v2i32 (REV32v8i8 FPR64:$src))>;
6956def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
6957                             (v2i32 (REV64v2i32 FPR64:$src))>;
6958def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
6959                             (v2i32 (REV64v2i32 FPR64:$src))>;
6960def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
6961                             (v2i32 (REV32v4i16 FPR64:$src))>;
6962def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
6963                             (v2i32 (REV32v4i16 FPR64:$src))>;
6964}
6965def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6966
6967let Predicates = [IsLE] in {
6968def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
6969def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6970def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
6971def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
6972def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6973def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6974}
6975let Predicates = [IsBE] in {
6976def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
6977                             (v4i16 (REV64v4i16 FPR64:$src))>;
6978def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
6979                             (v4i16 (REV32v4i16 FPR64:$src))>;
6980def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
6981                             (v4i16 (REV16v8i8 FPR64:$src))>;
6982def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
6983                             (v4i16 (REV64v4i16 FPR64:$src))>;
6984def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
6985                             (v4i16 (REV32v4i16 FPR64:$src))>;
6986def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
6987                             (v4i16 (REV64v4i16 FPR64:$src))>;
6988}
6989def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
6990def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
6991
6992let Predicates = [IsLE] in {
6993def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
6994def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6995def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
6996def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
6997def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
6998def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6999
7000def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7001def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7002def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7003def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7004def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7005def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7006}
7007let Predicates = [IsBE] in {
7008def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7009                             (v4f16 (REV64v4i16 FPR64:$src))>;
7010def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7011                             (v4f16 (REV32v4i16 FPR64:$src))>;
7012def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7013                             (v4f16 (REV16v8i8 FPR64:$src))>;
7014def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7015                             (v4f16 (REV64v4i16 FPR64:$src))>;
7016def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7017                             (v4f16 (REV32v4i16 FPR64:$src))>;
7018def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7019                             (v4f16 (REV64v4i16 FPR64:$src))>;
7020
7021def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7022                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7023def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7024                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7025def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7026                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7027def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7028                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7029def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7030                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7031def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7032                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7033}
7034def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7035def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7036
7037let Predicates = [IsLE] in {
7038def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7039def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7040def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7041def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7042def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7043def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7044def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7045def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7046}
7047let Predicates = [IsBE] in {
7048def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7049                             (v8i8 (REV64v8i8 FPR64:$src))>;
7050def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7051                             (v8i8 (REV32v8i8 FPR64:$src))>;
7052def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7053                             (v8i8 (REV16v8i8 FPR64:$src))>;
7054def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7055                             (v8i8 (REV64v8i8 FPR64:$src))>;
7056def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7057                             (v8i8 (REV32v8i8 FPR64:$src))>;
7058def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7059                             (v8i8 (REV64v8i8 FPR64:$src))>;
7060def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7061                             (v8i8 (REV16v8i8 FPR64:$src))>;
7062def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7063                             (v8i8 (REV16v8i8 FPR64:$src))>;
7064}
7065
7066let Predicates = [IsLE] in {
7067def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7068def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7069def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7070def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7071def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7072def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7073}
7074let Predicates = [IsBE] in {
7075def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7076                             (f64 (REV64v2i32 FPR64:$src))>;
7077def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7078                             (f64 (REV64v4i16 FPR64:$src))>;
7079def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7080                             (f64 (REV64v2i32 FPR64:$src))>;
7081def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7082                             (f64 (REV64v8i8 FPR64:$src))>;
7083def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7084                             (f64 (REV64v4i16 FPR64:$src))>;
7085def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7086                             (f64 (REV64v4i16 FPR64:$src))>;
7087}
7088def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7089def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7090
7091let Predicates = [IsLE] in {
7092def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7093def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7094def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7095def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7096def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7097def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7098}
7099let Predicates = [IsBE] in {
7100def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7101                             (v1f64 (REV64v2i32 FPR64:$src))>;
7102def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7103                             (v1f64 (REV64v4i16 FPR64:$src))>;
7104def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7105                             (v1f64 (REV64v8i8 FPR64:$src))>;
7106def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7107                             (v1f64 (REV64v2i32 FPR64:$src))>;
7108def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7109                             (v1f64 (REV64v4i16 FPR64:$src))>;
7110def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7111                             (v1f64 (REV64v4i16 FPR64:$src))>;
7112}
7113def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7114def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7115
7116let Predicates = [IsLE] in {
7117def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7118def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7119def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7120def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7121def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7122def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7123def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7124}
7125let Predicates = [IsBE] in {
7126def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7127                             (v2f32 (REV64v2i32 FPR64:$src))>;
7128def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7129                             (v2f32 (REV32v4i16 FPR64:$src))>;
7130def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7131                             (v2f32 (REV32v8i8 FPR64:$src))>;
7132def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7133                             (v2f32 (REV64v2i32 FPR64:$src))>;
7134def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7135                             (v2f32 (REV64v2i32 FPR64:$src))>;
7136def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7137                             (v2f32 (REV32v4i16 FPR64:$src))>;
7138def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7139                             (v2f32 (REV32v4i16 FPR64:$src))>;
7140}
7141def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7142
7143let Predicates = [IsLE] in {
7144def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7145def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7146def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7147def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7148def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7149def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7150def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7151def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7152}
7153let Predicates = [IsBE] in {
7154def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7155                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7156def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7157                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7158                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7159def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7160                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7161                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7162def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7163                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7164                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7165def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7166                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7167                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7168def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7169                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7170def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7171                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7172                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7173def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7174                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7175                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7176}
7177
7178let Predicates = [IsLE] in {
7179def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7180def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7181def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7182def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7183def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7184def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7185def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7186}
7187let Predicates = [IsBE] in {
7188def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7189                             (v2f64 (EXTv16i8 FPR128:$src,
7190                                              FPR128:$src, (i32 8)))>;
7191def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7192                             (v2f64 (REV64v4i32 FPR128:$src))>;
7193def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7194                             (v2f64 (REV64v8i16 FPR128:$src))>;
7195def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7196                             (v2f64 (REV64v8i16 FPR128:$src))>;
7197def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7198                             (v2f64 (REV64v8i16 FPR128:$src))>;
7199def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7200                             (v2f64 (REV64v16i8 FPR128:$src))>;
7201def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7202                             (v2f64 (REV64v4i32 FPR128:$src))>;
7203}
7204def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7205
7206let Predicates = [IsLE] in {
7207def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7208def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7209def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7210def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7211def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7212def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7213def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7214}
7215let Predicates = [IsBE] in {
7216def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7217                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7218                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7219def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7220                             (v4f32 (REV32v8i16 FPR128:$src))>;
7221def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7222                             (v4f32 (REV32v8i16 FPR128:$src))>;
7223def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7224                             (v4f32 (REV32v8i16 FPR128:$src))>;
7225def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7226                             (v4f32 (REV32v16i8 FPR128:$src))>;
7227def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7228                             (v4f32 (REV64v4i32 FPR128:$src))>;
7229def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7230                             (v4f32 (REV64v4i32 FPR128:$src))>;
7231}
7232def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7233
7234let Predicates = [IsLE] in {
7235def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7236def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7237def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7238def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7239def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7240def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7241def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7242}
7243let Predicates = [IsBE] in {
7244def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7245                             (v2i64 (EXTv16i8 FPR128:$src,
7246                                              FPR128:$src, (i32 8)))>;
7247def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7248                             (v2i64 (REV64v4i32 FPR128:$src))>;
7249def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7250                             (v2i64 (REV64v8i16 FPR128:$src))>;
7251def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7252                             (v2i64 (REV64v16i8 FPR128:$src))>;
7253def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7254                             (v2i64 (REV64v4i32 FPR128:$src))>;
7255def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7256                             (v2i64 (REV64v8i16 FPR128:$src))>;
7257def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7258                             (v2i64 (REV64v8i16 FPR128:$src))>;
7259}
7260def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7261
7262let Predicates = [IsLE] in {
7263def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7264def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7265def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7266def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7267def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7268def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7269def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7270}
7271let Predicates = [IsBE] in {
7272def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7273                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7274                                              (REV64v4i32 FPR128:$src),
7275                                              (i32 8)))>;
7276def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7277                             (v4i32 (REV64v4i32 FPR128:$src))>;
7278def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7279                             (v4i32 (REV32v8i16 FPR128:$src))>;
7280def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7281                             (v4i32 (REV32v16i8 FPR128:$src))>;
7282def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7283                             (v4i32 (REV64v4i32 FPR128:$src))>;
7284def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7285                             (v4i32 (REV32v8i16 FPR128:$src))>;
7286def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7287                             (v4i32 (REV32v8i16 FPR128:$src))>;
7288}
7289def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7290
7291let Predicates = [IsLE] in {
7292def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7293def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7294def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7295def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7296def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7297def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7298}
7299let Predicates = [IsBE] in {
7300def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7301                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7302                                              (REV64v8i16 FPR128:$src),
7303                                              (i32 8)))>;
7304def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7305                             (v8i16 (REV64v8i16 FPR128:$src))>;
7306def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7307                             (v8i16 (REV32v8i16 FPR128:$src))>;
7308def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7309                             (v8i16 (REV16v16i8 FPR128:$src))>;
7310def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7311                             (v8i16 (REV64v8i16 FPR128:$src))>;
7312def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7313                             (v8i16 (REV32v8i16 FPR128:$src))>;
7314}
7315def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7316def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7317
7318let Predicates = [IsLE] in {
7319def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7320def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7321def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7322def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7323def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7324def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7325
7326def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7327def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7328def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7329def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7330def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7331def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7332}
7333let Predicates = [IsBE] in {
7334def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7335                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7336                                              (REV64v8i16 FPR128:$src),
7337                                              (i32 8)))>;
7338def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7339                             (v8f16 (REV64v8i16 FPR128:$src))>;
7340def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7341                             (v8f16 (REV32v8i16 FPR128:$src))>;
7342def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7343                             (v8f16 (REV16v16i8 FPR128:$src))>;
7344def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7345                             (v8f16 (REV64v8i16 FPR128:$src))>;
7346def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7347                             (v8f16 (REV32v8i16 FPR128:$src))>;
7348
7349def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7350                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7351                                              (REV64v8i16 FPR128:$src),
7352                                              (i32 8)))>;
7353def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7354                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7355def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7356                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7357def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
7358                             (v8bf16 (REV16v16i8 FPR128:$src))>;
7359def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
7360                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7361def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
7362                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7363}
7364def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7365def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7366
7367let Predicates = [IsLE] in {
7368def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
7369def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7370def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7371def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7372def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7373def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7374def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7375def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
7376}
7377let Predicates = [IsBE] in {
7378def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
7379                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7380                                              (REV64v16i8 FPR128:$src),
7381                                              (i32 8)))>;
7382def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7383                             (v16i8 (REV64v16i8 FPR128:$src))>;
7384def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7385                             (v16i8 (REV32v16i8 FPR128:$src))>;
7386def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7387                             (v16i8 (REV16v16i8 FPR128:$src))>;
7388def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7389                             (v16i8 (REV64v16i8 FPR128:$src))>;
7390def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7391                             (v16i8 (REV32v16i8 FPR128:$src))>;
7392def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7393                             (v16i8 (REV16v16i8 FPR128:$src))>;
7394def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
7395                             (v16i8 (REV16v16i8 FPR128:$src))>;
7396}
7397
7398def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
7399           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7400def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
7401           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7402def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
7403           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7404def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
7405           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7406def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
7407           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7408def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
7409           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7410def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
7411           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7412def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
7413           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7414
7415def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
7416          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7417def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
7418          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7419def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
7420          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7421def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
7422          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7423
7424// A 64-bit subvector insert to the first 128-bit vector position
7425// is a subregister copy that needs no instruction.
7426multiclass InsertSubvectorUndef<ValueType Ty> {
7427  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
7428            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7429  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
7430            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7431  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
7432            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7433  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
7434            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7435  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
7436            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7437  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
7438            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7439  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
7440            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7441  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
7442            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7443}
7444
7445defm : InsertSubvectorUndef<i32>;
7446defm : InsertSubvectorUndef<i64>;
7447
7448// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
7449// or v2f32.
7450def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
7451                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
7452           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
7453def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
7454                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
7455           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
7456    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
7457    // so we match on v4f32 here, not v2f32. This will also catch adding
7458    // the low two lanes of a true v4f32 vector.
7459def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
7460                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
7461          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7462
7463// Scalar 64-bit shifts in FPR64 registers.
7464def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7465          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7466def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7467          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7468def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7469          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7470def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7471          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7472
7473// Patterns for nontemporal/no-allocate stores.
7474// We have to resort to tricks to turn a single-input store into a store pair,
7475// because there is no single-input nontemporal store, only STNP.
7476let Predicates = [IsLE] in {
7477let AddedComplexity = 15 in {
7478class NTStore128Pat<ValueType VT> :
7479  Pat<(nontemporalstore (VT FPR128:$Rt),
7480        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
7481      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
7482              (CPYi64 FPR128:$Rt, (i64 1)),
7483              GPR64sp:$Rn, simm7s8:$offset)>;
7484
7485def : NTStore128Pat<v2i64>;
7486def : NTStore128Pat<v4i32>;
7487def : NTStore128Pat<v8i16>;
7488def : NTStore128Pat<v16i8>;
7489
7490class NTStore64Pat<ValueType VT> :
7491  Pat<(nontemporalstore (VT FPR64:$Rt),
7492        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7493      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
7494              (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
7495              GPR64sp:$Rn, simm7s4:$offset)>;
7496
7497// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
7498def : NTStore64Pat<v1f64>;
7499def : NTStore64Pat<v1i64>;
7500def : NTStore64Pat<v2i32>;
7501def : NTStore64Pat<v4i16>;
7502def : NTStore64Pat<v8i8>;
7503
7504def : Pat<(nontemporalstore GPR64:$Rt,
7505            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7506          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
7507                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
7508                  GPR64sp:$Rn, simm7s4:$offset)>;
7509} // AddedComplexity=10
7510} // Predicates = [IsLE]
7511
7512// Tail call return handling. These are all compiler pseudo-instructions,
7513// so no encoding information or anything like that.
7514let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
7515  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
7516                   Sched<[WriteBrReg]>;
7517  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
7518                   Sched<[WriteBrReg]>;
7519  // Indirect tail-call with any register allowed, used by MachineOutliner when
7520  // this is proven safe.
7521  // FIXME: If we have to add any more hacks like this, we should instead relax
7522  // some verifier checks for outlined functions.
7523  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
7524                      Sched<[WriteBrReg]>;
7525  // Indirect tail-call limited to only use registers (x16 and x17) which are
7526  // allowed to tail-call a "BTI c" instruction.
7527  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
7528                      Sched<[WriteBrReg]>;
7529}
7530
7531def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
7532          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
7533      Requires<[NotUseBTI]>;
7534def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
7535          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
7536      Requires<[UseBTI]>;
7537def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
7538          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7539def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
7540          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7541
7542def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
7543def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
7544
7545// Extracting lane zero is a special case where we can just use a plain
7546// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
7547// rest of the compiler, especially the register allocator and copy propagation,
7548// to reason about, so is preferred when it's possible to use it.
7549let AddedComplexity = 10 in {
7550  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
7551  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
7552  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
7553}
7554
7555// dot_v4i8
7556class mul_v4i8<SDPatternOperator ldop> :
7557  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
7558          (mul (ldop (add node:$Rn, node:$offset)),
7559               (ldop (add node:$Rm, node:$offset)))>;
7560class mulz_v4i8<SDPatternOperator ldop> :
7561  PatFrag<(ops node:$Rn, node:$Rm),
7562          (mul (ldop node:$Rn), (ldop node:$Rm))>;
7563
7564def load_v4i8 :
7565  OutPatFrag<(ops node:$R),
7566             (INSERT_SUBREG
7567              (v2i32 (IMPLICIT_DEF)),
7568               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
7569              ssub)>;
7570
7571class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
7572  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
7573           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
7574           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
7575                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
7576      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
7577                                (load_v4i8 GPR64sp:$Rn),
7578                                (load_v4i8 GPR64sp:$Rm))),
7579                      sub_32)>, Requires<[HasDotProd]>;
7580
7581// dot_v8i8
7582class ee_v8i8<SDPatternOperator extend> :
7583  PatFrag<(ops node:$V, node:$K),
7584          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
7585
7586class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7587  PatFrag<(ops node:$M, node:$N, node:$K),
7588          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
7589                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
7590
7591class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7592  PatFrag<(ops node:$M, node:$N),
7593          (i32 (extractelt
7594           (v4i32 (AArch64uaddv
7595            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
7596                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
7597           (i64 0)))>;
7598
7599// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
7600def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
7601
7602class odot_v8i8<Instruction DOT> :
7603  OutPatFrag<(ops node:$Vm, node:$Vn),
7604             (EXTRACT_SUBREG
7605              (VADDV_32
7606               (i64 (DOT (DUPv2i32gpr WZR),
7607                         (v8i8 node:$Vm),
7608                         (v8i8 node:$Vn)))),
7609              sub_32)>;
7610
7611class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
7612                    SDPatternOperator extend> :
7613  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
7614      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
7615  Requires<[HasDotProd]>;
7616
7617// dot_v16i8
7618class ee_v16i8<SDPatternOperator extend> :
7619  PatFrag<(ops node:$V, node:$K1, node:$K2),
7620          (v4i16 (extract_subvector
7621           (v8i16 (extend
7622            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
7623
7624class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
7625  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
7626          (v4i32
7627           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
7628                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
7629
7630class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
7631  PatFrag<(ops node:$M, node:$N),
7632          (i32 (extractelt
7633           (v4i32 (AArch64uaddv
7634            (add
7635             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
7636                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
7637             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
7638                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
7639           (i64 0)))>;
7640
7641class odot_v16i8<Instruction DOT> :
7642  OutPatFrag<(ops node:$Vm, node:$Vn),
7643             (i32 (ADDVv4i32v
7644              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
7645
7646class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
7647                SDPatternOperator extend> :
7648  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
7649      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
7650  Requires<[HasDotProd]>;
7651
7652let AddedComplexity = 10 in {
7653  def : dot_v4i8<SDOTv8i8, sextloadi8>;
7654  def : dot_v4i8<UDOTv8i8, zextloadi8>;
7655  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
7656  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
7657  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
7658  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
7659
7660  // FIXME: add patterns to generate vector by element dot product.
7661  // FIXME: add SVE dot-product patterns.
7662}
7663
7664include "AArch64InstrAtomics.td"
7665include "AArch64SVEInstrInfo.td"
7666
7667include "AArch64InstrGISel.td"
7668