xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision 4e99f45480598189d49d45a825533a6c9e12f02c)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<"HasV8_3aOps", "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<"HasV8_4aOps", "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<"HasV8_5aOps", "armv8.5a">;
26def HasVH            : Predicate<"Subtarget->hasVH()">,
27                       AssemblerPredicate<"FeatureVH", "vh">;
28
29def HasLOR           : Predicate<"Subtarget->hasLOR()">,
30                       AssemblerPredicate<"FeatureLOR", "lor">;
31
32def HasPA            : Predicate<"Subtarget->hasPA()">,
33                       AssemblerPredicate<"FeaturePA", "pa">;
34
35def HasJS            : Predicate<"Subtarget->hasJS()">,
36                       AssemblerPredicate<"FeatureJS", "jsconv">;
37
38def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
39                       AssemblerPredicate<"FeatureCCIDX", "ccidx">;
40
41def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
42                       AssemblerPredicate<"FeatureComplxNum", "complxnum">;
43
44def HasNV            : Predicate<"Subtarget->hasNV()">,
45                       AssemblerPredicate<"FeatureNV", "nv">;
46
47def HasRASv8_4       : Predicate<"Subtarget->hasRASv8_4()">,
48                       AssemblerPredicate<"FeatureRASv8_4", "rasv8_4">;
49
50def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
51                       AssemblerPredicate<"FeatureMPAM", "mpam">;
52
53def HasDIT           : Predicate<"Subtarget->hasDIT()">,
54                       AssemblerPredicate<"FeatureDIT", "dit">;
55
56def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
57                       AssemblerPredicate<"FeatureTRACEV8_4", "tracev8.4">;
58
59def HasAM            : Predicate<"Subtarget->hasAM()">,
60                       AssemblerPredicate<"FeatureAM", "am">;
61
62def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
63                       AssemblerPredicate<"FeatureSEL2", "sel2">;
64
65def HasPMU           : Predicate<"Subtarget->hasPMU()">,
66                       AssemblerPredicate<"FeaturePMU", "pmu">;
67
68def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
69                       AssemblerPredicate<"FeatureTLB_RMI", "tlb-rmi">;
70
71def HasFMI           : Predicate<"Subtarget->hasFMI()">,
72                       AssemblerPredicate<"FeatureFMI", "fmi">;
73
74def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
75                       AssemblerPredicate<"FeatureRCPC_IMMO", "rcpc-immo">;
76
77def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
78                               AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
79def HasNEON          : Predicate<"Subtarget->hasNEON()">,
80                                 AssemblerPredicate<"FeatureNEON", "neon">;
81def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
82                                 AssemblerPredicate<"FeatureCrypto", "crypto">;
83def HasSM4           : Predicate<"Subtarget->hasSM4()">,
84                                 AssemblerPredicate<"FeatureSM4", "sm4">;
85def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
86                                 AssemblerPredicate<"FeatureSHA3", "sha3">;
87def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
88                                 AssemblerPredicate<"FeatureSHA2", "sha2">;
89def HasAES           : Predicate<"Subtarget->hasAES()">,
90                                 AssemblerPredicate<"FeatureAES", "aes">;
91def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
92                                 AssemblerPredicate<"FeatureDotProd", "dotprod">;
93def HasCRC           : Predicate<"Subtarget->hasCRC()">,
94                                 AssemblerPredicate<"FeatureCRC", "crc">;
95def HasLSE           : Predicate<"Subtarget->hasLSE()">,
96                                 AssemblerPredicate<"FeatureLSE", "lse">;
97def HasRAS           : Predicate<"Subtarget->hasRAS()">,
98                                 AssemblerPredicate<"FeatureRAS", "ras">;
99def HasRDM           : Predicate<"Subtarget->hasRDM()">,
100                                 AssemblerPredicate<"FeatureRDM", "rdm">;
101def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
102def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
103                                 AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
104def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
105                                 AssemblerPredicate<"FeatureFP16FML", "fp16fml">;
106def HasSPE           : Predicate<"Subtarget->hasSPE()">,
107                                 AssemblerPredicate<"FeatureSPE", "spe">;
108def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
109                                 AssemblerPredicate<"FeatureFuseAES",
110                                 "fuse-aes">;
111def HasSVE           : Predicate<"Subtarget->hasSVE()">,
112                                 AssemblerPredicate<"FeatureSVE", "sve">;
113def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
114                                 AssemblerPredicate<"FeatureSVE2", "sve2">;
115def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
116                                 AssemblerPredicate<"FeatureSVE2AES", "sve2-aes">;
117def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
118                                 AssemblerPredicate<"FeatureSVE2SM4", "sve2-sm4">;
119def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
120                                 AssemblerPredicate<"FeatureSVE2SHA3", "sve2-sha3">;
121def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
122                                 AssemblerPredicate<"FeatureSVE2BitPerm", "sve2-bitperm">;
123def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
124                                 AssemblerPredicate<"FeatureRCPC", "rcpc">;
125def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
126                       AssemblerPredicate<"FeatureAltFPCmp", "altnzcv">;
127def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
128                       AssemblerPredicate<"FeatureFRInt3264", "frint3264">;
129def HasSB            : Predicate<"Subtarget->hasSB()">,
130                       AssemblerPredicate<"FeatureSB", "sb">;
131def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
132                       AssemblerPredicate<"FeaturePredRes", "predres">;
133def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
134                       AssemblerPredicate<"FeatureCacheDeepPersist", "ccdp">;
135def HasBTI           : Predicate<"Subtarget->hasBTI()">,
136                       AssemblerPredicate<"FeatureBranchTargetId", "bti">;
137def HasMTE           : Predicate<"Subtarget->hasMTE()">,
138                       AssemblerPredicate<"FeatureMTE", "mte">;
139def HasTME           : Predicate<"Subtarget->hasTME()">,
140                       AssemblerPredicate<"FeatureTME", "tme">;
141def HasETE           : Predicate<"Subtarget->hasETE()">,
142                       AssemblerPredicate<"FeatureETE", "ete">;
143def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
144                       AssemblerPredicate<"FeatureTRBE", "trbe">;
145def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
146def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
147def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
148def UseAlternateSExtLoadCVTF32
149    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
150
151def UseNegativeImmediates
152    : Predicate<"false">, AssemblerPredicate<"!FeatureNoNegativeImmediates",
153                                             "NegativeImmediates">;
154
155def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
156                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
157                                                       SDTCisInt<1>]>>;
158
159
160//===----------------------------------------------------------------------===//
161// AArch64-specific DAG Nodes.
162//
163
164// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
165def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
166                                              [SDTCisSameAs<0, 2>,
167                                               SDTCisSameAs<0, 3>,
168                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
169
170// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
171def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
172                                            [SDTCisSameAs<0, 1>,
173                                             SDTCisSameAs<0, 2>,
174                                             SDTCisInt<0>,
175                                             SDTCisVT<3, i32>]>;
176
177// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
178def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
179                                            [SDTCisSameAs<0, 2>,
180                                             SDTCisSameAs<0, 3>,
181                                             SDTCisInt<0>,
182                                             SDTCisVT<1, i32>,
183                                             SDTCisVT<4, i32>]>;
184
185def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
186                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
187                                      SDTCisVT<2, i32>]>;
188def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
189def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
190                                        SDTCisVT<2, OtherVT>]>;
191
192
193def SDT_AArch64CSel  : SDTypeProfile<1, 4,
194                                   [SDTCisSameAs<0, 1>,
195                                    SDTCisSameAs<0, 2>,
196                                    SDTCisInt<3>,
197                                    SDTCisVT<4, i32>]>;
198def SDT_AArch64CCMP : SDTypeProfile<1, 5,
199                                    [SDTCisVT<0, i32>,
200                                     SDTCisInt<1>,
201                                     SDTCisSameAs<1, 2>,
202                                     SDTCisInt<3>,
203                                     SDTCisInt<4>,
204                                     SDTCisVT<5, i32>]>;
205def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
206                                     [SDTCisVT<0, i32>,
207                                      SDTCisFP<1>,
208                                      SDTCisSameAs<1, 2>,
209                                      SDTCisInt<3>,
210                                      SDTCisInt<4>,
211                                      SDTCisVT<5, i32>]>;
212def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
213                                   [SDTCisFP<0>,
214                                    SDTCisSameAs<0, 1>]>;
215def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
216def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
217def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
218def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
219                                          SDTCisSameAs<0, 1>,
220                                          SDTCisSameAs<0, 2>]>;
221def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
222def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
223def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
224                                           SDTCisInt<2>, SDTCisInt<3>]>;
225def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
226def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
227                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
228def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
229
230def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
231def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
232def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
233def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
234                                           SDTCisSameAs<0,2>]>;
235def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
236                                           SDTCisSameAs<0,2>,
237                                           SDTCisSameAs<0,3>]>;
238def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
239def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
240
241def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
242
243def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
244                                                 SDTCisPtrTy<1>]>;
245
246def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
247def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
248
249// Generates the general dynamic sequences, i.e.
250//  adrp  x0, :tlsdesc:var
251//  ldr   x1, [x0, #:tlsdesc_lo12:var]
252//  add   x0, x0, #:tlsdesc_lo12:var
253//  .tlsdesccall var
254//  blr   x1
255
256// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
257// number of operands (the variable)
258def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
259                                          [SDTCisPtrTy<0>]>;
260
261def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
262                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
263                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
264                                         SDTCisSameAs<1, 4>]>;
265
266def SDT_AArch64TBL : SDTypeProfile<1, 2, [
267  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
268]>;
269
270// non-extending masked load fragment.
271def nonext_masked_load :
272  PatFrag<(ops node:$ptr, node:$pred, node:$def),
273          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
274  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
275         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
276         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
277}]>;
278// sign extending masked load fragments.
279def asext_masked_load :
280  PatFrag<(ops node:$ptr, node:$pred, node:$def),
281          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
282  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
283          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
284         cast<MaskedLoadSDNode>(N)->isUnindexed();
285}]>;
286def asext_masked_load_i8 :
287  PatFrag<(ops node:$ptr, node:$pred, node:$def),
288          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
289  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
290}]>;
291def asext_masked_load_i16 :
292  PatFrag<(ops node:$ptr, node:$pred, node:$def),
293          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
294  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
295}]>;
296def asext_masked_load_i32 :
297  PatFrag<(ops node:$ptr, node:$pred, node:$def),
298          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
299  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
300}]>;
301// zero extending masked load fragments.
302def zext_masked_load :
303  PatFrag<(ops node:$ptr, node:$pred, node:$def),
304          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
305  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
306         cast<MaskedLoadSDNode>(N)->isUnindexed();
307}]>;
308def zext_masked_load_i8 :
309  PatFrag<(ops node:$ptr, node:$pred, node:$def),
310          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
311  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
312}]>;
313def zext_masked_load_i16 :
314  PatFrag<(ops node:$ptr, node:$pred, node:$def),
315          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
316  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
317}]>;
318def zext_masked_load_i32 :
319  PatFrag<(ops node:$ptr, node:$pred, node:$def),
320          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
321  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
322}]>;
323
324def non_temporal_load :
325   PatFrag<(ops node:$ptr, node:$pred, node:$def),
326           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
327   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
328          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
329          cast<MaskedLoadSDNode>(N)->isNonTemporal();
330}]>;
331
332// non-truncating masked store fragment.
333def nontrunc_masked_store :
334  PatFrag<(ops node:$val, node:$ptr, node:$pred),
335          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
336  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
337         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
338         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
339}]>;
340// truncating masked store fragments.
341def trunc_masked_store :
342  PatFrag<(ops node:$val, node:$ptr, node:$pred),
343          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
344  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
345         cast<MaskedStoreSDNode>(N)->isUnindexed();
346}]>;
347def trunc_masked_store_i8 :
348  PatFrag<(ops node:$val, node:$ptr, node:$pred),
349          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
350  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
351}]>;
352def trunc_masked_store_i16 :
353  PatFrag<(ops node:$val, node:$ptr, node:$pred),
354          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
355  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
356}]>;
357def trunc_masked_store_i32 :
358  PatFrag<(ops node:$val, node:$ptr, node:$pred),
359          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
360  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
361}]>;
362
363def non_temporal_store :
364  PatFrag<(ops node:$val, node:$ptr, node:$pred),
365          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
366  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
367         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
368         cast<MaskedStoreSDNode>(N)->isNonTemporal();
369}]>;
370
371// Node definitions.
372def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
373def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
374def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
375def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
376def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
377                                SDCallSeqStart<[ SDTCisVT<0, i32>,
378                                                 SDTCisVT<1, i32> ]>,
379                                [SDNPHasChain, SDNPOutGlue]>;
380def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
381                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
382                                               SDTCisVT<1, i32> ]>,
383                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
384def AArch64call          : SDNode<"AArch64ISD::CALL",
385                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
386                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
387                                 SDNPVariadic]>;
388def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
389                                [SDNPHasChain]>;
390def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
391                                [SDNPHasChain]>;
392def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
393                                [SDNPHasChain]>;
394def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
395                                [SDNPHasChain]>;
396def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
397                                [SDNPHasChain]>;
398
399
400def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
401def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
402def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
403def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
404def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
405                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
406def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
407def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
408def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
409                            [SDNPCommutative]>;
410def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
411def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
412                            [SDNPCommutative]>;
413def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
414def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
415
416def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
417def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
418def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
419
420def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
421
422def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
423def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
424                                 [SDNPHasChain]>;
425def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
426                                 [SDNPHasChain]>;
427def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
428                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
429                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
430
431def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
432def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
433def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
434def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
435def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
436
437def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
438
439def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
440def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
441def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
442def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
443def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
444def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
445
446def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
447def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
448def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
449def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
450def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
451def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
452def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
453
454def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
455def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
456def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
457def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
458
459def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
460def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
461def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
462def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
463def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
464def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
465def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
466def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
467
468def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
469def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
470def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
471
472def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
473def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
474def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
475def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
476def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
477
478def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
479def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
480def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
481
482def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
483def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
484def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
485def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
486def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
487def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
488                        (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
489
490def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
491def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
492def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
493def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
494def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
495
496def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
497def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
498
499def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
500
501def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
502                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
503
504def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
505                               [SDNPHasChain, SDNPSideEffect]>;
506
507def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
508def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
509
510def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
511                                    SDT_AArch64TLSDescCallSeq,
512                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
513                                     SDNPVariadic]>;
514
515
516def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
517                                 SDT_AArch64WrapperLarge>;
518
519def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
520
521def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
522                                    SDTCisSameAs<1, 2>]>;
523def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
524def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
525
526def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
527def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
528def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
529def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
530
531def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
532def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
533def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
534def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
535def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
536def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
537
538def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
539def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
540def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
541def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
542def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
543
544def SDT_AArch64unpk : SDTypeProfile<1, 1, [
545    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
546]>;
547def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
548def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
549def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
550def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
551
552def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
553def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
554
555def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
556
557//===----------------------------------------------------------------------===//
558
559//===----------------------------------------------------------------------===//
560
561// AArch64 Instruction Predicate Definitions.
562// We could compute these on a per-module basis but doing so requires accessing
563// the Function object through the <Target>Subtarget and objections were raised
564// to that (see post-commit review comments for r301750).
565let RecomputePerFunction = 1 in {
566  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
567  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
568  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
569  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
570
571  def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
572  def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
573
574  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
575  // optimizing. This allows us to selectively use patterns without impacting
576  // SelectionDAG's behaviour.
577  // FIXME: One day there will probably be a nicer way to check for this, but
578  // today is not that day.
579  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
580}
581
582include "AArch64InstrFormats.td"
583include "SVEInstrFormats.td"
584
585//===----------------------------------------------------------------------===//
586
587//===----------------------------------------------------------------------===//
588// Miscellaneous instructions.
589//===----------------------------------------------------------------------===//
590
591let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
592// We set Sched to empty list because we expect these instructions to simply get
593// removed in most cases.
594def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
595                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
596                              Sched<[]>;
597def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
598                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
599                            Sched<[]>;
600} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
601
602let isReMaterializable = 1, isCodeGenOnly = 1 in {
603// FIXME: The following pseudo instructions are only needed because remat
604// cannot handle multiple instructions.  When that changes, they can be
605// removed, along with the AArch64Wrapper node.
606
607let AddedComplexity = 10 in
608def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
609                     [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
610              Sched<[WriteLDAdr]>;
611
612// The MOVaddr instruction should match only when the add is not folded
613// into a load or store address.
614def MOVaddr
615    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
616             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
617                                            tglobaladdr:$low))]>,
618      Sched<[WriteAdrAdr]>;
619def MOVaddrJT
620    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
621             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
622                                             tjumptable:$low))]>,
623      Sched<[WriteAdrAdr]>;
624def MOVaddrCP
625    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
626             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
627                                             tconstpool:$low))]>,
628      Sched<[WriteAdrAdr]>;
629def MOVaddrBA
630    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
631             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
632                                             tblockaddress:$low))]>,
633      Sched<[WriteAdrAdr]>;
634def MOVaddrTLS
635    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
636             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
637                                            tglobaltlsaddr:$low))]>,
638      Sched<[WriteAdrAdr]>;
639def MOVaddrEXT
640    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
641             [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
642                                            texternalsym:$low))]>,
643      Sched<[WriteAdrAdr]>;
644// Normally AArch64addlow either gets folded into a following ldr/str,
645// or together with an adrp into MOVaddr above. For cases with TLS, it
646// might appear without either of them, so allow lowering it into a plain
647// add.
648def ADDlowTLS
649    : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low),
650             [(set GPR64:$dst, (AArch64addlow GPR64:$src,
651                                            tglobaltlsaddr:$low))]>,
652      Sched<[WriteAdr]>;
653
654} // isReMaterializable, isCodeGenOnly
655
656def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
657          (LOADgot tglobaltlsaddr:$addr)>;
658
659def : Pat<(AArch64LOADgot texternalsym:$addr),
660          (LOADgot texternalsym:$addr)>;
661
662def : Pat<(AArch64LOADgot tconstpool:$addr),
663          (LOADgot tconstpool:$addr)>;
664
665// 32-bit jump table destination is actually only 2 instructions since we can
666// use the table itself as a PC-relative base. But optimization occurs after
667// branch relaxation so be pessimistic.
668let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch" in {
669def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
670                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
671                      Sched<[]>;
672def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
673                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
674                      Sched<[]>;
675def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
676                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
677                     Sched<[]>;
678}
679
680// Space-consuming pseudo to aid testing of placement and reachability
681// algorithms. Immediate operand is the number of bytes this "instruction"
682// occupies; register operands can be used to enforce dependency and constrain
683// the scheduler.
684let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
685def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
686                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
687            Sched<[]>;
688
689let hasSideEffects = 1, isCodeGenOnly = 1 in {
690  def SpeculationSafeValueX
691      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
692  def SpeculationSafeValueW
693      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
694}
695
696
697//===----------------------------------------------------------------------===//
698// System instructions.
699//===----------------------------------------------------------------------===//
700
701def HINT : HintI<"hint">;
702def : InstAlias<"nop",  (HINT 0b000)>;
703def : InstAlias<"yield",(HINT 0b001)>;
704def : InstAlias<"wfe",  (HINT 0b010)>;
705def : InstAlias<"wfi",  (HINT 0b011)>;
706def : InstAlias<"sev",  (HINT 0b100)>;
707def : InstAlias<"sevl", (HINT 0b101)>;
708def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
709def : InstAlias<"csdb", (HINT 20)>;
710def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
711def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
712
713// v8.2a Statistical Profiling extension
714def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
715
716// As far as LLVM is concerned this writes to the system's exclusive monitors.
717let mayLoad = 1, mayStore = 1 in
718def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
719
720// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
721// model patterns with sufficiently fine granularity.
722let mayLoad = ?, mayStore = ? in {
723def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
724                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
725
726def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
727                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
728
729def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
730                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
731
732def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
733  let CRm        = 0b0010;
734  let Inst{12}   = 0;
735  let Predicates = [HasTRACEV8_4];
736}
737}
738
739// ARMv8.2-A Dot Product
740let Predicates = [HasDotProd] in {
741defm SDOT : SIMDThreeSameVectorDot<0, "sdot", int_aarch64_neon_sdot>;
742defm UDOT : SIMDThreeSameVectorDot<1, "udot", int_aarch64_neon_udot>;
743defm SDOTlane : SIMDThreeSameVectorDotIndex<0, "sdot", int_aarch64_neon_sdot>;
744defm UDOTlane : SIMDThreeSameVectorDotIndex<1, "udot", int_aarch64_neon_udot>;
745}
746
747// ARMv8.2-A FP16 Fused Multiply-Add Long
748let Predicates = [HasNEON, HasFP16FML] in {
749defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
750defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
751defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
752defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
753defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
754defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
755defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
756defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
757}
758
759// Armv8.2-A Crypto extensions
760let Predicates = [HasSHA3] in {
761def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
762def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
763def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
764def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
765def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
766def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
767def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
768def XAR       : CryptoRRRi6<"xar">;
769} // HasSHA3
770
771let Predicates = [HasSM4] in {
772def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
773def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
774def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
775def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
776def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
777def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
778def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
779def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
780def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
781} // HasSM4
782
783let Predicates = [HasRCPC] in {
784  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
785  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
786  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
787  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
788  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
789}
790
791// v8.3a complex add and multiply-accumulate. No predicate here, that is done
792// inside the multiclass as the FP16 versions need different predicates.
793defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
794                                               "fcmla", null_frag>;
795defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
796                                           "fcadd", null_frag>;
797defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla",
798                                       null_frag>;
799
800let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
801  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
802            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
803  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
804            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
805  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
806            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
807  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
808            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
809}
810let Predicates = [HasComplxNum, HasNEON] in {
811  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
812            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
813  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
814            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
815  foreach Ty = [v4f32, v2f64] in {
816    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
817              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
818    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
819              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
820  }
821}
822
823// v8.3a Pointer Authentication
824// These instructions inhabit part of the hint space and so can be used for
825// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
826// important for compatibility with other assemblers (e.g. GAS) when building
827// software compatible with both CPUs that do or don't implement PA.
828let Uses = [LR], Defs = [LR] in {
829  def PACIAZ   : SystemNoOperands<0b000, "hint #24">;
830  def PACIBZ   : SystemNoOperands<0b010, "hint #26">;
831  let isAuthenticated = 1 in {
832    def AUTIAZ   : SystemNoOperands<0b100, "hint #28">;
833    def AUTIBZ   : SystemNoOperands<0b110, "hint #30">;
834  }
835}
836let Uses = [LR, SP], Defs = [LR] in {
837  def PACIASP  : SystemNoOperands<0b001, "hint #25">;
838  def PACIBSP  : SystemNoOperands<0b011, "hint #27">;
839  let isAuthenticated = 1 in {
840    def AUTIASP  : SystemNoOperands<0b101, "hint #29">;
841    def AUTIBSP  : SystemNoOperands<0b111, "hint #31">;
842  }
843}
844let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
845  def PACIA1716  : SystemNoOperands<0b000, "hint #8">;
846  def PACIB1716  : SystemNoOperands<0b010, "hint #10">;
847  let isAuthenticated = 1 in {
848    def AUTIA1716  : SystemNoOperands<0b100, "hint #12">;
849    def AUTIB1716  : SystemNoOperands<0b110, "hint #14">;
850  }
851}
852
853let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
854  def XPACLRI   : SystemNoOperands<0b111, "hint #7">;
855}
856
857// These pointer authentication instructions require armv8.3a
858let Predicates = [HasPA] in {
859
860  // When compiling with PA, there is a better mnemonic for these instructions.
861  def : InstAlias<"paciaz", (PACIAZ), 1>;
862  def : InstAlias<"pacibz", (PACIBZ), 1>;
863  def : InstAlias<"autiaz", (AUTIAZ), 1>;
864  def : InstAlias<"autibz", (AUTIBZ), 1>;
865  def : InstAlias<"paciasp", (PACIASP), 1>;
866  def : InstAlias<"pacibsp", (PACIBSP), 1>;
867  def : InstAlias<"autiasp", (AUTIASP), 1>;
868  def : InstAlias<"autibsp", (AUTIBSP), 1>;
869  def : InstAlias<"pacia1716", (PACIA1716), 1>;
870  def : InstAlias<"pacib1716", (PACIB1716), 1>;
871  def : InstAlias<"autia1716", (AUTIA1716), 1>;
872  def : InstAlias<"autib1716", (AUTIB1716), 1>;
873  def : InstAlias<"xpaclri", (XPACLRI), 1>;
874
875  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm> {
876    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia")>;
877    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib")>;
878    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da")>;
879    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db")>;
880    def IZA  : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza")>;
881    def DZA  : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza")>;
882    def IZB  : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb")>;
883    def DZB  : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb")>;
884  }
885
886  defm PAC : SignAuth<0b000, 0b010, "pac">;
887  defm AUT : SignAuth<0b001, 0b011, "aut">;
888
889  def XPACI : SignAuthZero<0b100, 0b00, "xpaci">;
890  def XPACD : SignAuthZero<0b100, 0b01, "xpacd">;
891  def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>;
892
893  // Combined Instructions
894  def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
895  def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
896  def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
897  def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
898
899  def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
900  def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
901  def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
902  def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
903
904  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
905    def RETAA   : AuthReturn<0b010, 0, "retaa">;
906    def RETAB   : AuthReturn<0b010, 1, "retab">;
907    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
908    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
909  }
910
911  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
912  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
913
914}
915
916// v8.3a floating point conversion for javascript
917let Predicates = [HasJS, HasFPARMv8] in
918def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
919                                      "fjcvtzs",
920                                      [(set GPR32:$Rd,
921                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
922  let Inst{31} = 0;
923} // HasJS, HasFPARMv8
924
925// v8.4 Flag manipulation instructions
926let Predicates = [HasFMI] in {
927def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
928  let Inst{20-5} = 0b0000001000000000;
929}
930def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
931def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
932def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
933                        "{\t$Rn, $imm, $mask}">;
934} // HasFMI
935
936// v8.5 flag manipulation instructions
937let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
938
939def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
940  let Inst{18-16} = 0b000;
941  let Inst{11-8} = 0b0000;
942  let Unpredictable{11-8} = 0b1111;
943  let Inst{7-5} = 0b001;
944}
945
946def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
947  let Inst{18-16} = 0b000;
948  let Inst{11-8} = 0b0000;
949  let Unpredictable{11-8} = 0b1111;
950  let Inst{7-5} = 0b010;
951}
952} // HasAltNZCV
953
954
955// Armv8.5-A speculation barrier
956def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
957  let Inst{20-5} = 0b0001100110000111;
958  let Unpredictable{11-8} = 0b1111;
959  let Predicates = [HasSB];
960  let hasSideEffects = 1;
961}
962
963def : InstAlias<"clrex", (CLREX 0xf)>;
964def : InstAlias<"isb", (ISB 0xf)>;
965def : InstAlias<"ssbb", (DSB 0)>;
966def : InstAlias<"pssbb", (DSB 4)>;
967
968def MRS    : MRSI;
969def MSR    : MSRI;
970def MSRpstateImm1 : MSRpstateImm0_1;
971def MSRpstateImm4 : MSRpstateImm0_15;
972
973// The thread pointer (on Linux, at least, where this has been implemented) is
974// TPIDR_EL0.
975def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
976                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
977
978let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
979def HWASAN_CHECK_MEMACCESS : Pseudo<
980  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
981  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
982  Sched<[]>;
983def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
984  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
985  [(int_hwasan_check_memaccess_shortgranules X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
986  Sched<[]>;
987}
988
989// The cycle counter PMC register is PMCCNTR_EL0.
990let Predicates = [HasPerfMon] in
991def : Pat<(readcyclecounter), (MRS 0xdce8)>;
992
993// FPCR register
994def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
995
996// Generic system instructions
997def SYSxt  : SystemXtI<0, "sys">;
998def SYSLxt : SystemLXtI<1, "sysl">;
999
1000def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1001                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1002                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1003
1004
1005let Predicates = [HasTME] in {
1006
1007def TSTART : TMSystemI<0b0000, "tstart",
1008                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1009
1010def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1011
1012def TCANCEL : TMSystemException<0b011, "tcancel",
1013                                [(int_aarch64_tcancel i64_imm0_65535:$imm)]>;
1014
1015def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1016  let mayLoad = 0;
1017  let mayStore = 0;
1018}
1019} // HasTME
1020
1021//===----------------------------------------------------------------------===//
1022// Move immediate instructions.
1023//===----------------------------------------------------------------------===//
1024
1025defm MOVK : InsertImmediate<0b11, "movk">;
1026defm MOVN : MoveImmediate<0b00, "movn">;
1027
1028let PostEncoderMethod = "fixMOVZ" in
1029defm MOVZ : MoveImmediate<0b10, "movz">;
1030
1031// First group of aliases covers an implicit "lsl #0".
1032def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, i32_imm0_65535:$imm, 0), 0>;
1033def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, i32_imm0_65535:$imm, 0), 0>;
1034def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1035def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1036def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1037def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1038
1039// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1040def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1041def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1042def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1043def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1044
1045def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1046def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1047def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1048def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1049
1050def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1051def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1052def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1053def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1054
1055def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1056def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1057
1058def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1059def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1060
1061def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1062def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1063
1064// Final group of aliases covers true "mov $Rd, $imm" cases.
1065multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1066                          int width, int shift> {
1067  def _asmoperand : AsmOperandClass {
1068    let Name = basename # width # "_lsl" # shift # "MovAlias";
1069    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1070                               # shift # ">";
1071    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1072  }
1073
1074  def _movimm : Operand<i32> {
1075    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1076  }
1077
1078  def : InstAlias<"mov $Rd, $imm",
1079                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1080}
1081
1082defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1083defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1084
1085defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1086defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1087defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1088defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1089
1090defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1091defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1092
1093defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1094defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1095defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1096defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1097
1098let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1099    isAsCheapAsAMove = 1 in {
1100// FIXME: The following pseudo instructions are only needed because remat
1101// cannot handle multiple instructions.  When that changes, we can select
1102// directly to the real instructions and get rid of these pseudos.
1103
1104def MOVi32imm
1105    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1106             [(set GPR32:$dst, imm:$src)]>,
1107      Sched<[WriteImm]>;
1108def MOVi64imm
1109    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1110             [(set GPR64:$dst, imm:$src)]>,
1111      Sched<[WriteImm]>;
1112} // isReMaterializable, isCodeGenOnly
1113
1114// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1115// eventual expansion code fewer bits to worry about getting right. Marshalling
1116// the types is a little tricky though:
1117def i64imm_32bit : ImmLeaf<i64, [{
1118  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1119}]>;
1120
1121def s64imm_32bit : ImmLeaf<i64, [{
1122  int64_t Imm64 = static_cast<int64_t>(Imm);
1123  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1124         Imm64 <= std::numeric_limits<int32_t>::max();
1125}]>;
1126
1127def trunc_imm : SDNodeXForm<imm, [{
1128  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1129}]>;
1130
1131def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1132  GISDNodeXFormEquiv<trunc_imm>;
1133
1134let Predicates = [OptimizedGISelOrOtherSelector] in {
1135// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1136// copies.
1137def : Pat<(i64 i64imm_32bit:$src),
1138          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1139}
1140
1141// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1142def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1143return CurDAG->getTargetConstant(
1144  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1145}]>;
1146
1147def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1148return CurDAG->getTargetConstant(
1149  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1150}]>;
1151
1152
1153def : Pat<(f32 fpimm:$in),
1154  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1155def : Pat<(f64 fpimm:$in),
1156  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1157
1158
1159// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1160// sequences.
1161def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1162                             tglobaladdr:$g1, tglobaladdr:$g0),
1163          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1164                                  tglobaladdr:$g1, 16),
1165                          tglobaladdr:$g2, 32),
1166                  tglobaladdr:$g3, 48)>;
1167
1168def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1169                             tblockaddress:$g1, tblockaddress:$g0),
1170          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1171                                  tblockaddress:$g1, 16),
1172                          tblockaddress:$g2, 32),
1173                  tblockaddress:$g3, 48)>;
1174
1175def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1176                             tconstpool:$g1, tconstpool:$g0),
1177          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1178                                  tconstpool:$g1, 16),
1179                          tconstpool:$g2, 32),
1180                  tconstpool:$g3, 48)>;
1181
1182def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1183                             tjumptable:$g1, tjumptable:$g0),
1184          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1185                                  tjumptable:$g1, 16),
1186                          tjumptable:$g2, 32),
1187                  tjumptable:$g3, 48)>;
1188
1189
1190//===----------------------------------------------------------------------===//
1191// Arithmetic instructions.
1192//===----------------------------------------------------------------------===//
1193
1194// Add/subtract with carry.
1195defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1196defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1197
1198def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1199def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1200def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1201def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1202
1203// Add/subtract
1204defm ADD : AddSub<0, "add", "sub", add>;
1205defm SUB : AddSub<1, "sub", "add">;
1206
1207def : InstAlias<"mov $dst, $src",
1208                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1209def : InstAlias<"mov $dst, $src",
1210                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1211def : InstAlias<"mov $dst, $src",
1212                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1213def : InstAlias<"mov $dst, $src",
1214                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1215
1216defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1217defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1218
1219// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1220def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1221          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1222def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1223          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1224def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1225          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1226def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1227          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1228def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1229          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1230def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1231          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1232let AddedComplexity = 1 in {
1233def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1234          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1235def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1236          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1237}
1238
1239// Because of the immediate format for add/sub-imm instructions, the
1240// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1241//  These patterns capture that transformation.
1242let AddedComplexity = 1 in {
1243def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1244          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1245def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1246          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1247def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1248          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1249def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1250          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1251}
1252
1253// Because of the immediate format for add/sub-imm instructions, the
1254// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1255//  These patterns capture that transformation.
1256let AddedComplexity = 1 in {
1257def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1258          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1259def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1260          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1261def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1262          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1263def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1264          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1265}
1266
1267def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1268def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1269def : InstAlias<"neg $dst, $src$shift",
1270                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1271def : InstAlias<"neg $dst, $src$shift",
1272                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1273
1274def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1275def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1276def : InstAlias<"negs $dst, $src$shift",
1277                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1278def : InstAlias<"negs $dst, $src$shift",
1279                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1280
1281
1282// Unsigned/Signed divide
1283defm UDIV : Div<0, "udiv", udiv>;
1284defm SDIV : Div<1, "sdiv", sdiv>;
1285
1286def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1287def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1288def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1289def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1290
1291// Variable shift
1292defm ASRV : Shift<0b10, "asr", sra>;
1293defm LSLV : Shift<0b00, "lsl", shl>;
1294defm LSRV : Shift<0b01, "lsr", srl>;
1295defm RORV : Shift<0b11, "ror", rotr>;
1296
1297def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1298def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1299def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1300def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1301def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1302def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1303def : ShiftAlias<"rorv", RORVWr, GPR32>;
1304def : ShiftAlias<"rorv", RORVXr, GPR64>;
1305
1306// Multiply-add
1307let AddedComplexity = 5 in {
1308defm MADD : MulAccum<0, "madd", add>;
1309defm MSUB : MulAccum<1, "msub", sub>;
1310
1311def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1312          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1313def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1314          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1315
1316def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1317          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1318def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1319          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1320def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1321          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1322def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1323          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1324} // AddedComplexity = 5
1325
1326let AddedComplexity = 5 in {
1327def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1328def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1329def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1330def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1331
1332def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1333          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1334def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1335          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1336
1337def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1338          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1339def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1340          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1341
1342def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1343          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1344def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1345          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1346def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1347          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1348                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1349
1350def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1351          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1352def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1353          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1354def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1355          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1356                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1357
1358def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1359          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1360def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1361          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1362def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1363                    GPR64:$Ra)),
1364          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1365                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1366
1367def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1368          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1369def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1370          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1371def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1372                                    (s64imm_32bit:$C)))),
1373          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1374                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1375} // AddedComplexity = 5
1376
1377def : MulAccumWAlias<"mul", MADDWrrr>;
1378def : MulAccumXAlias<"mul", MADDXrrr>;
1379def : MulAccumWAlias<"mneg", MSUBWrrr>;
1380def : MulAccumXAlias<"mneg", MSUBXrrr>;
1381def : WideMulAccumAlias<"smull", SMADDLrrr>;
1382def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1383def : WideMulAccumAlias<"umull", UMADDLrrr>;
1384def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1385
1386// Multiply-high
1387def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1388def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1389
1390// CRC32
1391def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1392def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1393def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1394def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1395
1396def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1397def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1398def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1399def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1400
1401// v8.1 atomic CAS
1402defm CAS   : CompareAndSwap<0, 0, "">;
1403defm CASA  : CompareAndSwap<1, 0, "a">;
1404defm CASL  : CompareAndSwap<0, 1, "l">;
1405defm CASAL : CompareAndSwap<1, 1, "al">;
1406
1407// v8.1 atomic CASP
1408defm CASP   : CompareAndSwapPair<0, 0, "">;
1409defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1410defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1411defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1412
1413// v8.1 atomic SWP
1414defm SWP   : Swap<0, 0, "">;
1415defm SWPA  : Swap<1, 0, "a">;
1416defm SWPL  : Swap<0, 1, "l">;
1417defm SWPAL : Swap<1, 1, "al">;
1418
1419// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1420defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1421defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1422defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1423defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1424
1425defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1426defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1427defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1428defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1429
1430defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1431defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1432defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1433defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1434
1435defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1436defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1437defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1438defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1439
1440defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1441defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1442defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1443defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1444
1445defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1446defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1447defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1448defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1449
1450defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1451defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1452defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1453defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1454
1455defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1456defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1457defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1458defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1459
1460// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1461defm : STOPregister<"stadd","LDADD">; // STADDx
1462defm : STOPregister<"stclr","LDCLR">; // STCLRx
1463defm : STOPregister<"steor","LDEOR">; // STEORx
1464defm : STOPregister<"stset","LDSET">; // STSETx
1465defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1466defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1467defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1468defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1469
1470// v8.5 Memory Tagging Extension
1471let Predicates = [HasMTE] in {
1472
1473def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1474            Sched<[]>{
1475  let Inst{31} = 1;
1476}
1477def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1478  let Inst{31} = 1;
1479  let isNotDuplicable = 1;
1480}
1481def ADDG  : AddSubG<0, "addg", null_frag>;
1482def SUBG  : AddSubG<1, "subg", null_frag>;
1483
1484def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1485
1486def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1487def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1488  let Defs = [NZCV];
1489}
1490
1491def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1492
1493def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1494
1495def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1496          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1497def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1498          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1499
1500def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1501
1502def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1503                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1504def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1505                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1506def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1507                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1508  let Inst{23} = 0;
1509}
1510
1511defm STG   : MemTagStore<0b00, "stg">;
1512defm STZG  : MemTagStore<0b01, "stzg">;
1513defm ST2G  : MemTagStore<0b10, "st2g">;
1514defm STZ2G : MemTagStore<0b11, "stz2g">;
1515
1516def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1517          (STGOffset $Rn, $Rm, $imm)>;
1518def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1519          (STZGOffset $Rn, $Rm, $imm)>;
1520def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1521          (ST2GOffset $Rn, $Rm, $imm)>;
1522def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1523          (STZ2GOffset $Rn, $Rm, $imm)>;
1524
1525defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1526def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1527def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1528
1529def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1530          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1531
1532def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1533          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1534
1535def IRGstack
1536    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1537      Sched<[]>;
1538def TAGPstack
1539    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1540      Sched<[]>;
1541
1542// Explicit SP in the first operand prevents ShrinkWrap optimization
1543// from leaving this instruction out of the stack frame. When IRGstack
1544// is transformed into IRG, this operand is replaced with the actual
1545// register / expression for the tagged base pointer of the current function.
1546def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1547
1548// Large STG to be expanded into a loop. $Rm is the size, $Rn is start address.
1549// $Rn_wback is one past the end of the range.
1550let isCodeGenOnly=1, mayStore=1 in {
1551def STGloop
1552    : Pseudo<(outs GPR64common:$Rm_wback, GPR64sp:$Rn_wback), (ins GPR64common:$Rm, GPR64sp:$Rn),
1553             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,$Rm = $Rm_wback,@earlyclobber $Rm_wback" >,
1554      Sched<[WriteAdr, WriteST]>;
1555
1556def STZGloop
1557    : Pseudo<(outs GPR64common:$Rm_wback, GPR64sp:$Rn_wback), (ins GPR64common:$Rm, GPR64sp:$Rn),
1558             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,$Rm = $Rm_wback,@earlyclobber $Rm_wback" >,
1559      Sched<[WriteAdr, WriteST]>;
1560}
1561
1562} // Predicates = [HasMTE]
1563
1564//===----------------------------------------------------------------------===//
1565// Logical instructions.
1566//===----------------------------------------------------------------------===//
1567
1568// (immediate)
1569defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1570defm AND  : LogicalImm<0b00, "and", and, "bic">;
1571defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1572defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1573
1574// FIXME: these aliases *are* canonical sometimes (when movz can't be
1575// used). Actually, it seems to be working right now, but putting logical_immXX
1576// here is a bit dodgy on the AsmParser side too.
1577def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1578                                          logical_imm32:$imm), 0>;
1579def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1580                                          logical_imm64:$imm), 0>;
1581
1582
1583// (register)
1584defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1585defm BICS : LogicalRegS<0b11, 1, "bics",
1586                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1587defm AND  : LogicalReg<0b00, 0, "and", and>;
1588defm BIC  : LogicalReg<0b00, 1, "bic",
1589                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1590defm EON  : LogicalReg<0b10, 1, "eon",
1591                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1592defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
1593defm ORN  : LogicalReg<0b01, 1, "orn",
1594                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1595defm ORR  : LogicalReg<0b01, 0, "orr", or>;
1596
1597def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1598def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1599
1600def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1601def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1602
1603def : InstAlias<"mvn $Wd, $Wm$sh",
1604                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1605def : InstAlias<"mvn $Xd, $Xm$sh",
1606                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
1607
1608def : InstAlias<"tst $src1, $src2",
1609                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
1610def : InstAlias<"tst $src1, $src2",
1611                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
1612
1613def : InstAlias<"tst $src1, $src2",
1614                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
1615def : InstAlias<"tst $src1, $src2",
1616                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
1617
1618def : InstAlias<"tst $src1, $src2$sh",
1619               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
1620def : InstAlias<"tst $src1, $src2$sh",
1621               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
1622
1623
1624def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
1625def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
1626
1627
1628//===----------------------------------------------------------------------===//
1629// One operand data processing instructions.
1630//===----------------------------------------------------------------------===//
1631
1632defm CLS    : OneOperandData<0b101, "cls">;
1633defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
1634defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
1635
1636def  REV16Wr : OneWRegData<0b001, "rev16",
1637                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
1638def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
1639
1640def : Pat<(cttz GPR32:$Rn),
1641          (CLZWr (RBITWr GPR32:$Rn))>;
1642def : Pat<(cttz GPR64:$Rn),
1643          (CLZXr (RBITXr GPR64:$Rn))>;
1644def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
1645                (i32 1))),
1646          (CLSWr GPR32:$Rn)>;
1647def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
1648                (i64 1))),
1649          (CLSXr GPR64:$Rn)>;
1650def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
1651def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
1652
1653// Unlike the other one operand instructions, the instructions with the "rev"
1654// mnemonic do *not* just different in the size bit, but actually use different
1655// opcode bits for the different sizes.
1656def REVWr   : OneWRegData<0b010, "rev", bswap>;
1657def REVXr   : OneXRegData<0b011, "rev", bswap>;
1658def REV32Xr : OneXRegData<0b010, "rev32",
1659                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
1660
1661def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
1662
1663// The bswap commutes with the rotr so we want a pattern for both possible
1664// orders.
1665def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
1666def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
1667
1668//===----------------------------------------------------------------------===//
1669// Bitfield immediate extraction instruction.
1670//===----------------------------------------------------------------------===//
1671let hasSideEffects = 0 in
1672defm EXTR : ExtractImm<"extr">;
1673def : InstAlias<"ror $dst, $src, $shift",
1674            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
1675def : InstAlias<"ror $dst, $src, $shift",
1676            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
1677
1678def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
1679          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
1680def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
1681          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1682
1683//===----------------------------------------------------------------------===//
1684// Other bitfield immediate instructions.
1685//===----------------------------------------------------------------------===//
1686let hasSideEffects = 0 in {
1687defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
1688defm SBFM : BitfieldImm<0b00, "sbfm">;
1689defm UBFM : BitfieldImm<0b10, "ubfm">;
1690}
1691
1692def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1693  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1694  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1695}]>;
1696
1697def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1698  uint64_t enc = 31 - N->getZExtValue();
1699  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1700}]>;
1701
1702// min(7, 31 - shift_amt)
1703def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1704  uint64_t enc = 31 - N->getZExtValue();
1705  enc = enc > 7 ? 7 : enc;
1706  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1707}]>;
1708
1709// min(15, 31 - shift_amt)
1710def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1711  uint64_t enc = 31 - N->getZExtValue();
1712  enc = enc > 15 ? 15 : enc;
1713  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1714}]>;
1715
1716def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1717  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1718  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1719}]>;
1720
1721def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1722  uint64_t enc = 63 - N->getZExtValue();
1723  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1724}]>;
1725
1726// min(7, 63 - shift_amt)
1727def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1728  uint64_t enc = 63 - N->getZExtValue();
1729  enc = enc > 7 ? 7 : enc;
1730  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1731}]>;
1732
1733// min(15, 63 - shift_amt)
1734def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1735  uint64_t enc = 63 - N->getZExtValue();
1736  enc = enc > 15 ? 15 : enc;
1737  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1738}]>;
1739
1740// min(31, 63 - shift_amt)
1741def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1742  uint64_t enc = 63 - N->getZExtValue();
1743  enc = enc > 31 ? 31 : enc;
1744  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1745}]>;
1746
1747def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1748          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1749                              (i64 (i32shift_b imm0_31:$imm)))>;
1750def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1751          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1752                              (i64 (i64shift_b imm0_63:$imm)))>;
1753
1754let AddedComplexity = 10 in {
1755def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1756          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1757def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1758          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1759}
1760
1761def : InstAlias<"asr $dst, $src, $shift",
1762                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1763def : InstAlias<"asr $dst, $src, $shift",
1764                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1765def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1766def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1767def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1768def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1769def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1770
1771def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1772          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1773def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1774          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1775
1776def : InstAlias<"lsr $dst, $src, $shift",
1777                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1778def : InstAlias<"lsr $dst, $src, $shift",
1779                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1780def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1781def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1782def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1783def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1784def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1785
1786//===----------------------------------------------------------------------===//
1787// Conditional comparison instructions.
1788//===----------------------------------------------------------------------===//
1789defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1790defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1791
1792//===----------------------------------------------------------------------===//
1793// Conditional select instructions.
1794//===----------------------------------------------------------------------===//
1795defm CSEL  : CondSelect<0, 0b00, "csel">;
1796
1797def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1798defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1799defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1800defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1801
1802def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1803          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1804def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1805          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1806def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1807          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1808def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1809          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1810def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1811          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1812def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1813          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1814
1815def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1816          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1817def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1818          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1819def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
1820          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1821def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
1822          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1823def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
1824          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1825def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
1826          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1827def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1828          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1829def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1830          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1831def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
1832          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1833def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
1834          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1835def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
1836          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1837def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
1838          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1839
1840// The inverse of the condition code from the alias instruction is what is used
1841// in the aliased instruction. The parser all ready inverts the condition code
1842// for these aliases.
1843def : InstAlias<"cset $dst, $cc",
1844                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1845def : InstAlias<"cset $dst, $cc",
1846                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1847
1848def : InstAlias<"csetm $dst, $cc",
1849                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1850def : InstAlias<"csetm $dst, $cc",
1851                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1852
1853def : InstAlias<"cinc $dst, $src, $cc",
1854                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1855def : InstAlias<"cinc $dst, $src, $cc",
1856                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1857
1858def : InstAlias<"cinv $dst, $src, $cc",
1859                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1860def : InstAlias<"cinv $dst, $src, $cc",
1861                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1862
1863def : InstAlias<"cneg $dst, $src, $cc",
1864                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1865def : InstAlias<"cneg $dst, $src, $cc",
1866                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1867
1868//===----------------------------------------------------------------------===//
1869// PC-relative instructions.
1870//===----------------------------------------------------------------------===//
1871let isReMaterializable = 1 in {
1872let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1873def ADR  : ADRI<0, "adr", adrlabel,
1874                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
1875} // hasSideEffects = 0
1876
1877def ADRP : ADRI<1, "adrp", adrplabel,
1878                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
1879} // isReMaterializable = 1
1880
1881// page address of a constant pool entry, block address
1882def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
1883def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
1884def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
1885def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
1886def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
1887def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
1888def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
1889
1890//===----------------------------------------------------------------------===//
1891// Unconditional branch (register) instructions.
1892//===----------------------------------------------------------------------===//
1893
1894let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1895def RET  : BranchReg<0b0010, "ret", []>;
1896def DRPS : SpecialReturn<0b0101, "drps">;
1897def ERET : SpecialReturn<0b0100, "eret">;
1898} // isReturn = 1, isTerminator = 1, isBarrier = 1
1899
1900// Default to the LR register.
1901def : InstAlias<"ret", (RET LR)>;
1902
1903let isCall = 1, Defs = [LR], Uses = [SP] in {
1904def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
1905} // isCall
1906
1907let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
1908def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
1909} // isBranch, isTerminator, isBarrier, isIndirectBranch
1910
1911// Create a separate pseudo-instruction for codegen to use so that we don't
1912// flag lr as used in every function. It'll be restored before the RET by the
1913// epilogue if it's legitimately used.
1914def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
1915                   Sched<[WriteBrReg]> {
1916  let isTerminator = 1;
1917  let isBarrier = 1;
1918  let isReturn = 1;
1919}
1920
1921// This is a directive-like pseudo-instruction. The purpose is to insert an
1922// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
1923// (which in the usual case is a BLR).
1924let hasSideEffects = 1 in
1925def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
1926  let AsmString = ".tlsdesccall $sym";
1927}
1928
1929// Pseudo instruction to tell the streamer to emit a 'B' character into the
1930// augmentation string.
1931def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
1932
1933// FIXME: maybe the scratch register used shouldn't be fixed to X1?
1934// FIXME: can "hasSideEffects be dropped?
1935let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
1936    isCodeGenOnly = 1 in
1937def TLSDESC_CALLSEQ
1938    : Pseudo<(outs), (ins i64imm:$sym),
1939             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
1940      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
1941def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
1942          (TLSDESC_CALLSEQ texternalsym:$sym)>;
1943
1944//===----------------------------------------------------------------------===//
1945// Conditional branch (immediate) instruction.
1946//===----------------------------------------------------------------------===//
1947def Bcc : BranchCond;
1948
1949//===----------------------------------------------------------------------===//
1950// Compare-and-branch instructions.
1951//===----------------------------------------------------------------------===//
1952defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
1953defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
1954
1955//===----------------------------------------------------------------------===//
1956// Test-bit-and-branch instructions.
1957//===----------------------------------------------------------------------===//
1958defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
1959defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
1960
1961//===----------------------------------------------------------------------===//
1962// Unconditional branch (immediate) instructions.
1963//===----------------------------------------------------------------------===//
1964let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1965def B  : BranchImm<0, "b", [(br bb:$addr)]>;
1966} // isBranch, isTerminator, isBarrier
1967
1968let isCall = 1, Defs = [LR], Uses = [SP] in {
1969def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
1970} // isCall
1971def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
1972
1973//===----------------------------------------------------------------------===//
1974// Exception generation instructions.
1975//===----------------------------------------------------------------------===//
1976let isTrap = 1 in {
1977def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
1978}
1979def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1980def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1981def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1982def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
1983def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
1984def SMC   : ExceptionGeneration<0b000, 0b11, "smc">;
1985def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
1986
1987// DCPSn defaults to an immediate operand of zero if unspecified.
1988def : InstAlias<"dcps1", (DCPS1 0)>;
1989def : InstAlias<"dcps2", (DCPS2 0)>;
1990def : InstAlias<"dcps3", (DCPS3 0)>;
1991
1992def UDF : UDFType<0, "udf">;
1993
1994//===----------------------------------------------------------------------===//
1995// Load instructions.
1996//===----------------------------------------------------------------------===//
1997
1998// Pair (indexed, offset)
1999defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2000defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2001defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2002defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2003defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2004
2005defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2006
2007// Pair (pre-indexed)
2008def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2009def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2010def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2011def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2012def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2013
2014def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2015
2016// Pair (post-indexed)
2017def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2018def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2019def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2020def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2021def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2022
2023def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2024
2025
2026// Pair (no allocate)
2027defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2028defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2029defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2030defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2031defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2032
2033def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2034          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2035
2036//---
2037// (register offset)
2038//---
2039
2040// Integer
2041defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2042defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2043defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2044defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2045
2046// Floating-point
2047defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2048defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2049defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2050defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2051defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2052
2053// Load sign-extended half-word
2054defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2055defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2056
2057// Load sign-extended byte
2058defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2059defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2060
2061// Load sign-extended word
2062defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2063
2064// Pre-fetch.
2065defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2066
2067// For regular load, we do not have any alignment requirement.
2068// Thus, it is safe to directly map the vector loads with interesting
2069// addressing modes.
2070// FIXME: We could do the same for bitconvert to floating point vectors.
2071multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2072                              ValueType ScalTy, ValueType VecTy,
2073                              Instruction LOADW, Instruction LOADX,
2074                              SubRegIndex sub> {
2075  def : Pat<(VecTy (scalar_to_vector (ScalTy
2076              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2077            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2078                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2079                           sub)>;
2080
2081  def : Pat<(VecTy (scalar_to_vector (ScalTy
2082              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2083            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2084                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2085                           sub)>;
2086}
2087
2088let AddedComplexity = 10 in {
2089defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2090defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2091
2092defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2093defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2094
2095defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2096defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2097
2098defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2099defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2100
2101defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2102defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2103
2104defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2105
2106defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2107
2108
2109def : Pat <(v1i64 (scalar_to_vector (i64
2110                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2111                                           ro_Wextend64:$extend))))),
2112           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2113
2114def : Pat <(v1i64 (scalar_to_vector (i64
2115                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2116                                           ro_Xextend64:$extend))))),
2117           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2118}
2119
2120// Match all load 64 bits width whose type is compatible with FPR64
2121multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2122                        Instruction LOADW, Instruction LOADX> {
2123
2124  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2125            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2126
2127  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2128            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2129}
2130
2131let AddedComplexity = 10 in {
2132let Predicates = [IsLE] in {
2133  // We must do vector loads with LD1 in big-endian.
2134  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2135  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2136  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2137  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2138  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2139}
2140
2141defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2142defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2143
2144// Match all load 128 bits width whose type is compatible with FPR128
2145let Predicates = [IsLE] in {
2146  // We must do vector loads with LD1 in big-endian.
2147  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2148  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2149  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2150  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2151  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2152  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2153  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2154}
2155} // AddedComplexity = 10
2156
2157// zextload -> i64
2158multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2159                            Instruction INSTW, Instruction INSTX> {
2160  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2161            (SUBREG_TO_REG (i64 0),
2162                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2163                           sub_32)>;
2164
2165  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2166            (SUBREG_TO_REG (i64 0),
2167                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2168                           sub_32)>;
2169}
2170
2171let AddedComplexity = 10 in {
2172  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2173  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2174  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2175
2176  // zextloadi1 -> zextloadi8
2177  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2178
2179  // extload -> zextload
2180  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2181  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2182  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2183
2184  // extloadi1 -> zextloadi8
2185  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2186}
2187
2188
2189// zextload -> i64
2190multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2191                            Instruction INSTW, Instruction INSTX> {
2192  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2193            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2194
2195  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2196            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2197
2198}
2199
2200let AddedComplexity = 10 in {
2201  // extload -> zextload
2202  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2203  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2204  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2205
2206  // zextloadi1 -> zextloadi8
2207  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2208}
2209
2210//---
2211// (unsigned immediate)
2212//---
2213defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2214                   [(set GPR64z:$Rt,
2215                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2216defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2217                   [(set GPR32z:$Rt,
2218                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2219defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2220                   [(set FPR8Op:$Rt,
2221                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2222defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2223                   [(set (f16 FPR16Op:$Rt),
2224                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2225defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2226                   [(set (f32 FPR32Op:$Rt),
2227                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2228defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2229                   [(set (f64 FPR64Op:$Rt),
2230                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2231defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2232                 [(set (f128 FPR128Op:$Rt),
2233                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2234
2235// For regular load, we do not have any alignment requirement.
2236// Thus, it is safe to directly map the vector loads with interesting
2237// addressing modes.
2238// FIXME: We could do the same for bitconvert to floating point vectors.
2239def : Pat <(v8i8 (scalar_to_vector (i32
2240               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2241           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2242                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2243def : Pat <(v16i8 (scalar_to_vector (i32
2244               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2245           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2246                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2247def : Pat <(v4i16 (scalar_to_vector (i32
2248               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2249           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2250                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2251def : Pat <(v8i16 (scalar_to_vector (i32
2252               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2253           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2254                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2255def : Pat <(v2i32 (scalar_to_vector (i32
2256               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2257           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2258                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2259def : Pat <(v4i32 (scalar_to_vector (i32
2260               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2261           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2262                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2263def : Pat <(v1i64 (scalar_to_vector (i64
2264               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2265           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2266def : Pat <(v2i64 (scalar_to_vector (i64
2267               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2268           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2269                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2270
2271// Match all load 64 bits width whose type is compatible with FPR64
2272let Predicates = [IsLE] in {
2273  // We must use LD1 to perform vector loads in big-endian.
2274  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2275            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2276  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2277            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2278  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2279            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2280  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2281            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2282  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2283            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2284}
2285def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2286          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2287def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2288          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2289
2290// Match all load 128 bits width whose type is compatible with FPR128
2291let Predicates = [IsLE] in {
2292  // We must use LD1 to perform vector loads in big-endian.
2293  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2294            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2295  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2296            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2297  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2298            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2299  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2300            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2301  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2302            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2303  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2304            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2305  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2306            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2307}
2308def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2309          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2310
2311defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2312                    [(set GPR32:$Rt,
2313                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2314                                                     uimm12s2:$offset)))]>;
2315defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2316                    [(set GPR32:$Rt,
2317                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2318                                                   uimm12s1:$offset)))]>;
2319// zextload -> i64
2320def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2321    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2322def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2323    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2324
2325// zextloadi1 -> zextloadi8
2326def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2327          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2328def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2329    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2330
2331// extload -> zextload
2332def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2333          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2334def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2335          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2336def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2337          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2338def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2339    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2340def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2341    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2342def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2343    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2344def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2345    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2346
2347// load sign-extended half-word
2348defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2349                     [(set GPR32:$Rt,
2350                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2351                                                      uimm12s2:$offset)))]>;
2352defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2353                     [(set GPR64:$Rt,
2354                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2355                                                      uimm12s2:$offset)))]>;
2356
2357// load sign-extended byte
2358defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2359                     [(set GPR32:$Rt,
2360                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2361                                                    uimm12s1:$offset)))]>;
2362defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2363                     [(set GPR64:$Rt,
2364                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2365                                                    uimm12s1:$offset)))]>;
2366
2367// load sign-extended word
2368defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2369                     [(set GPR64:$Rt,
2370                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2371                                                      uimm12s4:$offset)))]>;
2372
2373// load zero-extended word
2374def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2375      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2376
2377// Pre-fetch.
2378def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2379                        [(AArch64Prefetch imm:$Rt,
2380                                        (am_indexed64 GPR64sp:$Rn,
2381                                                      uimm12s8:$offset))]>;
2382
2383def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2384
2385//---
2386// (literal)
2387
2388def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2389  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2390    const DataLayout &DL = MF->getDataLayout();
2391    MaybeAlign Align = G->getGlobal()->getPointerAlignment(DL);
2392    return Align && *Align >= 4 && G->getOffset() % 4 == 0;
2393  }
2394  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2395    return C->getAlignment() >= 4 && C->getOffset() % 4 == 0;
2396  return false;
2397}]>;
2398
2399def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2400  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2401def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2402  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2403def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2404  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2405def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2406  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2407def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2408  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2409
2410// load sign-extended word
2411def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2412  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2413
2414let AddedComplexity = 20 in {
2415def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2416        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2417}
2418
2419// prefetch
2420def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2421//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2422
2423//---
2424// (unscaled immediate)
2425defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2426                    [(set GPR64z:$Rt,
2427                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2428defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2429                    [(set GPR32z:$Rt,
2430                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2431defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2432                    [(set FPR8Op:$Rt,
2433                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2434defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2435                    [(set FPR16Op:$Rt,
2436                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2437defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2438                    [(set (f32 FPR32Op:$Rt),
2439                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2440defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2441                    [(set (f64 FPR64Op:$Rt),
2442                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2443defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2444                    [(set (f128 FPR128Op:$Rt),
2445                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2446
2447defm LDURHH
2448    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2449             [(set GPR32:$Rt,
2450                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2451defm LDURBB
2452    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2453             [(set GPR32:$Rt,
2454                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2455
2456// Match all load 64 bits width whose type is compatible with FPR64
2457let Predicates = [IsLE] in {
2458  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2459            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2460  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2461            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2462  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2463            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2464  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2465            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2466  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2467            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2468}
2469def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2470          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2471def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2472          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2473
2474// Match all load 128 bits width whose type is compatible with FPR128
2475let Predicates = [IsLE] in {
2476  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2477            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2478  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2479            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2480  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2481            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2482  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2483            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2484  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2485            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2486  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2487            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2488  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2489            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2490}
2491
2492//  anyext -> zext
2493def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2494          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2495def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2496          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2497def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2498          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2499def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2500    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2501def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2502    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2503def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2504    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2505def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2506    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2507// unscaled zext
2508def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2509          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2510def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2511          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2512def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2513          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2514def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2515    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2516def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2517    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2518def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2519    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2520def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2521    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2522
2523
2524//---
2525// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2526
2527// Define new assembler match classes as we want to only match these when
2528// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2529// associate a DiagnosticType either, as we want the diagnostic for the
2530// canonical form (the scaled operand) to take precedence.
2531class SImm9OffsetOperand<int Width> : AsmOperandClass {
2532  let Name = "SImm9OffsetFB" # Width;
2533  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2534  let RenderMethod = "addImmOperands";
2535}
2536
2537def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2538def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2539def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2540def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2541def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2542
2543def simm9_offset_fb8 : Operand<i64> {
2544  let ParserMatchClass = SImm9OffsetFB8Operand;
2545}
2546def simm9_offset_fb16 : Operand<i64> {
2547  let ParserMatchClass = SImm9OffsetFB16Operand;
2548}
2549def simm9_offset_fb32 : Operand<i64> {
2550  let ParserMatchClass = SImm9OffsetFB32Operand;
2551}
2552def simm9_offset_fb64 : Operand<i64> {
2553  let ParserMatchClass = SImm9OffsetFB64Operand;
2554}
2555def simm9_offset_fb128 : Operand<i64> {
2556  let ParserMatchClass = SImm9OffsetFB128Operand;
2557}
2558
2559def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2560                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2561def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2562                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2563def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2564                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2565def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2566                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2567def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2568                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2569def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2570                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2571def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2572               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2573
2574// zextload -> i64
2575def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2576  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2577def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2578  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2579
2580// load sign-extended half-word
2581defm LDURSHW
2582    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
2583               [(set GPR32:$Rt,
2584                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2585defm LDURSHX
2586    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
2587              [(set GPR64:$Rt,
2588                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2589
2590// load sign-extended byte
2591defm LDURSBW
2592    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
2593                [(set GPR32:$Rt,
2594                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2595defm LDURSBX
2596    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
2597                [(set GPR64:$Rt,
2598                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2599
2600// load sign-extended word
2601defm LDURSW
2602    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
2603              [(set GPR64:$Rt,
2604                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2605
2606// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
2607def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
2608                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2609def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
2610                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2611def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2612                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2613def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2614                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2615def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2616                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2617def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2618                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2619def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
2620                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2621
2622// Pre-fetch.
2623defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
2624                  [(AArch64Prefetch imm:$Rt,
2625                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2626
2627//---
2628// (unscaled immediate, unprivileged)
2629defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
2630defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
2631
2632defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
2633defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
2634
2635// load sign-extended half-word
2636defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
2637defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
2638
2639// load sign-extended byte
2640defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
2641defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
2642
2643// load sign-extended word
2644defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
2645
2646//---
2647// (immediate pre-indexed)
2648def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2649def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2650def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
2651def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2652def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2653def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2654def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2655
2656// load sign-extended half-word
2657def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2658def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2659
2660// load sign-extended byte
2661def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2662def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2663
2664// load zero-extended byte
2665def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2666def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2667
2668// load sign-extended word
2669def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2670
2671//---
2672// (immediate post-indexed)
2673def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2674def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2675def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
2676def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2677def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2678def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2679def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2680
2681// load sign-extended half-word
2682def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2683def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2684
2685// load sign-extended byte
2686def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2687def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2688
2689// load zero-extended byte
2690def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2691def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2692
2693// load sign-extended word
2694def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2695
2696//===----------------------------------------------------------------------===//
2697// Store instructions.
2698//===----------------------------------------------------------------------===//
2699
2700// Pair (indexed, offset)
2701// FIXME: Use dedicated range-checked addressing mode operand here.
2702defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
2703defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
2704defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
2705defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
2706defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
2707
2708// Pair (pre-indexed)
2709def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2710def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2711def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2712def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2713def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2714
2715// Pair (pre-indexed)
2716def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2717def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2718def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2719def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2720def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2721
2722// Pair (no allocate)
2723defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
2724defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
2725defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
2726defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
2727defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
2728
2729def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2730          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
2731
2732//---
2733// (Register offset)
2734
2735// Integer
2736defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
2737defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
2738defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
2739defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
2740
2741
2742// Floating-point
2743defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
2744defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
2745defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
2746defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
2747defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str", f128,    store>;
2748
2749let Predicates = [UseSTRQro], AddedComplexity = 10 in {
2750  def : Pat<(store (f128 FPR128:$Rt),
2751                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
2752                                        ro_Wextend128:$extend)),
2753            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
2754  def : Pat<(store (f128 FPR128:$Rt),
2755                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
2756                                        ro_Xextend128:$extend)),
2757            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
2758}
2759
2760multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
2761                                 Instruction STRW, Instruction STRX> {
2762
2763  def : Pat<(storeop GPR64:$Rt,
2764                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2765            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2766                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2767
2768  def : Pat<(storeop GPR64:$Rt,
2769                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2770            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2771                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2772}
2773
2774let AddedComplexity = 10 in {
2775  // truncstore i64
2776  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
2777  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
2778  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
2779}
2780
2781multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
2782                         Instruction STRW, Instruction STRX> {
2783  def : Pat<(store (VecTy FPR:$Rt),
2784                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2785            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2786
2787  def : Pat<(store (VecTy FPR:$Rt),
2788                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2789            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2790}
2791
2792let AddedComplexity = 10 in {
2793// Match all store 64 bits width whose type is compatible with FPR64
2794let Predicates = [IsLE] in {
2795  // We must use ST1 to store vectors in big-endian.
2796  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
2797  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
2798  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
2799  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
2800  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
2801}
2802
2803defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
2804defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
2805
2806// Match all store 128 bits width whose type is compatible with FPR128
2807let Predicates = [IsLE, UseSTRQro] in {
2808  // We must use ST1 to store vectors in big-endian.
2809  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2810  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2811  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2812  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2813  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2814  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2815  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2816}
2817} // AddedComplexity = 10
2818
2819// Match stores from lane 0 to the appropriate subreg's store.
2820multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2821                              ValueType VecTy, ValueType STy,
2822                              SubRegIndex SubRegIdx,
2823                              Instruction STRW, Instruction STRX> {
2824
2825  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2826                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2827            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2828                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2829
2830  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2831                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2832            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2833                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2834}
2835
2836let AddedComplexity = 19 in {
2837  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2838  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
2839  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
2840  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
2841  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
2842  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
2843}
2844
2845//---
2846// (unsigned immediate)
2847defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
2848                   [(store GPR64z:$Rt,
2849                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2850defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
2851                    [(store GPR32z:$Rt,
2852                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2853defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
2854                    [(store FPR8Op:$Rt,
2855                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
2856defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
2857                    [(store (f16 FPR16Op:$Rt),
2858                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
2859defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
2860                    [(store (f32 FPR32Op:$Rt),
2861                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2862defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
2863                    [(store (f64 FPR64Op:$Rt),
2864                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2865defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
2866
2867defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
2868                     [(truncstorei16 GPR32z:$Rt,
2869                                     (am_indexed16 GPR64sp:$Rn,
2870                                                   uimm12s2:$offset))]>;
2871defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
2872                     [(truncstorei8 GPR32z:$Rt,
2873                                    (am_indexed8 GPR64sp:$Rn,
2874                                                 uimm12s1:$offset))]>;
2875
2876let AddedComplexity = 10 in {
2877
2878// Match all store 64 bits width whose type is compatible with FPR64
2879def : Pat<(store (v1i64 FPR64:$Rt),
2880                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2881          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2882def : Pat<(store (v1f64 FPR64:$Rt),
2883                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2884          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2885
2886let Predicates = [IsLE] in {
2887  // We must use ST1 to store vectors in big-endian.
2888  def : Pat<(store (v2f32 FPR64:$Rt),
2889                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2890            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2891  def : Pat<(store (v8i8 FPR64:$Rt),
2892                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2893            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2894  def : Pat<(store (v4i16 FPR64:$Rt),
2895                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2896            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2897  def : Pat<(store (v2i32 FPR64:$Rt),
2898                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2899            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2900  def : Pat<(store (v4f16 FPR64:$Rt),
2901                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2902            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2903}
2904
2905// Match all store 128 bits width whose type is compatible with FPR128
2906def : Pat<(store (f128  FPR128:$Rt),
2907                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2908          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2909
2910let Predicates = [IsLE] in {
2911  // We must use ST1 to store vectors in big-endian.
2912  def : Pat<(store (v4f32 FPR128:$Rt),
2913                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2914            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2915  def : Pat<(store (v2f64 FPR128:$Rt),
2916                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2917            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2918  def : Pat<(store (v16i8 FPR128:$Rt),
2919                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2920            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2921  def : Pat<(store (v8i16 FPR128:$Rt),
2922                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2923            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2924  def : Pat<(store (v4i32 FPR128:$Rt),
2925                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2926            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2927  def : Pat<(store (v2i64 FPR128:$Rt),
2928                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2929            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2930  def : Pat<(store (v8f16 FPR128:$Rt),
2931                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2932            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2933}
2934
2935// truncstore i64
2936def : Pat<(truncstorei32 GPR64:$Rt,
2937                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
2938  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
2939def : Pat<(truncstorei16 GPR64:$Rt,
2940                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
2941  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
2942def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
2943  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
2944
2945} // AddedComplexity = 10
2946
2947// Match stores from lane 0 to the appropriate subreg's store.
2948multiclass VecStoreLane0Pat<Operand UIAddrMode, SDPatternOperator storeop,
2949                            ValueType VTy, ValueType STy,
2950                            SubRegIndex SubRegIdx, Operand IndexType,
2951                            Instruction STR> {
2952  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
2953                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
2954            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2955                 GPR64sp:$Rn, IndexType:$offset)>;
2956}
2957
2958let AddedComplexity = 19 in {
2959  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
2960  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
2961  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
2962  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
2963  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
2964  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
2965}
2966
2967//---
2968// (unscaled immediate)
2969defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
2970                         [(store GPR64z:$Rt,
2971                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2972defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
2973                         [(store GPR32z:$Rt,
2974                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2975defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
2976                         [(store FPR8Op:$Rt,
2977                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2978defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
2979                         [(store (f16 FPR16Op:$Rt),
2980                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2981defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
2982                         [(store (f32 FPR32Op:$Rt),
2983                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2984defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
2985                         [(store (f64 FPR64Op:$Rt),
2986                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2987defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
2988                         [(store (f128 FPR128Op:$Rt),
2989                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
2990defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
2991                         [(truncstorei16 GPR32z:$Rt,
2992                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2993defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
2994                         [(truncstorei8 GPR32z:$Rt,
2995                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2996
2997// Armv8.4 Weaker Release Consistency enhancements
2998//         LDAPR & STLR with Immediate Offset instructions
2999let Predicates = [HasRCPC_IMMO] in {
3000defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3001defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3002defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3003defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3004defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3005defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3006defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3007defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3008defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3009defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3010defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3011defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3012defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3013}
3014
3015// Match all store 64 bits width whose type is compatible with FPR64
3016def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3017          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3018def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3019          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3020
3021let AddedComplexity = 10 in {
3022
3023let Predicates = [IsLE] in {
3024  // We must use ST1 to store vectors in big-endian.
3025  def : Pat<(store (v2f32 FPR64:$Rt),
3026                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3027            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3028  def : Pat<(store (v8i8 FPR64:$Rt),
3029                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3030            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3031  def : Pat<(store (v4i16 FPR64:$Rt),
3032                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3033            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3034  def : Pat<(store (v2i32 FPR64:$Rt),
3035                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3036            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3037  def : Pat<(store (v4f16 FPR64:$Rt),
3038                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3039            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3040}
3041
3042// Match all store 128 bits width whose type is compatible with FPR128
3043def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3044          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3045
3046let Predicates = [IsLE] in {
3047  // We must use ST1 to store vectors in big-endian.
3048  def : Pat<(store (v4f32 FPR128:$Rt),
3049                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3050            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3051  def : Pat<(store (v2f64 FPR128:$Rt),
3052                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3053            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3054  def : Pat<(store (v16i8 FPR128:$Rt),
3055                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3056            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3057  def : Pat<(store (v8i16 FPR128:$Rt),
3058                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3059            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3060  def : Pat<(store (v4i32 FPR128:$Rt),
3061                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3062            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3063  def : Pat<(store (v2i64 FPR128:$Rt),
3064                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3065            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3066  def : Pat<(store (v2f64 FPR128:$Rt),
3067                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3068            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3069  def : Pat<(store (v8f16 FPR128:$Rt),
3070                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3071            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3072}
3073
3074} // AddedComplexity = 10
3075
3076// unscaled i64 truncating stores
3077def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3078  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3079def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3080  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3081def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3082  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3083
3084// Match stores from lane 0 to the appropriate subreg's store.
3085multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3086                             ValueType VTy, ValueType STy,
3087                             SubRegIndex SubRegIdx, Instruction STR> {
3088  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3089}
3090
3091let AddedComplexity = 19 in {
3092  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3093  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3094  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3095  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3096  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3097  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3098}
3099
3100//---
3101// STR mnemonics fall back to STUR for negative or unaligned offsets.
3102def : InstAlias<"str $Rt, [$Rn, $offset]",
3103                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3104def : InstAlias<"str $Rt, [$Rn, $offset]",
3105                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3106def : InstAlias<"str $Rt, [$Rn, $offset]",
3107                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3108def : InstAlias<"str $Rt, [$Rn, $offset]",
3109                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3110def : InstAlias<"str $Rt, [$Rn, $offset]",
3111                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3112def : InstAlias<"str $Rt, [$Rn, $offset]",
3113                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3114def : InstAlias<"str $Rt, [$Rn, $offset]",
3115                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3116
3117def : InstAlias<"strb $Rt, [$Rn, $offset]",
3118                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3119def : InstAlias<"strh $Rt, [$Rn, $offset]",
3120                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3121
3122//---
3123// (unscaled immediate, unprivileged)
3124defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3125defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3126
3127defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3128defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3129
3130//---
3131// (immediate pre-indexed)
3132def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3133def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3134def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3135def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3136def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3137def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3138def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3139
3140def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3141def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3142
3143// truncstore i64
3144def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3145  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3146           simm9:$off)>;
3147def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3148  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3149            simm9:$off)>;
3150def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3151  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3152            simm9:$off)>;
3153
3154def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3155          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3156def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3157          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3158def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3159          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3160def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3161          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3162def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3163          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3164def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3165          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3166def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3167          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3168
3169def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3170          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3171def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3172          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3173def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3174          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3175def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3176          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3177def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3178          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3179def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3180          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3181def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3182          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3183
3184//---
3185// (immediate post-indexed)
3186def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3187def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3188def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3189def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3190def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3191def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3192def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3193
3194def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3195def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3196
3197// truncstore i64
3198def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3199  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3200            simm9:$off)>;
3201def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3202  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3203             simm9:$off)>;
3204def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3205  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3206             simm9:$off)>;
3207
3208def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3209          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3210def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3211          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3212def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3213          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3214def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3215          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3216def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3217          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3218def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3219          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3220def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3221          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3222
3223def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3224          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3225def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3226          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3227def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3228          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3229def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3230          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3231def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3232          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3233def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3234          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3235def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3236          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3237
3238//===----------------------------------------------------------------------===//
3239// Load/store exclusive instructions.
3240//===----------------------------------------------------------------------===//
3241
3242def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3243def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3244def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3245def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3246
3247def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3248def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3249def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3250def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3251
3252def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3253def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3254def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3255def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3256
3257def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3258def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3259def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3260def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3261
3262def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3263def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3264def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3265def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3266
3267def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3268def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3269def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3270def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3271
3272def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3273def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3274
3275def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3276def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3277
3278def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3279def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3280
3281def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3282def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3283
3284let Predicates = [HasLOR] in {
3285  // v8.1a "Limited Order Region" extension load-acquire instructions
3286  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3287  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3288  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3289  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3290
3291  // v8.1a "Limited Order Region" extension store-release instructions
3292  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3293  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3294  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3295  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3296}
3297
3298//===----------------------------------------------------------------------===//
3299// Scaled floating point to integer conversion instructions.
3300//===----------------------------------------------------------------------===//
3301
3302defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3303defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3304defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3305defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3306defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3307defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3308defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3309defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3310defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3311defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3312defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3313defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3314
3315multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3316  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3317  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3318  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3319  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3320  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3321  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3322
3323  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3324            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3325  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3326            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3327  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3328            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3329  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3330            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3331  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3332            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3333  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3334            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3335}
3336
3337defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3338defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3339
3340multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
3341  def : Pat<(i32 (to_int (round f32:$Rn))),
3342            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3343  def : Pat<(i64 (to_int (round f32:$Rn))),
3344            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3345  def : Pat<(i32 (to_int (round f64:$Rn))),
3346            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3347  def : Pat<(i64 (to_int (round f64:$Rn))),
3348            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3349}
3350
3351defm : FPToIntegerPats<fp_to_sint, fceil,  "FCVTPS">;
3352defm : FPToIntegerPats<fp_to_uint, fceil,  "FCVTPU">;
3353defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
3354defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
3355defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
3356defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
3357defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">;
3358defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">;
3359
3360let Predicates = [HasFullFP16] in {
3361  def : Pat<(i32 (lround f16:$Rn)),
3362            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3363  def : Pat<(i64 (lround f16:$Rn)),
3364            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3365  def : Pat<(i64 (llround f16:$Rn)),
3366            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3367}
3368def : Pat<(i32 (lround f32:$Rn)),
3369          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3370def : Pat<(i32 (lround f64:$Rn)),
3371          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3372def : Pat<(i64 (lround f32:$Rn)),
3373          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3374def : Pat<(i64 (lround f64:$Rn)),
3375          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3376def : Pat<(i64 (llround f32:$Rn)),
3377          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3378def : Pat<(i64 (llround f64:$Rn)),
3379          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3380
3381//===----------------------------------------------------------------------===//
3382// Scaled integer to floating point conversion instructions.
3383//===----------------------------------------------------------------------===//
3384
3385defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3386defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3387
3388//===----------------------------------------------------------------------===//
3389// Unscaled integer to floating point conversion instruction.
3390//===----------------------------------------------------------------------===//
3391
3392defm FMOV : UnscaledConversion<"fmov">;
3393
3394// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3395let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3396def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3397    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3398def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3399    Sched<[WriteF]>;
3400def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3401    Sched<[WriteF]>;
3402}
3403// Similarly add aliases
3404def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3405    Requires<[HasFullFP16]>;
3406def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3407def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3408
3409//===----------------------------------------------------------------------===//
3410// Floating point conversion instruction.
3411//===----------------------------------------------------------------------===//
3412
3413defm FCVT : FPConversion<"fcvt">;
3414
3415//===----------------------------------------------------------------------===//
3416// Floating point single operand instructions.
3417//===----------------------------------------------------------------------===//
3418
3419defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3420defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3421defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3422defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3423defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3424defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3425defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
3426defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3427
3428def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
3429          (FRINTNDr FPR64:$Rn)>;
3430
3431defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3432defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3433
3434let SchedRW = [WriteFDiv] in {
3435defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3436}
3437
3438let Predicates = [HasFRInt3264] in {
3439  defm FRINT32Z : FRIntNNT<0b00, "frint32z">;
3440  defm FRINT64Z : FRIntNNT<0b10, "frint64z">;
3441  defm FRINT32X : FRIntNNT<0b01, "frint32x">;
3442  defm FRINT64X : FRIntNNT<0b11, "frint64x">;
3443} // HasFRInt3264
3444
3445let Predicates = [HasFullFP16] in {
3446  def : Pat<(i32 (lrint f16:$Rn)),
3447            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3448  def : Pat<(i64 (lrint f16:$Rn)),
3449            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3450  def : Pat<(i64 (llrint f16:$Rn)),
3451            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3452}
3453def : Pat<(i32 (lrint f32:$Rn)),
3454          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3455def : Pat<(i32 (lrint f64:$Rn)),
3456          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3457def : Pat<(i64 (lrint f32:$Rn)),
3458          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3459def : Pat<(i64 (lrint f64:$Rn)),
3460          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3461def : Pat<(i64 (llrint f32:$Rn)),
3462          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3463def : Pat<(i64 (llrint f64:$Rn)),
3464          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3465
3466//===----------------------------------------------------------------------===//
3467// Floating point two operand instructions.
3468//===----------------------------------------------------------------------===//
3469
3470defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
3471let SchedRW = [WriteFDiv] in {
3472defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3473}
3474defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3475defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3476defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3477defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
3478let SchedRW = [WriteFMul] in {
3479defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
3480defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3481}
3482defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
3483
3484def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3485          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3486def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3487          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
3488def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3489          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
3490def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3491          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
3492
3493//===----------------------------------------------------------------------===//
3494// Floating point three operand instructions.
3495//===----------------------------------------------------------------------===//
3496
3497defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
3498defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
3499     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
3500defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
3501     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
3502defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
3503     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
3504
3505// The following def pats catch the case where the LHS of an FMA is negated.
3506// The TriOpFrag above catches the case where the middle operand is negated.
3507
3508// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
3509// the NEON variant.
3510
3511// Here we handle first -(a + b*c) for FNMADD:
3512
3513let Predicates = [HasNEON, HasFullFP16] in
3514def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
3515          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3516
3517def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
3518          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3519
3520def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
3521          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3522
3523// Now it's time for "(-a) + (-b)*c"
3524
3525let Predicates = [HasNEON, HasFullFP16] in
3526def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
3527          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3528
3529def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
3530          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3531
3532def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
3533          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3534
3535// And here "(-a) + b*(-c)"
3536
3537let Predicates = [HasNEON, HasFullFP16] in
3538def : Pat<(f16 (fma FPR16:$Rn, (fneg FPR16:$Rm), (fneg FPR16:$Ra))),
3539          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3540
3541def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
3542          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3543
3544def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
3545          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3546
3547//===----------------------------------------------------------------------===//
3548// Floating point comparison instructions.
3549//===----------------------------------------------------------------------===//
3550
3551defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
3552defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
3553
3554//===----------------------------------------------------------------------===//
3555// Floating point conditional comparison instructions.
3556//===----------------------------------------------------------------------===//
3557
3558defm FCCMPE : FPCondComparison<1, "fccmpe">;
3559defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
3560
3561//===----------------------------------------------------------------------===//
3562// Floating point conditional select instruction.
3563//===----------------------------------------------------------------------===//
3564
3565defm FCSEL : FPCondSelect<"fcsel">;
3566
3567// CSEL instructions providing f128 types need to be handled by a
3568// pseudo-instruction since the eventual code will need to introduce basic
3569// blocks and control flow.
3570def F128CSEL : Pseudo<(outs FPR128:$Rd),
3571                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
3572                      [(set (f128 FPR128:$Rd),
3573                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
3574                                       (i32 imm:$cond), NZCV))]> {
3575  let Uses = [NZCV];
3576  let usesCustomInserter = 1;
3577  let hasNoSchedulingInfo = 1;
3578}
3579
3580//===----------------------------------------------------------------------===//
3581// Instructions used for emitting unwind opcodes on ARM64 Windows.
3582//===----------------------------------------------------------------------===//
3583let isPseudo = 1 in {
3584  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
3585  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3586  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3587  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3588  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3589  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3590  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3591  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3592  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3593  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3594  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3595  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
3596  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3597  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
3598  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3599  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
3600  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3601}
3602
3603// Pseudo instructions for Windows EH
3604//===----------------------------------------------------------------------===//
3605let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
3606    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
3607   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
3608   let usesCustomInserter = 1 in
3609     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
3610                    Sched<[]>;
3611}
3612
3613let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
3614    usesCustomInserter = 1 in
3615def CATCHPAD : Pseudo<(outs), (ins), [(catchpad)]>, Sched<[]>;
3616
3617//===----------------------------------------------------------------------===//
3618// Floating point immediate move.
3619//===----------------------------------------------------------------------===//
3620
3621let isReMaterializable = 1 in {
3622defm FMOV : FPMoveImmediate<"fmov">;
3623}
3624
3625//===----------------------------------------------------------------------===//
3626// Advanced SIMD two vector instructions.
3627//===----------------------------------------------------------------------===//
3628
3629defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
3630                                          int_aarch64_neon_uabd>;
3631// Match UABDL in log2-shuffle patterns.
3632def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
3633                           (zext (v8i8 V64:$opB))))),
3634          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3635def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3636               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
3637                                (zext (v8i8 V64:$opB))),
3638                           (AArch64vashr v8i16:$src, (i32 15))))),
3639          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3640def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
3641                           (zext (extract_high_v16i8 V128:$opB))))),
3642          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3643def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3644               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
3645                                (zext (extract_high_v16i8 V128:$opB))),
3646                           (AArch64vashr v8i16:$src, (i32 15))))),
3647          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3648def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
3649                           (zext (v4i16 V64:$opB))))),
3650          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
3651def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
3652                           (zext (extract_high_v8i16 V128:$opB))))),
3653          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
3654def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
3655                           (zext (v2i32 V64:$opB))))),
3656          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
3657def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
3658                           (zext (extract_high_v4i32 V128:$opB))))),
3659          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
3660
3661defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
3662defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
3663defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
3664defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
3665defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
3666defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
3667defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
3668defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
3669defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
3670defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
3671
3672defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3673defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3674defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3675defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3676defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3677defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
3678defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
3679defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
3680def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
3681          (FCVTLv4i16 V64:$Rn)>;
3682def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
3683                                                              (i64 4)))),
3684          (FCVTLv8i16 V128:$Rn)>;
3685def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
3686
3687def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
3688
3689defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
3690defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
3691defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
3692defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
3693defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
3694def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
3695          (FCVTNv4i16 V128:$Rn)>;
3696def : Pat<(concat_vectors V64:$Rd,
3697                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
3698          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3699def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
3700def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
3701def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
3702          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3703defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
3704defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
3705defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
3706                                        int_aarch64_neon_fcvtxn>;
3707defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
3708defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
3709
3710def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
3711def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
3712def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
3713def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
3714def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
3715
3716def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
3717def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
3718def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
3719def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
3720def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
3721
3722defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
3723defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
3724defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
3725defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
3726defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
3727defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
3728defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
3729defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
3730defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
3731
3732let Predicates = [HasFRInt3264] in {
3733  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z">;
3734  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z">;
3735  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x">;
3736  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x">;
3737} // HasFRInt3264
3738
3739defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
3740defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
3741defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
3742                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3743defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
3744// Aliases for MVN -> NOT.
3745def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
3746                (NOTv8i8 V64:$Vd, V64:$Vn)>;
3747def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
3748                (NOTv16i8 V128:$Vd, V128:$Vn)>;
3749
3750def : Pat<(AArch64neg (v8i8  V64:$Rn)),  (NEGv8i8  V64:$Rn)>;
3751def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
3752def : Pat<(AArch64neg (v4i16 V64:$Rn)),  (NEGv4i16 V64:$Rn)>;
3753def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
3754def : Pat<(AArch64neg (v2i32 V64:$Rn)),  (NEGv2i32 V64:$Rn)>;
3755def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
3756def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
3757
3758def : Pat<(AArch64not (v8i8 V64:$Rn)),   (NOTv8i8  V64:$Rn)>;
3759def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3760def : Pat<(AArch64not (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3761def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3762def : Pat<(AArch64not (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3763def : Pat<(AArch64not (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3764def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3765def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3766
3767def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3768def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3769def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
3770def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3771def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3772
3773defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
3774defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
3775defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
3776defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
3777defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
3778       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
3779defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
3780defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
3781defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
3782defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3783defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3784defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
3785defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
3786defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
3787defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
3788       BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
3789defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
3790                    int_aarch64_neon_uaddlp>;
3791defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
3792defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
3793defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
3794defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
3795defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
3796defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
3797
3798def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
3799def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
3800def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
3801def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
3802def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
3803def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
3804
3805// Patterns for vector long shift (by element width). These need to match all
3806// three of zext, sext and anyext so it's easier to pull the patterns out of the
3807// definition.
3808multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
3809  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
3810            (SHLLv8i8 V64:$Rn)>;
3811  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
3812            (SHLLv16i8 V128:$Rn)>;
3813  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
3814            (SHLLv4i16 V64:$Rn)>;
3815  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
3816            (SHLLv8i16 V128:$Rn)>;
3817  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
3818            (SHLLv2i32 V64:$Rn)>;
3819  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
3820            (SHLLv4i32 V128:$Rn)>;
3821}
3822
3823defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
3824defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
3825defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
3826
3827//===----------------------------------------------------------------------===//
3828// Advanced SIMD three vector instructions.
3829//===----------------------------------------------------------------------===//
3830
3831defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
3832defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
3833defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
3834defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
3835defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
3836defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
3837defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
3838defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
3839defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
3840let Predicates = [HasNEON] in {
3841foreach VT = [ v2f32, v4f32, v2f64 ] in
3842def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
3843}
3844let Predicates = [HasNEON, HasFullFP16] in {
3845foreach VT = [ v4f16, v8f16 ] in
3846def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
3847}
3848defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
3849defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
3850defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
3851defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
3852defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
3853defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
3854defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
3855defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
3856defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
3857defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
3858defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
3859defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
3860defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
3861defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
3862defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
3863defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
3864
3865// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
3866// instruction expects the addend first, while the fma intrinsic puts it last.
3867defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
3868            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
3869defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
3870            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
3871
3872// The following def pats catch the case where the LHS of an FMA is negated.
3873// The TriOpFrag above catches the case where the middle operand is negated.
3874def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
3875          (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
3876
3877def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
3878          (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
3879
3880def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
3881          (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
3882
3883defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
3884defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
3885defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
3886defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
3887defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
3888
3889// MLA and MLS are generated in MachineCombine
3890defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
3891defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
3892
3893defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
3894defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
3895defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
3896      TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
3897defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
3898defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
3899defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
3900defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
3901defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
3902defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
3903defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
3904defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
3905defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
3906defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
3907defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
3908defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
3909defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
3910defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
3911defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
3912defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
3913defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
3914defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
3915      TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
3916defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
3917defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
3918defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
3919defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
3920defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
3921defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
3922defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
3923defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
3924defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
3925defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
3926defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
3927defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
3928defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
3929defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
3930defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
3931                                                  int_aarch64_neon_sqadd>;
3932defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
3933                                                    int_aarch64_neon_sqsub>;
3934
3935// Extra saturate patterns, other than the intrinsics matches above
3936defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
3937defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
3938defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
3939defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
3940
3941defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
3942defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
3943                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
3944defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
3945defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
3946defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
3947    TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
3948defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
3949defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
3950                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
3951defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
3952
3953
3954def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
3955          (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3956def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
3957          (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3958def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
3959          (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3960def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
3961          (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3962
3963def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
3964          (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3965def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
3966          (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3967def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
3968          (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3969def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
3970          (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3971
3972def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
3973                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
3974def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
3975                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3976def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
3977                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3978def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
3979                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3980
3981def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
3982                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
3983def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
3984                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3985def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
3986                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3987def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
3988                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3989
3990def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
3991                "|cmls.8b\t$dst, $src1, $src2}",
3992                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3993def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
3994                "|cmls.16b\t$dst, $src1, $src2}",
3995                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3996def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
3997                "|cmls.4h\t$dst, $src1, $src2}",
3998                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3999def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4000                "|cmls.8h\t$dst, $src1, $src2}",
4001                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4002def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4003                "|cmls.2s\t$dst, $src1, $src2}",
4004                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4005def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4006                "|cmls.4s\t$dst, $src1, $src2}",
4007                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4008def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4009                "|cmls.2d\t$dst, $src1, $src2}",
4010                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4011
4012def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4013                "|cmlo.8b\t$dst, $src1, $src2}",
4014                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4015def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4016                "|cmlo.16b\t$dst, $src1, $src2}",
4017                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4018def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4019                "|cmlo.4h\t$dst, $src1, $src2}",
4020                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4021def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4022                "|cmlo.8h\t$dst, $src1, $src2}",
4023                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4024def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4025                "|cmlo.2s\t$dst, $src1, $src2}",
4026                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4027def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4028                "|cmlo.4s\t$dst, $src1, $src2}",
4029                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4030def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4031                "|cmlo.2d\t$dst, $src1, $src2}",
4032                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4033
4034def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4035                "|cmle.8b\t$dst, $src1, $src2}",
4036                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4037def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4038                "|cmle.16b\t$dst, $src1, $src2}",
4039                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4040def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4041                "|cmle.4h\t$dst, $src1, $src2}",
4042                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4043def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4044                "|cmle.8h\t$dst, $src1, $src2}",
4045                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4046def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4047                "|cmle.2s\t$dst, $src1, $src2}",
4048                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4049def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4050                "|cmle.4s\t$dst, $src1, $src2}",
4051                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4052def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4053                "|cmle.2d\t$dst, $src1, $src2}",
4054                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4055
4056def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4057                "|cmlt.8b\t$dst, $src1, $src2}",
4058                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4059def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4060                "|cmlt.16b\t$dst, $src1, $src2}",
4061                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4062def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4063                "|cmlt.4h\t$dst, $src1, $src2}",
4064                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4065def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4066                "|cmlt.8h\t$dst, $src1, $src2}",
4067                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4068def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4069                "|cmlt.2s\t$dst, $src1, $src2}",
4070                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4071def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4072                "|cmlt.4s\t$dst, $src1, $src2}",
4073                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4074def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4075                "|cmlt.2d\t$dst, $src1, $src2}",
4076                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4077
4078let Predicates = [HasNEON, HasFullFP16] in {
4079def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4080                "|fcmle.4h\t$dst, $src1, $src2}",
4081                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4082def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4083                "|fcmle.8h\t$dst, $src1, $src2}",
4084                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4085}
4086def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4087                "|fcmle.2s\t$dst, $src1, $src2}",
4088                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4089def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4090                "|fcmle.4s\t$dst, $src1, $src2}",
4091                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4092def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4093                "|fcmle.2d\t$dst, $src1, $src2}",
4094                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4095
4096let Predicates = [HasNEON, HasFullFP16] in {
4097def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4098                "|fcmlt.4h\t$dst, $src1, $src2}",
4099                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4100def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4101                "|fcmlt.8h\t$dst, $src1, $src2}",
4102                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4103}
4104def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4105                "|fcmlt.2s\t$dst, $src1, $src2}",
4106                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4107def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4108                "|fcmlt.4s\t$dst, $src1, $src2}",
4109                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4110def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4111                "|fcmlt.2d\t$dst, $src1, $src2}",
4112                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4113
4114let Predicates = [HasNEON, HasFullFP16] in {
4115def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4116                "|facle.4h\t$dst, $src1, $src2}",
4117                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4118def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4119                "|facle.8h\t$dst, $src1, $src2}",
4120                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4121}
4122def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4123                "|facle.2s\t$dst, $src1, $src2}",
4124                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4125def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4126                "|facle.4s\t$dst, $src1, $src2}",
4127                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4128def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4129                "|facle.2d\t$dst, $src1, $src2}",
4130                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4131
4132let Predicates = [HasNEON, HasFullFP16] in {
4133def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4134                "|faclt.4h\t$dst, $src1, $src2}",
4135                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4136def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4137                "|faclt.8h\t$dst, $src1, $src2}",
4138                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4139}
4140def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4141                "|faclt.2s\t$dst, $src1, $src2}",
4142                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4143def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4144                "|faclt.4s\t$dst, $src1, $src2}",
4145                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4146def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4147                "|faclt.2d\t$dst, $src1, $src2}",
4148                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4149
4150//===----------------------------------------------------------------------===//
4151// Advanced SIMD three scalar instructions.
4152//===----------------------------------------------------------------------===//
4153
4154defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4155defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4156defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4157defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4158defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4159defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4160defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4161defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4162def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4163          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4164let Predicates = [HasFullFP16] in {
4165def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4166}
4167def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4168def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4169defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4170                                     int_aarch64_neon_facge>;
4171defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4172                                     int_aarch64_neon_facgt>;
4173defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4174defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4175defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4176defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
4177defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
4178defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
4179defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4180defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4181defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4182defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4183defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4184defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4185defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4186defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4187defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4188defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4189defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4190defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4191defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4192defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4193defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4194let Predicates = [HasRDM] in {
4195  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4196  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4197  def : Pat<(i32 (int_aarch64_neon_sqadd
4198                   (i32 FPR32:$Rd),
4199                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4200                                                   (i32 FPR32:$Rm))))),
4201            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4202  def : Pat<(i32 (int_aarch64_neon_sqsub
4203                   (i32 FPR32:$Rd),
4204                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4205                                                   (i32 FPR32:$Rm))))),
4206            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4207}
4208
4209def : InstAlias<"cmls $dst, $src1, $src2",
4210                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4211def : InstAlias<"cmle $dst, $src1, $src2",
4212                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4213def : InstAlias<"cmlo $dst, $src1, $src2",
4214                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4215def : InstAlias<"cmlt $dst, $src1, $src2",
4216                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4217def : InstAlias<"fcmle $dst, $src1, $src2",
4218                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4219def : InstAlias<"fcmle $dst, $src1, $src2",
4220                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4221def : InstAlias<"fcmlt $dst, $src1, $src2",
4222                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4223def : InstAlias<"fcmlt $dst, $src1, $src2",
4224                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4225def : InstAlias<"facle $dst, $src1, $src2",
4226                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4227def : InstAlias<"facle $dst, $src1, $src2",
4228                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4229def : InstAlias<"faclt $dst, $src1, $src2",
4230                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4231def : InstAlias<"faclt $dst, $src1, $src2",
4232                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4233
4234//===----------------------------------------------------------------------===//
4235// Advanced SIMD three scalar instructions (mixed operands).
4236//===----------------------------------------------------------------------===//
4237defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4238                                       int_aarch64_neon_sqdmulls_scalar>;
4239defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4240defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4241
4242def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4243                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4244                                                        (i32 FPR32:$Rm))))),
4245          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4246def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4247                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4248                                                        (i32 FPR32:$Rm))))),
4249          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4250
4251//===----------------------------------------------------------------------===//
4252// Advanced SIMD two scalar instructions.
4253//===----------------------------------------------------------------------===//
4254
4255defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4256defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4257defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4258defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4259defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4260defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4261defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4262defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4263defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4264defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4265defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4266defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4267defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4268defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4269defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4270defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4271defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4272defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4273defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4274def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4275defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4276defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4277defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe">;
4278defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx">;
4279defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte">;
4280defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4281                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4282defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4283defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4284defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4285defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4286defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4287defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4288                                     int_aarch64_neon_suqadd>;
4289defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4290defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4291defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4292                                    int_aarch64_neon_usqadd>;
4293
4294def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
4295
4296def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4297          (FCVTASv1i64 FPR64:$Rn)>;
4298def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4299          (FCVTAUv1i64 FPR64:$Rn)>;
4300def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4301          (FCVTMSv1i64 FPR64:$Rn)>;
4302def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4303          (FCVTMUv1i64 FPR64:$Rn)>;
4304def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4305          (FCVTNSv1i64 FPR64:$Rn)>;
4306def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4307          (FCVTNUv1i64 FPR64:$Rn)>;
4308def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4309          (FCVTPSv1i64 FPR64:$Rn)>;
4310def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4311          (FCVTPUv1i64 FPR64:$Rn)>;
4312
4313def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4314          (FRECPEv1f16 FPR16:$Rn)>;
4315def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4316          (FRECPEv1i32 FPR32:$Rn)>;
4317def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4318          (FRECPEv1i64 FPR64:$Rn)>;
4319def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4320          (FRECPEv1i64 FPR64:$Rn)>;
4321
4322def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4323          (FRECPEv1i32 FPR32:$Rn)>;
4324def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4325          (FRECPEv2f32 V64:$Rn)>;
4326def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4327          (FRECPEv4f32 FPR128:$Rn)>;
4328def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4329          (FRECPEv1i64 FPR64:$Rn)>;
4330def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4331          (FRECPEv1i64 FPR64:$Rn)>;
4332def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4333          (FRECPEv2f64 FPR128:$Rn)>;
4334
4335def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4336          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4337def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4338          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4339def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4340          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4341def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4342          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4343def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4344          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4345
4346def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4347          (FRECPXv1f16 FPR16:$Rn)>;
4348def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4349          (FRECPXv1i32 FPR32:$Rn)>;
4350def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4351          (FRECPXv1i64 FPR64:$Rn)>;
4352
4353def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4354          (FRSQRTEv1f16 FPR16:$Rn)>;
4355def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4356          (FRSQRTEv1i32 FPR32:$Rn)>;
4357def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4358          (FRSQRTEv1i64 FPR64:$Rn)>;
4359def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4360          (FRSQRTEv1i64 FPR64:$Rn)>;
4361
4362def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4363          (FRSQRTEv1i32 FPR32:$Rn)>;
4364def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4365          (FRSQRTEv2f32 V64:$Rn)>;
4366def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4367          (FRSQRTEv4f32 FPR128:$Rn)>;
4368def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4369          (FRSQRTEv1i64 FPR64:$Rn)>;
4370def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4371          (FRSQRTEv1i64 FPR64:$Rn)>;
4372def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4373          (FRSQRTEv2f64 FPR128:$Rn)>;
4374
4375def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4376          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4377def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4378          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4379def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4380          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4381def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4382          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4383def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4384          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4385
4386// If an integer is about to be converted to a floating point value,
4387// just load it on the floating point unit.
4388// Here are the patterns for 8 and 16-bits to float.
4389// 8-bits -> float.
4390multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4391                             SDPatternOperator loadop, Instruction UCVTF,
4392                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4393                             SubRegIndex sub> {
4394  def : Pat<(DstTy (uint_to_fp (SrcTy
4395                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4396                                      ro.Wext:$extend))))),
4397           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4398                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4399                                 sub))>;
4400
4401  def : Pat<(DstTy (uint_to_fp (SrcTy
4402                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4403                                      ro.Wext:$extend))))),
4404           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4405                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4406                                 sub))>;
4407}
4408
4409defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4410                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4411def : Pat <(f32 (uint_to_fp (i32
4412               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4413           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4414                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4415def : Pat <(f32 (uint_to_fp (i32
4416                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4417           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4418                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4419// 16-bits -> float.
4420defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4421                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4422def : Pat <(f32 (uint_to_fp (i32
4423                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4424           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4425                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4426def : Pat <(f32 (uint_to_fp (i32
4427                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4428           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4429                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4430// 32-bits are handled in target specific dag combine:
4431// performIntToFpCombine.
4432// 64-bits integer to 32-bits floating point, not possible with
4433// UCVTF on floating point registers (both source and destination
4434// must have the same size).
4435
4436// Here are the patterns for 8, 16, 32, and 64-bits to double.
4437// 8-bits -> double.
4438defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4439                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4440def : Pat <(f64 (uint_to_fp (i32
4441                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4442           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4443                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4444def : Pat <(f64 (uint_to_fp (i32
4445                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4446           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4447                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4448// 16-bits -> double.
4449defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
4450                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
4451def : Pat <(f64 (uint_to_fp (i32
4452                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4453           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4454                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4455def : Pat <(f64 (uint_to_fp (i32
4456                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4457           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4458                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4459// 32-bits -> double.
4460defm : UIntToFPROLoadPat<f64, i32, load,
4461                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
4462def : Pat <(f64 (uint_to_fp (i32
4463                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4464           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4465                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
4466def : Pat <(f64 (uint_to_fp (i32
4467                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
4468           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4469                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
4470// 64-bits -> double are handled in target specific dag combine:
4471// performIntToFpCombine.
4472
4473//===----------------------------------------------------------------------===//
4474// Advanced SIMD three different-sized vector instructions.
4475//===----------------------------------------------------------------------===//
4476
4477defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
4478defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
4479defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
4480defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
4481defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
4482defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
4483                                             int_aarch64_neon_sabd>;
4484defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
4485                                          int_aarch64_neon_sabd>;
4486defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
4487            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
4488defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
4489                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
4490defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
4491    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4492defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
4493    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4494defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
4495defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
4496                                               int_aarch64_neon_sqadd>;
4497defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
4498                                               int_aarch64_neon_sqsub>;
4499defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
4500                                     int_aarch64_neon_sqdmull>;
4501defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
4502                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
4503defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
4504                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
4505defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
4506                                              int_aarch64_neon_uabd>;
4507defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
4508                 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
4509defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
4510                 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
4511defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
4512    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4513defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
4514    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4515defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
4516defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
4517                 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
4518defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
4519                 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
4520
4521// Additional patterns for SMULL and UMULL
4522multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
4523  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4524  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4525            (INST8B V64:$Rn, V64:$Rm)>;
4526  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4527            (INST4H V64:$Rn, V64:$Rm)>;
4528  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4529            (INST2S V64:$Rn, V64:$Rm)>;
4530}
4531
4532defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
4533  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
4534defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
4535  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
4536
4537// Patterns for smull2/umull2.
4538multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
4539  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4540  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
4541                           (extract_high_v16i8 V128:$Rm))),
4542             (INST8B V128:$Rn, V128:$Rm)>;
4543  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
4544                           (extract_high_v8i16 V128:$Rm))),
4545             (INST4H V128:$Rn, V128:$Rm)>;
4546  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
4547                           (extract_high_v4i32 V128:$Rm))),
4548             (INST2S V128:$Rn, V128:$Rm)>;
4549}
4550
4551defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
4552  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
4553defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
4554  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
4555
4556// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
4557multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
4558  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4559  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4560            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
4561  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4562            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
4563  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4564            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
4565}
4566
4567defm : Neon_mulacc_widen_patterns<
4568  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4569  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
4570defm : Neon_mulacc_widen_patterns<
4571  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4572  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
4573defm : Neon_mulacc_widen_patterns<
4574  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4575  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
4576defm : Neon_mulacc_widen_patterns<
4577  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4578  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
4579
4580// Patterns for 64-bit pmull
4581def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
4582          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
4583def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
4584                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
4585          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
4586
4587// CodeGen patterns for addhn and subhn instructions, which can actually be
4588// written in LLVM IR without too much difficulty.
4589
4590// ADDHN
4591def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
4592          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4593def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4594                                           (i32 16))))),
4595          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4596def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4597                                           (i32 32))))),
4598          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4599def : Pat<(concat_vectors (v8i8 V64:$Rd),
4600                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4601                                                    (i32 8))))),
4602          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4603                            V128:$Rn, V128:$Rm)>;
4604def : Pat<(concat_vectors (v4i16 V64:$Rd),
4605                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4606                                                    (i32 16))))),
4607          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4608                            V128:$Rn, V128:$Rm)>;
4609def : Pat<(concat_vectors (v2i32 V64:$Rd),
4610                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4611                                                    (i32 32))))),
4612          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4613                            V128:$Rn, V128:$Rm)>;
4614
4615// SUBHN
4616def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
4617          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4618def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4619                                           (i32 16))))),
4620          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4621def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4622                                           (i32 32))))),
4623          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4624def : Pat<(concat_vectors (v8i8 V64:$Rd),
4625                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4626                                                    (i32 8))))),
4627          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4628                            V128:$Rn, V128:$Rm)>;
4629def : Pat<(concat_vectors (v4i16 V64:$Rd),
4630                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4631                                                    (i32 16))))),
4632          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4633                            V128:$Rn, V128:$Rm)>;
4634def : Pat<(concat_vectors (v2i32 V64:$Rd),
4635                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4636                                                    (i32 32))))),
4637          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4638                            V128:$Rn, V128:$Rm)>;
4639
4640//----------------------------------------------------------------------------
4641// AdvSIMD bitwise extract from vector instruction.
4642//----------------------------------------------------------------------------
4643
4644defm EXT : SIMDBitwiseExtract<"ext">;
4645
4646def AdjustExtImm : SDNodeXForm<imm, [{
4647  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
4648}]>;
4649multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
4650  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
4651            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
4652  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
4653            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
4654  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
4655  // 128-bit vector.
4656  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
4657            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
4658  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
4659  // single 128-bit EXT.
4660  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
4661                              (extract_subvector V128:$Rn, (i64 N)),
4662                              (i32 imm:$imm))),
4663            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
4664  // A 64-bit EXT of the high half of a 128-bit register can be done using a
4665  // 128-bit EXT of the whole register with an adjustment to the immediate. The
4666  // top half of the other operand will be unset, but that doesn't matter as it
4667  // will not be used.
4668  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
4669                              V64:$Rm,
4670                              (i32 imm:$imm))),
4671            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
4672                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4673                                      (AdjustExtImm imm:$imm)), dsub)>;
4674}
4675
4676defm : ExtPat<v8i8, v16i8, 8>;
4677defm : ExtPat<v4i16, v8i16, 4>;
4678defm : ExtPat<v4f16, v8f16, 4>;
4679defm : ExtPat<v2i32, v4i32, 2>;
4680defm : ExtPat<v2f32, v4f32, 2>;
4681defm : ExtPat<v1i64, v2i64, 1>;
4682defm : ExtPat<v1f64, v2f64, 1>;
4683
4684//----------------------------------------------------------------------------
4685// AdvSIMD zip vector
4686//----------------------------------------------------------------------------
4687
4688defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
4689defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
4690defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
4691defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
4692defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
4693defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
4694
4695//----------------------------------------------------------------------------
4696// AdvSIMD TBL/TBX instructions
4697//----------------------------------------------------------------------------
4698
4699defm TBL : SIMDTableLookup<    0, "tbl">;
4700defm TBX : SIMDTableLookupTied<1, "tbx">;
4701
4702def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4703          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
4704def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4705          (TBLv16i8One V128:$Ri, V128:$Rn)>;
4706
4707def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
4708                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4709          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
4710def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
4711                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4712          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
4713
4714
4715//----------------------------------------------------------------------------
4716// AdvSIMD scalar CPY instruction
4717//----------------------------------------------------------------------------
4718
4719defm CPY : SIMDScalarCPY<"cpy">;
4720
4721//----------------------------------------------------------------------------
4722// AdvSIMD scalar pairwise instructions
4723//----------------------------------------------------------------------------
4724
4725defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
4726defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
4727defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
4728defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
4729defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
4730defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
4731def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
4732          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4733def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
4734          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4735def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
4736          (FADDPv2i32p V64:$Rn)>;
4737def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
4738          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
4739def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
4740          (FADDPv2i64p V128:$Rn)>;
4741def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
4742          (FMAXNMPv2i32p V64:$Rn)>;
4743def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
4744          (FMAXNMPv2i64p V128:$Rn)>;
4745def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
4746          (FMAXPv2i32p V64:$Rn)>;
4747def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
4748          (FMAXPv2i64p V128:$Rn)>;
4749def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
4750          (FMINNMPv2i32p V64:$Rn)>;
4751def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
4752          (FMINNMPv2i64p V128:$Rn)>;
4753def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
4754          (FMINPv2i32p V64:$Rn)>;
4755def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
4756          (FMINPv2i64p V128:$Rn)>;
4757
4758//----------------------------------------------------------------------------
4759// AdvSIMD INS/DUP instructions
4760//----------------------------------------------------------------------------
4761
4762def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
4763def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
4764def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
4765def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
4766def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
4767def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
4768def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
4769
4770def DUPv2i64lane : SIMDDup64FromElement;
4771def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
4772def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
4773def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
4774def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
4775def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
4776def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
4777
4778// DUP from a 64-bit register to a 64-bit register is just a copy
4779def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
4780          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
4781def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
4782          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
4783
4784def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
4785          (v2f32 (DUPv2i32lane
4786            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4787            (i64 0)))>;
4788def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
4789          (v4f32 (DUPv4i32lane
4790            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4791            (i64 0)))>;
4792def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
4793          (v2f64 (DUPv2i64lane
4794            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
4795            (i64 0)))>;
4796def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
4797          (v4f16 (DUPv4i16lane
4798            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4799            (i64 0)))>;
4800def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
4801          (v8f16 (DUPv8i16lane
4802            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4803            (i64 0)))>;
4804
4805def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4806          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
4807def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4808          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
4809
4810def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4811          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
4812def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4813         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
4814def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
4815          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
4816
4817// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
4818// instruction even if the types don't match: we just have to remap the lane
4819// carefully. N.b. this trick only applies to truncations.
4820def VecIndex_x2 : SDNodeXForm<imm, [{
4821  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
4822}]>;
4823def VecIndex_x4 : SDNodeXForm<imm, [{
4824  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
4825}]>;
4826def VecIndex_x8 : SDNodeXForm<imm, [{
4827  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
4828}]>;
4829
4830multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
4831                            ValueType Src128VT, ValueType ScalVT,
4832                            Instruction DUP, SDNodeXForm IdxXFORM> {
4833  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
4834                                                     imm:$idx)))),
4835            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
4836
4837  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
4838                                                     imm:$idx)))),
4839            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
4840}
4841
4842defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
4843defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
4844defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
4845
4846defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
4847defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
4848defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
4849
4850multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
4851                               SDNodeXForm IdxXFORM> {
4852  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
4853                                                         imm:$idx))))),
4854            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
4855
4856  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
4857                                                       imm:$idx))))),
4858            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
4859}
4860
4861defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
4862defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
4863defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
4864
4865defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
4866defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
4867defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
4868
4869// SMOV and UMOV definitions, with some extra patterns for convenience
4870defm SMOV : SMov;
4871defm UMOV : UMov;
4872
4873def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
4874          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
4875def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
4876          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
4877def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4878          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
4879def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4880          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
4881def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4882          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
4883def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
4884          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
4885
4886def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
4887            VectorIndexB:$idx)))), i8),
4888          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
4889def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
4890            VectorIndexH:$idx)))), i16),
4891          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
4892
4893// Extracting i8 or i16 elements will have the zero-extend transformed to
4894// an 'and' mask by type legalization since neither i8 nor i16 are legal types
4895// for AArch64. Match these patterns here since UMOV already zeroes out the high
4896// bits of the destination register.
4897def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
4898               (i32 0xff)),
4899          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
4900def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
4901               (i32 0xffff)),
4902          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
4903
4904defm INS : SIMDIns;
4905
4906def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
4907          (SUBREG_TO_REG (i32 0),
4908                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4909def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
4910          (SUBREG_TO_REG (i32 0),
4911                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4912
4913def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
4914          (SUBREG_TO_REG (i32 0),
4915                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4916def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
4917          (SUBREG_TO_REG (i32 0),
4918                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4919
4920def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
4921          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4922def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
4923          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4924
4925def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
4926            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
4927                                  (i32 FPR32:$Rn), ssub))>;
4928def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
4929            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4930                                  (i32 FPR32:$Rn), ssub))>;
4931
4932def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
4933            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
4934                                  (i64 FPR64:$Rn), dsub))>;
4935
4936def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
4937          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4938def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
4939          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4940
4941def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
4942          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4943def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
4944          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4945
4946def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
4947          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
4948
4949def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
4950            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
4951          (EXTRACT_SUBREG
4952            (INSvi16lane
4953              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4954              VectorIndexS:$imm,
4955              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4956              (i64 0)),
4957            dsub)>;
4958
4959def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
4960            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
4961          (INSvi16lane
4962            V128:$Rn, VectorIndexH:$imm,
4963            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4964            (i64 0))>;
4965
4966def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
4967            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4968          (EXTRACT_SUBREG
4969            (INSvi32lane
4970              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4971              VectorIndexS:$imm,
4972              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4973              (i64 0)),
4974            dsub)>;
4975def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
4976            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4977          (INSvi32lane
4978            V128:$Rn, VectorIndexS:$imm,
4979            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4980            (i64 0))>;
4981def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
4982            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
4983          (INSvi64lane
4984            V128:$Rn, VectorIndexD:$imm,
4985            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
4986            (i64 0))>;
4987
4988// Copy an element at a constant index in one vector into a constant indexed
4989// element of another.
4990// FIXME refactor to a shared class/dev parameterized on vector type, vector
4991// index type and INS extension
4992def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
4993                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
4994                   VectorIndexB:$idx2)),
4995          (v16i8 (INSvi8lane
4996                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
4997          )>;
4998def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
4999                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5000                   VectorIndexH:$idx2)),
5001          (v8i16 (INSvi16lane
5002                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5003          )>;
5004def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5005                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5006                   VectorIndexS:$idx2)),
5007          (v4i32 (INSvi32lane
5008                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5009          )>;
5010def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5011                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5012                   VectorIndexD:$idx2)),
5013          (v2i64 (INSvi64lane
5014                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5015          )>;
5016
5017multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5018                                ValueType VTScal, Instruction INS> {
5019  def : Pat<(VT128 (vector_insert V128:$src,
5020                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5021                        imm:$Immd)),
5022            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5023
5024  def : Pat<(VT128 (vector_insert V128:$src,
5025                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5026                        imm:$Immd)),
5027            (INS V128:$src, imm:$Immd,
5028                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5029
5030  def : Pat<(VT64 (vector_insert V64:$src,
5031                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5032                        imm:$Immd)),
5033            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5034                                 imm:$Immd, V128:$Rn, imm:$Immn),
5035                            dsub)>;
5036
5037  def : Pat<(VT64 (vector_insert V64:$src,
5038                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5039                        imm:$Immd)),
5040            (EXTRACT_SUBREG
5041                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5042                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5043                dsub)>;
5044}
5045
5046defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5047defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5048defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5049
5050
5051// Floating point vector extractions are codegen'd as either a sequence of
5052// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
5053// the lane number is anything other than zero.
5054def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5055          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5056def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5057          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5058def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5059          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5060
5061def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5062          (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
5063def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5064          (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
5065def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5066          (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5067
5068// All concat_vectors operations are canonicalised to act on i64 vectors for
5069// AArch64. In the general case we need an instruction, which had just as well be
5070// INS.
5071class ConcatPat<ValueType DstTy, ValueType SrcTy>
5072  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5073        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5074                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5075
5076def : ConcatPat<v2i64, v1i64>;
5077def : ConcatPat<v2f64, v1f64>;
5078def : ConcatPat<v4i32, v2i32>;
5079def : ConcatPat<v4f32, v2f32>;
5080def : ConcatPat<v8i16, v4i16>;
5081def : ConcatPat<v8f16, v4f16>;
5082def : ConcatPat<v16i8, v8i8>;
5083
5084// If the high lanes are undef, though, we can just ignore them:
5085class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5086  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5087        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5088
5089def : ConcatUndefPat<v2i64, v1i64>;
5090def : ConcatUndefPat<v2f64, v1f64>;
5091def : ConcatUndefPat<v4i32, v2i32>;
5092def : ConcatUndefPat<v4f32, v2f32>;
5093def : ConcatUndefPat<v8i16, v4i16>;
5094def : ConcatUndefPat<v16i8, v8i8>;
5095
5096//----------------------------------------------------------------------------
5097// AdvSIMD across lanes instructions
5098//----------------------------------------------------------------------------
5099
5100defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5101defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5102defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5103defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5104defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5105defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5106defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5107defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5108defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5109defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5110defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5111
5112// Patterns for across-vector intrinsics, that have a node equivalent, that
5113// returns a vector (with only the low lane defined) instead of a scalar.
5114// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5115multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5116                                    SDPatternOperator opNode> {
5117// If a lane instruction caught the vector_extract around opNode, we can
5118// directly match the latter to the instruction.
5119def : Pat<(v8i8 (opNode V64:$Rn)),
5120          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5121           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5122def : Pat<(v16i8 (opNode V128:$Rn)),
5123          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5124           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5125def : Pat<(v4i16 (opNode V64:$Rn)),
5126          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5127           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5128def : Pat<(v8i16 (opNode V128:$Rn)),
5129          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5130           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5131def : Pat<(v4i32 (opNode V128:$Rn)),
5132          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5133           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5134
5135
5136// If none did, fallback to the explicit patterns, consuming the vector_extract.
5137def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5138            (i32 0)), (i64 0))),
5139          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5140            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5141            bsub), ssub)>;
5142def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5143          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5144            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5145            bsub), ssub)>;
5146def : Pat<(i32 (vector_extract (insert_subvector undef,
5147            (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
5148          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5149            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5150            hsub), ssub)>;
5151def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5152          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5153            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5154            hsub), ssub)>;
5155def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5156          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5157            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5158            ssub), ssub)>;
5159
5160}
5161
5162multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5163                                          SDPatternOperator opNode>
5164    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5165// If there is a sign extension after this intrinsic, consume it as smov already
5166// performed it
5167def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5168            (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
5169          (i32 (SMOVvi8to32
5170            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5171              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5172            (i64 0)))>;
5173def : Pat<(i32 (sext_inreg (i32 (vector_extract
5174            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5175          (i32 (SMOVvi8to32
5176            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5177             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5178            (i64 0)))>;
5179def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5180            (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
5181          (i32 (SMOVvi16to32
5182           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5183            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5184           (i64 0)))>;
5185def : Pat<(i32 (sext_inreg (i32 (vector_extract
5186            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5187          (i32 (SMOVvi16to32
5188            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5189             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5190            (i64 0)))>;
5191}
5192
5193multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5194                                            SDPatternOperator opNode>
5195    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5196// If there is a masking operation keeping only what has been actually
5197// generated, consume it.
5198def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5199            (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
5200      (i32 (EXTRACT_SUBREG
5201        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5202          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5203        ssub))>;
5204def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5205            maski8_or_more)),
5206        (i32 (EXTRACT_SUBREG
5207          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5208            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5209          ssub))>;
5210def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5211            (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
5212          (i32 (EXTRACT_SUBREG
5213            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5214              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5215            ssub))>;
5216def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5217            maski16_or_more)),
5218        (i32 (EXTRACT_SUBREG
5219          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5220            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5221          ssub))>;
5222}
5223
5224defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
5225// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5226def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5227          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5228
5229defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5230// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5231def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5232          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5233
5234defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
5235def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
5236          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
5237
5238defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
5239def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
5240          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
5241
5242defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
5243def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
5244          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
5245
5246defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
5247def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
5248          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
5249
5250multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
5251  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5252        (i32 (SMOVvi16to32
5253          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5254            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5255          (i64 0)))>;
5256def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5257        (i32 (SMOVvi16to32
5258          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5259           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5260          (i64 0)))>;
5261
5262def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5263          (i32 (EXTRACT_SUBREG
5264           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5265            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5266           ssub))>;
5267def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5268        (i32 (EXTRACT_SUBREG
5269          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5270           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5271          ssub))>;
5272
5273def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5274        (i64 (EXTRACT_SUBREG
5275          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5276           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5277          dsub))>;
5278}
5279
5280multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
5281                                                Intrinsic intOp> {
5282  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5283        (i32 (EXTRACT_SUBREG
5284          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5285            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5286          ssub))>;
5287def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5288        (i32 (EXTRACT_SUBREG
5289          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5290            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5291          ssub))>;
5292
5293def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5294          (i32 (EXTRACT_SUBREG
5295            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5296              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5297            ssub))>;
5298def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5299        (i32 (EXTRACT_SUBREG
5300          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5301            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5302          ssub))>;
5303
5304def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5305        (i64 (EXTRACT_SUBREG
5306          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5307            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5308          dsub))>;
5309}
5310
5311defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
5312defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
5313
5314// The vaddlv_s32 intrinsic gets mapped to SADDLP.
5315def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
5316          (i64 (EXTRACT_SUBREG
5317            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5318              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
5319            dsub))>;
5320// The vaddlv_u32 intrinsic gets mapped to UADDLP.
5321def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
5322          (i64 (EXTRACT_SUBREG
5323            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5324              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
5325            dsub))>;
5326
5327//------------------------------------------------------------------------------
5328// AdvSIMD modified immediate instructions
5329//------------------------------------------------------------------------------
5330
5331// AdvSIMD BIC
5332defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
5333// AdvSIMD ORR
5334defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
5335
5336def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5337def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5338def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5339def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5340
5341def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5342def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5343def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5344def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5345
5346def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5347def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5348def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5349def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5350
5351def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5352def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5353def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5354def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5355
5356// AdvSIMD FMOV
5357def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
5358                                              "fmov", ".2d",
5359                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5360def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
5361                                              "fmov", ".2s",
5362                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5363def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
5364                                              "fmov", ".4s",
5365                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5366let Predicates = [HasNEON, HasFullFP16] in {
5367def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
5368                                              "fmov", ".4h",
5369                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5370def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
5371                                              "fmov", ".8h",
5372                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5373} // Predicates = [HasNEON, HasFullFP16]
5374
5375// AdvSIMD MOVI
5376
5377// EDIT byte mask: scalar
5378let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5379def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
5380                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
5381// The movi_edit node has the immediate value already encoded, so we use
5382// a plain imm0_255 here.
5383def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
5384          (MOVID imm0_255:$shift)>;
5385
5386// EDIT byte mask: 2d
5387
5388// The movi_edit node has the immediate value already encoded, so we use
5389// a plain imm0_255 in the pattern
5390let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5391def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
5392                                                simdimmtype10,
5393                                                "movi", ".2d",
5394                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
5395
5396def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5397def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5398def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5399def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5400
5401def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5402def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5403def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5404def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5405
5406// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
5407// extract is free and this gives better MachineCSE results.
5408def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5409def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5410def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5411def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5412
5413def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5414def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5415def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5416def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5417
5418// EDIT per word & halfword: 2s, 4h, 4s, & 8h
5419let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5420defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
5421
5422def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5423def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5424def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5425def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5426
5427def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5428def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5429def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5430def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5431
5432def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5433          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
5434def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5435          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
5436def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5437          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
5438def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5439          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
5440
5441let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5442// EDIT per word: 2s & 4s with MSL shifter
5443def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
5444                      [(set (v2i32 V64:$Rd),
5445                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5446def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
5447                      [(set (v4i32 V128:$Rd),
5448                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5449
5450// Per byte: 8b & 16b
5451def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
5452                                                 "movi", ".8b",
5453                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
5454
5455def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
5456                                                 "movi", ".16b",
5457                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
5458}
5459
5460// AdvSIMD MVNI
5461
5462// EDIT per word & halfword: 2s, 4h, 4s, & 8h
5463let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5464defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
5465
5466def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5467def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5468def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5469def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5470
5471def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5472def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5473def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5474def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5475
5476def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5477          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
5478def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5479          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
5480def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5481          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
5482def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5483          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
5484
5485// EDIT per word: 2s & 4s with MSL shifter
5486let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5487def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
5488                      [(set (v2i32 V64:$Rd),
5489                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5490def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
5491                      [(set (v4i32 V128:$Rd),
5492                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5493}
5494
5495//----------------------------------------------------------------------------
5496// AdvSIMD indexed element
5497//----------------------------------------------------------------------------
5498
5499let hasSideEffects = 0 in {
5500  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
5501  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
5502}
5503
5504// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
5505// instruction expects the addend first, while the intrinsic expects it last.
5506
5507// On the other hand, there are quite a few valid combinatorial options due to
5508// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
5509defm : SIMDFPIndexedTiedPatterns<"FMLA",
5510           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
5511defm : SIMDFPIndexedTiedPatterns<"FMLA",
5512           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
5513
5514defm : SIMDFPIndexedTiedPatterns<"FMLS",
5515           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
5516defm : SIMDFPIndexedTiedPatterns<"FMLS",
5517           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
5518defm : SIMDFPIndexedTiedPatterns<"FMLS",
5519           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
5520defm : SIMDFPIndexedTiedPatterns<"FMLS",
5521           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
5522
5523multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
5524  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5525  // and DUP scalar.
5526  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5527                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5528                                           VectorIndexS:$idx))),
5529            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5530  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5531                           (v2f32 (AArch64duplane32
5532                                      (v4f32 (insert_subvector undef,
5533                                                 (v2f32 (fneg V64:$Rm)),
5534                                                 (i32 0))),
5535                                      VectorIndexS:$idx)))),
5536            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5537                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5538                               VectorIndexS:$idx)>;
5539  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5540                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5541            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5542                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5543
5544  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5545  // and DUP scalar.
5546  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5547                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5548                                           VectorIndexS:$idx))),
5549            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
5550                               VectorIndexS:$idx)>;
5551  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5552                           (v4f32 (AArch64duplane32
5553                                      (v4f32 (insert_subvector undef,
5554                                                 (v2f32 (fneg V64:$Rm)),
5555                                                 (i32 0))),
5556                                      VectorIndexS:$idx)))),
5557            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5558                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5559                               VectorIndexS:$idx)>;
5560  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5561                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5562            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5563                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5564
5565  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
5566  // (DUPLANE from 64-bit would be trivial).
5567  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5568                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
5569                                           VectorIndexD:$idx))),
5570            (FMLSv2i64_indexed
5571                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5572  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5573                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
5574            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
5575                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
5576
5577  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
5578  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5579                         (vector_extract (v4f32 (fneg V128:$Rm)),
5580                                         VectorIndexS:$idx))),
5581            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5582                V128:$Rm, VectorIndexS:$idx)>;
5583  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5584                         (vector_extract (v4f32 (insert_subvector undef,
5585                                                    (v2f32 (fneg V64:$Rm)),
5586                                                    (i32 0))),
5587                                         VectorIndexS:$idx))),
5588            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5589                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
5590
5591  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
5592  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
5593                         (vector_extract (v2f64 (fneg V128:$Rm)),
5594                                         VectorIndexS:$idx))),
5595            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
5596                V128:$Rm, VectorIndexS:$idx)>;
5597}
5598
5599defm : FMLSIndexedAfterNegPatterns<
5600           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
5601defm : FMLSIndexedAfterNegPatterns<
5602           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
5603
5604defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
5605defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
5606
5607def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5608          (FMULv2i32_indexed V64:$Rn,
5609            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5610            (i64 0))>;
5611def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5612          (FMULv4i32_indexed V128:$Rn,
5613            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5614            (i64 0))>;
5615def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
5616          (FMULv2i64_indexed V128:$Rn,
5617            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
5618            (i64 0))>;
5619
5620defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
5621defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
5622
5623// Generated by MachineCombine
5624defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
5625defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
5626
5627defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
5628defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
5629    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5630defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
5631    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5632defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
5633                int_aarch64_neon_smull>;
5634defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
5635                                           int_aarch64_neon_sqadd>;
5636defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
5637                                           int_aarch64_neon_sqsub>;
5638defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
5639                                          int_aarch64_neon_sqadd>;
5640defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
5641                                          int_aarch64_neon_sqsub>;
5642defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
5643defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
5644    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5645defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
5646    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5647defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
5648                int_aarch64_neon_umull>;
5649
5650// A scalar sqdmull with the second operand being a vector lane can be
5651// handled directly with the indexed instruction encoding.
5652def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
5653                                          (vector_extract (v4i32 V128:$Vm),
5654                                                           VectorIndexS:$idx)),
5655          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
5656
5657//----------------------------------------------------------------------------
5658// AdvSIMD scalar shift instructions
5659//----------------------------------------------------------------------------
5660defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
5661defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
5662defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
5663defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
5664// Codegen patterns for the above. We don't put these directly on the
5665// instructions because TableGen's type inference can't handle the truth.
5666// Having the same base pattern for fp <--> int totally freaks it out.
5667def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
5668          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
5669def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
5670          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
5671def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
5672          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5673def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
5674          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5675def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
5676                                            vecshiftR64:$imm)),
5677          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5678def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
5679                                            vecshiftR64:$imm)),
5680          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5681def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
5682          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5683def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5684          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5685def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
5686                                            vecshiftR64:$imm)),
5687          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5688def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5689          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5690def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
5691                                            vecshiftR64:$imm)),
5692          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5693def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
5694          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5695
5696// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
5697
5698def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
5699          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5700def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
5701          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5702def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5703          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5704def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
5705            (and FPR32:$Rn, (i32 65535)),
5706            vecshiftR16:$imm)),
5707          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5708def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
5709          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5710def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5711          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5712def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
5713          (i32 (INSERT_SUBREG
5714            (i32 (IMPLICIT_DEF)),
5715            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
5716            hsub))>;
5717def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
5718          (i64 (INSERT_SUBREG
5719            (i64 (IMPLICIT_DEF)),
5720            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
5721            hsub))>;
5722def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
5723          (i32 (INSERT_SUBREG
5724            (i32 (IMPLICIT_DEF)),
5725            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
5726            hsub))>;
5727def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
5728          (i64 (INSERT_SUBREG
5729            (i64 (IMPLICIT_DEF)),
5730            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
5731            hsub))>;
5732def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
5733          (i32 (INSERT_SUBREG
5734            (i32 (IMPLICIT_DEF)),
5735            (FACGE16 FPR16:$Rn, FPR16:$Rm),
5736            hsub))>;
5737def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
5738          (i32 (INSERT_SUBREG
5739            (i32 (IMPLICIT_DEF)),
5740            (FACGT16 FPR16:$Rn, FPR16:$Rm),
5741            hsub))>;
5742
5743defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
5744defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
5745defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
5746                                     int_aarch64_neon_sqrshrn>;
5747defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
5748                                     int_aarch64_neon_sqrshrun>;
5749defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5750defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5751defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
5752                                     int_aarch64_neon_sqshrn>;
5753defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
5754                                     int_aarch64_neon_sqshrun>;
5755defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
5756defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
5757defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
5758    TriOpFrag<(add node:$LHS,
5759                   (AArch64srshri node:$MHS, node:$RHS))>>;
5760defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
5761defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
5762    TriOpFrag<(add node:$LHS,
5763                   (AArch64vashr node:$MHS, node:$RHS))>>;
5764defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
5765                                     int_aarch64_neon_uqrshrn>;
5766defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5767defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
5768                                     int_aarch64_neon_uqshrn>;
5769defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
5770defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
5771    TriOpFrag<(add node:$LHS,
5772                   (AArch64urshri node:$MHS, node:$RHS))>>;
5773defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
5774defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
5775    TriOpFrag<(add node:$LHS,
5776                   (AArch64vlshr node:$MHS, node:$RHS))>>;
5777
5778//----------------------------------------------------------------------------
5779// AdvSIMD vector shift instructions
5780//----------------------------------------------------------------------------
5781defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
5782defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
5783defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
5784                                   int_aarch64_neon_vcvtfxs2fp>;
5785defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
5786                                         int_aarch64_neon_rshrn>;
5787defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
5788defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
5789                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
5790defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
5791def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
5792                                      (i32 vecshiftL64:$imm))),
5793          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
5794defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
5795                                         int_aarch64_neon_sqrshrn>;
5796defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
5797                                         int_aarch64_neon_sqrshrun>;
5798defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5799defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5800defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
5801                                         int_aarch64_neon_sqshrn>;
5802defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
5803                                         int_aarch64_neon_sqshrun>;
5804defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
5805def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
5806                                      (i32 vecshiftR64:$imm))),
5807          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
5808defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
5809defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
5810                 TriOpFrag<(add node:$LHS,
5811                                (AArch64srshri node:$MHS, node:$RHS))> >;
5812defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
5813                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
5814
5815defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
5816defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
5817                TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
5818defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
5819                        int_aarch64_neon_vcvtfxu2fp>;
5820defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
5821                                         int_aarch64_neon_uqrshrn>;
5822defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5823defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
5824                                         int_aarch64_neon_uqshrn>;
5825defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
5826defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
5827                TriOpFrag<(add node:$LHS,
5828                               (AArch64urshri node:$MHS, node:$RHS))> >;
5829defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
5830                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
5831defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
5832defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
5833                TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
5834
5835// SHRN patterns for when a logical right shift was used instead of arithmetic
5836// (the immediate guarantees no sign bits actually end up in the result so it
5837// doesn't matter).
5838def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
5839          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
5840def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
5841          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
5842def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
5843          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
5844
5845def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
5846                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
5847                                                    vecshiftR16Narrow:$imm)))),
5848          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5849                           V128:$Rn, vecshiftR16Narrow:$imm)>;
5850def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
5851                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
5852                                                    vecshiftR32Narrow:$imm)))),
5853          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5854                           V128:$Rn, vecshiftR32Narrow:$imm)>;
5855def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
5856                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
5857                                                    vecshiftR64Narrow:$imm)))),
5858          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5859                           V128:$Rn, vecshiftR32Narrow:$imm)>;
5860
5861// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
5862// Anyexts are implemented as zexts.
5863def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
5864def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
5865def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
5866def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
5867def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
5868def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
5869def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
5870def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
5871def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
5872// Also match an extend from the upper half of a 128 bit source register.
5873def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5874          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
5875def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5876          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
5877def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5878          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
5879def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5880          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
5881def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5882          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
5883def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5884          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
5885def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5886          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
5887def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5888          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
5889def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5890          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
5891
5892// Vector shift sxtl aliases
5893def : InstAlias<"sxtl.8h $dst, $src1",
5894                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5895def : InstAlias<"sxtl $dst.8h, $src1.8b",
5896                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5897def : InstAlias<"sxtl.4s $dst, $src1",
5898                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5899def : InstAlias<"sxtl $dst.4s, $src1.4h",
5900                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5901def : InstAlias<"sxtl.2d $dst, $src1",
5902                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5903def : InstAlias<"sxtl $dst.2d, $src1.2s",
5904                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5905
5906// Vector shift sxtl2 aliases
5907def : InstAlias<"sxtl2.8h $dst, $src1",
5908                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5909def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
5910                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5911def : InstAlias<"sxtl2.4s $dst, $src1",
5912                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5913def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
5914                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5915def : InstAlias<"sxtl2.2d $dst, $src1",
5916                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5917def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
5918                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5919
5920// Vector shift uxtl aliases
5921def : InstAlias<"uxtl.8h $dst, $src1",
5922                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5923def : InstAlias<"uxtl $dst.8h, $src1.8b",
5924                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5925def : InstAlias<"uxtl.4s $dst, $src1",
5926                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5927def : InstAlias<"uxtl $dst.4s, $src1.4h",
5928                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5929def : InstAlias<"uxtl.2d $dst, $src1",
5930                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5931def : InstAlias<"uxtl $dst.2d, $src1.2s",
5932                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5933
5934// Vector shift uxtl2 aliases
5935def : InstAlias<"uxtl2.8h $dst, $src1",
5936                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5937def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
5938                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5939def : InstAlias<"uxtl2.4s $dst, $src1",
5940                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5941def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
5942                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5943def : InstAlias<"uxtl2.2d $dst, $src1",
5944                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5945def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
5946                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5947
5948// If an integer is about to be converted to a floating point value,
5949// just load it on the floating point unit.
5950// These patterns are more complex because floating point loads do not
5951// support sign extension.
5952// The sign extension has to be explicitly added and is only supported for
5953// one step: byte-to-half, half-to-word, word-to-doubleword.
5954// SCVTF GPR -> FPR is 9 cycles.
5955// SCVTF FPR -> FPR is 4 cyclces.
5956// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
5957// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
5958// and still being faster.
5959// However, this is not good for code size.
5960// 8-bits -> float. 2 sizes step-up.
5961class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
5962  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
5963        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
5964                            (SSHLLv4i16_shift
5965                              (f64
5966                                (EXTRACT_SUBREG
5967                                  (SSHLLv8i8_shift
5968                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5969                                        INST,
5970                                        bsub),
5971                                    0),
5972                                  dsub)),
5973                               0),
5974                             ssub)))>,
5975    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
5976
5977def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
5978                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
5979def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
5980                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
5981def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
5982                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
5983def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
5984                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
5985
5986// 16-bits -> float. 1 size step-up.
5987class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
5988  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
5989        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
5990                            (SSHLLv4i16_shift
5991                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5992                                  INST,
5993                                  hsub),
5994                                0),
5995                            ssub)))>, Requires<[NotForCodeSize]>;
5996
5997def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5998                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5999def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6000                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6001def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6002                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6003def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6004                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6005
6006// 32-bits to 32-bits are handled in target specific dag combine:
6007// performIntToFpCombine.
6008// 64-bits integer to 32-bits floating point, not possible with
6009// SCVTF on floating point registers (both source and destination
6010// must have the same size).
6011
6012// Here are the patterns for 8, 16, 32, and 64-bits to double.
6013// 8-bits -> double. 3 size step-up: give up.
6014// 16-bits -> double. 2 size step.
6015class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6016  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6017           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6018                              (SSHLLv2i32_shift
6019                                 (f64
6020                                  (EXTRACT_SUBREG
6021                                    (SSHLLv4i16_shift
6022                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6023                                        INST,
6024                                        hsub),
6025                                     0),
6026                                   dsub)),
6027                               0),
6028                             dsub)))>,
6029    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6030
6031def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6032                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6033def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6034                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6035def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6036                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6037def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6038                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6039// 32-bits -> double. 1 size step-up.
6040class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6041  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6042           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6043                              (SSHLLv2i32_shift
6044                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6045                                  INST,
6046                                  ssub),
6047                               0),
6048                             dsub)))>, Requires<[NotForCodeSize]>;
6049
6050def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6051                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6052def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6053                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6054def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6055                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6056def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6057                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6058
6059// 64-bits -> double are handled in target specific dag combine:
6060// performIntToFpCombine.
6061
6062
6063//----------------------------------------------------------------------------
6064// AdvSIMD Load-Store Structure
6065//----------------------------------------------------------------------------
6066defm LD1 : SIMDLd1Multiple<"ld1">;
6067defm LD2 : SIMDLd2Multiple<"ld2">;
6068defm LD3 : SIMDLd3Multiple<"ld3">;
6069defm LD4 : SIMDLd4Multiple<"ld4">;
6070
6071defm ST1 : SIMDSt1Multiple<"st1">;
6072defm ST2 : SIMDSt2Multiple<"st2">;
6073defm ST3 : SIMDSt3Multiple<"st3">;
6074defm ST4 : SIMDSt4Multiple<"st4">;
6075
6076class Ld1Pat<ValueType ty, Instruction INST>
6077  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6078
6079def : Ld1Pat<v16i8, LD1Onev16b>;
6080def : Ld1Pat<v8i16, LD1Onev8h>;
6081def : Ld1Pat<v4i32, LD1Onev4s>;
6082def : Ld1Pat<v2i64, LD1Onev2d>;
6083def : Ld1Pat<v8i8,  LD1Onev8b>;
6084def : Ld1Pat<v4i16, LD1Onev4h>;
6085def : Ld1Pat<v2i32, LD1Onev2s>;
6086def : Ld1Pat<v1i64, LD1Onev1d>;
6087
6088class St1Pat<ValueType ty, Instruction INST>
6089  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6090        (INST ty:$Vt, GPR64sp:$Rn)>;
6091
6092def : St1Pat<v16i8, ST1Onev16b>;
6093def : St1Pat<v8i16, ST1Onev8h>;
6094def : St1Pat<v4i32, ST1Onev4s>;
6095def : St1Pat<v2i64, ST1Onev2d>;
6096def : St1Pat<v8i8,  ST1Onev8b>;
6097def : St1Pat<v4i16, ST1Onev4h>;
6098def : St1Pat<v2i32, ST1Onev2s>;
6099def : St1Pat<v1i64, ST1Onev1d>;
6100
6101//---
6102// Single-element
6103//---
6104
6105defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6106defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6107defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6108defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6109let mayLoad = 1, hasSideEffects = 0 in {
6110defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6111defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6112defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6113defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6114defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6115defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6116defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6117defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6118defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6119defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6120defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6121defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6122defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6123defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6124defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6125defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6126}
6127
6128def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6129          (LD1Rv8b GPR64sp:$Rn)>;
6130def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6131          (LD1Rv16b GPR64sp:$Rn)>;
6132def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6133          (LD1Rv4h GPR64sp:$Rn)>;
6134def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6135          (LD1Rv8h GPR64sp:$Rn)>;
6136def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6137          (LD1Rv2s GPR64sp:$Rn)>;
6138def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6139          (LD1Rv4s GPR64sp:$Rn)>;
6140def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6141          (LD1Rv2d GPR64sp:$Rn)>;
6142def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6143          (LD1Rv1d GPR64sp:$Rn)>;
6144// Grab the floating point version too
6145def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6146          (LD1Rv2s GPR64sp:$Rn)>;
6147def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6148          (LD1Rv4s GPR64sp:$Rn)>;
6149def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6150          (LD1Rv2d GPR64sp:$Rn)>;
6151def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6152          (LD1Rv1d GPR64sp:$Rn)>;
6153def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6154          (LD1Rv4h GPR64sp:$Rn)>;
6155def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6156          (LD1Rv8h GPR64sp:$Rn)>;
6157
6158class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6159                    ValueType VTy, ValueType STy, Instruction LD1>
6160  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6161           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6162        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6163
6164def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6165def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6166def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6167def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6168def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6169def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
6170def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
6171
6172class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
6173                   ValueType VTy, ValueType STy, Instruction LD1>
6174  : Pat<(vector_insert (VTy VecListOne64:$Rd),
6175           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6176        (EXTRACT_SUBREG
6177            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
6178                          VecIndex:$idx, GPR64sp:$Rn),
6179            dsub)>;
6180
6181def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
6182def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
6183def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
6184def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
6185def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
6186
6187
6188defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
6189defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
6190defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
6191defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
6192
6193// Stores
6194defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
6195defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
6196defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
6197defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
6198
6199let AddedComplexity = 19 in
6200class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6201                    ValueType VTy, ValueType STy, Instruction ST1>
6202  : Pat<(scalar_store
6203             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6204             GPR64sp:$Rn),
6205        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
6206
6207def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
6208def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
6209def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
6210def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
6211def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
6212def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
6213def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
6214
6215let AddedComplexity = 19 in
6216class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6217                   ValueType VTy, ValueType STy, Instruction ST1>
6218  : Pat<(scalar_store
6219             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6220             GPR64sp:$Rn),
6221        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6222             VecIndex:$idx, GPR64sp:$Rn)>;
6223
6224def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
6225def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
6226def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
6227def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
6228def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
6229
6230multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6231                             ValueType VTy, ValueType STy, Instruction ST1,
6232                             int offset> {
6233  def : Pat<(scalar_store
6234              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6235              GPR64sp:$Rn, offset),
6236        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6237             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6238
6239  def : Pat<(scalar_store
6240              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6241              GPR64sp:$Rn, GPR64:$Rm),
6242        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6243             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6244}
6245
6246defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
6247defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
6248                        2>;
6249defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
6250defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
6251defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
6252defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
6253defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
6254
6255multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6256                             ValueType VTy, ValueType STy, Instruction ST1,
6257                             int offset> {
6258  def : Pat<(scalar_store
6259              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6260              GPR64sp:$Rn, offset),
6261        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6262
6263  def : Pat<(scalar_store
6264              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6265              GPR64sp:$Rn, GPR64:$Rm),
6266        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6267}
6268
6269defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
6270                         1>;
6271defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
6272                         2>;
6273defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
6274defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
6275defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
6276defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
6277defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
6278
6279let mayStore = 1, hasSideEffects = 0 in {
6280defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
6281defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
6282defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
6283defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
6284defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
6285defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
6286defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
6287defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
6288defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
6289defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
6290defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
6291defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
6292}
6293
6294defm ST1 : SIMDLdSt1SingleAliases<"st1">;
6295defm ST2 : SIMDLdSt2SingleAliases<"st2">;
6296defm ST3 : SIMDLdSt3SingleAliases<"st3">;
6297defm ST4 : SIMDLdSt4SingleAliases<"st4">;
6298
6299//----------------------------------------------------------------------------
6300// Crypto extensions
6301//----------------------------------------------------------------------------
6302
6303let Predicates = [HasAES] in {
6304def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
6305def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
6306def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
6307def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
6308}
6309
6310// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
6311// for AES fusion on some CPUs.
6312let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
6313def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6314                        Sched<[WriteV]>;
6315def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6316                         Sched<[WriteV]>;
6317}
6318
6319// Only use constrained versions of AES(I)MC instructions if they are paired with
6320// AESE/AESD.
6321def : Pat<(v16i8 (int_aarch64_crypto_aesmc
6322            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
6323                                            (v16i8 V128:$src2))))),
6324          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
6325                                             (v16i8 V128:$src2)))))>,
6326          Requires<[HasFuseAES]>;
6327
6328def : Pat<(v16i8 (int_aarch64_crypto_aesimc
6329            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
6330                                            (v16i8 V128:$src2))))),
6331          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
6332                                              (v16i8 V128:$src2)))))>,
6333          Requires<[HasFuseAES]>;
6334
6335let Predicates = [HasSHA2] in {
6336def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
6337def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
6338def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
6339def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
6340def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
6341def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
6342def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
6343
6344def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
6345def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
6346def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
6347}
6348
6349//----------------------------------------------------------------------------
6350// Compiler-pseudos
6351//----------------------------------------------------------------------------
6352// FIXME: Like for X86, these should go in their own separate .td file.
6353
6354def def32 : PatLeaf<(i32 GPR32:$src), [{
6355  return isDef32(*N);
6356}]>;
6357
6358// In the case of a 32-bit def that is known to implicitly zero-extend,
6359// we can use a SUBREG_TO_REG.
6360def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
6361
6362// For an anyext, we don't care what the high bits are, so we can perform an
6363// INSERT_SUBREF into an IMPLICIT_DEF.
6364def : Pat<(i64 (anyext GPR32:$src)),
6365          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
6366
6367// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
6368// then assert the extension has happened.
6369def : Pat<(i64 (zext GPR32:$src)),
6370          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
6371
6372// To sign extend, we use a signed bitfield move instruction (SBFM) on the
6373// containing super-reg.
6374def : Pat<(i64 (sext GPR32:$src)),
6375   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
6376def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
6377def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
6378def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
6379def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
6380def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
6381def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
6382def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
6383
6384def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
6385          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
6386                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
6387def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
6388          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
6389                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
6390
6391def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
6392          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
6393                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
6394def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
6395          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
6396                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
6397
6398def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
6399          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6400                   (i64 (i64shift_a        imm0_63:$imm)),
6401                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
6402
6403// sra patterns have an AddedComplexity of 10, so make sure we have a higher
6404// AddedComplexity for the following patterns since we want to match sext + sra
6405// patterns before we attempt to match a single sra node.
6406let AddedComplexity = 20 in {
6407// We support all sext + sra combinations which preserve at least one bit of the
6408// original value which is to be sign extended. E.g. we support shifts up to
6409// bitwidth-1 bits.
6410def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
6411          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
6412def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
6413          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
6414
6415def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
6416          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
6417def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
6418          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
6419
6420def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
6421          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6422                   (i64 imm0_31:$imm), 31)>;
6423} // AddedComplexity = 20
6424
6425// To truncate, we can simply extract from a subregister.
6426def : Pat<(i32 (trunc GPR64sp:$src)),
6427          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
6428
6429// __builtin_trap() uses the BRK instruction on AArch64.
6430def : Pat<(trap), (BRK 1)>;
6431def : Pat<(debugtrap), (BRK 0xF000)>, Requires<[IsWindows]>;
6432
6433// Multiply high patterns which multiply the lower subvector using smull/umull
6434// and the upper subvector with smull2/umull2. Then shuffle the high the high
6435// part of both results together.
6436def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
6437          (UZP2v16i8
6438           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6439                            (EXTRACT_SUBREG V128:$Rm, dsub)),
6440           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6441def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
6442          (UZP2v8i16
6443           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6444                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6445           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6446def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
6447          (UZP2v4i32
6448           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6449                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6450           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6451
6452def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
6453          (UZP2v16i8
6454           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6455                            (EXTRACT_SUBREG V128:$Rm, dsub)),
6456           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6457def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
6458          (UZP2v8i16
6459           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6460                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6461           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6462def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
6463          (UZP2v4i32
6464           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6465                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6466           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6467
6468// Conversions within AdvSIMD types in the same register size are free.
6469// But because we need a consistent lane ordering, in big endian many
6470// conversions require one or more REV instructions.
6471//
6472// Consider a simple memory load followed by a bitconvert then a store.
6473//   v0 = load v2i32
6474//   v1 = BITCAST v2i32 v0 to v4i16
6475//        store v4i16 v2
6476//
6477// In big endian mode every memory access has an implicit byte swap. LDR and
6478// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
6479// is, they treat the vector as a sequence of elements to be byte-swapped.
6480// The two pairs of instructions are fundamentally incompatible. We've decided
6481// to use LD1/ST1 only to simplify compiler implementation.
6482//
6483// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
6484// the original code sequence:
6485//   v0 = load v2i32
6486//   v1 = REV v2i32                  (implicit)
6487//   v2 = BITCAST v2i32 v1 to v4i16
6488//   v3 = REV v4i16 v2               (implicit)
6489//        store v4i16 v3
6490//
6491// But this is now broken - the value stored is different to the value loaded
6492// due to lane reordering. To fix this, on every BITCAST we must perform two
6493// other REVs:
6494//   v0 = load v2i32
6495//   v1 = REV v2i32                  (implicit)
6496//   v2 = REV v2i32
6497//   v3 = BITCAST v2i32 v2 to v4i16
6498//   v4 = REV v4i16
6499//   v5 = REV v4i16 v4               (implicit)
6500//        store v4i16 v5
6501//
6502// This means an extra two instructions, but actually in most cases the two REV
6503// instructions can be combined into one. For example:
6504//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
6505//
6506// There is also no 128-bit REV instruction. This must be synthesized with an
6507// EXT instruction.
6508//
6509// Most bitconverts require some sort of conversion. The only exceptions are:
6510//   a) Identity conversions -  vNfX <-> vNiX
6511//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
6512//
6513
6514// Natural vector casts (64 bit)
6515def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6516def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6517def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6518def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
6519def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6520def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6521
6522def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6523def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
6524def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6525def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6526def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6527
6528def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
6529def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6530def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6531def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6532def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6533def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6534
6535def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6536def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6537def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6538def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6539def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6540def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6541def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6542
6543def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6544def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6545def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6546def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
6547def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6548def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6549
6550// Natural vector casts (128 bit)
6551def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
6552def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6553def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
6554def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
6555def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6556def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6557def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6558
6559def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
6560def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
6561def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
6562def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6563def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6564def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6565def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6566
6567def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
6568def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6569def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
6570def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6571def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6572def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6573def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6574
6575def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
6576def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6577def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
6578def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6579def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
6580def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6581def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6582
6583def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
6584def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6585def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6586def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
6587def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6588def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
6589def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6590
6591def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
6592def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6593def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6594def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6595def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
6596def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
6597def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6598
6599let Predicates = [IsLE] in {
6600def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6601def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6602def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6603def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6604def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6605
6606def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
6607          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6608def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6609          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6610def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6611          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6612def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6613          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6614def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6615          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6616def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6617          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6618}
6619let Predicates = [IsBE] in {
6620def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
6621                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6622def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
6623                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6624def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
6625                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6626def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
6627                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6628def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
6629                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6630
6631def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
6632          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6633def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6634          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6635def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6636          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6637def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6638          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6639def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6640          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6641}
6642def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6643def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6644def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
6645          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6646def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
6647          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6648def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
6649          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6650def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
6651
6652def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
6653          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
6654def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
6655          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
6656def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
6657          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6658def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
6659          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
6660def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6661          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6662
6663let Predicates = [IsLE] in {
6664def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6665def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6666def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
6667def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
6668def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6669}
6670let Predicates = [IsBE] in {
6671def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
6672                             (v1i64 (REV64v2i32 FPR64:$src))>;
6673def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
6674                             (v1i64 (REV64v4i16 FPR64:$src))>;
6675def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
6676                             (v1i64 (REV64v8i8 FPR64:$src))>;
6677def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
6678                             (v1i64 (REV64v4i16 FPR64:$src))>;
6679def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
6680                             (v1i64 (REV64v2i32 FPR64:$src))>;
6681}
6682def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6683def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
6684
6685let Predicates = [IsLE] in {
6686def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
6687def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6688def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
6689def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
6690def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6691def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
6692}
6693let Predicates = [IsBE] in {
6694def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
6695                             (v2i32 (REV64v2i32 FPR64:$src))>;
6696def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
6697                             (v2i32 (REV32v4i16 FPR64:$src))>;
6698def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
6699                             (v2i32 (REV32v8i8 FPR64:$src))>;
6700def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
6701                             (v2i32 (REV64v2i32 FPR64:$src))>;
6702def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
6703                             (v2i32 (REV64v2i32 FPR64:$src))>;
6704def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
6705                             (v2i32 (REV32v4i16 FPR64:$src))>;
6706}
6707def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6708
6709let Predicates = [IsLE] in {
6710def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
6711def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6712def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
6713def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
6714def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6715def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6716}
6717let Predicates = [IsBE] in {
6718def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
6719                             (v4i16 (REV64v4i16 FPR64:$src))>;
6720def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
6721                             (v4i16 (REV32v4i16 FPR64:$src))>;
6722def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
6723                             (v4i16 (REV16v8i8 FPR64:$src))>;
6724def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
6725                             (v4i16 (REV64v4i16 FPR64:$src))>;
6726def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
6727                             (v4i16 (REV32v4i16 FPR64:$src))>;
6728def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
6729                             (v4i16 (REV64v4i16 FPR64:$src))>;
6730}
6731def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
6732
6733let Predicates = [IsLE] in {
6734def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
6735def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6736def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
6737def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
6738def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
6739def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6740}
6741let Predicates = [IsBE] in {
6742def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
6743                             (v4f16 (REV64v4i16 FPR64:$src))>;
6744def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
6745                             (v4f16 (REV32v4i16 FPR64:$src))>;
6746def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
6747                             (v4f16 (REV16v8i8 FPR64:$src))>;
6748def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
6749                             (v4f16 (REV64v4i16 FPR64:$src))>;
6750def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
6751                             (v4f16 (REV32v4i16 FPR64:$src))>;
6752def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
6753                             (v4f16 (REV64v4i16 FPR64:$src))>;
6754}
6755def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6756
6757let Predicates = [IsLE] in {
6758def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
6759def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
6760def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
6761def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
6762def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
6763def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
6764def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
6765}
6766let Predicates = [IsBE] in {
6767def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
6768                             (v8i8 (REV64v8i8 FPR64:$src))>;
6769def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
6770                             (v8i8 (REV32v8i8 FPR64:$src))>;
6771def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
6772                             (v8i8 (REV16v8i8 FPR64:$src))>;
6773def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
6774                             (v8i8 (REV64v8i8 FPR64:$src))>;
6775def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
6776                             (v8i8 (REV32v8i8 FPR64:$src))>;
6777def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
6778                             (v8i8 (REV64v8i8 FPR64:$src))>;
6779def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
6780                             (v8i8 (REV16v8i8 FPR64:$src))>;
6781}
6782
6783let Predicates = [IsLE] in {
6784def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
6785def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
6786def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
6787def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
6788def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
6789}
6790let Predicates = [IsBE] in {
6791def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
6792                             (f64 (REV64v2i32 FPR64:$src))>;
6793def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
6794                             (f64 (REV64v4i16 FPR64:$src))>;
6795def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
6796                             (f64 (REV64v2i32 FPR64:$src))>;
6797def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
6798                             (f64 (REV64v8i8 FPR64:$src))>;
6799def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
6800                             (f64 (REV64v4i16 FPR64:$src))>;
6801}
6802def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
6803def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
6804
6805let Predicates = [IsLE] in {
6806def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
6807def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
6808def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
6809def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6810def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
6811}
6812let Predicates = [IsBE] in {
6813def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
6814                             (v1f64 (REV64v2i32 FPR64:$src))>;
6815def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
6816                             (v1f64 (REV64v4i16 FPR64:$src))>;
6817def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
6818                             (v1f64 (REV64v8i8 FPR64:$src))>;
6819def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
6820                             (v1f64 (REV64v2i32 FPR64:$src))>;
6821def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
6822                             (v1f64 (REV64v4i16 FPR64:$src))>;
6823}
6824def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
6825def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
6826
6827let Predicates = [IsLE] in {
6828def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
6829def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
6830def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
6831def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6832def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
6833def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
6834}
6835let Predicates = [IsBE] in {
6836def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
6837                             (v2f32 (REV64v2i32 FPR64:$src))>;
6838def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
6839                             (v2f32 (REV32v4i16 FPR64:$src))>;
6840def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
6841                             (v2f32 (REV32v8i8 FPR64:$src))>;
6842def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
6843                             (v2f32 (REV64v2i32 FPR64:$src))>;
6844def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
6845                             (v2f32 (REV64v2i32 FPR64:$src))>;
6846def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
6847                             (v2f32 (REV32v4i16 FPR64:$src))>;
6848}
6849def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6850
6851let Predicates = [IsLE] in {
6852def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
6853def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
6854def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
6855def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
6856def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
6857def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
6858def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
6859}
6860let Predicates = [IsBE] in {
6861def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
6862                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
6863def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
6864                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
6865                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
6866def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
6867                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
6868                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
6869def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
6870                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
6871                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
6872def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
6873                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
6874def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
6875                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
6876                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
6877def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
6878                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
6879                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
6880}
6881
6882let Predicates = [IsLE] in {
6883def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
6884def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6885def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6886def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
6887def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6888def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6889}
6890let Predicates = [IsBE] in {
6891def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
6892                             (v2f64 (EXTv16i8 FPR128:$src,
6893                                              FPR128:$src, (i32 8)))>;
6894def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
6895                             (v2f64 (REV64v4i32 FPR128:$src))>;
6896def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
6897                             (v2f64 (REV64v8i16 FPR128:$src))>;
6898def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
6899                             (v2f64 (REV64v8i16 FPR128:$src))>;
6900def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
6901                             (v2f64 (REV64v16i8 FPR128:$src))>;
6902def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
6903                             (v2f64 (REV64v4i32 FPR128:$src))>;
6904}
6905def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6906
6907let Predicates = [IsLE] in {
6908def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
6909def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6910def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
6911def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6912def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6913def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6914}
6915let Predicates = [IsBE] in {
6916def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
6917                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
6918                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
6919def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
6920                             (v4f32 (REV32v8i16 FPR128:$src))>;
6921def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
6922                             (v4f32 (REV32v8i16 FPR128:$src))>;
6923def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
6924                             (v4f32 (REV32v16i8 FPR128:$src))>;
6925def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
6926                             (v4f32 (REV64v4i32 FPR128:$src))>;
6927def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
6928                             (v4f32 (REV64v4i32 FPR128:$src))>;
6929}
6930def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6931
6932let Predicates = [IsLE] in {
6933def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
6934def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6935def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6936def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6937def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6938def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
6939}
6940let Predicates = [IsBE] in {
6941def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
6942                             (v2i64 (EXTv16i8 FPR128:$src,
6943                                              FPR128:$src, (i32 8)))>;
6944def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
6945                             (v2i64 (REV64v4i32 FPR128:$src))>;
6946def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
6947                             (v2i64 (REV64v8i16 FPR128:$src))>;
6948def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
6949                             (v2i64 (REV64v16i8 FPR128:$src))>;
6950def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
6951                             (v2i64 (REV64v4i32 FPR128:$src))>;
6952def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
6953                             (v2i64 (REV64v8i16 FPR128:$src))>;
6954}
6955def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6956
6957let Predicates = [IsLE] in {
6958def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
6959def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6960def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6961def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6962def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6963def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
6964}
6965let Predicates = [IsBE] in {
6966def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
6967                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
6968                                              (REV64v4i32 FPR128:$src),
6969                                              (i32 8)))>;
6970def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
6971                             (v4i32 (REV64v4i32 FPR128:$src))>;
6972def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
6973                             (v4i32 (REV32v8i16 FPR128:$src))>;
6974def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
6975                             (v4i32 (REV32v16i8 FPR128:$src))>;
6976def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
6977                             (v4i32 (REV64v4i32 FPR128:$src))>;
6978def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
6979                             (v4i32 (REV32v8i16 FPR128:$src))>;
6980}
6981def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6982
6983let Predicates = [IsLE] in {
6984def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
6985def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6986def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6987def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6988def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6989def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6990}
6991let Predicates = [IsBE] in {
6992def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
6993                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
6994                                              (REV64v8i16 FPR128:$src),
6995                                              (i32 8)))>;
6996def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
6997                             (v8i16 (REV64v8i16 FPR128:$src))>;
6998def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
6999                             (v8i16 (REV32v8i16 FPR128:$src))>;
7000def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7001                             (v8i16 (REV16v16i8 FPR128:$src))>;
7002def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7003                             (v8i16 (REV64v8i16 FPR128:$src))>;
7004def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7005                             (v8i16 (REV32v8i16 FPR128:$src))>;
7006}
7007def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7008
7009let Predicates = [IsLE] in {
7010def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7011def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7012def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7013def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7014def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7015def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7016}
7017let Predicates = [IsBE] in {
7018def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7019                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7020                                              (REV64v8i16 FPR128:$src),
7021                                              (i32 8)))>;
7022def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7023                             (v8f16 (REV64v8i16 FPR128:$src))>;
7024def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7025                             (v8f16 (REV32v8i16 FPR128:$src))>;
7026def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7027                             (v8f16 (REV16v16i8 FPR128:$src))>;
7028def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7029                             (v8f16 (REV64v8i16 FPR128:$src))>;
7030def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7031                             (v8f16 (REV32v8i16 FPR128:$src))>;
7032}
7033def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7034
7035let Predicates = [IsLE] in {
7036def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
7037def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7038def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7039def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7040def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7041def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7042def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7043}
7044let Predicates = [IsBE] in {
7045def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
7046                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7047                                              (REV64v16i8 FPR128:$src),
7048                                              (i32 8)))>;
7049def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7050                             (v16i8 (REV64v16i8 FPR128:$src))>;
7051def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7052                             (v16i8 (REV32v16i8 FPR128:$src))>;
7053def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7054                             (v16i8 (REV16v16i8 FPR128:$src))>;
7055def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7056                             (v16i8 (REV64v16i8 FPR128:$src))>;
7057def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7058                             (v16i8 (REV32v16i8 FPR128:$src))>;
7059def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7060                             (v16i8 (REV16v16i8 FPR128:$src))>;
7061}
7062
7063def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
7064           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7065def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
7066           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7067def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
7068           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7069def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
7070           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7071def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
7072           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7073def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
7074           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7075def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
7076           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7077
7078def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
7079          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7080def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
7081          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7082def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
7083          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7084def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
7085          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7086
7087// A 64-bit subvector insert to the first 128-bit vector position
7088// is a subregister copy that needs no instruction.
7089multiclass InsertSubvectorUndef<ValueType Ty> {
7090  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
7091            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7092  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
7093            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7094  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
7095            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7096  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
7097            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7098  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
7099            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7100  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
7101            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7102  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
7103            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7104}
7105
7106defm : InsertSubvectorUndef<i32>;
7107defm : InsertSubvectorUndef<i64>;
7108
7109// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
7110// or v2f32.
7111def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
7112                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
7113           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
7114def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
7115                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
7116           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
7117    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
7118    // so we match on v4f32 here, not v2f32. This will also catch adding
7119    // the low two lanes of a true v4f32 vector.
7120def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
7121                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
7122          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7123
7124// Scalar 64-bit shifts in FPR64 registers.
7125def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7126          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7127def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7128          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7129def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7130          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7131def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7132          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7133
7134// Patterns for nontemporal/no-allocate stores.
7135// We have to resort to tricks to turn a single-input store into a store pair,
7136// because there is no single-input nontemporal store, only STNP.
7137let Predicates = [IsLE] in {
7138let AddedComplexity = 15 in {
7139class NTStore128Pat<ValueType VT> :
7140  Pat<(nontemporalstore (VT FPR128:$Rt),
7141        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
7142      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
7143              (CPYi64 FPR128:$Rt, (i64 1)),
7144              GPR64sp:$Rn, simm7s8:$offset)>;
7145
7146def : NTStore128Pat<v2i64>;
7147def : NTStore128Pat<v4i32>;
7148def : NTStore128Pat<v8i16>;
7149def : NTStore128Pat<v16i8>;
7150
7151class NTStore64Pat<ValueType VT> :
7152  Pat<(nontemporalstore (VT FPR64:$Rt),
7153        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7154      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
7155              (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
7156              GPR64sp:$Rn, simm7s4:$offset)>;
7157
7158// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
7159def : NTStore64Pat<v1f64>;
7160def : NTStore64Pat<v1i64>;
7161def : NTStore64Pat<v2i32>;
7162def : NTStore64Pat<v4i16>;
7163def : NTStore64Pat<v8i8>;
7164
7165def : Pat<(nontemporalstore GPR64:$Rt,
7166            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7167          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
7168                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
7169                  GPR64sp:$Rn, simm7s4:$offset)>;
7170} // AddedComplexity=10
7171} // Predicates = [IsLE]
7172
7173// Tail call return handling. These are all compiler pseudo-instructions,
7174// so no encoding information or anything like that.
7175let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
7176  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
7177                   Sched<[WriteBrReg]>;
7178  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
7179                   Sched<[WriteBrReg]>;
7180  // Indirect tail-call with any register allowed, used by MachineOutliner when
7181  // this is proven safe.
7182  // FIXME: If we have to add any more hacks like this, we should instead relax
7183  // some verifier checks for outlined functions.
7184  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
7185                      Sched<[WriteBrReg]>;
7186  // Indirect tail-call limited to only use registers (x16 and x17) which are
7187  // allowed to tail-call a "BTI c" instruction.
7188  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
7189                      Sched<[WriteBrReg]>;
7190}
7191
7192def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
7193          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
7194      Requires<[NotUseBTI]>;
7195def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
7196          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
7197      Requires<[UseBTI]>;
7198def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
7199          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7200def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
7201          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7202
7203def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
7204def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
7205
7206// Extracting lane zero is a special case where we can just use a plain
7207// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
7208// rest of the compiler, especially the register allocator and copy propagation,
7209// to reason about, so is preferred when it's possible to use it.
7210let AddedComplexity = 10 in {
7211  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
7212  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
7213  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
7214}
7215
7216// dot_v4i8
7217class mul_v4i8<SDPatternOperator ldop> :
7218  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
7219          (mul (ldop (add node:$Rn, node:$offset)),
7220               (ldop (add node:$Rm, node:$offset)))>;
7221class mulz_v4i8<SDPatternOperator ldop> :
7222  PatFrag<(ops node:$Rn, node:$Rm),
7223          (mul (ldop node:$Rn), (ldop node:$Rm))>;
7224
7225def load_v4i8 :
7226  OutPatFrag<(ops node:$R),
7227             (INSERT_SUBREG
7228              (v2i32 (IMPLICIT_DEF)),
7229               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
7230              ssub)>;
7231
7232class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
7233  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
7234           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
7235           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
7236                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
7237      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
7238                                (load_v4i8 GPR64sp:$Rn),
7239                                (load_v4i8 GPR64sp:$Rm))),
7240                      sub_32)>, Requires<[HasDotProd]>;
7241
7242// dot_v8i8
7243class ee_v8i8<SDPatternOperator extend> :
7244  PatFrag<(ops node:$V, node:$K),
7245          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
7246
7247class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7248  PatFrag<(ops node:$M, node:$N, node:$K),
7249          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
7250                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
7251
7252class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7253  PatFrag<(ops node:$M, node:$N),
7254          (i32 (extractelt
7255           (v4i32 (AArch64uaddv
7256            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
7257                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
7258           (i64 0)))>;
7259
7260// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
7261def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
7262
7263class odot_v8i8<Instruction DOT> :
7264  OutPatFrag<(ops node:$Vm, node:$Vn),
7265             (EXTRACT_SUBREG
7266              (VADDV_32
7267               (i64 (DOT (DUPv2i32gpr WZR),
7268                         (v8i8 node:$Vm),
7269                         (v8i8 node:$Vn)))),
7270              sub_32)>;
7271
7272class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
7273                    SDPatternOperator extend> :
7274  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
7275      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
7276  Requires<[HasDotProd]>;
7277
7278// dot_v16i8
7279class ee_v16i8<SDPatternOperator extend> :
7280  PatFrag<(ops node:$V, node:$K1, node:$K2),
7281          (v4i16 (extract_subvector
7282           (v8i16 (extend
7283            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
7284
7285class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
7286  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
7287          (v4i32
7288           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
7289                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
7290
7291class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
7292  PatFrag<(ops node:$M, node:$N),
7293          (i32 (extractelt
7294           (v4i32 (AArch64uaddv
7295            (add
7296             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
7297                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
7298             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
7299                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
7300           (i64 0)))>;
7301
7302class odot_v16i8<Instruction DOT> :
7303  OutPatFrag<(ops node:$Vm, node:$Vn),
7304             (i32 (ADDVv4i32v
7305              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
7306
7307class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
7308                SDPatternOperator extend> :
7309  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
7310      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
7311  Requires<[HasDotProd]>;
7312
7313let AddedComplexity = 10 in {
7314  def : dot_v4i8<SDOTv8i8, sextloadi8>;
7315  def : dot_v4i8<UDOTv8i8, zextloadi8>;
7316  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
7317  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
7318  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
7319  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
7320
7321  // FIXME: add patterns to generate vector by element dot product.
7322  // FIXME: add SVE dot-product patterns.
7323}
7324
7325include "AArch64InstrAtomics.td"
7326include "AArch64SVEInstrInfo.td"
7327