xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicate<(all_of HasV8_7aOps), "armv8.7a">;
30def HasV9_0a         : Predicate<"Subtarget->hasV9_0aOps()">,
31                                 AssemblerPredicate<(all_of HasV9_0aOps), "armv9-a">;
32def HasV9_1a         : Predicate<"Subtarget->hasV9_1aOps()">,
33                                 AssemblerPredicate<(all_of HasV9_1aOps), "armv9.1a">;
34def HasV9_2a         : Predicate<"Subtarget->hasV9_2aOps()">,
35                                 AssemblerPredicate<(all_of HasV9_2aOps), "armv9.2a">;
36def HasV8_0r         : Predicate<"Subtarget->hasV8_0rOps()">,
37                                 AssemblerPredicate<(all_of HasV8_0rOps), "armv8-r">;
38
39def HasEL2VMSA       : Predicate<"Subtarget->hasEL2VMSA()">,
40                       AssemblerPredicate<(all_of FeatureEL2VMSA), "el2vmsa">;
41
42def HasEL3           : Predicate<"Subtarget->hasEL3()">,
43                       AssemblerPredicate<(all_of FeatureEL3), "el3">;
44
45def HasVH            : Predicate<"Subtarget->hasVH()">,
46                       AssemblerPredicate<(all_of FeatureVH), "vh">;
47
48def HasLOR           : Predicate<"Subtarget->hasLOR()">,
49                       AssemblerPredicate<(all_of FeatureLOR), "lor">;
50
51def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
52                       AssemblerPredicate<(all_of FeaturePAuth), "pauth">;
53
54def HasJS            : Predicate<"Subtarget->hasJS()">,
55                       AssemblerPredicate<(all_of FeatureJS), "jsconv">;
56
57def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
58                       AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">;
59
60def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
61                       AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">;
62
63def HasNV            : Predicate<"Subtarget->hasNV()">,
64                       AssemblerPredicate<(all_of FeatureNV), "nv">;
65
66def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
67                       AssemblerPredicate<(all_of FeatureMPAM), "mpam">;
68
69def HasDIT           : Predicate<"Subtarget->hasDIT()">,
70                       AssemblerPredicate<(all_of FeatureDIT), "dit">;
71
72def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
73                       AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">;
74
75def HasAM            : Predicate<"Subtarget->hasAM()">,
76                       AssemblerPredicate<(all_of FeatureAM), "am">;
77
78def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
79                       AssemblerPredicate<(all_of FeatureSEL2), "sel2">;
80
81def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
82                       AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">;
83
84def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
85                       AssemblerPredicate<(all_of FeatureFlagM), "flagm">;
86
87def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
88                       AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
89
90def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
91                               AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">;
92def HasNEON          : Predicate<"Subtarget->hasNEON()">,
93                                 AssemblerPredicate<(all_of FeatureNEON), "neon">;
94def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
95                                 AssemblerPredicate<(all_of FeatureCrypto), "crypto">;
96def HasSM4           : Predicate<"Subtarget->hasSM4()">,
97                                 AssemblerPredicate<(all_of FeatureSM4), "sm4">;
98def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
99                                 AssemblerPredicate<(all_of FeatureSHA3), "sha3">;
100def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
101                                 AssemblerPredicate<(all_of FeatureSHA2), "sha2">;
102def HasAES           : Predicate<"Subtarget->hasAES()">,
103                                 AssemblerPredicate<(all_of FeatureAES), "aes">;
104def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
105                                 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">;
106def HasCRC           : Predicate<"Subtarget->hasCRC()">,
107                                 AssemblerPredicate<(all_of FeatureCRC), "crc">;
108def HasLSE           : Predicate<"Subtarget->hasLSE()">,
109                                 AssemblerPredicate<(all_of FeatureLSE), "lse">;
110def HasNoLSE         : Predicate<"!Subtarget->hasLSE()">;
111def HasRAS           : Predicate<"Subtarget->hasRAS()">,
112                                 AssemblerPredicate<(all_of FeatureRAS), "ras">;
113def HasRDM           : Predicate<"Subtarget->hasRDM()">,
114                                 AssemblerPredicate<(all_of FeatureRDM), "rdm">;
115def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
116def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
117                                 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">;
118def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
119                                 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">;
120def HasSPE           : Predicate<"Subtarget->hasSPE()">,
121                                 AssemblerPredicate<(all_of FeatureSPE), "spe">;
122def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
123                                 AssemblerPredicate<(all_of FeatureFuseAES),
124                                 "fuse-aes">;
125def HasSVE           : Predicate<"Subtarget->hasSVE()">,
126                                 AssemblerPredicate<(all_of FeatureSVE), "sve">;
127def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
128                                 AssemblerPredicate<(all_of FeatureSVE2), "sve2">;
129def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
130                                 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">;
131def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
132                                 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">;
133def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
134                                 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">;
135def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
136                                 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
137def HasSME           : Predicate<"Subtarget->hasSME()">,
138                                 AssemblerPredicate<(all_of FeatureSME), "sme">;
139def HasSMEF64        : Predicate<"Subtarget->hasSMEF64()">,
140                                 AssemblerPredicate<(all_of FeatureSMEF64), "sme-f64">;
141def HasSMEI64        : Predicate<"Subtarget->hasSMEI64()">,
142                                 AssemblerPredicate<(all_of FeatureSMEI64), "sme-i64">;
143def HasStreamingSVE  : Predicate<"Subtarget->hasStreamingSVE()">,
144                                 AssemblerPredicate<(all_of FeatureStreamingSVE), "streaming-sve">;
145// A subset of SVE(2) instructions are legal in Streaming SVE execution mode,
146// they should be enabled if either has been specified.
147def HasSVEorStreamingSVE
148    : Predicate<"Subtarget->hasSVE() || Subtarget->hasStreamingSVE()">,
149                AssemblerPredicate<(any_of FeatureSVE, FeatureStreamingSVE),
150                "streaming-sve or sve">;
151def HasSVE2orStreamingSVE
152    : Predicate<"Subtarget->hasSVE2() || Subtarget->hasStreamingSVE()">,
153                AssemblerPredicate<(any_of FeatureSVE2, FeatureStreamingSVE),
154                "streaming-sve or sve2">;
155// A subset of NEON instructions are legal in Streaming SVE execution mode,
156// they should be enabled if either has been specified.
157def HasNEONorStreamingSVE
158    : Predicate<"Subtarget->hasNEON() || Subtarget->hasStreamingSVE()">,
159                AssemblerPredicate<(any_of FeatureNEON, FeatureStreamingSVE),
160                "streaming-sve or neon">;
161def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
162                                 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">;
163def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
164                       AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">;
165def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
166                       AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">;
167def HasSB            : Predicate<"Subtarget->hasSB()">,
168                       AssemblerPredicate<(all_of FeatureSB), "sb">;
169def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
170                       AssemblerPredicate<(all_of FeaturePredRes), "predres">;
171def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
172                       AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">;
173def HasBTI           : Predicate<"Subtarget->hasBTI()">,
174                       AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">;
175def HasMTE           : Predicate<"Subtarget->hasMTE()">,
176                       AssemblerPredicate<(all_of FeatureMTE), "mte">;
177def HasTME           : Predicate<"Subtarget->hasTME()">,
178                       AssemblerPredicate<(all_of FeatureTME), "tme">;
179def HasETE           : Predicate<"Subtarget->hasETE()">,
180                       AssemblerPredicate<(all_of FeatureETE), "ete">;
181def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
182                       AssemblerPredicate<(all_of FeatureTRBE), "trbe">;
183def HasBF16          : Predicate<"Subtarget->hasBF16()">,
184                       AssemblerPredicate<(all_of FeatureBF16), "bf16">;
185def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
186                       AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">;
187def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
188                       AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">;
189def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
190                       AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">;
191def HasXS            : Predicate<"Subtarget->hasXS()">,
192                       AssemblerPredicate<(all_of FeatureXS), "xs">;
193def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
194                       AssemblerPredicate<(all_of FeatureWFxT), "wfxt">;
195def HasLS64          : Predicate<"Subtarget->hasLS64()">,
196                       AssemblerPredicate<(all_of FeatureLS64), "ls64">;
197def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
198                       AssemblerPredicate<(all_of FeatureBRBE), "brbe">;
199def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
200                       AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">;
201def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
202def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
203def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
204def UseExperimentalZeroingPseudos
205    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
206def UseAlternateSExtLoadCVTF32
207    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
208
209def UseNegativeImmediates
210    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
211                                             "NegativeImmediates">;
212
213def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">;
214
215def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
216                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
217                                                       SDTCisInt<1>]>>;
218
219
220//===----------------------------------------------------------------------===//
221// AArch64-specific DAG Nodes.
222//
223
224// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
225def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
226                                              [SDTCisSameAs<0, 2>,
227                                               SDTCisSameAs<0, 3>,
228                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
229
230// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
231def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
232                                            [SDTCisSameAs<0, 1>,
233                                             SDTCisSameAs<0, 2>,
234                                             SDTCisInt<0>,
235                                             SDTCisVT<3, i32>]>;
236
237// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
238def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
239                                            [SDTCisSameAs<0, 2>,
240                                             SDTCisSameAs<0, 3>,
241                                             SDTCisInt<0>,
242                                             SDTCisVT<1, i32>,
243                                             SDTCisVT<4, i32>]>;
244
245def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
246                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
247                                      SDTCisVT<2, i32>]>;
248def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
249def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
250                                        SDTCisVT<2, OtherVT>]>;
251
252
253def SDT_AArch64CSel  : SDTypeProfile<1, 4,
254                                   [SDTCisSameAs<0, 1>,
255                                    SDTCisSameAs<0, 2>,
256                                    SDTCisInt<3>,
257                                    SDTCisVT<4, i32>]>;
258def SDT_AArch64CCMP : SDTypeProfile<1, 5,
259                                    [SDTCisVT<0, i32>,
260                                     SDTCisInt<1>,
261                                     SDTCisSameAs<1, 2>,
262                                     SDTCisInt<3>,
263                                     SDTCisInt<4>,
264                                     SDTCisVT<5, i32>]>;
265def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
266                                     [SDTCisVT<0, i32>,
267                                      SDTCisFP<1>,
268                                      SDTCisSameAs<1, 2>,
269                                      SDTCisInt<3>,
270                                      SDTCisInt<4>,
271                                      SDTCisVT<5, i32>]>;
272def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
273                                   [SDTCisFP<0>,
274                                    SDTCisSameAs<0, 1>]>;
275def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
276def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
277def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
278def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
279                                          SDTCisSameAs<0, 1>,
280                                          SDTCisSameAs<0, 2>]>;
281def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
282def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
283def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
284                                           SDTCisInt<2>, SDTCisInt<3>]>;
285def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
286def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
287                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
288def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
289def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
290                                         SDTCisVec<2>, SDTCisSameAs<2,3>]>;
291
292def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
293                                                 SDTCisSameAs<0,1>,
294                                                 SDTCisSameAs<0,2>]>;
295
296def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
297def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
298def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
299def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
300                                           SDTCisSameAs<0,2>]>;
301def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
302                                           SDTCisSameAs<0,2>,
303                                           SDTCisSameAs<0,3>]>;
304def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
305def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
306
307def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
308
309def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
310                                                 SDTCisPtrTy<1>]>;
311
312def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
313
314def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
315def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
316def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
317
318// Generates the general dynamic sequences, i.e.
319//  adrp  x0, :tlsdesc:var
320//  ldr   x1, [x0, #:tlsdesc_lo12:var]
321//  add   x0, x0, #:tlsdesc_lo12:var
322//  .tlsdesccall var
323//  blr   x1
324
325// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
326// number of operands (the variable)
327def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
328                                          [SDTCisPtrTy<0>]>;
329
330def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
331                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
332                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
333                                         SDTCisSameAs<1, 4>]>;
334
335def SDT_AArch64TBL : SDTypeProfile<1, 2, [
336  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
337]>;
338
339// non-extending masked load fragment.
340def nonext_masked_load :
341  PatFrag<(ops node:$ptr, node:$pred, node:$def),
342          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
343  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
344         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
345         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
346}]>;
347// sign extending masked load fragments.
348def asext_masked_load :
349  PatFrag<(ops node:$ptr, node:$pred, node:$def),
350          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
351  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
352          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
353         cast<MaskedLoadSDNode>(N)->isUnindexed();
354}]>;
355def asext_masked_load_i8 :
356  PatFrag<(ops node:$ptr, node:$pred, node:$def),
357          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
358  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
359}]>;
360def asext_masked_load_i16 :
361  PatFrag<(ops node:$ptr, node:$pred, node:$def),
362          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
363  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
364}]>;
365def asext_masked_load_i32 :
366  PatFrag<(ops node:$ptr, node:$pred, node:$def),
367          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
368  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
369}]>;
370// zero extending masked load fragments.
371def zext_masked_load :
372  PatFrag<(ops node:$ptr, node:$pred, node:$def),
373          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
374  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
375         cast<MaskedLoadSDNode>(N)->isUnindexed();
376}]>;
377def zext_masked_load_i8 :
378  PatFrag<(ops node:$ptr, node:$pred, node:$def),
379          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
380  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
381}]>;
382def zext_masked_load_i16 :
383  PatFrag<(ops node:$ptr, node:$pred, node:$def),
384          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
385  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
386}]>;
387def zext_masked_load_i32 :
388  PatFrag<(ops node:$ptr, node:$pred, node:$def),
389          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
390  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
391}]>;
392
393def non_temporal_load :
394   PatFrag<(ops node:$ptr, node:$pred, node:$def),
395           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
396   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
397          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
398          cast<MaskedLoadSDNode>(N)->isNonTemporal();
399}]>;
400
401// non-truncating masked store fragment.
402def nontrunc_masked_store :
403  PatFrag<(ops node:$val, node:$ptr, node:$pred),
404          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
405  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
406         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
407         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
408}]>;
409// truncating masked store fragments.
410def trunc_masked_store :
411  PatFrag<(ops node:$val, node:$ptr, node:$pred),
412          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
413  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
414         cast<MaskedStoreSDNode>(N)->isUnindexed();
415}]>;
416def trunc_masked_store_i8 :
417  PatFrag<(ops node:$val, node:$ptr, node:$pred),
418          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
419  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
420}]>;
421def trunc_masked_store_i16 :
422  PatFrag<(ops node:$val, node:$ptr, node:$pred),
423          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
424  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
425}]>;
426def trunc_masked_store_i32 :
427  PatFrag<(ops node:$val, node:$ptr, node:$pred),
428          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
429  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
430}]>;
431
432def non_temporal_store :
433  PatFrag<(ops node:$val, node:$ptr, node:$pred),
434          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
435  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
436         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
437         cast<MaskedStoreSDNode>(N)->isNonTemporal();
438}]>;
439
440// Node definitions.
441def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
442def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
443def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
444def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
445def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
446                                SDCallSeqStart<[ SDTCisVT<0, i32>,
447                                                 SDTCisVT<1, i32> ]>,
448                                [SDNPHasChain, SDNPOutGlue]>;
449def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
450                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
451                                               SDTCisVT<1, i32> ]>,
452                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
453def AArch64call          : SDNode<"AArch64ISD::CALL",
454                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
455                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
456                                 SDNPVariadic]>;
457
458def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
459                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
460                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
461                              SDNPVariadic]>;
462
463def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
464                                [SDNPHasChain]>;
465def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
466                                [SDNPHasChain]>;
467def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
468                                [SDNPHasChain]>;
469def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
470                                [SDNPHasChain]>;
471def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
472                                [SDNPHasChain]>;
473
474
475def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
476def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
477def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
478def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
479def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
480                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
481def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
482def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
483def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
484                            [SDNPCommutative]>;
485def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
486def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
487                            [SDNPCommutative]>;
488def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
489def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
490
491def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
492def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
493def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
494
495def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
496
497def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
498def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
499                                 [SDNPHasChain]>;
500def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
501                                 [SDNPHasChain]>;
502def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
503                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
504                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
505
506def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
507def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
508def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
509def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
510def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
511
512def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
513
514def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
515def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
516def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
517def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
518def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
519def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
520
521def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
522def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
523def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
524def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
525def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
526def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
527def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
528
529def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
530def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
531def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
532def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
533
534def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
535def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
536def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
537def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
538def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
539def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
540def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
541def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
542def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
543def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
544
545def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
546def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
547
548def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
549def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
550def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
551def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
552def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
553
554def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
555def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
556def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
557
558def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
559def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
560def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
561def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
562def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
563def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
564                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
565
566def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
567def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
568def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
569def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
570def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
571
572def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
573def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
574
575def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
576                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
577
578def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
579                               [SDNPHasChain, SDNPSideEffect]>;
580
581def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
582def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
583
584def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
585                                    SDT_AArch64TLSDescCallSeq,
586                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
587                                     SDNPVariadic]>;
588
589
590def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
591                                 SDT_AArch64WrapperLarge>;
592
593def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
594
595def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
596                                    SDTCisSameAs<1, 2>]>;
597def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
598def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
599
600def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
601def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
602def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
603def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
604
605def AArch64sdot     : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>;
606def AArch64udot     : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>;
607
608def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
609def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
610def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
611def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
612def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
613def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
614
615def AArch64srhadd   : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
616def AArch64urhadd   : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
617def AArch64shadd   : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>;
618def AArch64uhadd   : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>;
619
620def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
621                               [(abdu node:$lhs, node:$rhs),
622                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
623def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
624                               [(abds node:$lhs, node:$rhs),
625                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
626
627def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>;
628def AArch64uaddlp   : PatFrags<(ops node:$src),
629                               [(AArch64uaddlp_n node:$src),
630                                (int_aarch64_neon_uaddlp node:$src)]>;
631
632def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
633def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
634def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
635def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
636def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
637
638def SDT_AArch64unpk : SDTypeProfile<1, 1, [
639    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
640]>;
641def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
642def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
643def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
644def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
645
646def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
647def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
648def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
649
650def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
651def AArch64mrs : SDNode<"AArch64ISD::MRS",
652                        SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>,
653                        [SDNPHasChain, SDNPOutGlue]>;
654//===----------------------------------------------------------------------===//
655
656//===----------------------------------------------------------------------===//
657
658// AArch64 Instruction Predicate Definitions.
659// We could compute these on a per-module basis but doing so requires accessing
660// the Function object through the <Target>Subtarget and objections were raised
661// to that (see post-commit review comments for r301750).
662let RecomputePerFunction = 1 in {
663  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
664  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
665  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
666  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
667
668  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
669  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
670
671  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
672  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
673  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
674  // optimizing. This allows us to selectively use patterns without impacting
675  // SelectionDAG's behaviour.
676  // FIXME: One day there will probably be a nicer way to check for this, but
677  // today is not that day.
678  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
679}
680
681include "AArch64InstrFormats.td"
682include "SVEInstrFormats.td"
683include "SMEInstrFormats.td"
684
685//===----------------------------------------------------------------------===//
686
687//===----------------------------------------------------------------------===//
688// Miscellaneous instructions.
689//===----------------------------------------------------------------------===//
690
691let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
692// We set Sched to empty list because we expect these instructions to simply get
693// removed in most cases.
694def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
695                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
696                              Sched<[]>;
697def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
698                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
699                            Sched<[]>;
700} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
701
702let isReMaterializable = 1, isCodeGenOnly = 1 in {
703// FIXME: The following pseudo instructions are only needed because remat
704// cannot handle multiple instructions.  When that changes, they can be
705// removed, along with the AArch64Wrapper node.
706
707let AddedComplexity = 10 in
708def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr),
709                     [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
710              Sched<[WriteLDAdr]>;
711
712// The MOVaddr instruction should match only when the add is not folded
713// into a load or store address.
714def MOVaddr
715    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
716             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
717                                            tglobaladdr:$low))]>,
718      Sched<[WriteAdrAdr]>;
719def MOVaddrJT
720    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
721             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
722                                             tjumptable:$low))]>,
723      Sched<[WriteAdrAdr]>;
724def MOVaddrCP
725    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
726             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
727                                             tconstpool:$low))]>,
728      Sched<[WriteAdrAdr]>;
729def MOVaddrBA
730    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
731             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
732                                             tblockaddress:$low))]>,
733      Sched<[WriteAdrAdr]>;
734def MOVaddrTLS
735    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
736             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
737                                            tglobaltlsaddr:$low))]>,
738      Sched<[WriteAdrAdr]>;
739def MOVaddrEXT
740    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
741             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
742                                            texternalsym:$low))]>,
743      Sched<[WriteAdrAdr]>;
744// Normally AArch64addlow either gets folded into a following ldr/str,
745// or together with an adrp into MOVaddr above. For cases with TLS, it
746// might appear without either of them, so allow lowering it into a plain
747// add.
748def ADDlowTLS
749    : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low),
750             [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src,
751                                            tglobaltlsaddr:$low))]>,
752      Sched<[WriteAdr]>;
753
754} // isReMaterializable, isCodeGenOnly
755
756def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
757          (LOADgot tglobaltlsaddr:$addr)>;
758
759def : Pat<(AArch64LOADgot texternalsym:$addr),
760          (LOADgot texternalsym:$addr)>;
761
762def : Pat<(AArch64LOADgot tconstpool:$addr),
763          (LOADgot tconstpool:$addr)>;
764
765// 32-bit jump table destination is actually only 2 instructions since we can
766// use the table itself as a PC-relative base. But optimization occurs after
767// branch relaxation so be pessimistic.
768let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
769    isNotDuplicable = 1 in {
770def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
771                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
772                      Sched<[]>;
773def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
774                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
775                      Sched<[]>;
776def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
777                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
778                     Sched<[]>;
779}
780
781// Space-consuming pseudo to aid testing of placement and reachability
782// algorithms. Immediate operand is the number of bytes this "instruction"
783// occupies; register operands can be used to enforce dependency and constrain
784// the scheduler.
785let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
786def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
787                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
788            Sched<[]>;
789
790let hasSideEffects = 1, isCodeGenOnly = 1 in {
791  def SpeculationSafeValueX
792      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
793  def SpeculationSafeValueW
794      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
795}
796
797// SpeculationBarrierEndBB must only be used after an unconditional control
798// flow, i.e. after a terminator for which isBarrier is True.
799let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
800  def SpeculationBarrierISBDSBEndBB
801      : Pseudo<(outs), (ins), []>, Sched<[]>;
802  def SpeculationBarrierSBEndBB
803      : Pseudo<(outs), (ins), []>, Sched<[]>;
804}
805
806//===----------------------------------------------------------------------===//
807// System instructions.
808//===----------------------------------------------------------------------===//
809
810def HINT : HintI<"hint">;
811def : InstAlias<"nop",  (HINT 0b000)>;
812def : InstAlias<"yield",(HINT 0b001)>;
813def : InstAlias<"wfe",  (HINT 0b010)>;
814def : InstAlias<"wfi",  (HINT 0b011)>;
815def : InstAlias<"sev",  (HINT 0b100)>;
816def : InstAlias<"sevl", (HINT 0b101)>;
817def : InstAlias<"dgh",  (HINT 0b110)>;
818def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
819def : InstAlias<"csdb", (HINT 20)>;
820// In order to be able to write readable assembly, LLVM should accept assembly
821// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
822// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
823// should not emit these mnemonics unless BTI is enabled.
824def : InstAlias<"bti",  (HINT 32), 0>;
825def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
826def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
827def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
828
829// v8.2a Statistical Profiling extension
830def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
831
832// As far as LLVM is concerned this writes to the system's exclusive monitors.
833let mayLoad = 1, mayStore = 1 in
834def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
835
836// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
837// model patterns with sufficiently fine granularity.
838let mayLoad = ?, mayStore = ? in {
839def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
840                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
841
842def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
843                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
844
845def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
846                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
847
848def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
849  let CRm        = 0b0010;
850  let Inst{12}   = 0;
851  let Predicates = [HasTRACEV8_4];
852}
853
854def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
855  let CRm{1-0}   = 0b11;
856  let Inst{9-8}  = 0b10;
857  let Predicates = [HasXS];
858}
859
860let Predicates = [HasWFxT] in {
861def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
862def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
863}
864
865// Branch Record Buffer two-word mnemonic instructions
866class BRBEI<bits<3> op2, string keyword>
867    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
868  let Inst{31-8} = 0b110101010000100101110010;
869  let Inst{7-5} = op2;
870  let Predicates = [HasBRBE];
871}
872def BRB_IALL: BRBEI<0b100, "\tiall">;
873def BRB_INJ:  BRBEI<0b101, "\tinj">;
874
875}
876
877// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
878def : TokenAlias<"INJ", "inj">;
879def : TokenAlias<"IALL", "iall">;
880
881// ARMv8.2-A Dot Product
882let Predicates = [HasDotProd] in {
883defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>;
884defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>;
885defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>;
886defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>;
887}
888
889// ARMv8.6-A BFloat
890let Predicates = [HasNEON, HasBF16] in {
891defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
892defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
893def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
894def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
895def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
896def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
897def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
898def BFCVTN       : SIMD_BFCVTN;
899def BFCVTN2      : SIMD_BFCVTN2;
900
901// Vector-scalar BFDOT:
902// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
903// register (the instruction uses a single 32-bit lane from it), so the pattern
904// is a bit tricky.
905def : Pat<(v2f32 (int_aarch64_neon_bfdot
906                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
907                    (v4bf16 (bitconvert
908                      (v2i32 (AArch64duplane32
909                        (v4i32 (bitconvert
910                          (v8bf16 (insert_subvector undef,
911                            (v4bf16 V64:$Rm),
912                            (i64 0))))),
913                        VectorIndexS:$idx)))))),
914          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
915                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
916                             VectorIndexS:$idx)>;
917}
918
919let Predicates = [HasNEONorStreamingSVE, HasBF16] in {
920def BFCVT : BF16ToSinglePrecision<"bfcvt">;
921}
922
923// ARMv8.6A AArch64 matrix multiplication
924let Predicates = [HasMatMulInt8] in {
925def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
926def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
927def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
928defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
929defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
930
931// sudot lane has a pattern where usdot is expected (there is no sudot).
932// The second operand is used in the dup operation to repeat the indexed
933// element.
934class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
935                         string rhs_kind, RegisterOperand RegType,
936                         ValueType AccumType, ValueType InputType>
937      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
938                                        lhs_kind, rhs_kind, RegType, AccumType,
939                                        InputType, null_frag> {
940  let Pattern = [(set (AccumType RegType:$dst),
941                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
942                                 (InputType (bitconvert (AccumType
943                                    (AArch64duplane32 (v4i32 V128:$Rm),
944                                        VectorIndexS:$idx)))),
945                                 (InputType RegType:$Rn))))];
946}
947
948multiclass SIMDSUDOTIndex {
949  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
950  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
951}
952
953defm SUDOTlane : SIMDSUDOTIndex;
954
955}
956
957// ARMv8.2-A FP16 Fused Multiply-Add Long
958let Predicates = [HasNEON, HasFP16FML] in {
959defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
960defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
961defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
962defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
963defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
964defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
965defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
966defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
967}
968
969// Armv8.2-A Crypto extensions
970let Predicates = [HasSHA3] in {
971def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
972def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
973def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
974def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
975def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
976def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
977def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
978def XAR       : CryptoRRRi6<"xar">;
979
980class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy>
981  : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))),
982        (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>;
983
984def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
985          (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
986
987def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>;
988def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>;
989def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>;
990
991def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>;
992def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>;
993def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>;
994def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>;
995
996class EOR3_pattern<ValueType VecTy>
997  : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)),
998        (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
999
1000def : EOR3_pattern<v16i8>;
1001def : EOR3_pattern<v8i16>;
1002def : EOR3_pattern<v4i32>;
1003def : EOR3_pattern<v2i64>;
1004
1005def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>;
1006def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>;
1007def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>;
1008def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>;
1009
1010def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>;
1011def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>;
1012def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>;
1013def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>;
1014
1015def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>;
1016def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>;
1017def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>;
1018def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>;
1019
1020def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1021          (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1022
1023def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))),
1024          (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>;
1025
1026
1027} // HasSHA3
1028
1029let Predicates = [HasSM4] in {
1030def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
1031def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
1032def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
1033def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
1034def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
1035def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
1036def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
1037def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
1038def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
1039
1040def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))),
1041          (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>;
1042
1043class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode>
1044  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1045        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1046
1047class SM3TT_pattern<Instruction INST, Intrinsic OpNode>
1048  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )),
1049        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>;
1050
1051class SM4_pattern<Instruction INST, Intrinsic OpNode>
1052  : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1053        (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1054
1055def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>;
1056def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>;
1057
1058def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>;
1059def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>;
1060def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>;
1061def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>;
1062
1063def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>;
1064def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>;
1065} // HasSM4
1066
1067let Predicates = [HasRCPC] in {
1068  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
1069  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
1070  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
1071  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
1072  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
1073}
1074
1075// v8.3a complex add and multiply-accumulate. No predicate here, that is done
1076// inside the multiclass as the FP16 versions need different predicates.
1077defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
1078                                               "fcmla", null_frag>;
1079defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
1080                                           "fcadd", null_frag>;
1081defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">;
1082
1083let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1084  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1085            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
1086  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1087            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
1088  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1089            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
1090  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1091            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
1092}
1093
1094let Predicates = [HasComplxNum, HasNEON] in {
1095  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1096            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
1097  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1098            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
1099  foreach Ty = [v4f32, v2f64] in {
1100    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
1101              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
1102    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
1103              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
1104  }
1105}
1106
1107multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> {
1108  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1109            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
1110  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1111            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
1112  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1113            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
1114  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1115            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
1116}
1117
1118multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> {
1119  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1120            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
1121  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1122            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
1123  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1124            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1125  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1126            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1127}
1128
1129
1130let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1131  defm : FCMLA_PATS<v4f16, V64>;
1132  defm : FCMLA_PATS<v8f16, V128>;
1133
1134  defm : FCMLA_LANE_PATS<v4f16, V64,
1135                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1136  defm : FCMLA_LANE_PATS<v8f16, V128,
1137                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1138}
1139let Predicates = [HasComplxNum, HasNEON] in {
1140  defm : FCMLA_PATS<v2f32, V64>;
1141  defm : FCMLA_PATS<v4f32, V128>;
1142  defm : FCMLA_PATS<v2f64, V128>;
1143
1144  defm : FCMLA_LANE_PATS<v4f32, V128,
1145                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1146}
1147
1148// v8.3a Pointer Authentication
1149// These instructions inhabit part of the hint space and so can be used for
1150// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1151// important for compatibility with other assemblers (e.g. GAS) when building
1152// software compatible with both CPUs that do or don't implement PA.
1153let Uses = [LR], Defs = [LR] in {
1154  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1155  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1156  let isAuthenticated = 1 in {
1157    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1158    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1159  }
1160}
1161let Uses = [LR, SP], Defs = [LR] in {
1162  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1163  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1164  let isAuthenticated = 1 in {
1165    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1166    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1167  }
1168}
1169let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1170  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1171  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1172  let isAuthenticated = 1 in {
1173    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1174    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1175  }
1176}
1177
1178let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1179  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1180}
1181
1182// In order to be able to write readable assembly, LLVM should accept assembly
1183// inputs that use pointer authentication mnemonics, even with PA disabled.
1184// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1185// should not emit these mnemonics unless PA is enabled.
1186def : InstAlias<"paciaz", (PACIAZ), 0>;
1187def : InstAlias<"pacibz", (PACIBZ), 0>;
1188def : InstAlias<"autiaz", (AUTIAZ), 0>;
1189def : InstAlias<"autibz", (AUTIBZ), 0>;
1190def : InstAlias<"paciasp", (PACIASP), 0>;
1191def : InstAlias<"pacibsp", (PACIBSP), 0>;
1192def : InstAlias<"autiasp", (AUTIASP), 0>;
1193def : InstAlias<"autibsp", (AUTIBSP), 0>;
1194def : InstAlias<"pacia1716", (PACIA1716), 0>;
1195def : InstAlias<"pacib1716", (PACIB1716), 0>;
1196def : InstAlias<"autia1716", (AUTIA1716), 0>;
1197def : InstAlias<"autib1716", (AUTIB1716), 0>;
1198def : InstAlias<"xpaclri", (XPACLRI), 0>;
1199
1200// These pointer authentication instructions require armv8.3a
1201let Predicates = [HasPAuth] in {
1202
1203  // When PA is enabled, a better mnemonic should be emitted.
1204  def : InstAlias<"paciaz", (PACIAZ), 1>;
1205  def : InstAlias<"pacibz", (PACIBZ), 1>;
1206  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1207  def : InstAlias<"autibz", (AUTIBZ), 1>;
1208  def : InstAlias<"paciasp", (PACIASP), 1>;
1209  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1210  def : InstAlias<"autiasp", (AUTIASP), 1>;
1211  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1212  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1213  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1214  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1215  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1216  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1217
1218  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm,
1219                      SDPatternOperator op> {
1220    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm,  "ia"), op>;
1221    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm,  "ib"), op>;
1222    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm,  "da"), op>;
1223    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm,  "db"), op>;
1224    def IZA  : SignAuthZero<prefix_z,  0b00, !strconcat(asm, "iza"), op>;
1225    def DZA  : SignAuthZero<prefix_z,  0b10, !strconcat(asm, "dza"), op>;
1226    def IZB  : SignAuthZero<prefix_z,  0b01, !strconcat(asm, "izb"), op>;
1227    def DZB  : SignAuthZero<prefix_z,  0b11, !strconcat(asm, "dzb"), op>;
1228  }
1229
1230  defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>;
1231  defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>;
1232
1233  def XPACI : ClearAuth<0, "xpaci">;
1234  def XPACD : ClearAuth<1, "xpacd">;
1235
1236  def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>;
1237
1238  // Combined Instructions
1239  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1240    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1241    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1242  }
1243  let isCall = 1, Defs = [LR], Uses = [SP] in {
1244    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1245    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1246  }
1247
1248  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1249    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1250    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1251  }
1252  let isCall = 1, Defs = [LR], Uses = [SP] in {
1253    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1254    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1255  }
1256
1257  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1258    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1259    def RETAB   : AuthReturn<0b010, 1, "retab">;
1260    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1261    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1262  }
1263
1264  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1265  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1266
1267}
1268
1269// v8.3a floating point conversion for javascript
1270let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1271def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1272                                      "fjcvtzs",
1273                                      [(set GPR32:$Rd,
1274                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1275  let Inst{31} = 0;
1276} // HasJS, HasFPARMv8
1277
1278// v8.4 Flag manipulation instructions
1279let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1280def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1281  let Inst{20-5} = 0b0000001000000000;
1282}
1283def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1284def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1285def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1286                        "{\t$Rn, $imm, $mask}">;
1287} // HasFlagM
1288
1289// v8.5 flag manipulation instructions
1290let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1291
1292def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1293  let Inst{18-16} = 0b000;
1294  let Inst{11-8} = 0b0000;
1295  let Unpredictable{11-8} = 0b1111;
1296  let Inst{7-5} = 0b001;
1297}
1298
1299def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1300  let Inst{18-16} = 0b000;
1301  let Inst{11-8} = 0b0000;
1302  let Unpredictable{11-8} = 0b1111;
1303  let Inst{7-5} = 0b010;
1304}
1305} // HasAltNZCV
1306
1307
1308// Armv8.5-A speculation barrier
1309def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1310  let Inst{20-5} = 0b0001100110000111;
1311  let Unpredictable{11-8} = 0b1111;
1312  let Predicates = [HasSB];
1313  let hasSideEffects = 1;
1314}
1315
1316def : InstAlias<"clrex", (CLREX 0xf)>;
1317def : InstAlias<"isb", (ISB 0xf)>;
1318def : InstAlias<"ssbb", (DSB 0)>;
1319def : InstAlias<"pssbb", (DSB 4)>;
1320def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>;
1321
1322def MRS    : MRSI;
1323def MSR    : MSRI;
1324def MSRpstateImm1 : MSRpstateImm0_1;
1325def MSRpstateImm4 : MSRpstateImm0_15;
1326
1327def : Pat<(AArch64mrs imm:$id),
1328          (MRS imm:$id)>;
1329
1330// The thread pointer (on Linux, at least, where this has been implemented) is
1331// TPIDR_EL0.
1332def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1333                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1334
1335let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1336def HWASAN_CHECK_MEMACCESS : Pseudo<
1337  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1338  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1339  Sched<[]>;
1340}
1341
1342let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1343def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1344  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1345  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1346  Sched<[]>;
1347}
1348
1349// The cycle counter PMC register is PMCCNTR_EL0.
1350let Predicates = [HasPerfMon] in
1351def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1352
1353// FPCR register
1354def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1355def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>;
1356
1357// Generic system instructions
1358def SYSxt  : SystemXtI<0, "sys">;
1359def SYSLxt : SystemLXtI<1, "sysl">;
1360
1361def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1362                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1363                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1364
1365
1366let Predicates = [HasTME] in {
1367
1368def TSTART : TMSystemI<0b0000, "tstart",
1369                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1370
1371def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1372
1373def TCANCEL : TMSystemException<0b011, "tcancel",
1374                                [(int_aarch64_tcancel timm64_0_65535:$imm)]>;
1375
1376def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1377  let mayLoad = 0;
1378  let mayStore = 0;
1379}
1380} // HasTME
1381
1382//===----------------------------------------------------------------------===//
1383// Move immediate instructions.
1384//===----------------------------------------------------------------------===//
1385
1386defm MOVK : InsertImmediate<0b11, "movk">;
1387defm MOVN : MoveImmediate<0b00, "movn">;
1388
1389let PostEncoderMethod = "fixMOVZ" in
1390defm MOVZ : MoveImmediate<0b10, "movz">;
1391
1392// First group of aliases covers an implicit "lsl #0".
1393def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>;
1394def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>;
1395def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1396def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1397def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1398def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1399
1400// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1401def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1402def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1403def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1404def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1405
1406def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1407def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1408def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1409def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1410
1411def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1412def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1413def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1414def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1415
1416def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1417def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1418
1419def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1420def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1421
1422def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1423def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1424
1425// Final group of aliases covers true "mov $Rd, $imm" cases.
1426multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1427                          int width, int shift> {
1428  def _asmoperand : AsmOperandClass {
1429    let Name = basename # width # "_lsl" # shift # "MovAlias";
1430    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1431                               # shift # ">";
1432    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1433  }
1434
1435  def _movimm : Operand<i32> {
1436    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1437  }
1438
1439  def : InstAlias<"mov $Rd, $imm",
1440                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1441}
1442
1443defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1444defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1445
1446defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1447defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1448defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1449defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1450
1451defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1452defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1453
1454defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1455defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1456defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1457defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1458
1459let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1460    isAsCheapAsAMove = 1 in {
1461// FIXME: The following pseudo instructions are only needed because remat
1462// cannot handle multiple instructions.  When that changes, we can select
1463// directly to the real instructions and get rid of these pseudos.
1464
1465def MOVi32imm
1466    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1467             [(set GPR32:$dst, imm:$src)]>,
1468      Sched<[WriteImm]>;
1469def MOVi64imm
1470    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1471             [(set GPR64:$dst, imm:$src)]>,
1472      Sched<[WriteImm]>;
1473} // isReMaterializable, isCodeGenOnly
1474
1475// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1476// eventual expansion code fewer bits to worry about getting right. Marshalling
1477// the types is a little tricky though:
1478def i64imm_32bit : ImmLeaf<i64, [{
1479  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1480}]>;
1481
1482def s64imm_32bit : ImmLeaf<i64, [{
1483  int64_t Imm64 = static_cast<int64_t>(Imm);
1484  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1485         Imm64 <= std::numeric_limits<int32_t>::max();
1486}]>;
1487
1488def trunc_imm : SDNodeXForm<imm, [{
1489  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1490}]>;
1491
1492def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1493  GISDNodeXFormEquiv<trunc_imm>;
1494
1495let Predicates = [OptimizedGISelOrOtherSelector] in {
1496// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1497// copies.
1498def : Pat<(i64 i64imm_32bit:$src),
1499          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1500}
1501
1502// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1503def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1504return CurDAG->getTargetConstant(
1505  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1506}]>;
1507
1508def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1509return CurDAG->getTargetConstant(
1510  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1511}]>;
1512
1513
1514def : Pat<(f32 fpimm:$in),
1515  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1516def : Pat<(f64 fpimm:$in),
1517  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1518
1519
1520// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1521// sequences.
1522def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1523                             tglobaladdr:$g1, tglobaladdr:$g0),
1524          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1525                                  tglobaladdr:$g1, 16),
1526                          tglobaladdr:$g2, 32),
1527                  tglobaladdr:$g3, 48)>;
1528
1529def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1530                             tblockaddress:$g1, tblockaddress:$g0),
1531          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1532                                  tblockaddress:$g1, 16),
1533                          tblockaddress:$g2, 32),
1534                  tblockaddress:$g3, 48)>;
1535
1536def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1537                             tconstpool:$g1, tconstpool:$g0),
1538          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1539                                  tconstpool:$g1, 16),
1540                          tconstpool:$g2, 32),
1541                  tconstpool:$g3, 48)>;
1542
1543def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1544                             tjumptable:$g1, tjumptable:$g0),
1545          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1546                                  tjumptable:$g1, 16),
1547                          tjumptable:$g2, 32),
1548                  tjumptable:$g3, 48)>;
1549
1550
1551//===----------------------------------------------------------------------===//
1552// Arithmetic instructions.
1553//===----------------------------------------------------------------------===//
1554
1555// Add/subtract with carry.
1556defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1557defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1558
1559def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1560def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1561def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1562def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1563
1564// Add/subtract
1565defm ADD : AddSub<0, "add", "sub", add>;
1566defm SUB : AddSub<1, "sub", "add">;
1567
1568def : InstAlias<"mov $dst, $src",
1569                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1570def : InstAlias<"mov $dst, $src",
1571                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1572def : InstAlias<"mov $dst, $src",
1573                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1574def : InstAlias<"mov $dst, $src",
1575                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1576
1577defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1578defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1579
1580// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1581def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1582          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1583def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1584          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1585def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1586          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1587def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1588          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1589def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1590          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1591def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1592          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1593let AddedComplexity = 1 in {
1594def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1595          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1596def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1597          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1598}
1599
1600// Because of the immediate format for add/sub-imm instructions, the
1601// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1602//  These patterns capture that transformation.
1603let AddedComplexity = 1 in {
1604def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1605          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1606def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1607          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1608def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1609          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1610def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1611          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1612}
1613
1614// Because of the immediate format for add/sub-imm instructions, the
1615// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1616//  These patterns capture that transformation.
1617let AddedComplexity = 1 in {
1618def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1619          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1620def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1621          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1622def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1623          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1624def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1625          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1626}
1627
1628def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1629def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1630def : InstAlias<"neg $dst, $src$shift",
1631                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1632def : InstAlias<"neg $dst, $src$shift",
1633                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1634
1635def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1636def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1637def : InstAlias<"negs $dst, $src$shift",
1638                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1639def : InstAlias<"negs $dst, $src$shift",
1640                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1641
1642
1643// Unsigned/Signed divide
1644defm UDIV : Div<0, "udiv", udiv>;
1645defm SDIV : Div<1, "sdiv", sdiv>;
1646
1647def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1648def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1649def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1650def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1651
1652// Variable shift
1653defm ASRV : Shift<0b10, "asr", sra>;
1654defm LSLV : Shift<0b00, "lsl", shl>;
1655defm LSRV : Shift<0b01, "lsr", srl>;
1656defm RORV : Shift<0b11, "ror", rotr>;
1657
1658def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1659def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1660def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1661def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1662def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1663def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1664def : ShiftAlias<"rorv", RORVWr, GPR32>;
1665def : ShiftAlias<"rorv", RORVXr, GPR64>;
1666
1667// Multiply-add
1668let AddedComplexity = 5 in {
1669defm MADD : MulAccum<0, "madd">;
1670defm MSUB : MulAccum<1, "msub">;
1671
1672def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1673          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1674def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1675          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1676
1677def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1678          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1679def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1680          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1681def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1682          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1683def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1684          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1685} // AddedComplexity = 5
1686
1687let AddedComplexity = 5 in {
1688def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1689def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1690def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1691def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1692
1693def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1694          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1695def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1696          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1697def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1698          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1699def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1700          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1701def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1702          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1703def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1704          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1705
1706def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1707          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1708def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1709          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1710
1711def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1712          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1713def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1714          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1715def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1716          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1717                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1718
1719def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1720          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1721def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1722          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1723def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1724          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1725                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1726
1727def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1728          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1729def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1730          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1731def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1732                    GPR64:$Ra)),
1733          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1734                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1735
1736def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1737          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1738def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1739          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1740def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1741                                    (s64imm_32bit:$C)))),
1742          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1743                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1744} // AddedComplexity = 5
1745
1746def : MulAccumWAlias<"mul", MADDWrrr>;
1747def : MulAccumXAlias<"mul", MADDXrrr>;
1748def : MulAccumWAlias<"mneg", MSUBWrrr>;
1749def : MulAccumXAlias<"mneg", MSUBXrrr>;
1750def : WideMulAccumAlias<"smull", SMADDLrrr>;
1751def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1752def : WideMulAccumAlias<"umull", UMADDLrrr>;
1753def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1754
1755// Multiply-high
1756def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1757def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1758
1759// CRC32
1760def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1761def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1762def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1763def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1764
1765def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1766def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1767def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1768def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1769
1770// v8.1 atomic CAS
1771defm CAS   : CompareAndSwap<0, 0, "">;
1772defm CASA  : CompareAndSwap<1, 0, "a">;
1773defm CASL  : CompareAndSwap<0, 1, "l">;
1774defm CASAL : CompareAndSwap<1, 1, "al">;
1775
1776// v8.1 atomic CASP
1777defm CASP   : CompareAndSwapPair<0, 0, "">;
1778defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1779defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1780defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1781
1782// v8.1 atomic SWP
1783defm SWP   : Swap<0, 0, "">;
1784defm SWPA  : Swap<1, 0, "a">;
1785defm SWPL  : Swap<0, 1, "l">;
1786defm SWPAL : Swap<1, 1, "al">;
1787
1788// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1789defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1790defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1791defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1792defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1793
1794defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1795defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1796defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1797defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1798
1799defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1800defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1801defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1802defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1803
1804defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1805defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1806defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1807defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1808
1809defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1810defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1811defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1812defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1813
1814defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1815defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1816defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1817defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1818
1819defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1820defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1821defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1822defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1823
1824defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1825defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1826defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1827defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1828
1829// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1830defm : STOPregister<"stadd","LDADD">; // STADDx
1831defm : STOPregister<"stclr","LDCLR">; // STCLRx
1832defm : STOPregister<"steor","LDEOR">; // STEORx
1833defm : STOPregister<"stset","LDSET">; // STSETx
1834defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1835defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1836defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1837defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1838
1839// v8.5 Memory Tagging Extension
1840let Predicates = [HasMTE] in {
1841
1842def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1843            Sched<[]>{
1844  let Inst{31} = 1;
1845}
1846def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1847  let Inst{31} = 1;
1848  let isNotDuplicable = 1;
1849}
1850def ADDG  : AddSubG<0, "addg", null_frag>;
1851def SUBG  : AddSubG<1, "subg", null_frag>;
1852
1853def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1854
1855def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1856def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1857  let Defs = [NZCV];
1858}
1859
1860def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1861
1862def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1863
1864def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1865          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1866def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1867          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1868
1869def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1870
1871def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1872                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1873def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1874                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1875def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1876                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1877  let Inst{23} = 0;
1878}
1879
1880defm STG   : MemTagStore<0b00, "stg">;
1881defm STZG  : MemTagStore<0b01, "stzg">;
1882defm ST2G  : MemTagStore<0b10, "st2g">;
1883defm STZ2G : MemTagStore<0b11, "stz2g">;
1884
1885def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1886          (STGOffset $Rn, $Rm, $imm)>;
1887def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1888          (STZGOffset $Rn, $Rm, $imm)>;
1889def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1890          (ST2GOffset $Rn, $Rm, $imm)>;
1891def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1892          (STZ2GOffset $Rn, $Rm, $imm)>;
1893
1894defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1895def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1896def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1897
1898def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1899          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1900
1901def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1902          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1903
1904def IRGstack
1905    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1906      Sched<[]>;
1907def TAGPstack
1908    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1909      Sched<[]>;
1910
1911// Explicit SP in the first operand prevents ShrinkWrap optimization
1912// from leaving this instruction out of the stack frame. When IRGstack
1913// is transformed into IRG, this operand is replaced with the actual
1914// register / expression for the tagged base pointer of the current function.
1915def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1916
1917// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
1918// $Rn_wback is one past the end of the range. $Rm is the loop counter.
1919let isCodeGenOnly=1, mayStore=1 in {
1920def STGloop_wback
1921    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1922             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1923      Sched<[WriteAdr, WriteST]>;
1924
1925def STZGloop_wback
1926    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1927             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1928      Sched<[WriteAdr, WriteST]>;
1929
1930// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
1931// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
1932def STGloop
1933    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1934             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1935      Sched<[WriteAdr, WriteST]>;
1936
1937def STZGloop
1938    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1939             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1940      Sched<[WriteAdr, WriteST]>;
1941}
1942
1943} // Predicates = [HasMTE]
1944
1945//===----------------------------------------------------------------------===//
1946// Logical instructions.
1947//===----------------------------------------------------------------------===//
1948
1949// (immediate)
1950defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1951defm AND  : LogicalImm<0b00, "and", and, "bic">;
1952defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1953defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1954
1955// FIXME: these aliases *are* canonical sometimes (when movz can't be
1956// used). Actually, it seems to be working right now, but putting logical_immXX
1957// here is a bit dodgy on the AsmParser side too.
1958def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1959                                          logical_imm32:$imm), 0>;
1960def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1961                                          logical_imm64:$imm), 0>;
1962
1963
1964// (register)
1965defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1966defm BICS : LogicalRegS<0b11, 1, "bics",
1967                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1968defm AND  : LogicalReg<0b00, 0, "and", and>;
1969defm BIC  : LogicalReg<0b00, 1, "bic",
1970                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1971defm EON  : LogicalReg<0b10, 1, "eon",
1972                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1973defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
1974defm ORN  : LogicalReg<0b01, 1, "orn",
1975                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1976defm ORR  : LogicalReg<0b01, 0, "orr", or>;
1977
1978def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1979def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1980
1981def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1982def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1983
1984def : InstAlias<"mvn $Wd, $Wm$sh",
1985                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1986def : InstAlias<"mvn $Xd, $Xm$sh",
1987                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
1988
1989def : InstAlias<"tst $src1, $src2",
1990                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
1991def : InstAlias<"tst $src1, $src2",
1992                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
1993
1994def : InstAlias<"tst $src1, $src2",
1995                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
1996def : InstAlias<"tst $src1, $src2",
1997                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
1998
1999def : InstAlias<"tst $src1, $src2$sh",
2000               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
2001def : InstAlias<"tst $src1, $src2$sh",
2002               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
2003
2004
2005def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
2006def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
2007
2008
2009//===----------------------------------------------------------------------===//
2010// One operand data processing instructions.
2011//===----------------------------------------------------------------------===//
2012
2013defm CLS    : OneOperandData<0b101, "cls">;
2014defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
2015defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
2016
2017def  REV16Wr : OneWRegData<0b001, "rev16",
2018                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
2019def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
2020
2021def : Pat<(cttz GPR32:$Rn),
2022          (CLZWr (RBITWr GPR32:$Rn))>;
2023def : Pat<(cttz GPR64:$Rn),
2024          (CLZXr (RBITXr GPR64:$Rn))>;
2025def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
2026                (i32 1))),
2027          (CLSWr GPR32:$Rn)>;
2028def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
2029                (i64 1))),
2030          (CLSXr GPR64:$Rn)>;
2031def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
2032def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
2033
2034// Unlike the other one operand instructions, the instructions with the "rev"
2035// mnemonic do *not* just different in the size bit, but actually use different
2036// opcode bits for the different sizes.
2037def REVWr   : OneWRegData<0b010, "rev", bswap>;
2038def REVXr   : OneXRegData<0b011, "rev", bswap>;
2039def REV32Xr : OneXRegData<0b010, "rev32",
2040                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
2041
2042def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
2043
2044// The bswap commutes with the rotr so we want a pattern for both possible
2045// orders.
2046def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
2047def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
2048
2049//===----------------------------------------------------------------------===//
2050// Bitfield immediate extraction instruction.
2051//===----------------------------------------------------------------------===//
2052let hasSideEffects = 0 in
2053defm EXTR : ExtractImm<"extr">;
2054def : InstAlias<"ror $dst, $src, $shift",
2055            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
2056def : InstAlias<"ror $dst, $src, $shift",
2057            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
2058
2059def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
2060          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
2061def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
2062          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
2063
2064//===----------------------------------------------------------------------===//
2065// Other bitfield immediate instructions.
2066//===----------------------------------------------------------------------===//
2067let hasSideEffects = 0 in {
2068defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
2069defm SBFM : BitfieldImm<0b00, "sbfm">;
2070defm UBFM : BitfieldImm<0b10, "ubfm">;
2071}
2072
2073def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
2074  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
2075  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2076}]>;
2077
2078def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
2079  uint64_t enc = 31 - N->getZExtValue();
2080  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2081}]>;
2082
2083// min(7, 31 - shift_amt)
2084def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2085  uint64_t enc = 31 - N->getZExtValue();
2086  enc = enc > 7 ? 7 : enc;
2087  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2088}]>;
2089
2090// min(15, 31 - shift_amt)
2091def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2092  uint64_t enc = 31 - N->getZExtValue();
2093  enc = enc > 15 ? 15 : enc;
2094  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2095}]>;
2096
2097def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
2098  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
2099  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2100}]>;
2101
2102def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
2103  uint64_t enc = 63 - N->getZExtValue();
2104  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2105}]>;
2106
2107// min(7, 63 - shift_amt)
2108def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2109  uint64_t enc = 63 - N->getZExtValue();
2110  enc = enc > 7 ? 7 : enc;
2111  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2112}]>;
2113
2114// min(15, 63 - shift_amt)
2115def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2116  uint64_t enc = 63 - N->getZExtValue();
2117  enc = enc > 15 ? 15 : enc;
2118  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2119}]>;
2120
2121// min(31, 63 - shift_amt)
2122def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
2123  uint64_t enc = 63 - N->getZExtValue();
2124  enc = enc > 31 ? 31 : enc;
2125  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2126}]>;
2127
2128def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
2129          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
2130                              (i64 (i32shift_b imm0_31:$imm)))>;
2131def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2132          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2133                              (i64 (i64shift_b imm0_63:$imm)))>;
2134
2135let AddedComplexity = 10 in {
2136def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2137          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2138def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2139          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2140}
2141
2142def : InstAlias<"asr $dst, $src, $shift",
2143                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2144def : InstAlias<"asr $dst, $src, $shift",
2145                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2146def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2147def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2148def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2149def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2150def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2151
2152def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2153          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2154def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2155          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2156
2157def : InstAlias<"lsr $dst, $src, $shift",
2158                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2159def : InstAlias<"lsr $dst, $src, $shift",
2160                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2161def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2162def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2163def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2164def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2165def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2166
2167//===----------------------------------------------------------------------===//
2168// Conditional comparison instructions.
2169//===----------------------------------------------------------------------===//
2170defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2171defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2172
2173//===----------------------------------------------------------------------===//
2174// Conditional select instructions.
2175//===----------------------------------------------------------------------===//
2176defm CSEL  : CondSelect<0, 0b00, "csel">;
2177
2178def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2179defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2180defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2181defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2182
2183def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2184          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2185def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2186          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2187def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2188          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2189def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2190          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2191def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2192          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2193def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2194          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2195
2196def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2197          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2198def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2199          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2200def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2201          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2202def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2203          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2204def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2205          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2206def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2207          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2208def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2209          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2210def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2211          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2212def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2213          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2214def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2215          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2216def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2217          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2218def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2219          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2220
2221def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)),
2222          (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>;
2223def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))),
2224          (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>;
2225
2226// The inverse of the condition code from the alias instruction is what is used
2227// in the aliased instruction. The parser all ready inverts the condition code
2228// for these aliases.
2229def : InstAlias<"cset $dst, $cc",
2230                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2231def : InstAlias<"cset $dst, $cc",
2232                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2233
2234def : InstAlias<"csetm $dst, $cc",
2235                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2236def : InstAlias<"csetm $dst, $cc",
2237                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2238
2239def : InstAlias<"cinc $dst, $src, $cc",
2240                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2241def : InstAlias<"cinc $dst, $src, $cc",
2242                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2243
2244def : InstAlias<"cinv $dst, $src, $cc",
2245                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2246def : InstAlias<"cinv $dst, $src, $cc",
2247                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2248
2249def : InstAlias<"cneg $dst, $src, $cc",
2250                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2251def : InstAlias<"cneg $dst, $src, $cc",
2252                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2253
2254//===----------------------------------------------------------------------===//
2255// PC-relative instructions.
2256//===----------------------------------------------------------------------===//
2257let isReMaterializable = 1 in {
2258let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2259def ADR  : ADRI<0, "adr", adrlabel,
2260                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2261} // hasSideEffects = 0
2262
2263def ADRP : ADRI<1, "adrp", adrplabel,
2264                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2265} // isReMaterializable = 1
2266
2267// page address of a constant pool entry, block address
2268def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2269def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2270def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2271def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2272def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2273def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2274def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2275
2276//===----------------------------------------------------------------------===//
2277// Unconditional branch (register) instructions.
2278//===----------------------------------------------------------------------===//
2279
2280let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2281def RET  : BranchReg<0b0010, "ret", []>;
2282def DRPS : SpecialReturn<0b0101, "drps">;
2283def ERET : SpecialReturn<0b0100, "eret">;
2284} // isReturn = 1, isTerminator = 1, isBarrier = 1
2285
2286// Default to the LR register.
2287def : InstAlias<"ret", (RET LR)>;
2288
2289let isCall = 1, Defs = [LR], Uses = [SP] in {
2290  def BLR : BranchReg<0b0001, "blr", []>;
2291  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2292                Sched<[WriteBrReg]>,
2293                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2294  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2295                     Sched<[WriteBrReg]>;
2296} // isCall
2297
2298def : Pat<(AArch64call GPR64:$Rn),
2299          (BLR GPR64:$Rn)>,
2300      Requires<[NoSLSBLRMitigation]>;
2301def : Pat<(AArch64call GPR64noip:$Rn),
2302          (BLRNoIP GPR64noip:$Rn)>,
2303      Requires<[SLSBLRMitigation]>;
2304
2305def : Pat<(AArch64call_rvmarker GPR64:$Rn),
2306          (BLR_RVMARKER GPR64:$Rn)>,
2307      Requires<[NoSLSBLRMitigation]>;
2308
2309let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2310def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2311} // isBranch, isTerminator, isBarrier, isIndirectBranch
2312
2313// Create a separate pseudo-instruction for codegen to use so that we don't
2314// flag lr as used in every function. It'll be restored before the RET by the
2315// epilogue if it's legitimately used.
2316def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2317                   Sched<[WriteBrReg]> {
2318  let isTerminator = 1;
2319  let isBarrier = 1;
2320  let isReturn = 1;
2321}
2322
2323// This is a directive-like pseudo-instruction. The purpose is to insert an
2324// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2325// (which in the usual case is a BLR).
2326let hasSideEffects = 1 in
2327def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2328  let AsmString = ".tlsdesccall $sym";
2329}
2330
2331// Pseudo instruction to tell the streamer to emit a 'B' character into the
2332// augmentation string.
2333def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2334
2335// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2336// FIXME: can "hasSideEffects be dropped?
2337let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
2338    isCodeGenOnly = 1 in
2339def TLSDESC_CALLSEQ
2340    : Pseudo<(outs), (ins i64imm:$sym),
2341             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2342      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2343def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2344          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2345
2346//===----------------------------------------------------------------------===//
2347// Conditional branch (immediate) instruction.
2348//===----------------------------------------------------------------------===//
2349def Bcc : BranchCond;
2350
2351//===----------------------------------------------------------------------===//
2352// Compare-and-branch instructions.
2353//===----------------------------------------------------------------------===//
2354defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2355defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2356
2357//===----------------------------------------------------------------------===//
2358// Test-bit-and-branch instructions.
2359//===----------------------------------------------------------------------===//
2360defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2361defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2362
2363//===----------------------------------------------------------------------===//
2364// Unconditional branch (immediate) instructions.
2365//===----------------------------------------------------------------------===//
2366let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2367def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2368} // isBranch, isTerminator, isBarrier
2369
2370let isCall = 1, Defs = [LR], Uses = [SP] in {
2371def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2372} // isCall
2373def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2374
2375//===----------------------------------------------------------------------===//
2376// Exception generation instructions.
2377//===----------------------------------------------------------------------===//
2378let isTrap = 1 in {
2379def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
2380}
2381def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2382def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2383def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>;
2384def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2385def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2386def SMC   : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>;
2387def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2388
2389// DCPSn defaults to an immediate operand of zero if unspecified.
2390def : InstAlias<"dcps1", (DCPS1 0)>;
2391def : InstAlias<"dcps2", (DCPS2 0)>;
2392def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>;
2393
2394def UDF : UDFType<0, "udf">;
2395
2396//===----------------------------------------------------------------------===//
2397// Load instructions.
2398//===----------------------------------------------------------------------===//
2399
2400// Pair (indexed, offset)
2401defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2402defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2403defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2404defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2405defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2406
2407defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2408
2409// Pair (pre-indexed)
2410def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2411def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2412def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2413def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2414def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2415
2416def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2417
2418// Pair (post-indexed)
2419def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2420def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2421def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2422def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2423def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2424
2425def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2426
2427
2428// Pair (no allocate)
2429defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2430defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2431defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2432defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2433defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2434
2435def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2436          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2437
2438//---
2439// (register offset)
2440//---
2441
2442// Integer
2443defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2444defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2445defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2446defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2447
2448// Floating-point
2449defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2450defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2451defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2452defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2453defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2454
2455// Load sign-extended half-word
2456defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2457defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2458
2459// Load sign-extended byte
2460defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2461defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2462
2463// Load sign-extended word
2464defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2465
2466// Pre-fetch.
2467defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2468
2469// For regular load, we do not have any alignment requirement.
2470// Thus, it is safe to directly map the vector loads with interesting
2471// addressing modes.
2472// FIXME: We could do the same for bitconvert to floating point vectors.
2473multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2474                              ValueType ScalTy, ValueType VecTy,
2475                              Instruction LOADW, Instruction LOADX,
2476                              SubRegIndex sub> {
2477  def : Pat<(VecTy (scalar_to_vector (ScalTy
2478              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2479            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2480                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2481                           sub)>;
2482
2483  def : Pat<(VecTy (scalar_to_vector (ScalTy
2484              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2485            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2486                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2487                           sub)>;
2488}
2489
2490let AddedComplexity = 10 in {
2491defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2492defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2493
2494defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2495defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2496
2497defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2498defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2499
2500defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2501defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2502
2503defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2504defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2505
2506defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2507
2508defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2509
2510
2511def : Pat <(v1i64 (scalar_to_vector (i64
2512                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2513                                           ro_Wextend64:$extend))))),
2514           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2515
2516def : Pat <(v1i64 (scalar_to_vector (i64
2517                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2518                                           ro_Xextend64:$extend))))),
2519           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2520}
2521
2522// Match all load 64 bits width whose type is compatible with FPR64
2523multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2524                        Instruction LOADW, Instruction LOADX> {
2525
2526  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2527            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2528
2529  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2530            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2531}
2532
2533let AddedComplexity = 10 in {
2534let Predicates = [IsLE] in {
2535  // We must do vector loads with LD1 in big-endian.
2536  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2537  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2538  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2539  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2540  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2541  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2542}
2543
2544defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2545defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2546
2547// Match all load 128 bits width whose type is compatible with FPR128
2548let Predicates = [IsLE] in {
2549  // We must do vector loads with LD1 in big-endian.
2550  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2551  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2552  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2553  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2554  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2555  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2556  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2557  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2558}
2559} // AddedComplexity = 10
2560
2561// zextload -> i64
2562multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2563                            Instruction INSTW, Instruction INSTX> {
2564  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2565            (SUBREG_TO_REG (i64 0),
2566                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2567                           sub_32)>;
2568
2569  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2570            (SUBREG_TO_REG (i64 0),
2571                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2572                           sub_32)>;
2573}
2574
2575let AddedComplexity = 10 in {
2576  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2577  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2578  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2579
2580  // zextloadi1 -> zextloadi8
2581  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2582
2583  // extload -> zextload
2584  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2585  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2586  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2587
2588  // extloadi1 -> zextloadi8
2589  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2590}
2591
2592
2593// zextload -> i64
2594multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2595                            Instruction INSTW, Instruction INSTX> {
2596  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2597            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2598
2599  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2600            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2601
2602}
2603
2604let AddedComplexity = 10 in {
2605  // extload -> zextload
2606  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2607  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2608  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2609
2610  // zextloadi1 -> zextloadi8
2611  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2612}
2613
2614//---
2615// (unsigned immediate)
2616//---
2617defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2618                   [(set GPR64z:$Rt,
2619                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2620defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2621                   [(set GPR32z:$Rt,
2622                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2623defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2624                   [(set FPR8Op:$Rt,
2625                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2626defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2627                   [(set (f16 FPR16Op:$Rt),
2628                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2629defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2630                   [(set (f32 FPR32Op:$Rt),
2631                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2632defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2633                   [(set (f64 FPR64Op:$Rt),
2634                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2635defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2636                 [(set (f128 FPR128Op:$Rt),
2637                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2638
2639// bf16 load pattern
2640def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2641           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2642
2643// For regular load, we do not have any alignment requirement.
2644// Thus, it is safe to directly map the vector loads with interesting
2645// addressing modes.
2646// FIXME: We could do the same for bitconvert to floating point vectors.
2647def : Pat <(v8i8 (scalar_to_vector (i32
2648               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2649           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2650                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2651def : Pat <(v16i8 (scalar_to_vector (i32
2652               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2653           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2654                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2655def : Pat <(v4i16 (scalar_to_vector (i32
2656               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2657           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2658                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2659def : Pat <(v8i16 (scalar_to_vector (i32
2660               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2661           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2662                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2663def : Pat <(v2i32 (scalar_to_vector (i32
2664               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2665           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2666                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2667def : Pat <(v4i32 (scalar_to_vector (i32
2668               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2669           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2670                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2671def : Pat <(v1i64 (scalar_to_vector (i64
2672               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2673           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2674def : Pat <(v2i64 (scalar_to_vector (i64
2675               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2676           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2677                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2678
2679// Match all load 64 bits width whose type is compatible with FPR64
2680let Predicates = [IsLE] in {
2681  // We must use LD1 to perform vector loads in big-endian.
2682  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2683            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2684  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2685            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2686  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2687            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2688  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2689            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2690  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2691            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2692  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2693            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2694}
2695def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2696          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2697def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2698          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2699
2700// Match all load 128 bits width whose type is compatible with FPR128
2701let Predicates = [IsLE] in {
2702  // We must use LD1 to perform vector loads in big-endian.
2703  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2704            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2705  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2706            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2707  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2708            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2709  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2710            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2711  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2712            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2713  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2714            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2715  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2716            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2717  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2718            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2719}
2720def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2721          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2722
2723defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2724                    [(set GPR32:$Rt,
2725                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2726                                                     uimm12s2:$offset)))]>;
2727defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2728                    [(set GPR32:$Rt,
2729                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2730                                                   uimm12s1:$offset)))]>;
2731// zextload -> i64
2732def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2733    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2734def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2735    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2736
2737// zextloadi1 -> zextloadi8
2738def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2739          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2740def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2741    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2742
2743// extload -> zextload
2744def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2745          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2746def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2747          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2748def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2749          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2750def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2751    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2752def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2753    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2754def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2755    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2756def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2757    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2758
2759// load sign-extended half-word
2760defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2761                     [(set GPR32:$Rt,
2762                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2763                                                      uimm12s2:$offset)))]>;
2764defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2765                     [(set GPR64:$Rt,
2766                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2767                                                      uimm12s2:$offset)))]>;
2768
2769// load sign-extended byte
2770defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2771                     [(set GPR32:$Rt,
2772                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2773                                                    uimm12s1:$offset)))]>;
2774defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2775                     [(set GPR64:$Rt,
2776                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2777                                                    uimm12s1:$offset)))]>;
2778
2779// load sign-extended word
2780defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2781                     [(set GPR64:$Rt,
2782                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2783                                                      uimm12s4:$offset)))]>;
2784
2785// load zero-extended word
2786def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2787      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2788
2789// Pre-fetch.
2790def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2791                        [(AArch64Prefetch imm:$Rt,
2792                                        (am_indexed64 GPR64sp:$Rn,
2793                                                      uimm12s8:$offset))]>;
2794
2795def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2796
2797//---
2798// (literal)
2799
2800def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2801  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2802    const DataLayout &DL = MF->getDataLayout();
2803    Align Align = G->getGlobal()->getPointerAlignment(DL);
2804    return Align >= 4 && G->getOffset() % 4 == 0;
2805  }
2806  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2807    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2808  return false;
2809}]>;
2810
2811def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2812  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2813def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2814  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2815def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2816  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2817def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2818  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2819def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2820  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2821
2822// load sign-extended word
2823def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2824  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2825
2826let AddedComplexity = 20 in {
2827def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2828        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2829}
2830
2831// prefetch
2832def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2833//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2834
2835//---
2836// (unscaled immediate)
2837defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2838                    [(set GPR64z:$Rt,
2839                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2840defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2841                    [(set GPR32z:$Rt,
2842                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2843defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2844                    [(set FPR8Op:$Rt,
2845                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2846defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2847                    [(set (f16 FPR16Op:$Rt),
2848                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2849defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2850                    [(set (f32 FPR32Op:$Rt),
2851                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2852defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2853                    [(set (f64 FPR64Op:$Rt),
2854                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2855defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2856                    [(set (f128 FPR128Op:$Rt),
2857                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2858
2859defm LDURHH
2860    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2861             [(set GPR32:$Rt,
2862                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2863defm LDURBB
2864    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2865             [(set GPR32:$Rt,
2866                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2867
2868// Match all load 64 bits width whose type is compatible with FPR64
2869let Predicates = [IsLE] in {
2870  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2871            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2872  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2873            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2874  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2875            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2876  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2877            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2878  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2879            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2880}
2881def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2882          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2883def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2884          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2885
2886// Match all load 128 bits width whose type is compatible with FPR128
2887let Predicates = [IsLE] in {
2888  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2889            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2890  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2891            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2892  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2893            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2894  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2895            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2896  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2897            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2898  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2899            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2900  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2901            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2902}
2903
2904//  anyext -> zext
2905def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2906          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2907def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2908          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2909def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2910          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2911def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2912    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2913def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2914    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2915def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2916    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2917def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2918    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2919// unscaled zext
2920def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2921          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2922def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2923          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2924def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2925          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2926def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2927    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2928def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2929    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2930def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2931    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2932def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2933    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2934
2935
2936//---
2937// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2938
2939// Define new assembler match classes as we want to only match these when
2940// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2941// associate a DiagnosticType either, as we want the diagnostic for the
2942// canonical form (the scaled operand) to take precedence.
2943class SImm9OffsetOperand<int Width> : AsmOperandClass {
2944  let Name = "SImm9OffsetFB" # Width;
2945  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2946  let RenderMethod = "addImmOperands";
2947}
2948
2949def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2950def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2951def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2952def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2953def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2954
2955def simm9_offset_fb8 : Operand<i64> {
2956  let ParserMatchClass = SImm9OffsetFB8Operand;
2957}
2958def simm9_offset_fb16 : Operand<i64> {
2959  let ParserMatchClass = SImm9OffsetFB16Operand;
2960}
2961def simm9_offset_fb32 : Operand<i64> {
2962  let ParserMatchClass = SImm9OffsetFB32Operand;
2963}
2964def simm9_offset_fb64 : Operand<i64> {
2965  let ParserMatchClass = SImm9OffsetFB64Operand;
2966}
2967def simm9_offset_fb128 : Operand<i64> {
2968  let ParserMatchClass = SImm9OffsetFB128Operand;
2969}
2970
2971def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2972                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2973def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2974                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2975def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2976                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2977def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2978                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2979def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2980                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2981def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2982                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2983def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2984               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2985
2986// zextload -> i64
2987def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2988  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2989def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2990  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2991
2992// load sign-extended half-word
2993defm LDURSHW
2994    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
2995               [(set GPR32:$Rt,
2996                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2997defm LDURSHX
2998    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
2999              [(set GPR64:$Rt,
3000                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3001
3002// load sign-extended byte
3003defm LDURSBW
3004    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
3005                [(set GPR32:$Rt,
3006                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3007defm LDURSBX
3008    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
3009                [(set GPR64:$Rt,
3010                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3011
3012// load sign-extended word
3013defm LDURSW
3014    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
3015              [(set GPR64:$Rt,
3016                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
3017
3018// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
3019def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
3020                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3021def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
3022                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3023def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3024                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3025def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3026                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3027def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3028                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3029def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3030                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3031def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
3032                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3033
3034// Pre-fetch.
3035defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
3036                  [(AArch64Prefetch imm:$Rt,
3037                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3038
3039//---
3040// (unscaled immediate, unprivileged)
3041defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
3042defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
3043
3044defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
3045defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
3046
3047// load sign-extended half-word
3048defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
3049defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
3050
3051// load sign-extended byte
3052defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
3053defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
3054
3055// load sign-extended word
3056defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
3057
3058//---
3059// (immediate pre-indexed)
3060def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3061def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3062def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3063def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3064def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3065def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3066def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3067
3068// load sign-extended half-word
3069def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3070def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3071
3072// load sign-extended byte
3073def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3074def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3075
3076// load zero-extended byte
3077def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3078def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3079
3080// load sign-extended word
3081def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3082
3083//---
3084// (immediate post-indexed)
3085def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3086def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3087def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3088def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3089def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3090def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3091def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3092
3093// load sign-extended half-word
3094def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3095def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3096
3097// load sign-extended byte
3098def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3099def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3100
3101// load zero-extended byte
3102def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3103def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3104
3105// load sign-extended word
3106def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3107
3108//===----------------------------------------------------------------------===//
3109// Store instructions.
3110//===----------------------------------------------------------------------===//
3111
3112// Pair (indexed, offset)
3113// FIXME: Use dedicated range-checked addressing mode operand here.
3114defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
3115defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
3116defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
3117defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
3118defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
3119
3120// Pair (pre-indexed)
3121def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3122def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3123def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3124def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3125def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3126
3127// Pair (pre-indexed)
3128def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3129def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3130def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3131def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3132def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3133
3134// Pair (no allocate)
3135defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
3136defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3137defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3138defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3139defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3140
3141def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3142          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3143
3144def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3145          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3146
3147
3148//---
3149// (Register offset)
3150
3151// Integer
3152defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3153defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3154defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3155defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3156
3157
3158// Floating-point
3159defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3160defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3161defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3162defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3163defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">;
3164
3165let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3166  def : Pat<(store (f128 FPR128:$Rt),
3167                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3168                                        ro_Wextend128:$extend)),
3169            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3170  def : Pat<(store (f128 FPR128:$Rt),
3171                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3172                                        ro_Xextend128:$extend)),
3173            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3174}
3175
3176multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3177                                 Instruction STRW, Instruction STRX> {
3178
3179  def : Pat<(storeop GPR64:$Rt,
3180                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3181            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3182                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3183
3184  def : Pat<(storeop GPR64:$Rt,
3185                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3186            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3187                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3188}
3189
3190let AddedComplexity = 10 in {
3191  // truncstore i64
3192  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3193  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3194  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3195}
3196
3197multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3198                         Instruction STRW, Instruction STRX> {
3199  def : Pat<(store (VecTy FPR:$Rt),
3200                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3201            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3202
3203  def : Pat<(store (VecTy FPR:$Rt),
3204                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3205            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3206}
3207
3208let AddedComplexity = 10 in {
3209// Match all store 64 bits width whose type is compatible with FPR64
3210let Predicates = [IsLE] in {
3211  // We must use ST1 to store vectors in big-endian.
3212  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3213  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3214  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3215  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3216  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3217  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3218}
3219
3220defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3221defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3222
3223// Match all store 128 bits width whose type is compatible with FPR128
3224let Predicates = [IsLE, UseSTRQro] in {
3225  // We must use ST1 to store vectors in big-endian.
3226  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3227  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3228  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3229  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3230  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3231  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3232  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3233  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3234}
3235} // AddedComplexity = 10
3236
3237// Match stores from lane 0 to the appropriate subreg's store.
3238multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3239                              ValueType VecTy, ValueType STy,
3240                              SubRegIndex SubRegIdx,
3241                              Instruction STRW, Instruction STRX> {
3242
3243  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3244                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3245            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3246                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3247
3248  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3249                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3250            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3251                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3252}
3253
3254let AddedComplexity = 19 in {
3255  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3256  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3257  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3258  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3259  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3260  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3261}
3262
3263//---
3264// (unsigned immediate)
3265defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3266                   [(store GPR64z:$Rt,
3267                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3268defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3269                    [(store GPR32z:$Rt,
3270                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3271defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3272                    [(store FPR8Op:$Rt,
3273                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3274defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3275                    [(store (f16 FPR16Op:$Rt),
3276                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3277defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3278                    [(store (f32 FPR32Op:$Rt),
3279                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3280defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3281                    [(store (f64 FPR64Op:$Rt),
3282                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3283defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3284
3285defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3286                     [(truncstorei16 GPR32z:$Rt,
3287                                     (am_indexed16 GPR64sp:$Rn,
3288                                                   uimm12s2:$offset))]>;
3289defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3290                     [(truncstorei8 GPR32z:$Rt,
3291                                    (am_indexed8 GPR64sp:$Rn,
3292                                                 uimm12s1:$offset))]>;
3293
3294// bf16 store pattern
3295def : Pat<(store (bf16 FPR16Op:$Rt),
3296                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3297          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3298
3299let AddedComplexity = 10 in {
3300
3301// Match all store 64 bits width whose type is compatible with FPR64
3302def : Pat<(store (v1i64 FPR64:$Rt),
3303                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3304          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3305def : Pat<(store (v1f64 FPR64:$Rt),
3306                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3307          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3308
3309let Predicates = [IsLE] in {
3310  // We must use ST1 to store vectors in big-endian.
3311  def : Pat<(store (v2f32 FPR64:$Rt),
3312                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3313            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3314  def : Pat<(store (v8i8 FPR64:$Rt),
3315                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3316            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3317  def : Pat<(store (v4i16 FPR64:$Rt),
3318                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3319            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3320  def : Pat<(store (v2i32 FPR64:$Rt),
3321                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3322            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3323  def : Pat<(store (v4f16 FPR64:$Rt),
3324                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3325            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3326  def : Pat<(store (v4bf16 FPR64:$Rt),
3327                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3328            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3329}
3330
3331// Match all store 128 bits width whose type is compatible with FPR128
3332def : Pat<(store (f128  FPR128:$Rt),
3333                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3334          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3335
3336let Predicates = [IsLE] in {
3337  // We must use ST1 to store vectors in big-endian.
3338  def : Pat<(store (v4f32 FPR128:$Rt),
3339                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3340            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3341  def : Pat<(store (v2f64 FPR128:$Rt),
3342                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3343            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3344  def : Pat<(store (v16i8 FPR128:$Rt),
3345                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3346            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3347  def : Pat<(store (v8i16 FPR128:$Rt),
3348                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3349            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3350  def : Pat<(store (v4i32 FPR128:$Rt),
3351                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3352            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3353  def : Pat<(store (v2i64 FPR128:$Rt),
3354                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3355            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3356  def : Pat<(store (v8f16 FPR128:$Rt),
3357                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3358            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3359  def : Pat<(store (v8bf16 FPR128:$Rt),
3360                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3361            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3362}
3363
3364// truncstore i64
3365def : Pat<(truncstorei32 GPR64:$Rt,
3366                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3367  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3368def : Pat<(truncstorei16 GPR64:$Rt,
3369                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3370  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3371def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3372  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3373
3374} // AddedComplexity = 10
3375
3376// Match stores from lane 0 to the appropriate subreg's store.
3377multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
3378                            ValueType VTy, ValueType STy,
3379                            SubRegIndex SubRegIdx, Operand IndexType,
3380                            Instruction STR> {
3381  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3382                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3383            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3384                 GPR64sp:$Rn, IndexType:$offset)>;
3385}
3386
3387let AddedComplexity = 19 in {
3388  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3389  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3390  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3391  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3392  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3393  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3394}
3395
3396//---
3397// (unscaled immediate)
3398defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3399                         [(store GPR64z:$Rt,
3400                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3401defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3402                         [(store GPR32z:$Rt,
3403                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3404defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3405                         [(store FPR8Op:$Rt,
3406                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3407defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3408                         [(store (f16 FPR16Op:$Rt),
3409                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3410defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3411                         [(store (f32 FPR32Op:$Rt),
3412                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3413defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3414                         [(store (f64 FPR64Op:$Rt),
3415                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3416defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3417                         [(store (f128 FPR128Op:$Rt),
3418                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3419defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3420                         [(truncstorei16 GPR32z:$Rt,
3421                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3422defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3423                         [(truncstorei8 GPR32z:$Rt,
3424                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3425
3426// Armv8.4 Weaker Release Consistency enhancements
3427//         LDAPR & STLR with Immediate Offset instructions
3428let Predicates = [HasRCPC_IMMO] in {
3429defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3430defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3431defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3432defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3433defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3434defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3435defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3436defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3437defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3438defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3439defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3440defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3441defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3442}
3443
3444// Match all store 64 bits width whose type is compatible with FPR64
3445def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3446          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3447def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3448          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3449
3450let AddedComplexity = 10 in {
3451
3452let Predicates = [IsLE] in {
3453  // We must use ST1 to store vectors in big-endian.
3454  def : Pat<(store (v2f32 FPR64:$Rt),
3455                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3456            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3457  def : Pat<(store (v8i8 FPR64:$Rt),
3458                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3459            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3460  def : Pat<(store (v4i16 FPR64:$Rt),
3461                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3462            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3463  def : Pat<(store (v2i32 FPR64:$Rt),
3464                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3465            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3466  def : Pat<(store (v4f16 FPR64:$Rt),
3467                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3468            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3469  def : Pat<(store (v4bf16 FPR64:$Rt),
3470                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3471            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3472}
3473
3474// Match all store 128 bits width whose type is compatible with FPR128
3475def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3476          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3477
3478let Predicates = [IsLE] in {
3479  // We must use ST1 to store vectors in big-endian.
3480  def : Pat<(store (v4f32 FPR128:$Rt),
3481                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3482            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3483  def : Pat<(store (v2f64 FPR128:$Rt),
3484                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3485            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3486  def : Pat<(store (v16i8 FPR128:$Rt),
3487                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3488            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3489  def : Pat<(store (v8i16 FPR128:$Rt),
3490                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3491            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3492  def : Pat<(store (v4i32 FPR128:$Rt),
3493                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3494            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3495  def : Pat<(store (v2i64 FPR128:$Rt),
3496                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3497            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3498  def : Pat<(store (v2f64 FPR128:$Rt),
3499                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3500            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3501  def : Pat<(store (v8f16 FPR128:$Rt),
3502                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3503            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3504  def : Pat<(store (v8bf16 FPR128:$Rt),
3505                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3506            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3507}
3508
3509} // AddedComplexity = 10
3510
3511// unscaled i64 truncating stores
3512def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3513  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3514def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3515  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3516def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3517  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3518
3519// Match stores from lane 0 to the appropriate subreg's store.
3520multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3521                             ValueType VTy, ValueType STy,
3522                             SubRegIndex SubRegIdx, Instruction STR> {
3523  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3524}
3525
3526let AddedComplexity = 19 in {
3527  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3528  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3529  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3530  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3531  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3532  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3533}
3534
3535//---
3536// STR mnemonics fall back to STUR for negative or unaligned offsets.
3537def : InstAlias<"str $Rt, [$Rn, $offset]",
3538                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3539def : InstAlias<"str $Rt, [$Rn, $offset]",
3540                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3541def : InstAlias<"str $Rt, [$Rn, $offset]",
3542                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3543def : InstAlias<"str $Rt, [$Rn, $offset]",
3544                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3545def : InstAlias<"str $Rt, [$Rn, $offset]",
3546                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3547def : InstAlias<"str $Rt, [$Rn, $offset]",
3548                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3549def : InstAlias<"str $Rt, [$Rn, $offset]",
3550                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3551
3552def : InstAlias<"strb $Rt, [$Rn, $offset]",
3553                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3554def : InstAlias<"strh $Rt, [$Rn, $offset]",
3555                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3556
3557//---
3558// (unscaled immediate, unprivileged)
3559defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3560defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3561
3562defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3563defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3564
3565//---
3566// (immediate pre-indexed)
3567def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3568def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3569def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3570def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3571def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3572def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3573def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3574
3575def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3576def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3577
3578// truncstore i64
3579def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3580  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3581           simm9:$off)>;
3582def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3583  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3584            simm9:$off)>;
3585def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3586  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3587            simm9:$off)>;
3588
3589def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3590          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3591def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3592          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3593def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3594          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3595def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3596          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3597def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3598          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3599def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3600          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3601def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3602          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3603
3604def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3605          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3606def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3607          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3608def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3609          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3610def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3611          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3612def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3613          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3614def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3615          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3616def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3617          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3618
3619//---
3620// (immediate post-indexed)
3621def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3622def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3623def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3624def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3625def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3626def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3627def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3628
3629def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3630def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3631
3632// truncstore i64
3633def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3634  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3635            simm9:$off)>;
3636def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3637  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3638             simm9:$off)>;
3639def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3640  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3641             simm9:$off)>;
3642
3643def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off),
3644          (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>;
3645
3646def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3647          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3648def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3649          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3650def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3651          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3652def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3653          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3654def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3655          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3656def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3657          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3658def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3659          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3660def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3661          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3662
3663def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3664          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3665def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3666          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3667def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3668          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3669def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3670          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3671def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3672          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3673def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3674          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3675def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3676          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3677def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3678          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3679
3680//===----------------------------------------------------------------------===//
3681// Load/store exclusive instructions.
3682//===----------------------------------------------------------------------===//
3683
3684def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3685def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3686def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3687def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3688
3689def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3690def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3691def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3692def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3693
3694def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3695def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3696def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3697def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3698
3699def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3700def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3701def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3702def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3703
3704def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3705def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3706def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3707def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3708
3709def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3710def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3711def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3712def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3713
3714def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3715def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3716
3717def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3718def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3719
3720def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3721def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3722
3723def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3724def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3725
3726let Predicates = [HasLOR] in {
3727  // v8.1a "Limited Order Region" extension load-acquire instructions
3728  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3729  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3730  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3731  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3732
3733  // v8.1a "Limited Order Region" extension store-release instructions
3734  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3735  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3736  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3737  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3738}
3739
3740//===----------------------------------------------------------------------===//
3741// Scaled floating point to integer conversion instructions.
3742//===----------------------------------------------------------------------===//
3743
3744defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3745defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3746defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3747defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3748defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3749defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3750defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3751defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3752defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3753defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3754defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3755defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3756
3757// AArch64's FCVT instructions saturate when out of range.
3758multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> {
3759  let Predicates = [HasFullFP16] in {
3760  def : Pat<(i32 (to_int_sat f16:$Rn, i32)),
3761            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3762  def : Pat<(i64 (to_int_sat f16:$Rn, i64)),
3763            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3764  }
3765  def : Pat<(i32 (to_int_sat f32:$Rn, i32)),
3766            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3767  def : Pat<(i64 (to_int_sat f32:$Rn, i64)),
3768            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3769  def : Pat<(i32 (to_int_sat f64:$Rn, i32)),
3770            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3771  def : Pat<(i64 (to_int_sat f64:$Rn, i64)),
3772            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3773
3774  let Predicates = [HasFullFP16] in {
3775  def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)),
3776            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3777  def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)),
3778            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3779  }
3780  def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)),
3781            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3782  def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)),
3783            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3784  def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)),
3785            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3786  def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)),
3787            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3788}
3789
3790defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">;
3791defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">;
3792
3793multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3794  let Predicates = [HasFullFP16] in {
3795  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3796  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3797  }
3798  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3799  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3800  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3801  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3802
3803  let Predicates = [HasFullFP16] in {
3804  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3805            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3806  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3807            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3808  }
3809  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3810            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3811  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3812            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3813  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3814            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3815  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3816            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3817}
3818
3819defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3820defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3821
3822multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> {
3823  def : Pat<(i32 (to_int (round f32:$Rn))),
3824            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3825  def : Pat<(i64 (to_int (round f32:$Rn))),
3826            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3827  def : Pat<(i32 (to_int (round f64:$Rn))),
3828            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3829  def : Pat<(i64 (to_int (round f64:$Rn))),
3830            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3831
3832  // These instructions saturate like fp_to_[su]int_sat.
3833  let Predicates = [HasFullFP16] in {
3834  def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)),
3835            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3836  def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)),
3837            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3838  }
3839  def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)),
3840            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3841  def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)),
3842            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3843  def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)),
3844            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3845  def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)),
3846            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3847}
3848
3849defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil,  "FCVTPS">;
3850defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil,  "FCVTPU">;
3851defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">;
3852defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">;
3853defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">;
3854defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">;
3855defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">;
3856defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">;
3857
3858
3859
3860let Predicates = [HasFullFP16] in {
3861  def : Pat<(i32 (lround f16:$Rn)),
3862            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3863  def : Pat<(i64 (lround f16:$Rn)),
3864            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3865  def : Pat<(i64 (llround f16:$Rn)),
3866            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3867}
3868def : Pat<(i32 (lround f32:$Rn)),
3869          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3870def : Pat<(i32 (lround f64:$Rn)),
3871          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3872def : Pat<(i64 (lround f32:$Rn)),
3873          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3874def : Pat<(i64 (lround f64:$Rn)),
3875          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3876def : Pat<(i64 (llround f32:$Rn)),
3877          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3878def : Pat<(i64 (llround f64:$Rn)),
3879          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3880
3881//===----------------------------------------------------------------------===//
3882// Scaled integer to floating point conversion instructions.
3883//===----------------------------------------------------------------------===//
3884
3885defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3886defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3887
3888//===----------------------------------------------------------------------===//
3889// Unscaled integer to floating point conversion instruction.
3890//===----------------------------------------------------------------------===//
3891
3892defm FMOV : UnscaledConversion<"fmov">;
3893
3894// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3895let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3896def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3897    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3898def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3899    Sched<[WriteF]>;
3900def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3901    Sched<[WriteF]>;
3902}
3903// Similarly add aliases
3904def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3905    Requires<[HasFullFP16]>;
3906def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3907def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3908
3909//===----------------------------------------------------------------------===//
3910// Floating point conversion instruction.
3911//===----------------------------------------------------------------------===//
3912
3913defm FCVT : FPConversion<"fcvt">;
3914
3915//===----------------------------------------------------------------------===//
3916// Floating point single operand instructions.
3917//===----------------------------------------------------------------------===//
3918
3919defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3920defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3921defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3922defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3923defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3924defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3925defm FRINTN : SingleOperandFPData<0b1000, "frintn", froundeven>;
3926defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3927
3928defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3929defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3930
3931let SchedRW = [WriteFDiv] in {
3932defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3933}
3934
3935let Predicates = [HasFRInt3264] in {
3936  defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>;
3937  defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>;
3938  defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>;
3939  defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>;
3940} // HasFRInt3264
3941
3942let Predicates = [HasFullFP16] in {
3943  def : Pat<(i32 (lrint f16:$Rn)),
3944            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3945  def : Pat<(i64 (lrint f16:$Rn)),
3946            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3947  def : Pat<(i64 (llrint f16:$Rn)),
3948            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3949}
3950def : Pat<(i32 (lrint f32:$Rn)),
3951          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3952def : Pat<(i32 (lrint f64:$Rn)),
3953          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3954def : Pat<(i64 (lrint f32:$Rn)),
3955          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3956def : Pat<(i64 (lrint f64:$Rn)),
3957          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3958def : Pat<(i64 (llrint f32:$Rn)),
3959          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3960def : Pat<(i64 (llrint f64:$Rn)),
3961          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3962
3963//===----------------------------------------------------------------------===//
3964// Floating point two operand instructions.
3965//===----------------------------------------------------------------------===//
3966
3967defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
3968let SchedRW = [WriteFDiv] in {
3969defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3970}
3971defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3972defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3973defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3974defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
3975let SchedRW = [WriteFMul] in {
3976defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
3977defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3978}
3979defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
3980
3981def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3982          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3983def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3984          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
3985def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3986          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
3987def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3988          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
3989
3990//===----------------------------------------------------------------------===//
3991// Floating point three operand instructions.
3992//===----------------------------------------------------------------------===//
3993
3994defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
3995defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
3996     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
3997defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
3998     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
3999defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
4000     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
4001
4002// The following def pats catch the case where the LHS of an FMA is negated.
4003// The TriOpFrag above catches the case where the middle operand is negated.
4004
4005// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
4006// the NEON variant.
4007
4008// Here we handle first -(a + b*c) for FNMADD:
4009
4010let Predicates = [HasNEON, HasFullFP16] in
4011def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
4012          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4013
4014def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
4015          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4016
4017def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
4018          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4019
4020// Now it's time for "(-a) + (-b)*c"
4021
4022let Predicates = [HasNEON, HasFullFP16] in
4023def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
4024          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4025
4026def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
4027          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4028
4029def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
4030          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4031
4032//===----------------------------------------------------------------------===//
4033// Floating point comparison instructions.
4034//===----------------------------------------------------------------------===//
4035
4036defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
4037defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
4038
4039//===----------------------------------------------------------------------===//
4040// Floating point conditional comparison instructions.
4041//===----------------------------------------------------------------------===//
4042
4043defm FCCMPE : FPCondComparison<1, "fccmpe">;
4044defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
4045
4046//===----------------------------------------------------------------------===//
4047// Floating point conditional select instruction.
4048//===----------------------------------------------------------------------===//
4049
4050defm FCSEL : FPCondSelect<"fcsel">;
4051
4052// CSEL instructions providing f128 types need to be handled by a
4053// pseudo-instruction since the eventual code will need to introduce basic
4054// blocks and control flow.
4055def F128CSEL : Pseudo<(outs FPR128:$Rd),
4056                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
4057                      [(set (f128 FPR128:$Rd),
4058                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
4059                                       (i32 imm:$cond), NZCV))]> {
4060  let Uses = [NZCV];
4061  let usesCustomInserter = 1;
4062  let hasNoSchedulingInfo = 1;
4063}
4064
4065//===----------------------------------------------------------------------===//
4066// Instructions used for emitting unwind opcodes on ARM64 Windows.
4067//===----------------------------------------------------------------------===//
4068let isPseudo = 1 in {
4069  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
4070  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4071  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4072  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4073  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4074  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4075  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4076  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4077  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4078  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4079  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4080  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
4081  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4082  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
4083  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4084  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
4085  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4086}
4087
4088// Pseudo instructions for Windows EH
4089//===----------------------------------------------------------------------===//
4090let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
4091    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
4092   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
4093   let usesCustomInserter = 1 in
4094     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
4095                    Sched<[]>;
4096}
4097
4098// Pseudo instructions for homogeneous prolog/epilog
4099let isPseudo = 1 in {
4100  // Save CSRs in order, {FPOffset}
4101  def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4102  // Restore CSRs in order
4103  def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4104}
4105
4106//===----------------------------------------------------------------------===//
4107// Floating point immediate move.
4108//===----------------------------------------------------------------------===//
4109
4110let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
4111defm FMOV : FPMoveImmediate<"fmov">;
4112}
4113
4114//===----------------------------------------------------------------------===//
4115// Advanced SIMD two vector instructions.
4116//===----------------------------------------------------------------------===//
4117
4118defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
4119                                          AArch64uabd>;
4120// Match UABDL in log2-shuffle patterns.
4121def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
4122                           (zext (v8i8 V64:$opB))))),
4123          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4124def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4125               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
4126                                (zext (v8i8 V64:$opB))),
4127                           (AArch64vashr v8i16:$src, (i32 15))))),
4128          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4129def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
4130                           (zext (extract_high_v16i8 V128:$opB))))),
4131          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4132def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4133               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
4134                                (zext (extract_high_v16i8 V128:$opB))),
4135                           (AArch64vashr v8i16:$src, (i32 15))))),
4136          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4137def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
4138                           (zext (v4i16 V64:$opB))))),
4139          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
4140def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
4141                           (zext (extract_high_v8i16 V128:$opB))))),
4142          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
4143def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
4144                           (zext (v2i32 V64:$opB))))),
4145          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
4146def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
4147                           (zext (extract_high_v4i32 V128:$opB))))),
4148          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
4149
4150defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
4151defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
4152defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
4153defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
4154defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
4155defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
4156defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
4157defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
4158defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
4159defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
4160
4161defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4162defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4163defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4164defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4165defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4166defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
4167defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
4168defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
4169def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
4170          (FCVTLv4i16 V64:$Rn)>;
4171def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
4172                                                              (i64 4)))),
4173          (FCVTLv8i16 V128:$Rn)>;
4174def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
4175
4176def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
4177
4178defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
4179defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
4180defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
4181defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
4182defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
4183def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
4184          (FCVTNv4i16 V128:$Rn)>;
4185def : Pat<(concat_vectors V64:$Rd,
4186                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
4187          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4188def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
4189def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
4190def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
4191          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4192defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
4193defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
4194defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
4195                                        int_aarch64_neon_fcvtxn>;
4196defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
4197defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
4198
4199// AArch64's FCVT instructions saturate when out of range.
4200multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> {
4201  def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)),
4202            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
4203  def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)),
4204            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
4205  def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)),
4206            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
4207  def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)),
4208            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
4209  def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)),
4210            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
4211}
4212defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">;
4213defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">;
4214
4215def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
4216def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
4217def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
4218def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
4219def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
4220
4221def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
4222def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4223def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4224def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4225def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4226
4227defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
4228defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4229defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
4230defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
4231defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
4232defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", froundeven>;
4233defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
4234defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
4235defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
4236
4237let Predicates = [HasFRInt3264] in {
4238  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>;
4239  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>;
4240  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>;
4241  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>;
4242} // HasFRInt3264
4243
4244defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4245defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
4246defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4247                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4248defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4249// Aliases for MVN -> NOT.
4250def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4251                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4252def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4253                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4254
4255def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4256def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4257def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4258def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4259def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4260def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4261
4262defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>;
4263defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4264defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4265defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4266defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4267       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
4268defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
4269defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
4270defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4271defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4272defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4273defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4274defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4275defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4276defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4277       BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >;
4278defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>;
4279defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
4280defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4281defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4282defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4283defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4284defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4285
4286def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4287def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4288def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4289def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4290def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4291def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4292def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4293def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4294def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4295def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4296
4297// Patterns for vector long shift (by element width). These need to match all
4298// three of zext, sext and anyext so it's easier to pull the patterns out of the
4299// definition.
4300multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4301  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4302            (SHLLv8i8 V64:$Rn)>;
4303  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
4304            (SHLLv16i8 V128:$Rn)>;
4305  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4306            (SHLLv4i16 V64:$Rn)>;
4307  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
4308            (SHLLv8i16 V128:$Rn)>;
4309  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4310            (SHLLv2i32 V64:$Rn)>;
4311  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
4312            (SHLLv4i32 V128:$Rn)>;
4313}
4314
4315defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4316defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4317defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4318
4319// Constant vector values, used in the S/UQXTN patterns below.
4320def VImmFF:   PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>;
4321def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>;
4322def VImm7F:   PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>;
4323def VImm80:   PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>;
4324def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>;
4325def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>;
4326
4327// trunc(umin(X, 255)) -> UQXTRN v8i8
4328def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))),
4329          (UQXTNv8i8 V128:$Vn)>;
4330// trunc(umin(X, 65535)) -> UQXTRN v4i16
4331def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))),
4332          (UQXTNv4i16 V128:$Vn)>;
4333// trunc(smin(smax(X, -128), 128)) -> SQXTRN
4334//  with reversed min/max
4335def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4336                             (v8i16 VImm7F)))),
4337          (SQXTNv8i8 V128:$Vn)>;
4338def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4339                             (v8i16 VImm80)))),
4340          (SQXTNv8i8 V128:$Vn)>;
4341// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN
4342//  with reversed min/max
4343def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4344                              (v4i32 VImm7FFF)))),
4345          (SQXTNv4i16 V128:$Vn)>;
4346def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4347                              (v4i32 VImm8000)))),
4348          (SQXTNv4i16 V128:$Vn)>;
4349
4350//===----------------------------------------------------------------------===//
4351// Advanced SIMD three vector instructions.
4352//===----------------------------------------------------------------------===//
4353
4354defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4355defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
4356defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4357defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4358defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4359defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4360defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4361defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4362foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in {
4363def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>;
4364}
4365defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4366let Predicates = [HasNEON] in {
4367foreach VT = [ v2f32, v4f32, v2f64 ] in
4368def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4369}
4370let Predicates = [HasNEON, HasFullFP16] in {
4371foreach VT = [ v4f16, v8f16 ] in
4372def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4373}
4374defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4375defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4376defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
4377defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
4378defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4379defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4380defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4381defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
4382defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4383defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
4384defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4385defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
4386defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4387defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
4388defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4389defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
4390
4391// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4392// instruction expects the addend first, while the fma intrinsic puts it last.
4393defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4394            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4395defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4396            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4397
4398defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4399defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
4400defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4401defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4402defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
4403
4404// MLA and MLS are generated in MachineCombine
4405defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4406defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4407
4408defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4409defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4410defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4411      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4412defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4413defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>;
4414defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4415defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4416defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4417defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4418defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4419defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4420defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4421defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4422defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4423defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4424defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4425defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
4426defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4427defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4428defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4429defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4430      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4431defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4432defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>;
4433defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4434defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4435defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4436defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4437defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4438defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4439defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4440defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4441defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4442defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
4443defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4444defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4445defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4446                                                  int_aarch64_neon_sqadd>;
4447defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4448                                                    int_aarch64_neon_sqsub>;
4449
4450// Extra saturate patterns, other than the intrinsics matches above
4451defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4452defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4453defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4454defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4455
4456defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4457defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4458                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4459defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4460defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4461                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4462defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4463
4464// Pseudo bitwise select pattern BSP.
4465// It is expanded into BSL/BIT/BIF after register allocation.
4466defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4467                                                      (and (vnot node:$LHS), node:$RHS))>>;
4468defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4469defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4470defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4471
4472def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4473          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4474def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4475          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4476def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4477          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4478def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4479          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4480
4481def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4482          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4483def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4484          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4485def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4486          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4487def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4488          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4489
4490def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4491                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4492def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4493                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4494def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4495                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4496def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4497                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4498
4499def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4500                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4501def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4502                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4503def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4504                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4505def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4506                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4507
4508def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4509                "|cmls.8b\t$dst, $src1, $src2}",
4510                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4511def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4512                "|cmls.16b\t$dst, $src1, $src2}",
4513                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4514def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4515                "|cmls.4h\t$dst, $src1, $src2}",
4516                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4517def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4518                "|cmls.8h\t$dst, $src1, $src2}",
4519                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4520def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4521                "|cmls.2s\t$dst, $src1, $src2}",
4522                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4523def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4524                "|cmls.4s\t$dst, $src1, $src2}",
4525                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4526def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4527                "|cmls.2d\t$dst, $src1, $src2}",
4528                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4529
4530def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4531                "|cmlo.8b\t$dst, $src1, $src2}",
4532                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4533def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4534                "|cmlo.16b\t$dst, $src1, $src2}",
4535                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4536def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4537                "|cmlo.4h\t$dst, $src1, $src2}",
4538                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4539def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4540                "|cmlo.8h\t$dst, $src1, $src2}",
4541                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4542def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4543                "|cmlo.2s\t$dst, $src1, $src2}",
4544                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4545def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4546                "|cmlo.4s\t$dst, $src1, $src2}",
4547                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4548def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4549                "|cmlo.2d\t$dst, $src1, $src2}",
4550                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4551
4552def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4553                "|cmle.8b\t$dst, $src1, $src2}",
4554                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4555def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4556                "|cmle.16b\t$dst, $src1, $src2}",
4557                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4558def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4559                "|cmle.4h\t$dst, $src1, $src2}",
4560                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4561def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4562                "|cmle.8h\t$dst, $src1, $src2}",
4563                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4564def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4565                "|cmle.2s\t$dst, $src1, $src2}",
4566                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4567def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4568                "|cmle.4s\t$dst, $src1, $src2}",
4569                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4570def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4571                "|cmle.2d\t$dst, $src1, $src2}",
4572                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4573
4574def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4575                "|cmlt.8b\t$dst, $src1, $src2}",
4576                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4577def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4578                "|cmlt.16b\t$dst, $src1, $src2}",
4579                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4580def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4581                "|cmlt.4h\t$dst, $src1, $src2}",
4582                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4583def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4584                "|cmlt.8h\t$dst, $src1, $src2}",
4585                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4586def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4587                "|cmlt.2s\t$dst, $src1, $src2}",
4588                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4589def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4590                "|cmlt.4s\t$dst, $src1, $src2}",
4591                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4592def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4593                "|cmlt.2d\t$dst, $src1, $src2}",
4594                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4595
4596let Predicates = [HasNEON, HasFullFP16] in {
4597def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4598                "|fcmle.4h\t$dst, $src1, $src2}",
4599                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4600def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4601                "|fcmle.8h\t$dst, $src1, $src2}",
4602                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4603}
4604def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4605                "|fcmle.2s\t$dst, $src1, $src2}",
4606                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4607def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4608                "|fcmle.4s\t$dst, $src1, $src2}",
4609                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4610def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4611                "|fcmle.2d\t$dst, $src1, $src2}",
4612                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4613
4614let Predicates = [HasNEON, HasFullFP16] in {
4615def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4616                "|fcmlt.4h\t$dst, $src1, $src2}",
4617                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4618def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4619                "|fcmlt.8h\t$dst, $src1, $src2}",
4620                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4621}
4622def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4623                "|fcmlt.2s\t$dst, $src1, $src2}",
4624                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4625def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4626                "|fcmlt.4s\t$dst, $src1, $src2}",
4627                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4628def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4629                "|fcmlt.2d\t$dst, $src1, $src2}",
4630                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4631
4632let Predicates = [HasNEON, HasFullFP16] in {
4633def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4634                "|facle.4h\t$dst, $src1, $src2}",
4635                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4636def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4637                "|facle.8h\t$dst, $src1, $src2}",
4638                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4639}
4640def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4641                "|facle.2s\t$dst, $src1, $src2}",
4642                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4643def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4644                "|facle.4s\t$dst, $src1, $src2}",
4645                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4646def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4647                "|facle.2d\t$dst, $src1, $src2}",
4648                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4649
4650let Predicates = [HasNEON, HasFullFP16] in {
4651def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4652                "|faclt.4h\t$dst, $src1, $src2}",
4653                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4654def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4655                "|faclt.8h\t$dst, $src1, $src2}",
4656                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4657}
4658def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4659                "|faclt.2s\t$dst, $src1, $src2}",
4660                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4661def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4662                "|faclt.4s\t$dst, $src1, $src2}",
4663                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4664def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4665                "|faclt.2d\t$dst, $src1, $src2}",
4666                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4667
4668//===----------------------------------------------------------------------===//
4669// Advanced SIMD three scalar instructions.
4670//===----------------------------------------------------------------------===//
4671
4672defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4673defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4674defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4675defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4676defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4677defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4678defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4679defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4680def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4681          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4682let Predicates = [HasFullFP16] in {
4683def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4684}
4685def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4686def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4687defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4688                                     int_aarch64_neon_facge>;
4689defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4690                                     int_aarch64_neon_facgt>;
4691defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4692defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4693defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4694defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorStreamingSVE>;
4695defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorStreamingSVE>;
4696defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorStreamingSVE>;
4697defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4698defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4699defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4700defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4701defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4702defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4703defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4704defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4705defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4706defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4707defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4708defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4709defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4710defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4711defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4712let Predicates = [HasRDM] in {
4713  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4714  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4715  def : Pat<(i32 (int_aarch64_neon_sqadd
4716                   (i32 FPR32:$Rd),
4717                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4718                                                   (i32 FPR32:$Rm))))),
4719            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4720  def : Pat<(i32 (int_aarch64_neon_sqsub
4721                   (i32 FPR32:$Rd),
4722                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4723                                                   (i32 FPR32:$Rm))))),
4724            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4725}
4726
4727def : InstAlias<"cmls $dst, $src1, $src2",
4728                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4729def : InstAlias<"cmle $dst, $src1, $src2",
4730                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4731def : InstAlias<"cmlo $dst, $src1, $src2",
4732                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4733def : InstAlias<"cmlt $dst, $src1, $src2",
4734                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4735def : InstAlias<"fcmle $dst, $src1, $src2",
4736                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4737def : InstAlias<"fcmle $dst, $src1, $src2",
4738                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4739def : InstAlias<"fcmlt $dst, $src1, $src2",
4740                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4741def : InstAlias<"fcmlt $dst, $src1, $src2",
4742                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4743def : InstAlias<"facle $dst, $src1, $src2",
4744                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4745def : InstAlias<"facle $dst, $src1, $src2",
4746                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4747def : InstAlias<"faclt $dst, $src1, $src2",
4748                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4749def : InstAlias<"faclt $dst, $src1, $src2",
4750                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4751
4752//===----------------------------------------------------------------------===//
4753// Advanced SIMD three scalar instructions (mixed operands).
4754//===----------------------------------------------------------------------===//
4755defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4756                                       int_aarch64_neon_sqdmulls_scalar>;
4757defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4758defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4759
4760def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4761                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4762                                                        (i32 FPR32:$Rm))))),
4763          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4764def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4765                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4766                                                        (i32 FPR32:$Rm))))),
4767          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4768
4769//===----------------------------------------------------------------------===//
4770// Advanced SIMD two scalar instructions.
4771//===----------------------------------------------------------------------===//
4772
4773defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4774defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4775defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4776defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4777defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4778defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4779defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4780defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4781defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4782defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4783defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4784defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4785defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4786defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4787defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4788defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4789defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4790defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4791defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4792def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4793defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4794defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4795defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe", HasNEONorStreamingSVE>;
4796defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx", HasNEONorStreamingSVE>;
4797defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte", HasNEONorStreamingSVE>;
4798defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4799                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4800defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4801defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4802defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4803defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4804defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4805defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4806                                     int_aarch64_neon_suqadd>;
4807defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4808defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4809defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4810                                    int_aarch64_neon_usqadd>;
4811
4812def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4813          (FCVTASv1i64 FPR64:$Rn)>;
4814def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4815          (FCVTAUv1i64 FPR64:$Rn)>;
4816def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4817          (FCVTMSv1i64 FPR64:$Rn)>;
4818def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4819          (FCVTMUv1i64 FPR64:$Rn)>;
4820def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4821          (FCVTNSv1i64 FPR64:$Rn)>;
4822def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4823          (FCVTNUv1i64 FPR64:$Rn)>;
4824def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4825          (FCVTPSv1i64 FPR64:$Rn)>;
4826def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4827          (FCVTPUv1i64 FPR64:$Rn)>;
4828def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
4829          (FCVTZSv1i64 FPR64:$Rn)>;
4830def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
4831          (FCVTZUv1i64 FPR64:$Rn)>;
4832
4833def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4834          (FRECPEv1f16 FPR16:$Rn)>;
4835def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4836          (FRECPEv1i32 FPR32:$Rn)>;
4837def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4838          (FRECPEv1i64 FPR64:$Rn)>;
4839def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4840          (FRECPEv1i64 FPR64:$Rn)>;
4841
4842def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4843          (FRECPEv1i32 FPR32:$Rn)>;
4844def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4845          (FRECPEv2f32 V64:$Rn)>;
4846def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4847          (FRECPEv4f32 FPR128:$Rn)>;
4848def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4849          (FRECPEv1i64 FPR64:$Rn)>;
4850def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4851          (FRECPEv1i64 FPR64:$Rn)>;
4852def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4853          (FRECPEv2f64 FPR128:$Rn)>;
4854
4855def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4856          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4857def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4858          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4859def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4860          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4861def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4862          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4863def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4864          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4865
4866def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4867          (FRECPXv1f16 FPR16:$Rn)>;
4868def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4869          (FRECPXv1i32 FPR32:$Rn)>;
4870def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4871          (FRECPXv1i64 FPR64:$Rn)>;
4872
4873def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4874          (FRSQRTEv1f16 FPR16:$Rn)>;
4875def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4876          (FRSQRTEv1i32 FPR32:$Rn)>;
4877def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4878          (FRSQRTEv1i64 FPR64:$Rn)>;
4879def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4880          (FRSQRTEv1i64 FPR64:$Rn)>;
4881
4882def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4883          (FRSQRTEv1i32 FPR32:$Rn)>;
4884def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4885          (FRSQRTEv2f32 V64:$Rn)>;
4886def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4887          (FRSQRTEv4f32 FPR128:$Rn)>;
4888def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4889          (FRSQRTEv1i64 FPR64:$Rn)>;
4890def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4891          (FRSQRTEv1i64 FPR64:$Rn)>;
4892def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4893          (FRSQRTEv2f64 FPR128:$Rn)>;
4894
4895def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4896          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4897def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4898          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4899def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4900          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4901def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4902          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4903def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4904          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4905
4906// Some float -> int -> float conversion patterns for which we want to keep the
4907// int values in FP registers using the corresponding NEON instructions to
4908// avoid more costly int <-> fp register transfers.
4909let Predicates = [HasNEON] in {
4910def : Pat<(f64 (sint_to_fp (i64 (fp_to_sint f64:$Rn)))),
4911          (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>;
4912def : Pat<(f32 (sint_to_fp (i32 (fp_to_sint f32:$Rn)))),
4913          (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>;
4914def : Pat<(f64 (uint_to_fp (i64 (fp_to_uint f64:$Rn)))),
4915          (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>;
4916def : Pat<(f32 (uint_to_fp (i32 (fp_to_uint f32:$Rn)))),
4917          (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>;
4918
4919let Predicates = [HasFullFP16] in {
4920def : Pat<(f16 (sint_to_fp (i32 (fp_to_sint f16:$Rn)))),
4921          (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>;
4922def : Pat<(f16 (uint_to_fp (i32 (fp_to_uint f16:$Rn)))),
4923          (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
4924}
4925}
4926
4927// If an integer is about to be converted to a floating point value,
4928// just load it on the floating point unit.
4929// Here are the patterns for 8 and 16-bits to float.
4930// 8-bits -> float.
4931multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4932                             SDPatternOperator loadop, Instruction UCVTF,
4933                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4934                             SubRegIndex sub> {
4935  def : Pat<(DstTy (uint_to_fp (SrcTy
4936                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4937                                      ro.Wext:$extend))))),
4938           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4939                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4940                                 sub))>;
4941
4942  def : Pat<(DstTy (uint_to_fp (SrcTy
4943                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4944                                      ro.Wext:$extend))))),
4945           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4946                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4947                                 sub))>;
4948}
4949
4950defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4951                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4952def : Pat <(f32 (uint_to_fp (i32
4953               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4954           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4955                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4956def : Pat <(f32 (uint_to_fp (i32
4957                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4958           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4959                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4960// 16-bits -> float.
4961defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4962                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4963def : Pat <(f32 (uint_to_fp (i32
4964                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4965           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4966                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4967def : Pat <(f32 (uint_to_fp (i32
4968                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4969           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4970                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4971// 32-bits are handled in target specific dag combine:
4972// performIntToFpCombine.
4973// 64-bits integer to 32-bits floating point, not possible with
4974// UCVTF on floating point registers (both source and destination
4975// must have the same size).
4976
4977// Here are the patterns for 8, 16, 32, and 64-bits to double.
4978// 8-bits -> double.
4979defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4980                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4981def : Pat <(f64 (uint_to_fp (i32
4982                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4983           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4984                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4985def : Pat <(f64 (uint_to_fp (i32
4986                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4987           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4988                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4989// 16-bits -> double.
4990defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
4991                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
4992def : Pat <(f64 (uint_to_fp (i32
4993                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4994           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4995                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4996def : Pat <(f64 (uint_to_fp (i32
4997                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4998           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4999                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5000// 32-bits -> double.
5001defm : UIntToFPROLoadPat<f64, i32, load,
5002                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
5003def : Pat <(f64 (uint_to_fp (i32
5004                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
5005           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5006                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
5007def : Pat <(f64 (uint_to_fp (i32
5008                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
5009           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5010                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
5011// 64-bits -> double are handled in target specific dag combine:
5012// performIntToFpCombine.
5013
5014//===----------------------------------------------------------------------===//
5015// Advanced SIMD three different-sized vector instructions.
5016//===----------------------------------------------------------------------===//
5017
5018defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
5019defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
5020defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
5021defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
5022defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
5023defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
5024                                             AArch64sabd>;
5025defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
5026                                          AArch64sabd>;
5027defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
5028            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
5029defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
5030                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
5031defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
5032    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5033defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
5034    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5035defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
5036defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
5037                                               int_aarch64_neon_sqadd>;
5038defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
5039                                               int_aarch64_neon_sqsub>;
5040defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
5041                                     int_aarch64_neon_sqdmull>;
5042defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
5043                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
5044defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
5045                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
5046defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
5047                                              AArch64uabd>;
5048defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
5049                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
5050defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
5051                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
5052defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
5053    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5054defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
5055    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5056defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
5057defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
5058                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
5059defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
5060                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
5061
5062// Additional patterns for [SU]ML[AS]L
5063multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
5064  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5065  def : Pat<(v4i16 (opnode
5066                    V64:$Ra,
5067                    (v4i16 (extract_subvector
5068                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
5069                            (i64 0))))),
5070             (EXTRACT_SUBREG (v8i16 (INST8B
5071                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
5072                                     V64:$Rn, V64:$Rm)), dsub)>;
5073  def : Pat<(v2i32 (opnode
5074                    V64:$Ra,
5075                    (v2i32 (extract_subvector
5076                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
5077                            (i64 0))))),
5078             (EXTRACT_SUBREG (v4i32 (INST4H
5079                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
5080                                     V64:$Rn, V64:$Rm)), dsub)>;
5081  def : Pat<(v1i64 (opnode
5082                    V64:$Ra,
5083                    (v1i64 (extract_subvector
5084                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
5085                            (i64 0))))),
5086             (EXTRACT_SUBREG (v2i64 (INST2S
5087                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
5088                                     V64:$Rn, V64:$Rm)), dsub)>;
5089}
5090
5091defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_umull,
5092     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5093defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_smull,
5094     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5095defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_umull,
5096     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5097defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_smull,
5098     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5099
5100// Additional patterns for SMULL and UMULL
5101multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
5102  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5103  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5104            (INST8B V64:$Rn, V64:$Rm)>;
5105  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5106            (INST4H V64:$Rn, V64:$Rm)>;
5107  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5108            (INST2S V64:$Rn, V64:$Rm)>;
5109}
5110
5111defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
5112  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
5113defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
5114  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
5115
5116// Patterns for smull2/umull2.
5117multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
5118  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5119  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
5120                           (extract_high_v16i8 V128:$Rm))),
5121             (INST8B V128:$Rn, V128:$Rm)>;
5122  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
5123                           (extract_high_v8i16 V128:$Rm))),
5124             (INST4H V128:$Rn, V128:$Rm)>;
5125  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
5126                           (extract_high_v4i32 V128:$Rm))),
5127             (INST2S V128:$Rn, V128:$Rm)>;
5128}
5129
5130defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
5131  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
5132defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
5133  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
5134
5135// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
5136multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
5137  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5138  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5139            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
5140  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5141            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
5142  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5143            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
5144}
5145
5146defm : Neon_mulacc_widen_patterns<
5147  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5148  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5149defm : Neon_mulacc_widen_patterns<
5150  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5151  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5152defm : Neon_mulacc_widen_patterns<
5153  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5154  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5155defm : Neon_mulacc_widen_patterns<
5156  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5157  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5158
5159// Patterns for 64-bit pmull
5160def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
5161          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
5162def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
5163                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
5164          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
5165
5166// CodeGen patterns for addhn and subhn instructions, which can actually be
5167// written in LLVM IR without too much difficulty.
5168
5169// ADDHN
5170def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
5171          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5172def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5173                                           (i32 16))))),
5174          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5175def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5176                                           (i32 32))))),
5177          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5178def : Pat<(concat_vectors (v8i8 V64:$Rd),
5179                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5180                                                    (i32 8))))),
5181          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5182                            V128:$Rn, V128:$Rm)>;
5183def : Pat<(concat_vectors (v4i16 V64:$Rd),
5184                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5185                                                    (i32 16))))),
5186          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5187                            V128:$Rn, V128:$Rm)>;
5188def : Pat<(concat_vectors (v2i32 V64:$Rd),
5189                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5190                                                    (i32 32))))),
5191          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5192                            V128:$Rn, V128:$Rm)>;
5193
5194// SUBHN
5195def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
5196          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5197def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5198                                           (i32 16))))),
5199          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5200def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5201                                           (i32 32))))),
5202          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5203def : Pat<(concat_vectors (v8i8 V64:$Rd),
5204                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5205                                                    (i32 8))))),
5206          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5207                            V128:$Rn, V128:$Rm)>;
5208def : Pat<(concat_vectors (v4i16 V64:$Rd),
5209                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5210                                                    (i32 16))))),
5211          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5212                            V128:$Rn, V128:$Rm)>;
5213def : Pat<(concat_vectors (v2i32 V64:$Rd),
5214                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5215                                                    (i32 32))))),
5216          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5217                            V128:$Rn, V128:$Rm)>;
5218
5219//----------------------------------------------------------------------------
5220// AdvSIMD bitwise extract from vector instruction.
5221//----------------------------------------------------------------------------
5222
5223defm EXT : SIMDBitwiseExtract<"ext">;
5224
5225def AdjustExtImm : SDNodeXForm<imm, [{
5226  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
5227}]>;
5228multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
5229  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
5230            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
5231  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
5232            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
5233  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
5234  // 128-bit vector.
5235  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
5236            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
5237  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
5238  // single 128-bit EXT.
5239  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
5240                              (extract_subvector V128:$Rn, (i64 N)),
5241                              (i32 imm:$imm))),
5242            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
5243  // A 64-bit EXT of the high half of a 128-bit register can be done using a
5244  // 128-bit EXT of the whole register with an adjustment to the immediate. The
5245  // top half of the other operand will be unset, but that doesn't matter as it
5246  // will not be used.
5247  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
5248                              V64:$Rm,
5249                              (i32 imm:$imm))),
5250            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
5251                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5252                                      (AdjustExtImm imm:$imm)), dsub)>;
5253}
5254
5255defm : ExtPat<v8i8, v16i8, 8>;
5256defm : ExtPat<v4i16, v8i16, 4>;
5257defm : ExtPat<v4f16, v8f16, 4>;
5258defm : ExtPat<v4bf16, v8bf16, 4>;
5259defm : ExtPat<v2i32, v4i32, 2>;
5260defm : ExtPat<v2f32, v4f32, 2>;
5261defm : ExtPat<v1i64, v2i64, 1>;
5262defm : ExtPat<v1f64, v2f64, 1>;
5263
5264//----------------------------------------------------------------------------
5265// AdvSIMD zip vector
5266//----------------------------------------------------------------------------
5267
5268defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5269defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5270defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5271defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5272defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5273defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5274
5275//----------------------------------------------------------------------------
5276// AdvSIMD TBL/TBX instructions
5277//----------------------------------------------------------------------------
5278
5279defm TBL : SIMDTableLookup<    0, "tbl">;
5280defm TBX : SIMDTableLookupTied<1, "tbx">;
5281
5282def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5283          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5284def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5285          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5286
5287def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5288                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5289          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5290def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5291                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5292          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5293
5294
5295//----------------------------------------------------------------------------
5296// AdvSIMD scalar CPY instruction
5297//----------------------------------------------------------------------------
5298
5299defm CPY : SIMDScalarCPY<"mov">;
5300
5301//----------------------------------------------------------------------------
5302// AdvSIMD scalar pairwise instructions
5303//----------------------------------------------------------------------------
5304
5305defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5306defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5307defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5308defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5309defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5310defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5311
5312let Predicates = [HasFullFP16] in {
5313def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5314            (FADDPv2i16p
5315              (EXTRACT_SUBREG
5316                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, (v8f16 (IMPLICIT_DEF))), (v8f16 (IMPLICIT_DEF))),
5317               dsub))>;
5318def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5319          (FADDPv2i16p (FADDPv4f16 V64:$Rn, (v4f16 (IMPLICIT_DEF))))>;
5320}
5321def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5322          (FADDPv2i32p
5323            (EXTRACT_SUBREG
5324              (FADDPv4f32 V128:$Rn, (v4f32 (IMPLICIT_DEF))),
5325             dsub))>;
5326def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5327          (FADDPv2i32p V64:$Rn)>;
5328def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5329          (FADDPv2i64p V128:$Rn)>;
5330
5331def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5332          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5333def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5334          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5335def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5336          (FADDPv2i32p V64:$Rn)>;
5337def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5338          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5339def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5340          (FADDPv2i64p V128:$Rn)>;
5341def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5342          (FMAXNMPv2i32p V64:$Rn)>;
5343def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5344          (FMAXNMPv2i64p V128:$Rn)>;
5345def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5346          (FMAXPv2i32p V64:$Rn)>;
5347def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5348          (FMAXPv2i64p V128:$Rn)>;
5349def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5350          (FMINNMPv2i32p V64:$Rn)>;
5351def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5352          (FMINNMPv2i64p V128:$Rn)>;
5353def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5354          (FMINPv2i32p V64:$Rn)>;
5355def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5356          (FMINPv2i64p V128:$Rn)>;
5357
5358//----------------------------------------------------------------------------
5359// AdvSIMD INS/DUP instructions
5360//----------------------------------------------------------------------------
5361
5362def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5363def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5364def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5365def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5366def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5367def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5368def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5369
5370def DUPv2i64lane : SIMDDup64FromElement;
5371def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5372def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5373def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5374def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5375def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5376def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5377
5378// DUP from a 64-bit register to a 64-bit register is just a copy
5379def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5380          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5381def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5382          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5383
5384def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5385          (v2f32 (DUPv2i32lane
5386            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5387            (i64 0)))>;
5388def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5389          (v4f32 (DUPv4i32lane
5390            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5391            (i64 0)))>;
5392def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5393          (v2f64 (DUPv2i64lane
5394            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5395            (i64 0)))>;
5396def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5397          (v4f16 (DUPv4i16lane
5398            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5399            (i64 0)))>;
5400def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5401          (v4bf16 (DUPv4i16lane
5402            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5403            (i64 0)))>;
5404def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5405          (v8f16 (DUPv8i16lane
5406            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5407            (i64 0)))>;
5408def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5409          (v8bf16 (DUPv8i16lane
5410            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5411            (i64 0)))>;
5412
5413def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5414          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5415def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5416          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5417
5418def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5419          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5420def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5421          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5422
5423def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5424          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5425def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5426         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5427def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5428          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5429
5430// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5431// instruction even if the types don't match: we just have to remap the lane
5432// carefully. N.b. this trick only applies to truncations.
5433def VecIndex_x2 : SDNodeXForm<imm, [{
5434  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5435}]>;
5436def VecIndex_x4 : SDNodeXForm<imm, [{
5437  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5438}]>;
5439def VecIndex_x8 : SDNodeXForm<imm, [{
5440  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5441}]>;
5442
5443multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5444                            ValueType Src128VT, ValueType ScalVT,
5445                            Instruction DUP, SDNodeXForm IdxXFORM> {
5446  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5447                                                     imm:$idx)))),
5448            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5449
5450  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5451                                                     imm:$idx)))),
5452            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5453}
5454
5455defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5456defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5457defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5458
5459defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5460defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5461defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5462
5463multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5464                               SDNodeXForm IdxXFORM> {
5465  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5466                                                         imm:$idx))))),
5467            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5468
5469  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5470                                                       imm:$idx))))),
5471            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5472}
5473
5474defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5475defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5476defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5477
5478defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5479defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5480defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5481
5482// SMOV and UMOV definitions, with some extra patterns for convenience
5483defm SMOV : SMov;
5484defm UMOV : UMov;
5485
5486def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5487          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5488def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5489          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5490def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5491          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5492def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5493          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5494def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5495          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5496def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5497          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5498
5499def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5500            VectorIndexB:$idx)))), i8),
5501          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5502def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5503            VectorIndexH:$idx)))), i16),
5504          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5505
5506// Extracting i8 or i16 elements will have the zero-extend transformed to
5507// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5508// for AArch64. Match these patterns here since UMOV already zeroes out the high
5509// bits of the destination register.
5510def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5511               (i32 0xff)),
5512          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5513def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5514               (i32 0xffff)),
5515          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5516
5517def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5518            VectorIndexB:$idx)))), (i64 0xff))),
5519          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>;
5520def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5521            VectorIndexH:$idx)))), (i64 0xffff))),
5522          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>;
5523
5524defm INS : SIMDIns;
5525
5526def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5527          (SUBREG_TO_REG (i32 0),
5528                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5529def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5530          (SUBREG_TO_REG (i32 0),
5531                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5532
5533def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5534          (SUBREG_TO_REG (i32 0),
5535                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5536def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5537          (SUBREG_TO_REG (i32 0),
5538                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5539
5540def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5541          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5542def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5543          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5544
5545def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5546          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5547def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5548          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5549
5550def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5551            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5552                                  (i32 FPR32:$Rn), ssub))>;
5553def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5554            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5555                                  (i32 FPR32:$Rn), ssub))>;
5556
5557def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5558            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5559                                  (i64 FPR64:$Rn), dsub))>;
5560
5561def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5562          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5563def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5564          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5565
5566def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5567          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5568def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5569          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5570
5571def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5572          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5573def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5574          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5575
5576def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5577          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5578
5579def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5580            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5581          (EXTRACT_SUBREG
5582            (INSvi16lane
5583              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5584              VectorIndexS:$imm,
5585              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5586              (i64 0)),
5587            dsub)>;
5588
5589def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5590            (i64 VectorIndexH:$imm)),
5591          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5592def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5593            (i64 VectorIndexS:$imm)),
5594          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5595def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5596            (i64 VectorIndexD:$imm)),
5597          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5598
5599def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5600            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5601          (INSvi16lane
5602            V128:$Rn, VectorIndexH:$imm,
5603            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5604            (i64 0))>;
5605
5606def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5607            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5608          (EXTRACT_SUBREG
5609            (INSvi16lane
5610              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5611              VectorIndexS:$imm,
5612              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5613              (i64 0)),
5614            dsub)>;
5615
5616def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5617            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5618          (INSvi16lane
5619            V128:$Rn, VectorIndexH:$imm,
5620            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5621            (i64 0))>;
5622
5623def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5624            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5625          (EXTRACT_SUBREG
5626            (INSvi32lane
5627              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5628              VectorIndexS:$imm,
5629              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5630              (i64 0)),
5631            dsub)>;
5632def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5633            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5634          (INSvi32lane
5635            V128:$Rn, VectorIndexS:$imm,
5636            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5637            (i64 0))>;
5638def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5639            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5640          (INSvi64lane
5641            V128:$Rn, VectorIndexD:$imm,
5642            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5643            (i64 0))>;
5644
5645// Copy an element at a constant index in one vector into a constant indexed
5646// element of another.
5647// FIXME refactor to a shared class/dev parameterized on vector type, vector
5648// index type and INS extension
5649def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5650                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5651                   VectorIndexB:$idx2)),
5652          (v16i8 (INSvi8lane
5653                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5654          )>;
5655def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5656                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5657                   VectorIndexH:$idx2)),
5658          (v8i16 (INSvi16lane
5659                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5660          )>;
5661def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5662                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5663                   VectorIndexS:$idx2)),
5664          (v4i32 (INSvi32lane
5665                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5666          )>;
5667def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5668                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5669                   VectorIndexD:$idx2)),
5670          (v2i64 (INSvi64lane
5671                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5672          )>;
5673
5674multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5675                                ValueType VTScal, Instruction INS> {
5676  def : Pat<(VT128 (vector_insert V128:$src,
5677                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5678                        imm:$Immd)),
5679            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5680
5681  def : Pat<(VT128 (vector_insert V128:$src,
5682                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5683                        imm:$Immd)),
5684            (INS V128:$src, imm:$Immd,
5685                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5686
5687  def : Pat<(VT64 (vector_insert V64:$src,
5688                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5689                        imm:$Immd)),
5690            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5691                                 imm:$Immd, V128:$Rn, imm:$Immn),
5692                            dsub)>;
5693
5694  def : Pat<(VT64 (vector_insert V64:$src,
5695                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5696                        imm:$Immd)),
5697            (EXTRACT_SUBREG
5698                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5699                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5700                dsub)>;
5701}
5702
5703defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5704defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5705defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5706defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5707
5708
5709// Floating point vector extractions are codegen'd as either a sequence of
5710// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
5711// the lane number is anything other than zero.
5712def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5713          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5714def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5715          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5716def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5717          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5718def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5719          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5720
5721
5722def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5723          (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
5724def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5725          (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
5726def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5727          (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5728def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5729          (bf16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5730
5731// All concat_vectors operations are canonicalised to act on i64 vectors for
5732// AArch64. In the general case we need an instruction, which had just as well be
5733// INS.
5734class ConcatPat<ValueType DstTy, ValueType SrcTy>
5735  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5736        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5737                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5738
5739def : ConcatPat<v2i64, v1i64>;
5740def : ConcatPat<v2f64, v1f64>;
5741def : ConcatPat<v4i32, v2i32>;
5742def : ConcatPat<v4f32, v2f32>;
5743def : ConcatPat<v8i16, v4i16>;
5744def : ConcatPat<v8f16, v4f16>;
5745def : ConcatPat<v8bf16, v4bf16>;
5746def : ConcatPat<v16i8, v8i8>;
5747
5748// If the high lanes are undef, though, we can just ignore them:
5749class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5750  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5751        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5752
5753def : ConcatUndefPat<v2i64, v1i64>;
5754def : ConcatUndefPat<v2f64, v1f64>;
5755def : ConcatUndefPat<v4i32, v2i32>;
5756def : ConcatUndefPat<v4f32, v2f32>;
5757def : ConcatUndefPat<v8i16, v4i16>;
5758def : ConcatUndefPat<v16i8, v8i8>;
5759
5760//----------------------------------------------------------------------------
5761// AdvSIMD across lanes instructions
5762//----------------------------------------------------------------------------
5763
5764defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5765defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5766defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5767defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5768defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5769defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5770defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5771defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5772defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5773defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5774defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5775
5776// Patterns for uaddv(uaddlp(x)) ==> uaddlv
5777def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
5778            (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))),
5779            (i64 0))), (i64 0))),
5780          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5781           (UADDLVv8i8v V64:$op), hsub), ssub)>;
5782def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp
5783           (v16i8 V128:$op))))), (i64 0))),
5784          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5785           (UADDLVv16i8v V128:$op), hsub), ssub)>;
5786def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))),
5787          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>;
5788
5789// Patterns for addp(uaddlp(x))) ==> uaddlv
5790def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))),
5791          (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>;
5792def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))),
5793          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>;
5794
5795// Patterns for across-vector intrinsics, that have a node equivalent, that
5796// returns a vector (with only the low lane defined) instead of a scalar.
5797// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5798multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5799                                    SDPatternOperator opNode> {
5800// If a lane instruction caught the vector_extract around opNode, we can
5801// directly match the latter to the instruction.
5802def : Pat<(v8i8 (opNode V64:$Rn)),
5803          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5804           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5805def : Pat<(v16i8 (opNode V128:$Rn)),
5806          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5807           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5808def : Pat<(v4i16 (opNode V64:$Rn)),
5809          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5810           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5811def : Pat<(v8i16 (opNode V128:$Rn)),
5812          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5813           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5814def : Pat<(v4i32 (opNode V128:$Rn)),
5815          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5816           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5817
5818
5819// If none did, fallback to the explicit patterns, consuming the vector_extract.
5820def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5821            (i64 0)), (i64 0))),
5822          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5823            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5824            bsub), ssub)>;
5825def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5826          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5827            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5828            bsub), ssub)>;
5829def : Pat<(i32 (vector_extract (insert_subvector undef,
5830            (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))),
5831          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5832            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5833            hsub), ssub)>;
5834def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5835          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5836            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5837            hsub), ssub)>;
5838def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5839          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5840            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5841            ssub), ssub)>;
5842
5843}
5844
5845multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5846                                          SDPatternOperator opNode>
5847    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5848// If there is a sign extension after this intrinsic, consume it as smov already
5849// performed it
5850def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5851            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)),
5852          (i32 (SMOVvi8to32
5853            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5854              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5855            (i64 0)))>;
5856def : Pat<(i32 (sext_inreg (i32 (vector_extract
5857            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5858          (i32 (SMOVvi8to32
5859            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5860             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5861            (i64 0)))>;
5862def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5863            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)),
5864          (i32 (SMOVvi16to32
5865           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5866            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5867           (i64 0)))>;
5868def : Pat<(i32 (sext_inreg (i32 (vector_extract
5869            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5870          (i32 (SMOVvi16to32
5871            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5872             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5873            (i64 0)))>;
5874}
5875
5876multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5877                                            SDPatternOperator opNode>
5878    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5879// If there is a masking operation keeping only what has been actually
5880// generated, consume it.
5881def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5882            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)),
5883      (i32 (EXTRACT_SUBREG
5884        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5885          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5886        ssub))>;
5887def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5888            maski8_or_more)),
5889        (i32 (EXTRACT_SUBREG
5890          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5891            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5892          ssub))>;
5893def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5894            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)),
5895          (i32 (EXTRACT_SUBREG
5896            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5897              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5898            ssub))>;
5899def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5900            maski16_or_more)),
5901        (i32 (EXTRACT_SUBREG
5902          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5903            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5904          ssub))>;
5905}
5906
5907defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
5908// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5909def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5910          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5911
5912defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5913// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5914def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5915          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5916
5917defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
5918def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
5919          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
5920
5921defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
5922def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
5923          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
5924
5925defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
5926def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
5927          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
5928
5929defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
5930def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
5931          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
5932
5933multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
5934  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5935        (i32 (SMOVvi16to32
5936          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5937            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5938          (i64 0)))>;
5939def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5940        (i32 (SMOVvi16to32
5941          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5942           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5943          (i64 0)))>;
5944
5945def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5946          (i32 (EXTRACT_SUBREG
5947           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5948            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5949           ssub))>;
5950def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5951        (i32 (EXTRACT_SUBREG
5952          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5953           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5954          ssub))>;
5955
5956def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5957        (i64 (EXTRACT_SUBREG
5958          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5959           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5960          dsub))>;
5961}
5962
5963multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
5964                                                Intrinsic intOp> {
5965  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5966        (i32 (EXTRACT_SUBREG
5967          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5968            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5969          ssub))>;
5970def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5971        (i32 (EXTRACT_SUBREG
5972          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5973            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5974          ssub))>;
5975
5976def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5977          (i32 (EXTRACT_SUBREG
5978            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5979              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5980            ssub))>;
5981def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5982        (i32 (EXTRACT_SUBREG
5983          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5984            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5985          ssub))>;
5986
5987def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5988        (i64 (EXTRACT_SUBREG
5989          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5990            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5991          dsub))>;
5992}
5993
5994defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
5995defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
5996
5997// The vaddlv_s32 intrinsic gets mapped to SADDLP.
5998def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
5999          (i64 (EXTRACT_SUBREG
6000            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6001              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
6002            dsub))>;
6003// The vaddlv_u32 intrinsic gets mapped to UADDLP.
6004def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
6005          (i64 (EXTRACT_SUBREG
6006            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6007              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
6008            dsub))>;
6009
6010//------------------------------------------------------------------------------
6011// AdvSIMD modified immediate instructions
6012//------------------------------------------------------------------------------
6013
6014// AdvSIMD BIC
6015defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
6016// AdvSIMD ORR
6017defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
6018
6019def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6020def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6021def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6022def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6023
6024def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6025def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6026def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6027def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6028
6029def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6030def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6031def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6032def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6033
6034def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6035def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6036def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6037def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6038
6039// AdvSIMD FMOV
6040def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
6041                                              "fmov", ".2d",
6042                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6043def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
6044                                              "fmov", ".2s",
6045                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6046def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
6047                                              "fmov", ".4s",
6048                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6049let Predicates = [HasNEON, HasFullFP16] in {
6050def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
6051                                              "fmov", ".4h",
6052                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6053def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
6054                                              "fmov", ".8h",
6055                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6056} // Predicates = [HasNEON, HasFullFP16]
6057
6058// AdvSIMD MOVI
6059
6060// EDIT byte mask: scalar
6061let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6062def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
6063                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
6064// The movi_edit node has the immediate value already encoded, so we use
6065// a plain imm0_255 here.
6066def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
6067          (MOVID imm0_255:$shift)>;
6068
6069// EDIT byte mask: 2d
6070
6071// The movi_edit node has the immediate value already encoded, so we use
6072// a plain imm0_255 in the pattern
6073let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6074def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
6075                                                simdimmtype10,
6076                                                "movi", ".2d",
6077                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
6078
6079def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6080def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6081def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6082def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6083
6084def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6085def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6086def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6087def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6088
6089// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
6090// extract is free and this gives better MachineCSE results.
6091def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6092def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6093def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6094def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6095
6096def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6097def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6098def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6099def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6100
6101// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6102let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6103defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
6104
6105def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6106def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6107def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6108def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6109
6110def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6111def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6112def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6113def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6114
6115def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6116          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
6117def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6118          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
6119def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6120          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
6121def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6122          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
6123
6124let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6125// EDIT per word: 2s & 4s with MSL shifter
6126def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
6127                      [(set (v2i32 V64:$Rd),
6128                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6129def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
6130                      [(set (v4i32 V128:$Rd),
6131                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6132
6133// Per byte: 8b & 16b
6134def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
6135                                                 "movi", ".8b",
6136                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
6137
6138def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
6139                                                 "movi", ".16b",
6140                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
6141}
6142
6143// AdvSIMD MVNI
6144
6145// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6146let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6147defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
6148
6149def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6150def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6151def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6152def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6153
6154def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6155def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6156def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6157def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6158
6159def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6160          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
6161def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6162          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
6163def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6164          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
6165def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6166          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
6167
6168// EDIT per word: 2s & 4s with MSL shifter
6169let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6170def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
6171                      [(set (v2i32 V64:$Rd),
6172                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6173def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
6174                      [(set (v4i32 V128:$Rd),
6175                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6176}
6177
6178//----------------------------------------------------------------------------
6179// AdvSIMD indexed element
6180//----------------------------------------------------------------------------
6181
6182let hasSideEffects = 0 in {
6183  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
6184  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
6185}
6186
6187// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
6188// instruction expects the addend first, while the intrinsic expects it last.
6189
6190// On the other hand, there are quite a few valid combinatorial options due to
6191// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
6192defm : SIMDFPIndexedTiedPatterns<"FMLA",
6193           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
6194defm : SIMDFPIndexedTiedPatterns<"FMLA",
6195           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
6196
6197defm : SIMDFPIndexedTiedPatterns<"FMLS",
6198           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
6199defm : SIMDFPIndexedTiedPatterns<"FMLS",
6200           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
6201defm : SIMDFPIndexedTiedPatterns<"FMLS",
6202           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
6203defm : SIMDFPIndexedTiedPatterns<"FMLS",
6204           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
6205
6206multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
6207  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6208  // and DUP scalar.
6209  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6210                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6211                                           VectorIndexS:$idx))),
6212            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6213  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6214                           (v2f32 (AArch64duplane32
6215                                      (v4f32 (insert_subvector undef,
6216                                                 (v2f32 (fneg V64:$Rm)),
6217                                                 (i64 0))),
6218                                      VectorIndexS:$idx)))),
6219            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6220                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6221                               VectorIndexS:$idx)>;
6222  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6223                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6224            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6225                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6226
6227  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6228  // and DUP scalar.
6229  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6230                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6231                                           VectorIndexS:$idx))),
6232            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
6233                               VectorIndexS:$idx)>;
6234  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6235                           (v4f32 (AArch64duplane32
6236                                      (v4f32 (insert_subvector undef,
6237                                                 (v2f32 (fneg V64:$Rm)),
6238                                                 (i64 0))),
6239                                      VectorIndexS:$idx)))),
6240            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6241                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6242                               VectorIndexS:$idx)>;
6243  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6244                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6245            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6246                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6247
6248  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
6249  // (DUPLANE from 64-bit would be trivial).
6250  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6251                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
6252                                           VectorIndexD:$idx))),
6253            (FMLSv2i64_indexed
6254                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6255  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6256                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
6257            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
6258                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
6259
6260  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
6261  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6262                         (vector_extract (v4f32 (fneg V128:$Rm)),
6263                                         VectorIndexS:$idx))),
6264            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6265                V128:$Rm, VectorIndexS:$idx)>;
6266  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6267                         (vector_extract (v4f32 (insert_subvector undef,
6268                                                    (v2f32 (fneg V64:$Rm)),
6269                                                    (i64 0))),
6270                                         VectorIndexS:$idx))),
6271            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6272                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
6273
6274  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
6275  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
6276                         (vector_extract (v2f64 (fneg V128:$Rm)),
6277                                         VectorIndexS:$idx))),
6278            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
6279                V128:$Rm, VectorIndexS:$idx)>;
6280}
6281
6282defm : FMLSIndexedAfterNegPatterns<
6283           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
6284defm : FMLSIndexedAfterNegPatterns<
6285           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
6286
6287defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
6288defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
6289
6290def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6291          (FMULv2i32_indexed V64:$Rn,
6292            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6293            (i64 0))>;
6294def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6295          (FMULv4i32_indexed V128:$Rn,
6296            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6297            (i64 0))>;
6298def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6299          (FMULv2i64_indexed V128:$Rn,
6300            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6301            (i64 0))>;
6302
6303defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6304defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6305
6306defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6307                                     int_aarch64_neon_sqdmulh_laneq>;
6308defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6309                                      int_aarch64_neon_sqrdmulh_laneq>;
6310
6311// Generated by MachineCombine
6312defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6313defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6314
6315defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6316defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6317    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6318defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6319    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6320defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
6321                int_aarch64_neon_smull>;
6322defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6323                                           int_aarch64_neon_sqadd>;
6324defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6325                                           int_aarch64_neon_sqsub>;
6326defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6327                                          int_aarch64_neon_sqadd>;
6328defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6329                                          int_aarch64_neon_sqsub>;
6330defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6331defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6332    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6333defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6334    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6335defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
6336                int_aarch64_neon_umull>;
6337
6338// A scalar sqdmull with the second operand being a vector lane can be
6339// handled directly with the indexed instruction encoding.
6340def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6341                                          (vector_extract (v4i32 V128:$Vm),
6342                                                           VectorIndexS:$idx)),
6343          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6344
6345//----------------------------------------------------------------------------
6346// AdvSIMD scalar shift instructions
6347//----------------------------------------------------------------------------
6348defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6349defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6350defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6351defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6352// Codegen patterns for the above. We don't put these directly on the
6353// instructions because TableGen's type inference can't handle the truth.
6354// Having the same base pattern for fp <--> int totally freaks it out.
6355def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6356          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6357def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6358          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6359def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6360          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6361def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6362          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6363def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6364                                            vecshiftR64:$imm)),
6365          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6366def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6367                                            vecshiftR64:$imm)),
6368          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6369def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6370          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6371def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6372          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6373def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6374                                            vecshiftR64:$imm)),
6375          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6376def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6377          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6378def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6379                                            vecshiftR64:$imm)),
6380          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6381def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6382          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6383
6384// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
6385
6386def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6387          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6388def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6389          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6390def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6391          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6392def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6393            (and FPR32:$Rn, (i32 65535)),
6394            vecshiftR16:$imm)),
6395          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6396def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6397          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6398def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6399          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6400def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6401          (i32 (INSERT_SUBREG
6402            (i32 (IMPLICIT_DEF)),
6403            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6404            hsub))>;
6405def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6406          (i64 (INSERT_SUBREG
6407            (i64 (IMPLICIT_DEF)),
6408            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6409            hsub))>;
6410def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6411          (i32 (INSERT_SUBREG
6412            (i32 (IMPLICIT_DEF)),
6413            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6414            hsub))>;
6415def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6416          (i64 (INSERT_SUBREG
6417            (i64 (IMPLICIT_DEF)),
6418            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6419            hsub))>;
6420def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6421          (i32 (INSERT_SUBREG
6422            (i32 (IMPLICIT_DEF)),
6423            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6424            hsub))>;
6425def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6426          (i32 (INSERT_SUBREG
6427            (i32 (IMPLICIT_DEF)),
6428            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6429            hsub))>;
6430
6431defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6432defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6433defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6434                                     int_aarch64_neon_sqrshrn>;
6435defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6436                                     int_aarch64_neon_sqrshrun>;
6437defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6438defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6439defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6440                                     int_aarch64_neon_sqshrn>;
6441defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6442                                     int_aarch64_neon_sqshrun>;
6443defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6444defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6445defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6446    TriOpFrag<(add node:$LHS,
6447                   (AArch64srshri node:$MHS, node:$RHS))>>;
6448defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6449defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6450    TriOpFrag<(add node:$LHS,
6451                   (AArch64vashr node:$MHS, node:$RHS))>>;
6452defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6453                                     int_aarch64_neon_uqrshrn>;
6454defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6455defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6456                                     int_aarch64_neon_uqshrn>;
6457defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6458defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6459    TriOpFrag<(add node:$LHS,
6460                   (AArch64urshri node:$MHS, node:$RHS))>>;
6461defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6462defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6463    TriOpFrag<(add node:$LHS,
6464                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6465
6466//----------------------------------------------------------------------------
6467// AdvSIMD vector shift instructions
6468//----------------------------------------------------------------------------
6469defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6470defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6471defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6472                                   int_aarch64_neon_vcvtfxs2fp>;
6473defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6474                                         int_aarch64_neon_rshrn>;
6475defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6476defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6477                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6478defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6479def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6480                                      (i32 vecshiftL64:$imm))),
6481          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6482defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6483                                         int_aarch64_neon_sqrshrn>;
6484defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6485                                         int_aarch64_neon_sqrshrun>;
6486defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6487defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6488defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6489                                         int_aarch64_neon_sqshrn>;
6490defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6491                                         int_aarch64_neon_sqshrun>;
6492defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6493def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6494                                      (i32 vecshiftR64:$imm))),
6495          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6496defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6497defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6498                 TriOpFrag<(add node:$LHS,
6499                                (AArch64srshri node:$MHS, node:$RHS))> >;
6500defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6501                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6502
6503defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6504defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6505                TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6506defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6507                        int_aarch64_neon_vcvtfxu2fp>;
6508defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6509                                         int_aarch64_neon_uqrshrn>;
6510defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6511defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6512                                         int_aarch64_neon_uqshrn>;
6513defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6514defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6515                TriOpFrag<(add node:$LHS,
6516                               (AArch64urshri node:$MHS, node:$RHS))> >;
6517defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6518                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6519defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6520defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6521                TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6522
6523// SHRN patterns for when a logical right shift was used instead of arithmetic
6524// (the immediate guarantees no sign bits actually end up in the result so it
6525// doesn't matter).
6526def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6527          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6528def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6529          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6530def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6531          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6532
6533def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6534                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6535                                                    vecshiftR16Narrow:$imm)))),
6536          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6537                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6538def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6539                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6540                                                    vecshiftR32Narrow:$imm)))),
6541          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6542                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6543def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6544                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6545                                                    vecshiftR64Narrow:$imm)))),
6546          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6547                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6548
6549// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6550// Anyexts are implemented as zexts.
6551def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6552def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6553def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6554def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6555def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6556def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6557def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6558def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6559def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6560// Also match an extend from the upper half of a 128 bit source register.
6561def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6562          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6563def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6564          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6565def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6566          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6567def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6568          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6569def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6570          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6571def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6572          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6573def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6574          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6575def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6576          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6577def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6578          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6579
6580// Vector shift sxtl aliases
6581def : InstAlias<"sxtl.8h $dst, $src1",
6582                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6583def : InstAlias<"sxtl $dst.8h, $src1.8b",
6584                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6585def : InstAlias<"sxtl.4s $dst, $src1",
6586                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6587def : InstAlias<"sxtl $dst.4s, $src1.4h",
6588                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6589def : InstAlias<"sxtl.2d $dst, $src1",
6590                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6591def : InstAlias<"sxtl $dst.2d, $src1.2s",
6592                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6593
6594// Vector shift sxtl2 aliases
6595def : InstAlias<"sxtl2.8h $dst, $src1",
6596                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6597def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6598                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6599def : InstAlias<"sxtl2.4s $dst, $src1",
6600                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6601def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6602                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6603def : InstAlias<"sxtl2.2d $dst, $src1",
6604                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6605def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6606                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6607
6608// Vector shift uxtl aliases
6609def : InstAlias<"uxtl.8h $dst, $src1",
6610                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6611def : InstAlias<"uxtl $dst.8h, $src1.8b",
6612                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6613def : InstAlias<"uxtl.4s $dst, $src1",
6614                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6615def : InstAlias<"uxtl $dst.4s, $src1.4h",
6616                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6617def : InstAlias<"uxtl.2d $dst, $src1",
6618                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6619def : InstAlias<"uxtl $dst.2d, $src1.2s",
6620                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6621
6622// Vector shift uxtl2 aliases
6623def : InstAlias<"uxtl2.8h $dst, $src1",
6624                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6625def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6626                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6627def : InstAlias<"uxtl2.4s $dst, $src1",
6628                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6629def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6630                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6631def : InstAlias<"uxtl2.2d $dst, $src1",
6632                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6633def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6634                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6635
6636// If an integer is about to be converted to a floating point value,
6637// just load it on the floating point unit.
6638// These patterns are more complex because floating point loads do not
6639// support sign extension.
6640// The sign extension has to be explicitly added and is only supported for
6641// one step: byte-to-half, half-to-word, word-to-doubleword.
6642// SCVTF GPR -> FPR is 9 cycles.
6643// SCVTF FPR -> FPR is 4 cyclces.
6644// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6645// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6646// and still being faster.
6647// However, this is not good for code size.
6648// 8-bits -> float. 2 sizes step-up.
6649class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6650  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6651        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6652                            (SSHLLv4i16_shift
6653                              (f64
6654                                (EXTRACT_SUBREG
6655                                  (SSHLLv8i8_shift
6656                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6657                                        INST,
6658                                        bsub),
6659                                    0),
6660                                  dsub)),
6661                               0),
6662                             ssub)))>,
6663    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6664
6665def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6666                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6667def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6668                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6669def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6670                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6671def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6672                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6673
6674// 16-bits -> float. 1 size step-up.
6675class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6676  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6677        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6678                            (SSHLLv4i16_shift
6679                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6680                                  INST,
6681                                  hsub),
6682                                0),
6683                            ssub)))>, Requires<[NotForCodeSize]>;
6684
6685def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6686                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6687def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6688                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6689def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6690                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6691def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6692                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6693
6694// 32-bits to 32-bits are handled in target specific dag combine:
6695// performIntToFpCombine.
6696// 64-bits integer to 32-bits floating point, not possible with
6697// SCVTF on floating point registers (both source and destination
6698// must have the same size).
6699
6700// Here are the patterns for 8, 16, 32, and 64-bits to double.
6701// 8-bits -> double. 3 size step-up: give up.
6702// 16-bits -> double. 2 size step.
6703class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6704  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6705           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6706                              (SSHLLv2i32_shift
6707                                 (f64
6708                                  (EXTRACT_SUBREG
6709                                    (SSHLLv4i16_shift
6710                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6711                                        INST,
6712                                        hsub),
6713                                     0),
6714                                   dsub)),
6715                               0),
6716                             dsub)))>,
6717    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6718
6719def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6720                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6721def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6722                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6723def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6724                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6725def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6726                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6727// 32-bits -> double. 1 size step-up.
6728class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6729  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6730           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6731                              (SSHLLv2i32_shift
6732                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6733                                  INST,
6734                                  ssub),
6735                               0),
6736                             dsub)))>, Requires<[NotForCodeSize]>;
6737
6738def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6739                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6740def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6741                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6742def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6743                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6744def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6745                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6746
6747// 64-bits -> double are handled in target specific dag combine:
6748// performIntToFpCombine.
6749
6750
6751//----------------------------------------------------------------------------
6752// AdvSIMD Load-Store Structure
6753//----------------------------------------------------------------------------
6754defm LD1 : SIMDLd1Multiple<"ld1">;
6755defm LD2 : SIMDLd2Multiple<"ld2">;
6756defm LD3 : SIMDLd3Multiple<"ld3">;
6757defm LD4 : SIMDLd4Multiple<"ld4">;
6758
6759defm ST1 : SIMDSt1Multiple<"st1">;
6760defm ST2 : SIMDSt2Multiple<"st2">;
6761defm ST3 : SIMDSt3Multiple<"st3">;
6762defm ST4 : SIMDSt4Multiple<"st4">;
6763
6764class Ld1Pat<ValueType ty, Instruction INST>
6765  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6766
6767def : Ld1Pat<v16i8, LD1Onev16b>;
6768def : Ld1Pat<v8i16, LD1Onev8h>;
6769def : Ld1Pat<v4i32, LD1Onev4s>;
6770def : Ld1Pat<v2i64, LD1Onev2d>;
6771def : Ld1Pat<v8i8,  LD1Onev8b>;
6772def : Ld1Pat<v4i16, LD1Onev4h>;
6773def : Ld1Pat<v2i32, LD1Onev2s>;
6774def : Ld1Pat<v1i64, LD1Onev1d>;
6775
6776class St1Pat<ValueType ty, Instruction INST>
6777  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6778        (INST ty:$Vt, GPR64sp:$Rn)>;
6779
6780def : St1Pat<v16i8, ST1Onev16b>;
6781def : St1Pat<v8i16, ST1Onev8h>;
6782def : St1Pat<v4i32, ST1Onev4s>;
6783def : St1Pat<v2i64, ST1Onev2d>;
6784def : St1Pat<v8i8,  ST1Onev8b>;
6785def : St1Pat<v4i16, ST1Onev4h>;
6786def : St1Pat<v2i32, ST1Onev2s>;
6787def : St1Pat<v1i64, ST1Onev1d>;
6788
6789//---
6790// Single-element
6791//---
6792
6793defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6794defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6795defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6796defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6797let mayLoad = 1, hasSideEffects = 0 in {
6798defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6799defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6800defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6801defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6802defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6803defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6804defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6805defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6806defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6807defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6808defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6809defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6810defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6811defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6812defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6813defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6814}
6815
6816def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6817          (LD1Rv8b GPR64sp:$Rn)>;
6818def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6819          (LD1Rv16b GPR64sp:$Rn)>;
6820def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6821          (LD1Rv4h GPR64sp:$Rn)>;
6822def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6823          (LD1Rv8h GPR64sp:$Rn)>;
6824def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6825          (LD1Rv2s GPR64sp:$Rn)>;
6826def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6827          (LD1Rv4s GPR64sp:$Rn)>;
6828def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6829          (LD1Rv2d GPR64sp:$Rn)>;
6830def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6831          (LD1Rv1d GPR64sp:$Rn)>;
6832// Grab the floating point version too
6833def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6834          (LD1Rv2s GPR64sp:$Rn)>;
6835def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6836          (LD1Rv4s GPR64sp:$Rn)>;
6837def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6838          (LD1Rv2d GPR64sp:$Rn)>;
6839def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6840          (LD1Rv1d GPR64sp:$Rn)>;
6841def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6842          (LD1Rv4h GPR64sp:$Rn)>;
6843def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6844          (LD1Rv8h GPR64sp:$Rn)>;
6845def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6846          (LD1Rv4h GPR64sp:$Rn)>;
6847def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6848          (LD1Rv8h GPR64sp:$Rn)>;
6849
6850class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6851                    ValueType VTy, ValueType STy, Instruction LD1>
6852  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6853           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6854        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6855
6856def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6857def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6858def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6859def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6860def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6861def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
6862def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
6863def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
6864
6865// Generate LD1 for extload if memory type does not match the
6866// destination type, for example:
6867//
6868//   (v4i32 (insert_vector_elt (load anyext from i8) idx))
6869//
6870// In this case, the index must be adjusted to match LD1 type.
6871//
6872class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand
6873                    VecIndex, ValueType VTy, ValueType STy,
6874                    Instruction LD1, SDNodeXForm IdxOp>
6875  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6876                       (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6877        (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>;
6878
6879def VectorIndexStoH : SDNodeXForm<imm, [{
6880  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
6881}]>;
6882def VectorIndexStoB : SDNodeXForm<imm, [{
6883  return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64);
6884}]>;
6885def VectorIndexHtoB : SDNodeXForm<imm, [{
6886  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
6887}]>;
6888
6889def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>;
6890def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>;
6891def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>;
6892
6893// Same as above, but the first element is populated using
6894// scalar_to_vector + insert_subvector instead of insert_vector_elt.
6895class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
6896                        SDPatternOperator ExtLoad, Instruction LD1>
6897  : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))),
6898          (ResultTy (EXTRACT_SUBREG
6899            (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
6900
6901def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
6902def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
6903def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
6904
6905class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
6906                   ValueType VTy, ValueType STy, Instruction LD1>
6907  : Pat<(vector_insert (VTy VecListOne64:$Rd),
6908           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6909        (EXTRACT_SUBREG
6910            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
6911                          VecIndex:$idx, GPR64sp:$Rn),
6912            dsub)>;
6913
6914def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
6915def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
6916def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
6917def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
6918def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
6919def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
6920
6921
6922defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
6923defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
6924defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
6925defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
6926
6927// Stores
6928defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
6929defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
6930defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
6931defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
6932
6933let AddedComplexity = 19 in
6934class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6935                    ValueType VTy, ValueType STy, Instruction ST1>
6936  : Pat<(scalar_store
6937             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6938             GPR64sp:$Rn),
6939        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
6940
6941def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
6942def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
6943def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
6944def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
6945def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
6946def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
6947def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
6948def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
6949
6950let AddedComplexity = 19 in
6951class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6952                   ValueType VTy, ValueType STy, Instruction ST1>
6953  : Pat<(scalar_store
6954             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6955             GPR64sp:$Rn),
6956        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6957             VecIndex:$idx, GPR64sp:$Rn)>;
6958
6959def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
6960def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
6961def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
6962def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
6963def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
6964def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
6965
6966multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6967                             ValueType VTy, ValueType STy, Instruction ST1,
6968                             int offset> {
6969  def : Pat<(scalar_store
6970              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6971              GPR64sp:$Rn, offset),
6972        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6973             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6974
6975  def : Pat<(scalar_store
6976              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6977              GPR64sp:$Rn, GPR64:$Rm),
6978        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6979             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6980}
6981
6982defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
6983defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
6984                        2>;
6985defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
6986defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
6987defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
6988defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
6989defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
6990defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
6991
6992multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6993                             ValueType VTy, ValueType STy, Instruction ST1,
6994                             int offset> {
6995  def : Pat<(scalar_store
6996              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6997              GPR64sp:$Rn, offset),
6998        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6999
7000  def : Pat<(scalar_store
7001              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7002              GPR64sp:$Rn, GPR64:$Rm),
7003        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7004}
7005
7006defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
7007                         1>;
7008defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
7009                         2>;
7010defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
7011defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
7012defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
7013defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
7014defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
7015defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
7016
7017let mayStore = 1, hasSideEffects = 0 in {
7018defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
7019defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
7020defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
7021defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
7022defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
7023defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
7024defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
7025defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
7026defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
7027defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
7028defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
7029defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
7030}
7031
7032defm ST1 : SIMDLdSt1SingleAliases<"st1">;
7033defm ST2 : SIMDLdSt2SingleAliases<"st2">;
7034defm ST3 : SIMDLdSt3SingleAliases<"st3">;
7035defm ST4 : SIMDLdSt4SingleAliases<"st4">;
7036
7037//----------------------------------------------------------------------------
7038// Crypto extensions
7039//----------------------------------------------------------------------------
7040
7041let Predicates = [HasAES] in {
7042def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
7043def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
7044def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
7045def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
7046}
7047
7048// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
7049// for AES fusion on some CPUs.
7050let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
7051def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7052                        Sched<[WriteVq]>;
7053def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7054                         Sched<[WriteVq]>;
7055}
7056
7057// Only use constrained versions of AES(I)MC instructions if they are paired with
7058// AESE/AESD.
7059def : Pat<(v16i8 (int_aarch64_crypto_aesmc
7060            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
7061                                            (v16i8 V128:$src2))))),
7062          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
7063                                             (v16i8 V128:$src2)))))>,
7064          Requires<[HasFuseAES]>;
7065
7066def : Pat<(v16i8 (int_aarch64_crypto_aesimc
7067            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
7068                                            (v16i8 V128:$src2))))),
7069          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
7070                                              (v16i8 V128:$src2)))))>,
7071          Requires<[HasFuseAES]>;
7072
7073let Predicates = [HasSHA2] in {
7074def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
7075def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
7076def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
7077def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
7078def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
7079def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
7080def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
7081
7082def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
7083def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
7084def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
7085}
7086
7087//----------------------------------------------------------------------------
7088// Compiler-pseudos
7089//----------------------------------------------------------------------------
7090// FIXME: Like for X86, these should go in their own separate .td file.
7091
7092def def32 : PatLeaf<(i32 GPR32:$src), [{
7093  return isDef32(*N);
7094}]>;
7095
7096// In the case of a 32-bit def that is known to implicitly zero-extend,
7097// we can use a SUBREG_TO_REG.
7098def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
7099
7100// For an anyext, we don't care what the high bits are, so we can perform an
7101// INSERT_SUBREF into an IMPLICIT_DEF.
7102def : Pat<(i64 (anyext GPR32:$src)),
7103          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
7104
7105// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
7106// then assert the extension has happened.
7107def : Pat<(i64 (zext GPR32:$src)),
7108          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
7109
7110// To sign extend, we use a signed bitfield move instruction (SBFM) on the
7111// containing super-reg.
7112def : Pat<(i64 (sext GPR32:$src)),
7113   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
7114def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
7115def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
7116def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
7117def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
7118def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
7119def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
7120def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
7121
7122def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
7123          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
7124                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
7125def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
7126          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
7127                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
7128
7129def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
7130          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
7131                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
7132def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
7133          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
7134                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
7135
7136def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
7137          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7138                   (i64 (i64shift_a        imm0_63:$imm)),
7139                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
7140
7141// sra patterns have an AddedComplexity of 10, so make sure we have a higher
7142// AddedComplexity for the following patterns since we want to match sext + sra
7143// patterns before we attempt to match a single sra node.
7144let AddedComplexity = 20 in {
7145// We support all sext + sra combinations which preserve at least one bit of the
7146// original value which is to be sign extended. E.g. we support shifts up to
7147// bitwidth-1 bits.
7148def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
7149          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
7150def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
7151          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
7152
7153def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
7154          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
7155def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
7156          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
7157
7158def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
7159          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7160                   (i64 imm0_31:$imm), 31)>;
7161} // AddedComplexity = 20
7162
7163// To truncate, we can simply extract from a subregister.
7164def : Pat<(i32 (trunc GPR64sp:$src)),
7165          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
7166
7167// __builtin_trap() uses the BRK instruction on AArch64.
7168def : Pat<(trap), (BRK 1)>;
7169def : Pat<(debugtrap), (BRK 0xF000)>;
7170
7171def ubsan_trap_xform : SDNodeXForm<timm, [{
7172  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
7173}]>;
7174
7175def ubsan_trap_imm : TImmLeaf<i32, [{
7176  return isUInt<8>(Imm);
7177}], ubsan_trap_xform>;
7178
7179def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
7180
7181// Multiply high patterns which multiply the lower subvector using smull/umull
7182// and the upper subvector with smull2/umull2. Then shuffle the high the high
7183// part of both results together.
7184def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
7185          (UZP2v16i8
7186           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7187                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7188           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7189def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
7190          (UZP2v8i16
7191           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7192                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7193           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7194def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
7195          (UZP2v4i32
7196           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7197                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7198           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7199
7200def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
7201          (UZP2v16i8
7202           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7203                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7204           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7205def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
7206          (UZP2v8i16
7207           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7208                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7209           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7210def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
7211          (UZP2v4i32
7212           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7213                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7214           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7215
7216// Conversions within AdvSIMD types in the same register size are free.
7217// But because we need a consistent lane ordering, in big endian many
7218// conversions require one or more REV instructions.
7219//
7220// Consider a simple memory load followed by a bitconvert then a store.
7221//   v0 = load v2i32
7222//   v1 = BITCAST v2i32 v0 to v4i16
7223//        store v4i16 v2
7224//
7225// In big endian mode every memory access has an implicit byte swap. LDR and
7226// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
7227// is, they treat the vector as a sequence of elements to be byte-swapped.
7228// The two pairs of instructions are fundamentally incompatible. We've decided
7229// to use LD1/ST1 only to simplify compiler implementation.
7230//
7231// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
7232// the original code sequence:
7233//   v0 = load v2i32
7234//   v1 = REV v2i32                  (implicit)
7235//   v2 = BITCAST v2i32 v1 to v4i16
7236//   v3 = REV v4i16 v2               (implicit)
7237//        store v4i16 v3
7238//
7239// But this is now broken - the value stored is different to the value loaded
7240// due to lane reordering. To fix this, on every BITCAST we must perform two
7241// other REVs:
7242//   v0 = load v2i32
7243//   v1 = REV v2i32                  (implicit)
7244//   v2 = REV v2i32
7245//   v3 = BITCAST v2i32 v2 to v4i16
7246//   v4 = REV v4i16
7247//   v5 = REV v4i16 v4               (implicit)
7248//        store v4i16 v5
7249//
7250// This means an extra two instructions, but actually in most cases the two REV
7251// instructions can be combined into one. For example:
7252//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
7253//
7254// There is also no 128-bit REV instruction. This must be synthesized with an
7255// EXT instruction.
7256//
7257// Most bitconverts require some sort of conversion. The only exceptions are:
7258//   a) Identity conversions -  vNfX <-> vNiX
7259//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
7260//
7261
7262// Natural vector casts (64 bit)
7263def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
7264def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7265def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7266def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7267def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
7268def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7269def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7270
7271def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
7272def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
7273def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7274def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7275def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7276def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7277
7278def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
7279def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
7280def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
7281def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>;
7282def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
7283def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
7284def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
7285
7286def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
7287def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7288def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7289def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7290def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7291def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7292def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7293def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
7294
7295def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
7296def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7297def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7298def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
7299def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7300def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7301
7302// Natural vector casts (128 bit)
7303def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7304def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7305def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7306def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7307def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
7308def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7309def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7310def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7311
7312def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7313def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
7314def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7315def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7316def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7317def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7318def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7319def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7320
7321def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
7322def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7323def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7324def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7325def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7326def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7327def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7328def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7329
7330def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7331def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7332def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7333def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7334def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7335def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
7336def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7337def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7338
7339def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7340def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7341def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7342def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
7343def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7344def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7345def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7346def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7347
7348def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7349def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7350def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7351def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7352def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
7353def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7354def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7355def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7356
7357let Predicates = [IsLE] in {
7358def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7359def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7360def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7361def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7362def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7363def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7364
7365def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7366          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7367def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7368          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7369def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7370          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7371def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7372          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7373def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7374          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7375def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7376          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7377def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7378          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7379}
7380let Predicates = [IsBE] in {
7381def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7382                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7383def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7384                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7385def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7386                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7387def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7388                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7389def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7390                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7391def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7392                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7393
7394def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7395          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7396def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7397          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7398def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7399          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7400def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7401          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7402def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7403          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7404def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7405          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7406}
7407def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7408def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7409def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7410          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7411def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7412          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7413def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7414          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7415def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7416
7417def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7418          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7419def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7420          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7421def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7422          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7423def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7424          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7425def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7426          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7427
7428let Predicates = [IsLE] in {
7429def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7430def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7431def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7432def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7433def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7434def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7435}
7436let Predicates = [IsBE] in {
7437def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7438                             (v1i64 (REV64v2i32 FPR64:$src))>;
7439def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7440                             (v1i64 (REV64v4i16 FPR64:$src))>;
7441def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7442                             (v1i64 (REV64v8i8 FPR64:$src))>;
7443def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7444                             (v1i64 (REV64v4i16 FPR64:$src))>;
7445def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7446                             (v1i64 (REV64v4i16 FPR64:$src))>;
7447def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7448                             (v1i64 (REV64v2i32 FPR64:$src))>;
7449}
7450def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7451def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7452
7453let Predicates = [IsLE] in {
7454def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7455def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7456def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7457def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7458def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7459def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7460def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7461}
7462let Predicates = [IsBE] in {
7463def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7464                             (v2i32 (REV64v2i32 FPR64:$src))>;
7465def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7466                             (v2i32 (REV32v4i16 FPR64:$src))>;
7467def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7468                             (v2i32 (REV32v8i8 FPR64:$src))>;
7469def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7470                             (v2i32 (REV64v2i32 FPR64:$src))>;
7471def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7472                             (v2i32 (REV64v2i32 FPR64:$src))>;
7473def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7474                             (v2i32 (REV32v4i16 FPR64:$src))>;
7475def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7476                             (v2i32 (REV32v4i16 FPR64:$src))>;
7477}
7478def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7479
7480let Predicates = [IsLE] in {
7481def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7482def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7483def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7484def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7485def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7486def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7487}
7488let Predicates = [IsBE] in {
7489def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7490                             (v4i16 (REV64v4i16 FPR64:$src))>;
7491def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7492                             (v4i16 (REV32v4i16 FPR64:$src))>;
7493def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7494                             (v4i16 (REV16v8i8 FPR64:$src))>;
7495def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7496                             (v4i16 (REV64v4i16 FPR64:$src))>;
7497def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7498                             (v4i16 (REV32v4i16 FPR64:$src))>;
7499def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7500                             (v4i16 (REV64v4i16 FPR64:$src))>;
7501}
7502def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7503def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7504
7505let Predicates = [IsLE] in {
7506def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7507def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7508def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7509def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7510def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7511def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7512
7513def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7514def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7515def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7516def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7517def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7518def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7519}
7520let Predicates = [IsBE] in {
7521def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7522                             (v4f16 (REV64v4i16 FPR64:$src))>;
7523def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7524                             (v4f16 (REV32v4i16 FPR64:$src))>;
7525def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7526                             (v4f16 (REV16v8i8 FPR64:$src))>;
7527def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7528                             (v4f16 (REV64v4i16 FPR64:$src))>;
7529def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7530                             (v4f16 (REV32v4i16 FPR64:$src))>;
7531def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7532                             (v4f16 (REV64v4i16 FPR64:$src))>;
7533
7534def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7535                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7536def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7537                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7538def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7539                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7540def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7541                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7542def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7543                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7544def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7545                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7546}
7547def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7548def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7549
7550let Predicates = [IsLE] in {
7551def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7552def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7553def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7554def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7555def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7556def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7557def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7558def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7559}
7560let Predicates = [IsBE] in {
7561def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7562                             (v8i8 (REV64v8i8 FPR64:$src))>;
7563def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7564                             (v8i8 (REV32v8i8 FPR64:$src))>;
7565def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7566                             (v8i8 (REV16v8i8 FPR64:$src))>;
7567def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7568                             (v8i8 (REV64v8i8 FPR64:$src))>;
7569def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7570                             (v8i8 (REV32v8i8 FPR64:$src))>;
7571def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7572                             (v8i8 (REV64v8i8 FPR64:$src))>;
7573def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7574                             (v8i8 (REV16v8i8 FPR64:$src))>;
7575def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7576                             (v8i8 (REV16v8i8 FPR64:$src))>;
7577}
7578
7579let Predicates = [IsLE] in {
7580def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7581def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7582def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7583def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7584def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7585def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7586}
7587let Predicates = [IsBE] in {
7588def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7589                             (f64 (REV64v2i32 FPR64:$src))>;
7590def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7591                             (f64 (REV64v4i16 FPR64:$src))>;
7592def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7593                             (f64 (REV64v2i32 FPR64:$src))>;
7594def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7595                             (f64 (REV64v8i8 FPR64:$src))>;
7596def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7597                             (f64 (REV64v4i16 FPR64:$src))>;
7598def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7599                             (f64 (REV64v4i16 FPR64:$src))>;
7600}
7601def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7602def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7603
7604let Predicates = [IsLE] in {
7605def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7606def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7607def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7608def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7609def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7610def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7611}
7612let Predicates = [IsBE] in {
7613def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7614                             (v1f64 (REV64v2i32 FPR64:$src))>;
7615def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7616                             (v1f64 (REV64v4i16 FPR64:$src))>;
7617def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7618                             (v1f64 (REV64v8i8 FPR64:$src))>;
7619def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7620                             (v1f64 (REV64v2i32 FPR64:$src))>;
7621def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7622                             (v1f64 (REV64v4i16 FPR64:$src))>;
7623def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7624                             (v1f64 (REV64v4i16 FPR64:$src))>;
7625}
7626def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7627def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7628
7629let Predicates = [IsLE] in {
7630def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7631def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7632def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7633def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7634def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7635def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7636def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7637}
7638let Predicates = [IsBE] in {
7639def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7640                             (v2f32 (REV64v2i32 FPR64:$src))>;
7641def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7642                             (v2f32 (REV32v4i16 FPR64:$src))>;
7643def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7644                             (v2f32 (REV32v8i8 FPR64:$src))>;
7645def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7646                             (v2f32 (REV64v2i32 FPR64:$src))>;
7647def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7648                             (v2f32 (REV64v2i32 FPR64:$src))>;
7649def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7650                             (v2f32 (REV32v4i16 FPR64:$src))>;
7651def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7652                             (v2f32 (REV32v4i16 FPR64:$src))>;
7653}
7654def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7655
7656let Predicates = [IsLE] in {
7657def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7658def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7659def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7660def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7661def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7662def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7663def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7664def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7665}
7666let Predicates = [IsBE] in {
7667def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7668                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7669def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7670                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7671                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7672def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7673                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7674                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7675def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7676                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7677                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7678def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7679                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7680                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7681def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7682                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7683def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7684                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7685                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7686def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7687                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7688                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7689}
7690
7691let Predicates = [IsLE] in {
7692def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7693def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7694def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7695def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7696def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7697def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7698def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7699}
7700let Predicates = [IsBE] in {
7701def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7702                             (v2f64 (EXTv16i8 FPR128:$src,
7703                                              FPR128:$src, (i32 8)))>;
7704def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7705                             (v2f64 (REV64v4i32 FPR128:$src))>;
7706def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7707                             (v2f64 (REV64v8i16 FPR128:$src))>;
7708def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7709                             (v2f64 (REV64v8i16 FPR128:$src))>;
7710def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7711                             (v2f64 (REV64v8i16 FPR128:$src))>;
7712def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7713                             (v2f64 (REV64v16i8 FPR128:$src))>;
7714def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7715                             (v2f64 (REV64v4i32 FPR128:$src))>;
7716}
7717def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7718
7719let Predicates = [IsLE] in {
7720def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7721def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7722def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7723def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7724def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7725def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7726def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7727}
7728let Predicates = [IsBE] in {
7729def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7730                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7731                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7732def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7733                             (v4f32 (REV32v8i16 FPR128:$src))>;
7734def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7735                             (v4f32 (REV32v8i16 FPR128:$src))>;
7736def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7737                             (v4f32 (REV32v8i16 FPR128:$src))>;
7738def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7739                             (v4f32 (REV32v16i8 FPR128:$src))>;
7740def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7741                             (v4f32 (REV64v4i32 FPR128:$src))>;
7742def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7743                             (v4f32 (REV64v4i32 FPR128:$src))>;
7744}
7745def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7746
7747let Predicates = [IsLE] in {
7748def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7749def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7750def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7751def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7752def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7753def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7754def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7755}
7756let Predicates = [IsBE] in {
7757def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7758                             (v2i64 (EXTv16i8 FPR128:$src,
7759                                              FPR128:$src, (i32 8)))>;
7760def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7761                             (v2i64 (REV64v4i32 FPR128:$src))>;
7762def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7763                             (v2i64 (REV64v8i16 FPR128:$src))>;
7764def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7765                             (v2i64 (REV64v16i8 FPR128:$src))>;
7766def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7767                             (v2i64 (REV64v4i32 FPR128:$src))>;
7768def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7769                             (v2i64 (REV64v8i16 FPR128:$src))>;
7770def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7771                             (v2i64 (REV64v8i16 FPR128:$src))>;
7772}
7773def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7774
7775let Predicates = [IsLE] in {
7776def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7777def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7778def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7779def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7780def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7781def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7782def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7783}
7784let Predicates = [IsBE] in {
7785def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7786                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7787                                              (REV64v4i32 FPR128:$src),
7788                                              (i32 8)))>;
7789def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7790                             (v4i32 (REV64v4i32 FPR128:$src))>;
7791def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7792                             (v4i32 (REV32v8i16 FPR128:$src))>;
7793def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7794                             (v4i32 (REV32v16i8 FPR128:$src))>;
7795def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7796                             (v4i32 (REV64v4i32 FPR128:$src))>;
7797def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7798                             (v4i32 (REV32v8i16 FPR128:$src))>;
7799def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7800                             (v4i32 (REV32v8i16 FPR128:$src))>;
7801}
7802def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7803
7804let Predicates = [IsLE] in {
7805def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7806def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7807def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7808def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7809def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7810def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7811}
7812let Predicates = [IsBE] in {
7813def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7814                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7815                                              (REV64v8i16 FPR128:$src),
7816                                              (i32 8)))>;
7817def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7818                             (v8i16 (REV64v8i16 FPR128:$src))>;
7819def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7820                             (v8i16 (REV32v8i16 FPR128:$src))>;
7821def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7822                             (v8i16 (REV16v16i8 FPR128:$src))>;
7823def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7824                             (v8i16 (REV64v8i16 FPR128:$src))>;
7825def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7826                             (v8i16 (REV32v8i16 FPR128:$src))>;
7827}
7828def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7829def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7830
7831let Predicates = [IsLE] in {
7832def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7833def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7834def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7835def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7836def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7837def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7838
7839def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7840def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7841def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7842def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7843def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7844def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7845}
7846let Predicates = [IsBE] in {
7847def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7848                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7849                                              (REV64v8i16 FPR128:$src),
7850                                              (i32 8)))>;
7851def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7852                             (v8f16 (REV64v8i16 FPR128:$src))>;
7853def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7854                             (v8f16 (REV32v8i16 FPR128:$src))>;
7855def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7856                             (v8f16 (REV16v16i8 FPR128:$src))>;
7857def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7858                             (v8f16 (REV64v8i16 FPR128:$src))>;
7859def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7860                             (v8f16 (REV32v8i16 FPR128:$src))>;
7861
7862def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7863                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7864                                              (REV64v8i16 FPR128:$src),
7865                                              (i32 8)))>;
7866def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7867                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7868def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7869                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7870def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
7871                             (v8bf16 (REV16v16i8 FPR128:$src))>;
7872def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
7873                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7874def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
7875                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7876}
7877def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7878def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7879
7880let Predicates = [IsLE] in {
7881def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
7882def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7883def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7884def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7885def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7886def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7887def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7888def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
7889}
7890let Predicates = [IsBE] in {
7891def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
7892                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7893                                              (REV64v16i8 FPR128:$src),
7894                                              (i32 8)))>;
7895def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7896                             (v16i8 (REV64v16i8 FPR128:$src))>;
7897def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7898                             (v16i8 (REV32v16i8 FPR128:$src))>;
7899def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7900                             (v16i8 (REV16v16i8 FPR128:$src))>;
7901def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7902                             (v16i8 (REV64v16i8 FPR128:$src))>;
7903def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7904                             (v16i8 (REV32v16i8 FPR128:$src))>;
7905def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7906                             (v16i8 (REV16v16i8 FPR128:$src))>;
7907def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
7908                             (v16i8 (REV16v16i8 FPR128:$src))>;
7909}
7910
7911def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
7912           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7913def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
7914           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7915def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
7916           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7917def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
7918           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7919def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
7920           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7921def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
7922           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7923def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
7924           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7925def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
7926           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7927
7928def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
7929          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7930def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
7931          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7932def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
7933          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7934def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
7935          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7936
7937// A 64-bit subvector insert to the first 128-bit vector position
7938// is a subregister copy that needs no instruction.
7939multiclass InsertSubvectorUndef<ValueType Ty> {
7940  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
7941            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7942  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
7943            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7944  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
7945            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7946  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
7947            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7948  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
7949            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7950  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
7951            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7952  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
7953            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7954  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
7955            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7956}
7957
7958defm : InsertSubvectorUndef<i32>;
7959defm : InsertSubvectorUndef<i64>;
7960
7961// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
7962// or v2f32.
7963def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
7964                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
7965           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
7966def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
7967                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
7968           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
7969    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
7970    // so we match on v4f32 here, not v2f32. This will also catch adding
7971    // the low two lanes of a true v4f32 vector.
7972def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
7973                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
7974          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7975def : Pat<(fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
7976                (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
7977          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7978
7979// Scalar 64-bit shifts in FPR64 registers.
7980def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7981          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7982def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7983          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7984def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7985          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7986def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7987          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7988
7989// Patterns for nontemporal/no-allocate stores.
7990// We have to resort to tricks to turn a single-input store into a store pair,
7991// because there is no single-input nontemporal store, only STNP.
7992let Predicates = [IsLE] in {
7993let AddedComplexity = 15 in {
7994class NTStore128Pat<ValueType VT> :
7995  Pat<(nontemporalstore (VT FPR128:$Rt),
7996        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
7997      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
7998              (CPYi64 FPR128:$Rt, (i64 1)),
7999              GPR64sp:$Rn, simm7s8:$offset)>;
8000
8001def : NTStore128Pat<v2i64>;
8002def : NTStore128Pat<v4i32>;
8003def : NTStore128Pat<v8i16>;
8004def : NTStore128Pat<v16i8>;
8005
8006class NTStore64Pat<ValueType VT> :
8007  Pat<(nontemporalstore (VT FPR64:$Rt),
8008        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8009      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
8010              (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
8011              GPR64sp:$Rn, simm7s4:$offset)>;
8012
8013// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
8014def : NTStore64Pat<v1f64>;
8015def : NTStore64Pat<v1i64>;
8016def : NTStore64Pat<v2i32>;
8017def : NTStore64Pat<v4i16>;
8018def : NTStore64Pat<v8i8>;
8019
8020def : Pat<(nontemporalstore GPR64:$Rt,
8021            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8022          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
8023                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
8024                  GPR64sp:$Rn, simm7s4:$offset)>;
8025} // AddedComplexity=10
8026} // Predicates = [IsLE]
8027
8028// Tail call return handling. These are all compiler pseudo-instructions,
8029// so no encoding information or anything like that.
8030let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
8031  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
8032                   Sched<[WriteBrReg]>;
8033  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
8034                   Sched<[WriteBrReg]>;
8035  // Indirect tail-call with any register allowed, used by MachineOutliner when
8036  // this is proven safe.
8037  // FIXME: If we have to add any more hacks like this, we should instead relax
8038  // some verifier checks for outlined functions.
8039  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
8040                      Sched<[WriteBrReg]>;
8041  // Indirect tail-call limited to only use registers (x16 and x17) which are
8042  // allowed to tail-call a "BTI c" instruction.
8043  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
8044                      Sched<[WriteBrReg]>;
8045}
8046
8047def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
8048          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
8049      Requires<[NotUseBTI]>;
8050def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
8051          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
8052      Requires<[UseBTI]>;
8053def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
8054          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8055def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
8056          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8057
8058def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
8059def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
8060
8061// Extracting lane zero is a special case where we can just use a plain
8062// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
8063// rest of the compiler, especially the register allocator and copy propagation,
8064// to reason about, so is preferred when it's possible to use it.
8065let AddedComplexity = 10 in {
8066  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
8067  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
8068  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
8069}
8070
8071// dot_v4i8
8072class mul_v4i8<SDPatternOperator ldop> :
8073  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
8074          (mul (ldop (add node:$Rn, node:$offset)),
8075               (ldop (add node:$Rm, node:$offset)))>;
8076class mulz_v4i8<SDPatternOperator ldop> :
8077  PatFrag<(ops node:$Rn, node:$Rm),
8078          (mul (ldop node:$Rn), (ldop node:$Rm))>;
8079
8080def load_v4i8 :
8081  OutPatFrag<(ops node:$R),
8082             (INSERT_SUBREG
8083              (v2i32 (IMPLICIT_DEF)),
8084               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
8085              ssub)>;
8086
8087class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
8088  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
8089           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
8090           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
8091                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
8092      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
8093                                (load_v4i8 GPR64sp:$Rn),
8094                                (load_v4i8 GPR64sp:$Rm))),
8095                      sub_32)>, Requires<[HasDotProd]>;
8096
8097// dot_v8i8
8098class ee_v8i8<SDPatternOperator extend> :
8099  PatFrag<(ops node:$V, node:$K),
8100          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
8101
8102class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8103  PatFrag<(ops node:$M, node:$N, node:$K),
8104          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
8105                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
8106
8107class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8108  PatFrag<(ops node:$M, node:$N),
8109          (i32 (extractelt
8110           (v4i32 (AArch64uaddv
8111            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
8112                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
8113           (i64 0)))>;
8114
8115// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
8116def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
8117
8118class odot_v8i8<Instruction DOT> :
8119  OutPatFrag<(ops node:$Vm, node:$Vn),
8120             (EXTRACT_SUBREG
8121              (VADDV_32
8122               (i64 (DOT (DUPv2i32gpr WZR),
8123                         (v8i8 node:$Vm),
8124                         (v8i8 node:$Vn)))),
8125              sub_32)>;
8126
8127class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
8128                    SDPatternOperator extend> :
8129  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
8130      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
8131  Requires<[HasDotProd]>;
8132
8133// dot_v16i8
8134class ee_v16i8<SDPatternOperator extend> :
8135  PatFrag<(ops node:$V, node:$K1, node:$K2),
8136          (v4i16 (extract_subvector
8137           (v8i16 (extend
8138            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
8139
8140class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
8141  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
8142          (v4i32
8143           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
8144                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
8145
8146class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
8147  PatFrag<(ops node:$M, node:$N),
8148          (i32 (extractelt
8149           (v4i32 (AArch64uaddv
8150            (add
8151             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
8152                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
8153             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
8154                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
8155           (i64 0)))>;
8156
8157class odot_v16i8<Instruction DOT> :
8158  OutPatFrag<(ops node:$Vm, node:$Vn),
8159             (i32 (ADDVv4i32v
8160              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
8161
8162class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
8163                SDPatternOperator extend> :
8164  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
8165      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
8166  Requires<[HasDotProd]>;
8167
8168let AddedComplexity = 10 in {
8169  def : dot_v4i8<SDOTv8i8, sextloadi8>;
8170  def : dot_v4i8<UDOTv8i8, zextloadi8>;
8171  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
8172  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
8173  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
8174  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
8175
8176  // FIXME: add patterns to generate vector by element dot product.
8177  // FIXME: add SVE dot-product patterns.
8178}
8179
8180// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs,
8181// so that it can be used as input to inline asm, and vice versa.
8182def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>;
8183def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>;
8184def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3,
8185                             GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)),
8186          (REG_SEQUENCE GPR64x8Class,
8187              $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3,
8188              $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>;
8189foreach i = 0-7 in {
8190  def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))),
8191            (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>;
8192}
8193
8194let Predicates = [HasLS64] in {
8195  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
8196                                          (outs GPR64x8:$Rt)>;
8197  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
8198                                          (outs)>;
8199  def ST64BV:   Store64BV<0b011, "st64bv">;
8200  def ST64BV0:  Store64BV<0b010, "st64bv0">;
8201
8202  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
8203    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
8204          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
8205
8206  def : ST64BPattern<int_aarch64_st64b, ST64B>;
8207  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
8208  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
8209}
8210
8211let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1 in
8212def StoreSwiftAsyncContext
8213      : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
8214               []>, Sched<[]>;
8215
8216def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>;
8217def : Pat<(AArch64AssertZExtBool GPR32:$op),
8218          (i32 GPR32:$op)>;
8219
8220include "AArch64InstrAtomics.td"
8221include "AArch64SVEInstrInfo.td"
8222include "AArch64SMEInstrInfo.td"
8223include "AArch64InstrGISel.td"
8224