xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicate<(all_of HasV8_7aOps), "armv8.7a">;
30def HasV9_0a         : Predicate<"Subtarget->hasV9_0aOps()">,
31                                 AssemblerPredicate<(all_of HasV9_0aOps), "armv9-a">;
32def HasV9_1a         : Predicate<"Subtarget->hasV9_1aOps()">,
33                                 AssemblerPredicate<(all_of HasV9_1aOps), "armv9.1a">;
34def HasV9_2a         : Predicate<"Subtarget->hasV9_2aOps()">,
35                                 AssemblerPredicate<(all_of HasV9_2aOps), "armv9.2a">;
36def HasV9_3a         : Predicate<"Subtarget->hasV9_3aOps()">,
37                                 AssemblerPredicate<(all_of HasV9_3aOps), "armv9.3a">;
38def HasV8_0r         : Predicate<"Subtarget->hasV8_0rOps()">,
39                                 AssemblerPredicate<(all_of HasV8_0rOps), "armv8-r">;
40
41def HasEL2VMSA       : Predicate<"Subtarget->hasEL2VMSA()">,
42                       AssemblerPredicate<(all_of FeatureEL2VMSA), "el2vmsa">;
43
44def HasEL3           : Predicate<"Subtarget->hasEL3()">,
45                       AssemblerPredicate<(all_of FeatureEL3), "el3">;
46
47def HasVH            : Predicate<"Subtarget->hasVH()">,
48                       AssemblerPredicate<(all_of FeatureVH), "vh">;
49
50def HasLOR           : Predicate<"Subtarget->hasLOR()">,
51                       AssemblerPredicate<(all_of FeatureLOR), "lor">;
52
53def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
54                       AssemblerPredicate<(all_of FeaturePAuth), "pauth">;
55
56def HasJS            : Predicate<"Subtarget->hasJS()">,
57                       AssemblerPredicate<(all_of FeatureJS), "jsconv">;
58
59def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
60                       AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">;
61
62def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
63                       AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">;
64
65def HasNV            : Predicate<"Subtarget->hasNV()">,
66                       AssemblerPredicate<(all_of FeatureNV), "nv">;
67
68def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
69                       AssemblerPredicate<(all_of FeatureMPAM), "mpam">;
70
71def HasDIT           : Predicate<"Subtarget->hasDIT()">,
72                       AssemblerPredicate<(all_of FeatureDIT), "dit">;
73
74def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
75                       AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">;
76
77def HasAM            : Predicate<"Subtarget->hasAM()">,
78                       AssemblerPredicate<(all_of FeatureAM), "am">;
79
80def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
81                       AssemblerPredicate<(all_of FeatureSEL2), "sel2">;
82
83def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
84                       AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">;
85
86def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
87                       AssemblerPredicate<(all_of FeatureFlagM), "flagm">;
88
89def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
90                       AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
91
92def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
93                               AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">;
94def HasNEON          : Predicate<"Subtarget->hasNEON()">,
95                                 AssemblerPredicate<(all_of FeatureNEON), "neon">;
96def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
97                                 AssemblerPredicate<(all_of FeatureCrypto), "crypto">;
98def HasSM4           : Predicate<"Subtarget->hasSM4()">,
99                                 AssemblerPredicate<(all_of FeatureSM4), "sm4">;
100def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
101                                 AssemblerPredicate<(all_of FeatureSHA3), "sha3">;
102def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
103                                 AssemblerPredicate<(all_of FeatureSHA2), "sha2">;
104def HasAES           : Predicate<"Subtarget->hasAES()">,
105                                 AssemblerPredicate<(all_of FeatureAES), "aes">;
106def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
107                                 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">;
108def HasCRC           : Predicate<"Subtarget->hasCRC()">,
109                                 AssemblerPredicate<(all_of FeatureCRC), "crc">;
110def HasLSE           : Predicate<"Subtarget->hasLSE()">,
111                                 AssemblerPredicate<(all_of FeatureLSE), "lse">;
112def HasNoLSE         : Predicate<"!Subtarget->hasLSE()">;
113def HasRAS           : Predicate<"Subtarget->hasRAS()">,
114                                 AssemblerPredicate<(all_of FeatureRAS), "ras">;
115def HasRDM           : Predicate<"Subtarget->hasRDM()">,
116                                 AssemblerPredicate<(all_of FeatureRDM), "rdm">;
117def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
118def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
119                                 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">;
120def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
121                                 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">;
122def HasSPE           : Predicate<"Subtarget->hasSPE()">,
123                                 AssemblerPredicate<(all_of FeatureSPE), "spe">;
124def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
125                                 AssemblerPredicate<(all_of FeatureFuseAES),
126                                 "fuse-aes">;
127def HasSVE           : Predicate<"Subtarget->hasSVE()">,
128                                 AssemblerPredicate<(all_of FeatureSVE), "sve">;
129def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
130                                 AssemblerPredicate<(all_of FeatureSVE2), "sve2">;
131def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
132                                 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">;
133def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
134                                 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">;
135def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
136                                 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">;
137def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
138                                 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
139def HasSME           : Predicate<"Subtarget->hasSME()">,
140                                 AssemblerPredicate<(all_of FeatureSME), "sme">;
141def HasSMEF64        : Predicate<"Subtarget->hasSMEF64()">,
142                                 AssemblerPredicate<(all_of FeatureSMEF64), "sme-f64">;
143def HasSMEI64        : Predicate<"Subtarget->hasSMEI64()">,
144                                 AssemblerPredicate<(all_of FeatureSMEI64), "sme-i64">;
145def HasStreamingSVE  : Predicate<"Subtarget->hasStreamingSVE()">,
146                                 AssemblerPredicate<(all_of FeatureStreamingSVE), "sme">;
147// A subset of SVE(2) instructions are legal in Streaming SVE execution mode,
148// they should be enabled if either has been specified.
149def HasSVEorStreamingSVE
150    : Predicate<"Subtarget->hasSVE() || Subtarget->hasStreamingSVE()">,
151                AssemblerPredicate<(any_of FeatureSVE, FeatureStreamingSVE),
152                "sve or sme">;
153def HasSVE2orStreamingSVE
154    : Predicate<"Subtarget->hasSVE2() || Subtarget->hasStreamingSVE()">,
155                AssemblerPredicate<(any_of FeatureSVE2, FeatureStreamingSVE),
156                "sve2 or sme">;
157// A subset of NEON instructions are legal in Streaming SVE execution mode,
158// they should be enabled if either has been specified.
159def HasNEONorStreamingSVE
160    : Predicate<"Subtarget->hasNEON() || Subtarget->hasStreamingSVE()">,
161                AssemblerPredicate<(any_of FeatureNEON, FeatureStreamingSVE),
162                "neon or sme">;
163def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
164                                 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">;
165def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
166                       AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">;
167def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
168                       AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">;
169def HasSB            : Predicate<"Subtarget->hasSB()">,
170                       AssemblerPredicate<(all_of FeatureSB), "sb">;
171def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
172                       AssemblerPredicate<(all_of FeaturePredRes), "predres">;
173def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
174                       AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">;
175def HasBTI           : Predicate<"Subtarget->hasBTI()">,
176                       AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">;
177def HasMTE           : Predicate<"Subtarget->hasMTE()">,
178                       AssemblerPredicate<(all_of FeatureMTE), "mte">;
179def HasTME           : Predicate<"Subtarget->hasTME()">,
180                       AssemblerPredicate<(all_of FeatureTME), "tme">;
181def HasETE           : Predicate<"Subtarget->hasETE()">,
182                       AssemblerPredicate<(all_of FeatureETE), "ete">;
183def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
184                       AssemblerPredicate<(all_of FeatureTRBE), "trbe">;
185def HasBF16          : Predicate<"Subtarget->hasBF16()">,
186                       AssemblerPredicate<(all_of FeatureBF16), "bf16">;
187def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
188                       AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">;
189def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
190                       AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">;
191def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
192                       AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">;
193def HasXS            : Predicate<"Subtarget->hasXS()">,
194                       AssemblerPredicate<(all_of FeatureXS), "xs">;
195def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
196                       AssemblerPredicate<(all_of FeatureWFxT), "wfxt">;
197def HasLS64          : Predicate<"Subtarget->hasLS64()">,
198                       AssemblerPredicate<(all_of FeatureLS64), "ls64">;
199def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
200                       AssemblerPredicate<(all_of FeatureBRBE), "brbe">;
201def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
202                       AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">;
203def HasHBC           : Predicate<"Subtarget->hasHBC()">,
204                       AssemblerPredicate<(all_of FeatureHBC), "hbc">;
205def HasMOPS          : Predicate<"Subtarget->hasMOPS()">,
206                       AssemblerPredicate<(all_of FeatureMOPS), "mops">;
207def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
208def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
209def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
210def UseExperimentalZeroingPseudos
211    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
212def UseAlternateSExtLoadCVTF32
213    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
214
215def UseNegativeImmediates
216    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
217                                             "NegativeImmediates">;
218
219def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">;
220
221def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
222                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
223                                                       SDTCisInt<1>]>>;
224
225
226//===----------------------------------------------------------------------===//
227// AArch64-specific DAG Nodes.
228//
229
230// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
231def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
232                                              [SDTCisSameAs<0, 2>,
233                                               SDTCisSameAs<0, 3>,
234                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
235
236// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
237def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
238                                            [SDTCisSameAs<0, 1>,
239                                             SDTCisSameAs<0, 2>,
240                                             SDTCisInt<0>,
241                                             SDTCisVT<3, i32>]>;
242
243// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
244def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
245                                            [SDTCisSameAs<0, 2>,
246                                             SDTCisSameAs<0, 3>,
247                                             SDTCisInt<0>,
248                                             SDTCisVT<1, i32>,
249                                             SDTCisVT<4, i32>]>;
250
251def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
252                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
253                                      SDTCisVT<2, i32>]>;
254def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
255def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
256                                        SDTCisVT<2, OtherVT>]>;
257
258
259def SDT_AArch64CSel  : SDTypeProfile<1, 4,
260                                   [SDTCisSameAs<0, 1>,
261                                    SDTCisSameAs<0, 2>,
262                                    SDTCisInt<3>,
263                                    SDTCisVT<4, i32>]>;
264def SDT_AArch64CCMP : SDTypeProfile<1, 5,
265                                    [SDTCisVT<0, i32>,
266                                     SDTCisInt<1>,
267                                     SDTCisSameAs<1, 2>,
268                                     SDTCisInt<3>,
269                                     SDTCisInt<4>,
270                                     SDTCisVT<5, i32>]>;
271def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
272                                     [SDTCisVT<0, i32>,
273                                      SDTCisFP<1>,
274                                      SDTCisSameAs<1, 2>,
275                                      SDTCisInt<3>,
276                                      SDTCisInt<4>,
277                                      SDTCisVT<5, i32>]>;
278def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
279                                   [SDTCisFP<0>,
280                                    SDTCisSameAs<0, 1>]>;
281def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
282def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
283def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
284def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
285                                          SDTCisSameAs<0, 1>,
286                                          SDTCisSameAs<0, 2>]>;
287def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
288def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
289def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
290                                           SDTCisInt<2>, SDTCisInt<3>]>;
291def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
292def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
293                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
294def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
295def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
296                                         SDTCisVec<2>, SDTCisSameAs<2,3>]>;
297
298def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
299                                                 SDTCisSameAs<0,1>,
300                                                 SDTCisSameAs<0,2>]>;
301
302def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
303def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
304def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
305def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
306                                           SDTCisSameAs<0,2>]>;
307def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
308                                           SDTCisSameAs<0,2>,
309                                           SDTCisSameAs<0,3>]>;
310def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
311def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
312
313def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
314
315def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
316                                                 SDTCisPtrTy<1>]>;
317
318def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
319
320def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
321def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
322def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
323
324// Generates the general dynamic sequences, i.e.
325//  adrp  x0, :tlsdesc:var
326//  ldr   x1, [x0, #:tlsdesc_lo12:var]
327//  add   x0, x0, #:tlsdesc_lo12:var
328//  .tlsdesccall var
329//  blr   x1
330
331// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
332// number of operands (the variable)
333def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
334                                          [SDTCisPtrTy<0>]>;
335
336def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
337                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
338                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
339                                         SDTCisSameAs<1, 4>]>;
340
341def SDT_AArch64TBL : SDTypeProfile<1, 2, [
342  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
343]>;
344
345// non-extending masked load fragment.
346def nonext_masked_load :
347  PatFrag<(ops node:$ptr, node:$pred, node:$def),
348          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
349  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
350         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
351         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
352}]>;
353// sign extending masked load fragments.
354def asext_masked_load :
355  PatFrag<(ops node:$ptr, node:$pred, node:$def),
356          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
357  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
358          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
359         cast<MaskedLoadSDNode>(N)->isUnindexed();
360}]>;
361def asext_masked_load_i8 :
362  PatFrag<(ops node:$ptr, node:$pred, node:$def),
363          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
364  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
365}]>;
366def asext_masked_load_i16 :
367  PatFrag<(ops node:$ptr, node:$pred, node:$def),
368          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
369  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
370}]>;
371def asext_masked_load_i32 :
372  PatFrag<(ops node:$ptr, node:$pred, node:$def),
373          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
374  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
375}]>;
376// zero extending masked load fragments.
377def zext_masked_load :
378  PatFrag<(ops node:$ptr, node:$pred, node:$def),
379          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
380  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
381         cast<MaskedLoadSDNode>(N)->isUnindexed();
382}]>;
383def zext_masked_load_i8 :
384  PatFrag<(ops node:$ptr, node:$pred, node:$def),
385          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
386  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
387}]>;
388def zext_masked_load_i16 :
389  PatFrag<(ops node:$ptr, node:$pred, node:$def),
390          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
391  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
392}]>;
393def zext_masked_load_i32 :
394  PatFrag<(ops node:$ptr, node:$pred, node:$def),
395          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
396  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
397}]>;
398
399def non_temporal_load :
400   PatFrag<(ops node:$ptr, node:$pred, node:$def),
401           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
402   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
403          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
404          cast<MaskedLoadSDNode>(N)->isNonTemporal();
405}]>;
406
407// non-truncating masked store fragment.
408def nontrunc_masked_store :
409  PatFrag<(ops node:$val, node:$ptr, node:$pred),
410          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
411  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
412         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
413         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
414}]>;
415// truncating masked store fragments.
416def trunc_masked_store :
417  PatFrag<(ops node:$val, node:$ptr, node:$pred),
418          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
419  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
420         cast<MaskedStoreSDNode>(N)->isUnindexed();
421}]>;
422def trunc_masked_store_i8 :
423  PatFrag<(ops node:$val, node:$ptr, node:$pred),
424          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
425  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
426}]>;
427def trunc_masked_store_i16 :
428  PatFrag<(ops node:$val, node:$ptr, node:$pred),
429          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
430  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
431}]>;
432def trunc_masked_store_i32 :
433  PatFrag<(ops node:$val, node:$ptr, node:$pred),
434          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
435  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
436}]>;
437
438def non_temporal_store :
439  PatFrag<(ops node:$val, node:$ptr, node:$pred),
440          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
441  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
442         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
443         cast<MaskedStoreSDNode>(N)->isNonTemporal();
444}]>;
445
446// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise
447def top16Zero: PatLeaf<(i32 GPR32:$src), [{
448  return SDValue(N,0)->getValueType(0) == MVT::i32 &&
449         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16));
450  }]>;
451
452// top32Zero - answer true if the upper 32 bits of $src are 0, false otherwise
453def top32Zero: PatLeaf<(i64 GPR64:$src), [{
454  return SDValue(N,0)->getValueType(0) == MVT::i64 &&
455         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(64, 32));
456  }]>;
457
458// Node definitions.
459def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
460def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
461def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
462def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
463def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
464                                SDCallSeqStart<[ SDTCisVT<0, i32>,
465                                                 SDTCisVT<1, i32> ]>,
466                                [SDNPHasChain, SDNPOutGlue]>;
467def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
468                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
469                                               SDTCisVT<1, i32> ]>,
470                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
471def AArch64call          : SDNode<"AArch64ISD::CALL",
472                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
473                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
474                                 SDNPVariadic]>;
475
476def AArch64call_bti      : SDNode<"AArch64ISD::CALL_BTI",
477                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
478                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
479                                 SDNPVariadic]>;
480
481def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
482                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
483                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
484                              SDNPVariadic]>;
485
486def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
487                                [SDNPHasChain]>;
488def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
489                                [SDNPHasChain]>;
490def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
491                                [SDNPHasChain]>;
492def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
493                                [SDNPHasChain]>;
494def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
495                                [SDNPHasChain]>;
496
497
498def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
499def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
500def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
501def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
502def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
503                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
504def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
505def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
506def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
507                            [SDNPCommutative]>;
508def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
509def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
510                            [SDNPCommutative]>;
511def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
512def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
513
514def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
515def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
516def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
517
518def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
519
520def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
521def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
522                                 [SDNPHasChain]>;
523def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
524                                 [SDNPHasChain]>;
525def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
526                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
527                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
528
529def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
530def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
531def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
532def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
533def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
534
535def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
536
537def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
538def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
539def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
540def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
541def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
542def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
543
544def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
545def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
546def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
547def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
548def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
549def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
550def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
551
552def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
553def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
554def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
555def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
556
557def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
558def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
559def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
560def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
561def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
562def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
563def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
564def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
565def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
566def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
567
568def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
569def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
570
571def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
572def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
573def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
574def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
575def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
576
577def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
578def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
579def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
580
581def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
582def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
583def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
584def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
585def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
586def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
587                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
588
589def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
590def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
591def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
592def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
593def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
594
595def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
596def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
597
598def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
599                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
600
601def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
602                               [SDNPHasChain, SDNPSideEffect]>;
603
604def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
605def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
606
607def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
608                                    SDT_AArch64TLSDescCallSeq,
609                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
610                                     SDNPVariadic]>;
611
612
613def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
614                                 SDT_AArch64WrapperLarge>;
615
616def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
617
618def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
619                                    SDTCisSameAs<1, 2>]>;
620def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
621def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
622
623def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
624def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
625def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
626def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
627
628def AArch64sdot     : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>;
629def AArch64udot     : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>;
630
631def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
632def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
633def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
634def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
635def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
636def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
637
638def AArch64srhadd   : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
639def AArch64urhadd   : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
640def AArch64shadd   : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>;
641def AArch64uhadd   : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>;
642
643def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
644                               [(abdu node:$lhs, node:$rhs),
645                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
646def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
647                               [(abds node:$lhs, node:$rhs),
648                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
649
650def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>;
651def AArch64uaddlp   : PatFrags<(ops node:$src),
652                               [(AArch64uaddlp_n node:$src),
653                                (int_aarch64_neon_uaddlp node:$src)]>;
654
655def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
656def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
657def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
658def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
659def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
660
661def SDT_AArch64unpk : SDTypeProfile<1, 1, [
662    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
663]>;
664def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
665def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
666def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
667def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
668
669def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
670def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
671def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
672
673def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
674def AArch64mrs : SDNode<"AArch64ISD::MRS",
675                        SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>,
676                        [SDNPHasChain, SDNPOutGlue]>;
677//===----------------------------------------------------------------------===//
678
679//===----------------------------------------------------------------------===//
680
681// AArch64 Instruction Predicate Definitions.
682// We could compute these on a per-module basis but doing so requires accessing
683// the Function object through the <Target>Subtarget and objections were raised
684// to that (see post-commit review comments for r301750).
685let RecomputePerFunction = 1 in {
686  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
687  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
688  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
689  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
690
691  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
692  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
693
694  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
695  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
696  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
697  // optimizing. This allows us to selectively use patterns without impacting
698  // SelectionDAG's behaviour.
699  // FIXME: One day there will probably be a nicer way to check for this, but
700  // today is not that day.
701  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
702}
703
704include "AArch64InstrFormats.td"
705include "SVEInstrFormats.td"
706include "SMEInstrFormats.td"
707
708//===----------------------------------------------------------------------===//
709
710//===----------------------------------------------------------------------===//
711// Miscellaneous instructions.
712//===----------------------------------------------------------------------===//
713
714let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
715// We set Sched to empty list because we expect these instructions to simply get
716// removed in most cases.
717def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
718                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
719                              Sched<[]>;
720def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
721                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
722                            Sched<[]>;
723} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
724
725let isReMaterializable = 1, isCodeGenOnly = 1 in {
726// FIXME: The following pseudo instructions are only needed because remat
727// cannot handle multiple instructions.  When that changes, they can be
728// removed, along with the AArch64Wrapper node.
729
730let AddedComplexity = 10 in
731def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr),
732                     [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
733              Sched<[WriteLDAdr]>;
734
735// The MOVaddr instruction should match only when the add is not folded
736// into a load or store address.
737def MOVaddr
738    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
739             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
740                                            tglobaladdr:$low))]>,
741      Sched<[WriteAdrAdr]>;
742def MOVaddrJT
743    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
744             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
745                                             tjumptable:$low))]>,
746      Sched<[WriteAdrAdr]>;
747def MOVaddrCP
748    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
749             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
750                                             tconstpool:$low))]>,
751      Sched<[WriteAdrAdr]>;
752def MOVaddrBA
753    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
754             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
755                                             tblockaddress:$low))]>,
756      Sched<[WriteAdrAdr]>;
757def MOVaddrTLS
758    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
759             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
760                                            tglobaltlsaddr:$low))]>,
761      Sched<[WriteAdrAdr]>;
762def MOVaddrEXT
763    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
764             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
765                                            texternalsym:$low))]>,
766      Sched<[WriteAdrAdr]>;
767// Normally AArch64addlow either gets folded into a following ldr/str,
768// or together with an adrp into MOVaddr above. For cases with TLS, it
769// might appear without either of them, so allow lowering it into a plain
770// add.
771def ADDlowTLS
772    : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low),
773             [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src,
774                                            tglobaltlsaddr:$low))]>,
775      Sched<[WriteAdr]>;
776
777} // isReMaterializable, isCodeGenOnly
778
779def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
780          (LOADgot tglobaltlsaddr:$addr)>;
781
782def : Pat<(AArch64LOADgot texternalsym:$addr),
783          (LOADgot texternalsym:$addr)>;
784
785def : Pat<(AArch64LOADgot tconstpool:$addr),
786          (LOADgot tconstpool:$addr)>;
787
788// In general these get lowered into a sequence of three 4-byte instructions.
789// 32-bit jump table destination is actually only 2 instructions since we can
790// use the table itself as a PC-relative base. But optimization occurs after
791// branch relaxation so be pessimistic.
792let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
793    isNotDuplicable = 1 in {
794def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
795                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
796                      Sched<[]>;
797def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
798                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
799                      Sched<[]>;
800def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
801                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
802                     Sched<[]>;
803}
804
805// Space-consuming pseudo to aid testing of placement and reachability
806// algorithms. Immediate operand is the number of bytes this "instruction"
807// occupies; register operands can be used to enforce dependency and constrain
808// the scheduler.
809let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
810def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
811                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
812            Sched<[]>;
813
814let hasSideEffects = 1, isCodeGenOnly = 1 in {
815  def SpeculationSafeValueX
816      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
817  def SpeculationSafeValueW
818      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
819}
820
821// SpeculationBarrierEndBB must only be used after an unconditional control
822// flow, i.e. after a terminator for which isBarrier is True.
823let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
824  // This gets lowered to a pair of 4-byte instructions.
825  let Size = 8 in
826  def SpeculationBarrierISBDSBEndBB
827      : Pseudo<(outs), (ins), []>, Sched<[]>;
828  // This gets lowered to a 4-byte instruction.
829  let Size = 4 in
830  def SpeculationBarrierSBEndBB
831      : Pseudo<(outs), (ins), []>, Sched<[]>;
832}
833
834//===----------------------------------------------------------------------===//
835// System instructions.
836//===----------------------------------------------------------------------===//
837
838def HINT : HintI<"hint">;
839def : InstAlias<"nop",  (HINT 0b000)>;
840def : InstAlias<"yield",(HINT 0b001)>;
841def : InstAlias<"wfe",  (HINT 0b010)>;
842def : InstAlias<"wfi",  (HINT 0b011)>;
843def : InstAlias<"sev",  (HINT 0b100)>;
844def : InstAlias<"sevl", (HINT 0b101)>;
845def : InstAlias<"dgh",  (HINT 0b110)>;
846def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
847def : InstAlias<"csdb", (HINT 20)>;
848// In order to be able to write readable assembly, LLVM should accept assembly
849// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
850// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
851// should not emit these mnemonics unless BTI is enabled.
852def : InstAlias<"bti",  (HINT 32), 0>;
853def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
854def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
855def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
856
857// v8.2a Statistical Profiling extension
858def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
859
860// As far as LLVM is concerned this writes to the system's exclusive monitors.
861let mayLoad = 1, mayStore = 1 in
862def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
863
864// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
865// model patterns with sufficiently fine granularity.
866let mayLoad = ?, mayStore = ? in {
867def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
868                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
869
870def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
871                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
872
873def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
874                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
875
876def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
877  let CRm        = 0b0010;
878  let Inst{12}   = 0;
879  let Predicates = [HasTRACEV8_4];
880}
881
882def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
883  let CRm{1-0}   = 0b11;
884  let Inst{9-8}  = 0b10;
885  let Predicates = [HasXS];
886}
887
888let Predicates = [HasWFxT] in {
889def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
890def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
891}
892
893// Branch Record Buffer two-word mnemonic instructions
894class BRBEI<bits<3> op2, string keyword>
895    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
896  let Inst{31-8} = 0b110101010000100101110010;
897  let Inst{7-5} = op2;
898  let Predicates = [HasBRBE];
899}
900def BRB_IALL: BRBEI<0b100, "\tiall">;
901def BRB_INJ:  BRBEI<0b101, "\tinj">;
902
903}
904
905// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
906def : TokenAlias<"INJ", "inj">;
907def : TokenAlias<"IALL", "iall">;
908
909// ARMv8.2-A Dot Product
910let Predicates = [HasDotProd] in {
911defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>;
912defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>;
913defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>;
914defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>;
915}
916
917// ARMv8.6-A BFloat
918let Predicates = [HasNEON, HasBF16] in {
919defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
920defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
921def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
922def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
923def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
924def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
925def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
926def BFCVTN       : SIMD_BFCVTN;
927def BFCVTN2      : SIMD_BFCVTN2;
928
929// Vector-scalar BFDOT:
930// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
931// register (the instruction uses a single 32-bit lane from it), so the pattern
932// is a bit tricky.
933def : Pat<(v2f32 (int_aarch64_neon_bfdot
934                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
935                    (v4bf16 (bitconvert
936                      (v2i32 (AArch64duplane32
937                        (v4i32 (bitconvert
938                          (v8bf16 (insert_subvector undef,
939                            (v4bf16 V64:$Rm),
940                            (i64 0))))),
941                        VectorIndexS:$idx)))))),
942          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
943                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
944                             VectorIndexS:$idx)>;
945}
946
947let Predicates = [HasNEONorStreamingSVE, HasBF16] in {
948def BFCVT : BF16ToSinglePrecision<"bfcvt">;
949}
950
951// ARMv8.6A AArch64 matrix multiplication
952let Predicates = [HasMatMulInt8] in {
953def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
954def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
955def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
956defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
957defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
958
959// sudot lane has a pattern where usdot is expected (there is no sudot).
960// The second operand is used in the dup operation to repeat the indexed
961// element.
962class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
963                         string rhs_kind, RegisterOperand RegType,
964                         ValueType AccumType, ValueType InputType>
965      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
966                                        lhs_kind, rhs_kind, RegType, AccumType,
967                                        InputType, null_frag> {
968  let Pattern = [(set (AccumType RegType:$dst),
969                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
970                                 (InputType (bitconvert (AccumType
971                                    (AArch64duplane32 (v4i32 V128:$Rm),
972                                        VectorIndexS:$idx)))),
973                                 (InputType RegType:$Rn))))];
974}
975
976multiclass SIMDSUDOTIndex {
977  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
978  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
979}
980
981defm SUDOTlane : SIMDSUDOTIndex;
982
983}
984
985// ARMv8.2-A FP16 Fused Multiply-Add Long
986let Predicates = [HasNEON, HasFP16FML] in {
987defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
988defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
989defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
990defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
991defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
992defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
993defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
994defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
995}
996
997// Armv8.2-A Crypto extensions
998let Predicates = [HasSHA3] in {
999def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
1000def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
1001def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
1002def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
1003def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
1004def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
1005def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
1006def XAR       : CryptoRRRi6<"xar">;
1007
1008class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy>
1009  : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))),
1010        (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>;
1011
1012def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1013          (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1014
1015def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>;
1016def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>;
1017def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>;
1018
1019def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>;
1020def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>;
1021def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>;
1022def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>;
1023
1024class EOR3_pattern<ValueType VecTy>
1025  : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)),
1026        (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1027
1028def : EOR3_pattern<v16i8>;
1029def : EOR3_pattern<v8i16>;
1030def : EOR3_pattern<v4i32>;
1031def : EOR3_pattern<v2i64>;
1032
1033def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>;
1034def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>;
1035def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>;
1036def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>;
1037
1038def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>;
1039def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>;
1040def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>;
1041def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>;
1042
1043def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>;
1044def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>;
1045def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>;
1046def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>;
1047
1048def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1049          (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1050
1051def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))),
1052          (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>;
1053
1054
1055} // HasSHA3
1056
1057let Predicates = [HasSM4] in {
1058def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
1059def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
1060def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
1061def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
1062def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
1063def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
1064def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
1065def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
1066def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
1067
1068def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))),
1069          (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>;
1070
1071class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode>
1072  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1073        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1074
1075class SM3TT_pattern<Instruction INST, Intrinsic OpNode>
1076  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )),
1077        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>;
1078
1079class SM4_pattern<Instruction INST, Intrinsic OpNode>
1080  : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1081        (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1082
1083def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>;
1084def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>;
1085
1086def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>;
1087def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>;
1088def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>;
1089def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>;
1090
1091def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>;
1092def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>;
1093} // HasSM4
1094
1095let Predicates = [HasRCPC] in {
1096  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
1097  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
1098  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
1099  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
1100  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
1101}
1102
1103// v8.3a complex add and multiply-accumulate. No predicate here, that is done
1104// inside the multiclass as the FP16 versions need different predicates.
1105defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
1106                                               "fcmla", null_frag>;
1107defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
1108                                           "fcadd", null_frag>;
1109defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">;
1110
1111let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1112  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1113            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
1114  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1115            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
1116  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1117            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
1118  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1119            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
1120}
1121
1122let Predicates = [HasComplxNum, HasNEON] in {
1123  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1124            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
1125  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1126            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
1127  foreach Ty = [v4f32, v2f64] in {
1128    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
1129              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
1130    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
1131              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
1132  }
1133}
1134
1135multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> {
1136  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1137            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
1138  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1139            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
1140  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1141            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
1142  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1143            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
1144}
1145
1146multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> {
1147  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1148            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
1149  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1150            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
1151  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1152            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1153  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1154            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1155}
1156
1157
1158let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1159  defm : FCMLA_PATS<v4f16, V64>;
1160  defm : FCMLA_PATS<v8f16, V128>;
1161
1162  defm : FCMLA_LANE_PATS<v4f16, V64,
1163                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1164  defm : FCMLA_LANE_PATS<v8f16, V128,
1165                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1166}
1167let Predicates = [HasComplxNum, HasNEON] in {
1168  defm : FCMLA_PATS<v2f32, V64>;
1169  defm : FCMLA_PATS<v4f32, V128>;
1170  defm : FCMLA_PATS<v2f64, V128>;
1171
1172  defm : FCMLA_LANE_PATS<v4f32, V128,
1173                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1174}
1175
1176// v8.3a Pointer Authentication
1177// These instructions inhabit part of the hint space and so can be used for
1178// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1179// important for compatibility with other assemblers (e.g. GAS) when building
1180// software compatible with both CPUs that do or don't implement PA.
1181let Uses = [LR], Defs = [LR] in {
1182  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1183  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1184  let isAuthenticated = 1 in {
1185    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1186    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1187  }
1188}
1189let Uses = [LR, SP], Defs = [LR] in {
1190  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1191  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1192  let isAuthenticated = 1 in {
1193    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1194    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1195  }
1196}
1197let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1198  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1199  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1200  let isAuthenticated = 1 in {
1201    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1202    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1203  }
1204}
1205
1206let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1207  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1208}
1209
1210// In order to be able to write readable assembly, LLVM should accept assembly
1211// inputs that use pointer authentication mnemonics, even with PA disabled.
1212// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1213// should not emit these mnemonics unless PA is enabled.
1214def : InstAlias<"paciaz", (PACIAZ), 0>;
1215def : InstAlias<"pacibz", (PACIBZ), 0>;
1216def : InstAlias<"autiaz", (AUTIAZ), 0>;
1217def : InstAlias<"autibz", (AUTIBZ), 0>;
1218def : InstAlias<"paciasp", (PACIASP), 0>;
1219def : InstAlias<"pacibsp", (PACIBSP), 0>;
1220def : InstAlias<"autiasp", (AUTIASP), 0>;
1221def : InstAlias<"autibsp", (AUTIBSP), 0>;
1222def : InstAlias<"pacia1716", (PACIA1716), 0>;
1223def : InstAlias<"pacib1716", (PACIB1716), 0>;
1224def : InstAlias<"autia1716", (AUTIA1716), 0>;
1225def : InstAlias<"autib1716", (AUTIB1716), 0>;
1226def : InstAlias<"xpaclri", (XPACLRI), 0>;
1227
1228// These pointer authentication instructions require armv8.3a
1229let Predicates = [HasPAuth] in {
1230
1231  // When PA is enabled, a better mnemonic should be emitted.
1232  def : InstAlias<"paciaz", (PACIAZ), 1>;
1233  def : InstAlias<"pacibz", (PACIBZ), 1>;
1234  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1235  def : InstAlias<"autibz", (AUTIBZ), 1>;
1236  def : InstAlias<"paciasp", (PACIASP), 1>;
1237  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1238  def : InstAlias<"autiasp", (AUTIASP), 1>;
1239  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1240  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1241  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1242  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1243  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1244  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1245
1246  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm,
1247                      SDPatternOperator op> {
1248    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm,  "ia"), op>;
1249    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm,  "ib"), op>;
1250    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm,  "da"), op>;
1251    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm,  "db"), op>;
1252    def IZA  : SignAuthZero<prefix_z,  0b00, !strconcat(asm, "iza"), op>;
1253    def DZA  : SignAuthZero<prefix_z,  0b10, !strconcat(asm, "dza"), op>;
1254    def IZB  : SignAuthZero<prefix_z,  0b01, !strconcat(asm, "izb"), op>;
1255    def DZB  : SignAuthZero<prefix_z,  0b11, !strconcat(asm, "dzb"), op>;
1256  }
1257
1258  defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>;
1259  defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>;
1260
1261  def XPACI : ClearAuth<0, "xpaci">;
1262  def XPACD : ClearAuth<1, "xpacd">;
1263
1264  def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>;
1265
1266  // Combined Instructions
1267  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1268    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1269    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1270  }
1271  let isCall = 1, Defs = [LR], Uses = [SP] in {
1272    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1273    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1274  }
1275
1276  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1277    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1278    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1279  }
1280  let isCall = 1, Defs = [LR], Uses = [SP] in {
1281    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1282    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1283  }
1284
1285  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1286    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1287    def RETAB   : AuthReturn<0b010, 1, "retab">;
1288    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1289    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1290  }
1291
1292  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1293  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1294
1295}
1296
1297// v8.3a floating point conversion for javascript
1298let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1299def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1300                                      "fjcvtzs",
1301                                      [(set GPR32:$Rd,
1302                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1303  let Inst{31} = 0;
1304} // HasJS, HasFPARMv8
1305
1306// v8.4 Flag manipulation instructions
1307let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1308def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1309  let Inst{20-5} = 0b0000001000000000;
1310}
1311def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1312def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1313def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1314                        "{\t$Rn, $imm, $mask}">;
1315} // HasFlagM
1316
1317// v8.5 flag manipulation instructions
1318let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1319
1320def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1321  let Inst{18-16} = 0b000;
1322  let Inst{11-8} = 0b0000;
1323  let Unpredictable{11-8} = 0b1111;
1324  let Inst{7-5} = 0b001;
1325}
1326
1327def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1328  let Inst{18-16} = 0b000;
1329  let Inst{11-8} = 0b0000;
1330  let Unpredictable{11-8} = 0b1111;
1331  let Inst{7-5} = 0b010;
1332}
1333} // HasAltNZCV
1334
1335
1336// Armv8.5-A speculation barrier
1337def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1338  let Inst{20-5} = 0b0001100110000111;
1339  let Unpredictable{11-8} = 0b1111;
1340  let Predicates = [HasSB];
1341  let hasSideEffects = 1;
1342}
1343
1344def : InstAlias<"clrex", (CLREX 0xf)>;
1345def : InstAlias<"isb", (ISB 0xf)>;
1346def : InstAlias<"ssbb", (DSB 0)>;
1347def : InstAlias<"pssbb", (DSB 4)>;
1348def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>;
1349
1350def MRS    : MRSI;
1351def MSR    : MSRI;
1352def MSRpstateImm1 : MSRpstateImm0_1;
1353def MSRpstateImm4 : MSRpstateImm0_15;
1354
1355def : Pat<(AArch64mrs imm:$id),
1356          (MRS imm:$id)>;
1357
1358// The thread pointer (on Linux, at least, where this has been implemented) is
1359// TPIDR_EL0.
1360def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1361                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1362
1363let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1364def HWASAN_CHECK_MEMACCESS : Pseudo<
1365  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1366  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1367  Sched<[]>;
1368}
1369
1370let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1371def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1372  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1373  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1374  Sched<[]>;
1375}
1376
1377// The cycle counter PMC register is PMCCNTR_EL0.
1378let Predicates = [HasPerfMon] in
1379def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1380
1381// FPCR register
1382def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1383def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>;
1384
1385// Generic system instructions
1386def SYSxt  : SystemXtI<0, "sys">;
1387def SYSLxt : SystemLXtI<1, "sysl">;
1388
1389def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1390                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1391                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1392
1393
1394let Predicates = [HasTME] in {
1395
1396def TSTART : TMSystemI<0b0000, "tstart",
1397                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1398
1399def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1400
1401def TCANCEL : TMSystemException<0b011, "tcancel",
1402                                [(int_aarch64_tcancel timm64_0_65535:$imm)]>;
1403
1404def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1405  let mayLoad = 0;
1406  let mayStore = 0;
1407}
1408} // HasTME
1409
1410//===----------------------------------------------------------------------===//
1411// Move immediate instructions.
1412//===----------------------------------------------------------------------===//
1413
1414defm MOVK : InsertImmediate<0b11, "movk">;
1415defm MOVN : MoveImmediate<0b00, "movn">;
1416
1417let PostEncoderMethod = "fixMOVZ" in
1418defm MOVZ : MoveImmediate<0b10, "movz">;
1419
1420// First group of aliases covers an implicit "lsl #0".
1421def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>;
1422def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>;
1423def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1424def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1425def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1426def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1427
1428// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1429def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1430def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1431def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1432def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1433
1434def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1435def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1436def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1437def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1438
1439def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1440def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1441def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1442def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1443
1444def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1445def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1446
1447def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1448def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1449
1450def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1451def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1452
1453// Final group of aliases covers true "mov $Rd, $imm" cases.
1454multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1455                          int width, int shift> {
1456  def _asmoperand : AsmOperandClass {
1457    let Name = basename # width # "_lsl" # shift # "MovAlias";
1458    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1459                               # shift # ">";
1460    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1461  }
1462
1463  def _movimm : Operand<i32> {
1464    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1465  }
1466
1467  def : InstAlias<"mov $Rd, $imm",
1468                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1469}
1470
1471defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1472defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1473
1474defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1475defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1476defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1477defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1478
1479defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1480defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1481
1482defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1483defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1484defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1485defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1486
1487let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1488    isAsCheapAsAMove = 1 in {
1489// FIXME: The following pseudo instructions are only needed because remat
1490// cannot handle multiple instructions.  When that changes, we can select
1491// directly to the real instructions and get rid of these pseudos.
1492
1493def MOVi32imm
1494    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1495             [(set GPR32:$dst, imm:$src)]>,
1496      Sched<[WriteImm]>;
1497def MOVi64imm
1498    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1499             [(set GPR64:$dst, imm:$src)]>,
1500      Sched<[WriteImm]>;
1501} // isReMaterializable, isCodeGenOnly
1502
1503// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1504// eventual expansion code fewer bits to worry about getting right. Marshalling
1505// the types is a little tricky though:
1506def i64imm_32bit : ImmLeaf<i64, [{
1507  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1508}]>;
1509
1510def s64imm_32bit : ImmLeaf<i64, [{
1511  int64_t Imm64 = static_cast<int64_t>(Imm);
1512  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1513         Imm64 <= std::numeric_limits<int32_t>::max();
1514}]>;
1515
1516def trunc_imm : SDNodeXForm<imm, [{
1517  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1518}]>;
1519
1520def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1521  GISDNodeXFormEquiv<trunc_imm>;
1522
1523let Predicates = [OptimizedGISelOrOtherSelector] in {
1524// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1525// copies.
1526def : Pat<(i64 i64imm_32bit:$src),
1527          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1528}
1529
1530// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1531def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1532return CurDAG->getTargetConstant(
1533  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1534}]>;
1535
1536def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1537return CurDAG->getTargetConstant(
1538  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1539}]>;
1540
1541
1542def : Pat<(f32 fpimm:$in),
1543  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1544def : Pat<(f64 fpimm:$in),
1545  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1546
1547
1548// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1549// sequences.
1550def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1551                             tglobaladdr:$g1, tglobaladdr:$g0),
1552          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1553                                  tglobaladdr:$g1, 16),
1554                          tglobaladdr:$g2, 32),
1555                  tglobaladdr:$g3, 48)>;
1556
1557def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1558                             tblockaddress:$g1, tblockaddress:$g0),
1559          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1560                                  tblockaddress:$g1, 16),
1561                          tblockaddress:$g2, 32),
1562                  tblockaddress:$g3, 48)>;
1563
1564def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1565                             tconstpool:$g1, tconstpool:$g0),
1566          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1567                                  tconstpool:$g1, 16),
1568                          tconstpool:$g2, 32),
1569                  tconstpool:$g3, 48)>;
1570
1571def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1572                             tjumptable:$g1, tjumptable:$g0),
1573          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1574                                  tjumptable:$g1, 16),
1575                          tjumptable:$g2, 32),
1576                  tjumptable:$g3, 48)>;
1577
1578
1579//===----------------------------------------------------------------------===//
1580// Arithmetic instructions.
1581//===----------------------------------------------------------------------===//
1582
1583// Add/subtract with carry.
1584defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1585defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1586
1587def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1588def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1589def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1590def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1591
1592// Add/subtract
1593defm ADD : AddSub<0, "add", "sub", add>;
1594defm SUB : AddSub<1, "sub", "add">;
1595
1596def : InstAlias<"mov $dst, $src",
1597                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1598def : InstAlias<"mov $dst, $src",
1599                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1600def : InstAlias<"mov $dst, $src",
1601                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1602def : InstAlias<"mov $dst, $src",
1603                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1604
1605defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1606defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1607
1608// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1609def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1610          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1611def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1612          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1613def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1614          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1615def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1616          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1617def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1618          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1619def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1620          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1621let AddedComplexity = 1 in {
1622def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1623          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1624def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1625          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1626}
1627
1628// Because of the immediate format for add/sub-imm instructions, the
1629// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1630//  These patterns capture that transformation.
1631let AddedComplexity = 1 in {
1632def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1633          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1634def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1635          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1636def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1637          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1638def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1639          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1640}
1641
1642// Because of the immediate format for add/sub-imm instructions, the
1643// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1644//  These patterns capture that transformation.
1645let AddedComplexity = 1 in {
1646def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1647          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1648def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1649          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1650def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1651          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1652def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1653          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1654}
1655
1656def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1657def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1658def : InstAlias<"neg $dst, $src$shift",
1659                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1660def : InstAlias<"neg $dst, $src$shift",
1661                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1662
1663def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1664def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1665def : InstAlias<"negs $dst, $src$shift",
1666                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1667def : InstAlias<"negs $dst, $src$shift",
1668                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1669
1670
1671// Unsigned/Signed divide
1672defm UDIV : Div<0, "udiv", udiv>;
1673defm SDIV : Div<1, "sdiv", sdiv>;
1674
1675def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1676def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1677def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1678def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1679
1680// Variable shift
1681defm ASRV : Shift<0b10, "asr", sra>;
1682defm LSLV : Shift<0b00, "lsl", shl>;
1683defm LSRV : Shift<0b01, "lsr", srl>;
1684defm RORV : Shift<0b11, "ror", rotr>;
1685
1686def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1687def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1688def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1689def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1690def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1691def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1692def : ShiftAlias<"rorv", RORVWr, GPR32>;
1693def : ShiftAlias<"rorv", RORVXr, GPR64>;
1694
1695// Multiply-add
1696let AddedComplexity = 5 in {
1697defm MADD : MulAccum<0, "madd">;
1698defm MSUB : MulAccum<1, "msub">;
1699
1700def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1701          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1702def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1703          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1704
1705def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1706          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1707def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1708          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1709def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1710          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1711def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1712          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1713} // AddedComplexity = 5
1714
1715let AddedComplexity = 5 in {
1716def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1717def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1718def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1719def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1720
1721def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1722          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1723def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1724          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1725def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1726          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1727def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1728          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1729def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1730          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1731def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1732          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1733
1734def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1735          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1736def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1737          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1738
1739def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1740          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1741def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1742          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1743def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1744          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1745                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1746
1747def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1748          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1749def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1750          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1751def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1752          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1753                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1754
1755def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1756          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1757def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1758          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1759def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1760                    GPR64:$Ra)),
1761          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1762                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1763
1764def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1765          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1766def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1767          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1768def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1769                                    (s64imm_32bit:$C)))),
1770          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1771                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1772} // AddedComplexity = 5
1773
1774def : MulAccumWAlias<"mul", MADDWrrr>;
1775def : MulAccumXAlias<"mul", MADDXrrr>;
1776def : MulAccumWAlias<"mneg", MSUBWrrr>;
1777def : MulAccumXAlias<"mneg", MSUBXrrr>;
1778def : WideMulAccumAlias<"smull", SMADDLrrr>;
1779def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1780def : WideMulAccumAlias<"umull", UMADDLrrr>;
1781def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1782
1783// Multiply-high
1784def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1785def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1786
1787// CRC32
1788def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1789def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1790def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1791def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1792
1793def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1794def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1795def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1796def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1797
1798// v8.1 atomic CAS
1799defm CAS   : CompareAndSwap<0, 0, "">;
1800defm CASA  : CompareAndSwap<1, 0, "a">;
1801defm CASL  : CompareAndSwap<0, 1, "l">;
1802defm CASAL : CompareAndSwap<1, 1, "al">;
1803
1804// v8.1 atomic CASP
1805defm CASP   : CompareAndSwapPair<0, 0, "">;
1806defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1807defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1808defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1809
1810// v8.1 atomic SWP
1811defm SWP   : Swap<0, 0, "">;
1812defm SWPA  : Swap<1, 0, "a">;
1813defm SWPL  : Swap<0, 1, "l">;
1814defm SWPAL : Swap<1, 1, "al">;
1815
1816// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1817defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1818defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1819defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1820defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1821
1822defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1823defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1824defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1825defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1826
1827defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1828defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1829defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1830defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1831
1832defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1833defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1834defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1835defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1836
1837defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1838defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1839defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1840defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1841
1842defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1843defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1844defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1845defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1846
1847defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1848defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1849defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1850defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1851
1852defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1853defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1854defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1855defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1856
1857// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1858defm : STOPregister<"stadd","LDADD">; // STADDx
1859defm : STOPregister<"stclr","LDCLR">; // STCLRx
1860defm : STOPregister<"steor","LDEOR">; // STEORx
1861defm : STOPregister<"stset","LDSET">; // STSETx
1862defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1863defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1864defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1865defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1866
1867// v8.5 Memory Tagging Extension
1868let Predicates = [HasMTE] in {
1869
1870def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1871            Sched<[]>{
1872  let Inst{31} = 1;
1873}
1874def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1875  let Inst{31} = 1;
1876  let isNotDuplicable = 1;
1877}
1878def ADDG  : AddSubG<0, "addg", null_frag>;
1879def SUBG  : AddSubG<1, "subg", null_frag>;
1880
1881def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1882
1883def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1884def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1885  let Defs = [NZCV];
1886}
1887
1888def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1889
1890def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1891
1892def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1893          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1894def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1895          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1896
1897def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1898
1899def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1900                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1901def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1902                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1903def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1904                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1905  let Inst{23} = 0;
1906}
1907
1908defm STG   : MemTagStore<0b00, "stg">;
1909defm STZG  : MemTagStore<0b01, "stzg">;
1910defm ST2G  : MemTagStore<0b10, "st2g">;
1911defm STZ2G : MemTagStore<0b11, "stz2g">;
1912
1913def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1914          (STGOffset $Rn, $Rm, $imm)>;
1915def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1916          (STZGOffset $Rn, $Rm, $imm)>;
1917def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1918          (ST2GOffset $Rn, $Rm, $imm)>;
1919def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1920          (STZ2GOffset $Rn, $Rm, $imm)>;
1921
1922defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1923def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1924def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1925
1926def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1927          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1928
1929def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1930          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1931
1932def IRGstack
1933    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1934      Sched<[]>;
1935def TAGPstack
1936    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1937      Sched<[]>;
1938
1939// Explicit SP in the first operand prevents ShrinkWrap optimization
1940// from leaving this instruction out of the stack frame. When IRGstack
1941// is transformed into IRG, this operand is replaced with the actual
1942// register / expression for the tagged base pointer of the current function.
1943def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1944
1945// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
1946// $Rn_wback is one past the end of the range. $Rm is the loop counter.
1947let isCodeGenOnly=1, mayStore=1 in {
1948def STGloop_wback
1949    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1950             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1951      Sched<[WriteAdr, WriteST]>;
1952
1953def STZGloop_wback
1954    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1955             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1956      Sched<[WriteAdr, WriteST]>;
1957
1958// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
1959// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
1960def STGloop
1961    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1962             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1963      Sched<[WriteAdr, WriteST]>;
1964
1965def STZGloop
1966    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1967             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1968      Sched<[WriteAdr, WriteST]>;
1969}
1970
1971} // Predicates = [HasMTE]
1972
1973//===----------------------------------------------------------------------===//
1974// Logical instructions.
1975//===----------------------------------------------------------------------===//
1976
1977// (immediate)
1978defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1979defm AND  : LogicalImm<0b00, "and", and, "bic">;
1980defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1981defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1982
1983// FIXME: these aliases *are* canonical sometimes (when movz can't be
1984// used). Actually, it seems to be working right now, but putting logical_immXX
1985// here is a bit dodgy on the AsmParser side too.
1986def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1987                                          logical_imm32:$imm), 0>;
1988def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1989                                          logical_imm64:$imm), 0>;
1990
1991
1992// (register)
1993defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1994defm BICS : LogicalRegS<0b11, 1, "bics",
1995                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1996defm AND  : LogicalReg<0b00, 0, "and", and>;
1997defm BIC  : LogicalReg<0b00, 1, "bic",
1998                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1999defm EON  : LogicalReg<0b10, 1, "eon",
2000                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
2001defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
2002defm ORN  : LogicalReg<0b01, 1, "orn",
2003                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
2004defm ORR  : LogicalReg<0b01, 0, "orr", or>;
2005
2006def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
2007def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
2008
2009def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
2010def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
2011
2012def : InstAlias<"mvn $Wd, $Wm$sh",
2013                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
2014def : InstAlias<"mvn $Xd, $Xm$sh",
2015                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
2016
2017def : InstAlias<"tst $src1, $src2",
2018                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
2019def : InstAlias<"tst $src1, $src2",
2020                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
2021
2022def : InstAlias<"tst $src1, $src2",
2023                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
2024def : InstAlias<"tst $src1, $src2",
2025                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
2026
2027def : InstAlias<"tst $src1, $src2$sh",
2028               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
2029def : InstAlias<"tst $src1, $src2$sh",
2030               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
2031
2032
2033def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
2034def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
2035
2036
2037//===----------------------------------------------------------------------===//
2038// One operand data processing instructions.
2039//===----------------------------------------------------------------------===//
2040
2041defm CLS    : OneOperandData<0b101, "cls">;
2042defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
2043defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
2044
2045def  REV16Wr : OneWRegData<0b001, "rev16",
2046                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
2047def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
2048
2049def : Pat<(cttz GPR32:$Rn),
2050          (CLZWr (RBITWr GPR32:$Rn))>;
2051def : Pat<(cttz GPR64:$Rn),
2052          (CLZXr (RBITXr GPR64:$Rn))>;
2053def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
2054                (i32 1))),
2055          (CLSWr GPR32:$Rn)>;
2056def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
2057                (i64 1))),
2058          (CLSXr GPR64:$Rn)>;
2059def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
2060def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
2061
2062// Unlike the other one operand instructions, the instructions with the "rev"
2063// mnemonic do *not* just different in the size bit, but actually use different
2064// opcode bits for the different sizes.
2065def REVWr   : OneWRegData<0b010, "rev", bswap>;
2066def REVXr   : OneXRegData<0b011, "rev", bswap>;
2067def REV32Xr : OneXRegData<0b010, "rev32",
2068                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
2069
2070def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
2071
2072// The bswap commutes with the rotr so we want a pattern for both possible
2073// orders.
2074def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
2075def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
2076
2077// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
2078def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
2079def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
2080
2081//===----------------------------------------------------------------------===//
2082// Bitfield immediate extraction instruction.
2083//===----------------------------------------------------------------------===//
2084let hasSideEffects = 0 in
2085defm EXTR : ExtractImm<"extr">;
2086def : InstAlias<"ror $dst, $src, $shift",
2087            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
2088def : InstAlias<"ror $dst, $src, $shift",
2089            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
2090
2091def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
2092          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
2093def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
2094          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
2095
2096//===----------------------------------------------------------------------===//
2097// Other bitfield immediate instructions.
2098//===----------------------------------------------------------------------===//
2099let hasSideEffects = 0 in {
2100defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
2101defm SBFM : BitfieldImm<0b00, "sbfm">;
2102defm UBFM : BitfieldImm<0b10, "ubfm">;
2103}
2104
2105def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
2106  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
2107  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2108}]>;
2109
2110def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
2111  uint64_t enc = 31 - N->getZExtValue();
2112  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2113}]>;
2114
2115// min(7, 31 - shift_amt)
2116def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2117  uint64_t enc = 31 - N->getZExtValue();
2118  enc = enc > 7 ? 7 : enc;
2119  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2120}]>;
2121
2122// min(15, 31 - shift_amt)
2123def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2124  uint64_t enc = 31 - N->getZExtValue();
2125  enc = enc > 15 ? 15 : enc;
2126  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2127}]>;
2128
2129def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
2130  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
2131  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2132}]>;
2133
2134def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
2135  uint64_t enc = 63 - N->getZExtValue();
2136  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2137}]>;
2138
2139// min(7, 63 - shift_amt)
2140def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2141  uint64_t enc = 63 - N->getZExtValue();
2142  enc = enc > 7 ? 7 : enc;
2143  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2144}]>;
2145
2146// min(15, 63 - shift_amt)
2147def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2148  uint64_t enc = 63 - N->getZExtValue();
2149  enc = enc > 15 ? 15 : enc;
2150  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2151}]>;
2152
2153// min(31, 63 - shift_amt)
2154def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
2155  uint64_t enc = 63 - N->getZExtValue();
2156  enc = enc > 31 ? 31 : enc;
2157  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2158}]>;
2159
2160def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
2161          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
2162                              (i64 (i32shift_b imm0_31:$imm)))>;
2163def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2164          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2165                              (i64 (i64shift_b imm0_63:$imm)))>;
2166
2167let AddedComplexity = 10 in {
2168def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2169          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2170def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2171          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2172}
2173
2174def : InstAlias<"asr $dst, $src, $shift",
2175                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2176def : InstAlias<"asr $dst, $src, $shift",
2177                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2178def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2179def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2180def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2181def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2182def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2183
2184def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2185          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2186def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2187          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2188
2189def : InstAlias<"lsr $dst, $src, $shift",
2190                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2191def : InstAlias<"lsr $dst, $src, $shift",
2192                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2193def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2194def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2195def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2196def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2197def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2198
2199//===----------------------------------------------------------------------===//
2200// Conditional comparison instructions.
2201//===----------------------------------------------------------------------===//
2202defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2203defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2204
2205//===----------------------------------------------------------------------===//
2206// Conditional select instructions.
2207//===----------------------------------------------------------------------===//
2208defm CSEL  : CondSelect<0, 0b00, "csel">;
2209
2210def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2211defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2212defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2213defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2214
2215def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2216          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2217def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2218          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2219def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2220          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2221def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2222          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2223def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2224          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2225def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2226          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2227
2228def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2229          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2230def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2231          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2232def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2233          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2234def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2235          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2236def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2237          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2238def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2239          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2240def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2241          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2242def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2243          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2244def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2245          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2246def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2247          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2248def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2249          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2250def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2251          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2252
2253def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)),
2254          (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>;
2255def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))),
2256          (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>;
2257
2258// The inverse of the condition code from the alias instruction is what is used
2259// in the aliased instruction. The parser all ready inverts the condition code
2260// for these aliases.
2261def : InstAlias<"cset $dst, $cc",
2262                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2263def : InstAlias<"cset $dst, $cc",
2264                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2265
2266def : InstAlias<"csetm $dst, $cc",
2267                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2268def : InstAlias<"csetm $dst, $cc",
2269                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2270
2271def : InstAlias<"cinc $dst, $src, $cc",
2272                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2273def : InstAlias<"cinc $dst, $src, $cc",
2274                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2275
2276def : InstAlias<"cinv $dst, $src, $cc",
2277                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2278def : InstAlias<"cinv $dst, $src, $cc",
2279                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2280
2281def : InstAlias<"cneg $dst, $src, $cc",
2282                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2283def : InstAlias<"cneg $dst, $src, $cc",
2284                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2285
2286//===----------------------------------------------------------------------===//
2287// PC-relative instructions.
2288//===----------------------------------------------------------------------===//
2289let isReMaterializable = 1 in {
2290let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2291def ADR  : ADRI<0, "adr", adrlabel,
2292                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2293} // hasSideEffects = 0
2294
2295def ADRP : ADRI<1, "adrp", adrplabel,
2296                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2297} // isReMaterializable = 1
2298
2299// page address of a constant pool entry, block address
2300def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2301def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2302def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2303def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2304def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2305def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2306def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2307
2308//===----------------------------------------------------------------------===//
2309// Unconditional branch (register) instructions.
2310//===----------------------------------------------------------------------===//
2311
2312let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2313def RET  : BranchReg<0b0010, "ret", []>;
2314def DRPS : SpecialReturn<0b0101, "drps">;
2315def ERET : SpecialReturn<0b0100, "eret">;
2316} // isReturn = 1, isTerminator = 1, isBarrier = 1
2317
2318// Default to the LR register.
2319def : InstAlias<"ret", (RET LR)>;
2320
2321let isCall = 1, Defs = [LR], Uses = [SP] in {
2322  def BLR : BranchReg<0b0001, "blr", []>;
2323  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2324                Sched<[WriteBrReg]>,
2325                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2326  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2327                     Sched<[WriteBrReg]>;
2328  def BLR_BTI : Pseudo<(outs), (ins variable_ops), []>,
2329                Sched<[WriteBrReg]>;
2330} // isCall
2331
2332def : Pat<(AArch64call GPR64:$Rn),
2333          (BLR GPR64:$Rn)>,
2334      Requires<[NoSLSBLRMitigation]>;
2335def : Pat<(AArch64call GPR64noip:$Rn),
2336          (BLRNoIP GPR64noip:$Rn)>,
2337      Requires<[SLSBLRMitigation]>;
2338
2339def : Pat<(AArch64call_rvmarker (i64 tglobaladdr:$rvfunc), GPR64:$Rn),
2340          (BLR_RVMARKER tglobaladdr:$rvfunc, GPR64:$Rn)>,
2341      Requires<[NoSLSBLRMitigation]>;
2342
2343def : Pat<(AArch64call_bti GPR64:$Rn),
2344          (BLR_BTI GPR64:$Rn)>,
2345      Requires<[NoSLSBLRMitigation]>;
2346
2347let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2348def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2349} // isBranch, isTerminator, isBarrier, isIndirectBranch
2350
2351// Create a separate pseudo-instruction for codegen to use so that we don't
2352// flag lr as used in every function. It'll be restored before the RET by the
2353// epilogue if it's legitimately used.
2354def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2355                   Sched<[WriteBrReg]> {
2356  let isTerminator = 1;
2357  let isBarrier = 1;
2358  let isReturn = 1;
2359}
2360
2361// This is a directive-like pseudo-instruction. The purpose is to insert an
2362// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2363// (which in the usual case is a BLR).
2364let hasSideEffects = 1 in
2365def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2366  let AsmString = ".tlsdesccall $sym";
2367}
2368
2369// Pseudo instruction to tell the streamer to emit a 'B' character into the
2370// augmentation string.
2371def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2372
2373// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2374// FIXME: can "hasSideEffects be dropped?
2375// This gets lowered to an instruction sequence which takes 16 bytes
2376let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1, Size = 16,
2377    isCodeGenOnly = 1 in
2378def TLSDESC_CALLSEQ
2379    : Pseudo<(outs), (ins i64imm:$sym),
2380             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2381      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2382def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2383          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2384
2385//===----------------------------------------------------------------------===//
2386// Conditional branch (immediate) instruction.
2387//===----------------------------------------------------------------------===//
2388def Bcc : BranchCond<0, "b">;
2389
2390// Armv8.8-A variant form which hints to the branch predictor that
2391// this branch is very likely to go the same way nearly all the time
2392// (even though it is not known at compile time _which_ way that is).
2393def BCcc : BranchCond<1, "bc">, Requires<[HasHBC]>;
2394
2395//===----------------------------------------------------------------------===//
2396// Compare-and-branch instructions.
2397//===----------------------------------------------------------------------===//
2398defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2399defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2400
2401//===----------------------------------------------------------------------===//
2402// Test-bit-and-branch instructions.
2403//===----------------------------------------------------------------------===//
2404defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2405defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2406
2407//===----------------------------------------------------------------------===//
2408// Unconditional branch (immediate) instructions.
2409//===----------------------------------------------------------------------===//
2410let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2411def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2412} // isBranch, isTerminator, isBarrier
2413
2414let isCall = 1, Defs = [LR], Uses = [SP] in {
2415def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2416} // isCall
2417def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2418
2419//===----------------------------------------------------------------------===//
2420// Exception generation instructions.
2421//===----------------------------------------------------------------------===//
2422let isTrap = 1 in {
2423def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
2424}
2425def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2426def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2427def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>;
2428def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2429def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2430def SMC   : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>;
2431def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2432
2433// DCPSn defaults to an immediate operand of zero if unspecified.
2434def : InstAlias<"dcps1", (DCPS1 0)>;
2435def : InstAlias<"dcps2", (DCPS2 0)>;
2436def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>;
2437
2438def UDF : UDFType<0, "udf">;
2439
2440//===----------------------------------------------------------------------===//
2441// Load instructions.
2442//===----------------------------------------------------------------------===//
2443
2444// Pair (indexed, offset)
2445defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2446defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2447defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2448defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2449defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2450
2451defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2452
2453// Pair (pre-indexed)
2454def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2455def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2456def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2457def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2458def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2459
2460def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2461
2462// Pair (post-indexed)
2463def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2464def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2465def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2466def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2467def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2468
2469def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2470
2471
2472// Pair (no allocate)
2473defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2474defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2475defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2476defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2477defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2478
2479def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2480          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2481
2482//---
2483// (register offset)
2484//---
2485
2486// Integer
2487defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2488defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2489defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2490defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2491
2492// Floating-point
2493defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2494defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2495defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2496defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2497defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2498
2499// Load sign-extended half-word
2500defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2501defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2502
2503// Load sign-extended byte
2504defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2505defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2506
2507// Load sign-extended word
2508defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2509
2510// Pre-fetch.
2511defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2512
2513// For regular load, we do not have any alignment requirement.
2514// Thus, it is safe to directly map the vector loads with interesting
2515// addressing modes.
2516// FIXME: We could do the same for bitconvert to floating point vectors.
2517multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2518                              ValueType ScalTy, ValueType VecTy,
2519                              Instruction LOADW, Instruction LOADX,
2520                              SubRegIndex sub> {
2521  def : Pat<(VecTy (scalar_to_vector (ScalTy
2522              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2523            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2524                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2525                           sub)>;
2526
2527  def : Pat<(VecTy (scalar_to_vector (ScalTy
2528              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2529            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2530                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2531                           sub)>;
2532}
2533
2534let AddedComplexity = 10 in {
2535defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2536defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2537
2538defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2539defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2540
2541defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2542defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2543
2544defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2545defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2546
2547defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2548defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2549
2550defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2551
2552defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2553
2554
2555def : Pat <(v1i64 (scalar_to_vector (i64
2556                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2557                                           ro_Wextend64:$extend))))),
2558           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2559
2560def : Pat <(v1i64 (scalar_to_vector (i64
2561                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2562                                           ro_Xextend64:$extend))))),
2563           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2564}
2565
2566// Match all load 64 bits width whose type is compatible with FPR64
2567multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2568                        Instruction LOADW, Instruction LOADX> {
2569
2570  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2571            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2572
2573  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2574            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2575}
2576
2577let AddedComplexity = 10 in {
2578let Predicates = [IsLE] in {
2579  // We must do vector loads with LD1 in big-endian.
2580  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2581  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2582  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2583  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2584  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2585  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2586}
2587
2588defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2589defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2590
2591// Match all load 128 bits width whose type is compatible with FPR128
2592let Predicates = [IsLE] in {
2593  // We must do vector loads with LD1 in big-endian.
2594  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2595  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2596  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2597  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2598  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2599  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2600  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2601  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2602}
2603} // AddedComplexity = 10
2604
2605// zextload -> i64
2606multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2607                            Instruction INSTW, Instruction INSTX> {
2608  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2609            (SUBREG_TO_REG (i64 0),
2610                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2611                           sub_32)>;
2612
2613  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2614            (SUBREG_TO_REG (i64 0),
2615                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2616                           sub_32)>;
2617}
2618
2619let AddedComplexity = 10 in {
2620  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2621  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2622  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2623
2624  // zextloadi1 -> zextloadi8
2625  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2626
2627  // extload -> zextload
2628  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2629  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2630  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2631
2632  // extloadi1 -> zextloadi8
2633  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2634}
2635
2636
2637// zextload -> i64
2638multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2639                            Instruction INSTW, Instruction INSTX> {
2640  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2641            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2642
2643  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2644            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2645
2646}
2647
2648let AddedComplexity = 10 in {
2649  // extload -> zextload
2650  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2651  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2652  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2653
2654  // zextloadi1 -> zextloadi8
2655  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2656}
2657
2658//---
2659// (unsigned immediate)
2660//---
2661defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2662                   [(set GPR64z:$Rt,
2663                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2664defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2665                   [(set GPR32z:$Rt,
2666                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2667defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2668                   [(set FPR8Op:$Rt,
2669                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2670defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2671                   [(set (f16 FPR16Op:$Rt),
2672                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2673defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2674                   [(set (f32 FPR32Op:$Rt),
2675                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2676defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2677                   [(set (f64 FPR64Op:$Rt),
2678                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2679defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2680                 [(set (f128 FPR128Op:$Rt),
2681                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2682
2683// bf16 load pattern
2684def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2685           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2686
2687// For regular load, we do not have any alignment requirement.
2688// Thus, it is safe to directly map the vector loads with interesting
2689// addressing modes.
2690// FIXME: We could do the same for bitconvert to floating point vectors.
2691def : Pat <(v8i8 (scalar_to_vector (i32
2692               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2693           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2694                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2695def : Pat <(v16i8 (scalar_to_vector (i32
2696               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2697           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2698                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2699def : Pat <(v4i16 (scalar_to_vector (i32
2700               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2701           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2702                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2703def : Pat <(v8i16 (scalar_to_vector (i32
2704               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2705           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2706                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2707def : Pat <(v2i32 (scalar_to_vector (i32
2708               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2709           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2710                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2711def : Pat <(v4i32 (scalar_to_vector (i32
2712               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2713           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2714                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2715def : Pat <(v1i64 (scalar_to_vector (i64
2716               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2717           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2718def : Pat <(v2i64 (scalar_to_vector (i64
2719               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2720           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2721                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2722
2723// Match all load 64 bits width whose type is compatible with FPR64
2724let Predicates = [IsLE] in {
2725  // We must use LD1 to perform vector loads in big-endian.
2726  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2727            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2728  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2729            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2730  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2731            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2732  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2733            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2734  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2735            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2736  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2737            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2738}
2739def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2740          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2741def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2742          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2743
2744// Match all load 128 bits width whose type is compatible with FPR128
2745let Predicates = [IsLE] in {
2746  // We must use LD1 to perform vector loads in big-endian.
2747  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2748            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2749  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2750            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2751  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2752            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2753  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2754            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2755  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2756            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2757  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2758            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2759  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2760            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2761  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2762            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2763}
2764def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2765          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2766
2767defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2768                    [(set GPR32:$Rt,
2769                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2770                                                     uimm12s2:$offset)))]>;
2771defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2772                    [(set GPR32:$Rt,
2773                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2774                                                   uimm12s1:$offset)))]>;
2775// zextload -> i64
2776def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2777    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2778def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2779    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2780
2781// zextloadi1 -> zextloadi8
2782def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2783          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2784def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2785    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2786
2787// extload -> zextload
2788def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2789          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2790def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2791          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2792def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2793          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2794def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2795    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2796def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2797    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2798def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2799    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2800def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2801    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2802
2803// load sign-extended half-word
2804defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2805                     [(set GPR32:$Rt,
2806                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2807                                                      uimm12s2:$offset)))]>;
2808defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2809                     [(set GPR64:$Rt,
2810                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2811                                                      uimm12s2:$offset)))]>;
2812
2813// load sign-extended byte
2814defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2815                     [(set GPR32:$Rt,
2816                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2817                                                    uimm12s1:$offset)))]>;
2818defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2819                     [(set GPR64:$Rt,
2820                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2821                                                    uimm12s1:$offset)))]>;
2822
2823// load sign-extended word
2824defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2825                     [(set GPR64:$Rt,
2826                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2827                                                      uimm12s4:$offset)))]>;
2828
2829// load zero-extended word
2830def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2831      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2832
2833// Pre-fetch.
2834def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2835                        [(AArch64Prefetch imm:$Rt,
2836                                        (am_indexed64 GPR64sp:$Rn,
2837                                                      uimm12s8:$offset))]>;
2838
2839def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2840
2841//---
2842// (literal)
2843
2844def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2845  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2846    const DataLayout &DL = MF->getDataLayout();
2847    Align Align = G->getGlobal()->getPointerAlignment(DL);
2848    return Align >= 4 && G->getOffset() % 4 == 0;
2849  }
2850  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2851    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2852  return false;
2853}]>;
2854
2855def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2856  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2857def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2858  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2859def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2860  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2861def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2862  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2863def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2864  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2865
2866// load sign-extended word
2867def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2868  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2869
2870let AddedComplexity = 20 in {
2871def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2872        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2873}
2874
2875// prefetch
2876def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2877//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2878
2879//---
2880// (unscaled immediate)
2881defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2882                    [(set GPR64z:$Rt,
2883                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2884defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2885                    [(set GPR32z:$Rt,
2886                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2887defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2888                    [(set FPR8Op:$Rt,
2889                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2890defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2891                    [(set (f16 FPR16Op:$Rt),
2892                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2893defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2894                    [(set (f32 FPR32Op:$Rt),
2895                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2896defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2897                    [(set (f64 FPR64Op:$Rt),
2898                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2899defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2900                    [(set (f128 FPR128Op:$Rt),
2901                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2902
2903defm LDURHH
2904    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2905             [(set GPR32:$Rt,
2906                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2907defm LDURBB
2908    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2909             [(set GPR32:$Rt,
2910                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2911
2912// Match all load 64 bits width whose type is compatible with FPR64
2913let Predicates = [IsLE] in {
2914  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2915            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2916  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2917            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2918  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2919            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2920  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2921            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2922  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2923            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2924}
2925def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2926          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2927def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2928          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2929
2930// Match all load 128 bits width whose type is compatible with FPR128
2931let Predicates = [IsLE] in {
2932  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2933            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2934  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2935            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2936  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2937            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2938  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2939            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2940  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2941            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2942  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2943            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2944  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2945            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2946}
2947
2948//  anyext -> zext
2949def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2950          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2951def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2952          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2953def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2954          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2955def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2956    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2957def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2958    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2959def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2960    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2961def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2962    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2963// unscaled zext
2964def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2965          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2966def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2967          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2968def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2969          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2970def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2971    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2972def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2973    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2974def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2975    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2976def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2977    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2978
2979
2980//---
2981// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2982
2983// Define new assembler match classes as we want to only match these when
2984// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2985// associate a DiagnosticType either, as we want the diagnostic for the
2986// canonical form (the scaled operand) to take precedence.
2987class SImm9OffsetOperand<int Width> : AsmOperandClass {
2988  let Name = "SImm9OffsetFB" # Width;
2989  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2990  let RenderMethod = "addImmOperands";
2991}
2992
2993def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2994def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2995def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2996def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2997def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2998
2999def simm9_offset_fb8 : Operand<i64> {
3000  let ParserMatchClass = SImm9OffsetFB8Operand;
3001}
3002def simm9_offset_fb16 : Operand<i64> {
3003  let ParserMatchClass = SImm9OffsetFB16Operand;
3004}
3005def simm9_offset_fb32 : Operand<i64> {
3006  let ParserMatchClass = SImm9OffsetFB32Operand;
3007}
3008def simm9_offset_fb64 : Operand<i64> {
3009  let ParserMatchClass = SImm9OffsetFB64Operand;
3010}
3011def simm9_offset_fb128 : Operand<i64> {
3012  let ParserMatchClass = SImm9OffsetFB128Operand;
3013}
3014
3015def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3016                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3017def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3018                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3019def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3020                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3021def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3022                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3023def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3024                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3025def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3026                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3027def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3028               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3029
3030// zextload -> i64
3031def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3032  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3033def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3034  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3035
3036// load sign-extended half-word
3037defm LDURSHW
3038    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
3039               [(set GPR32:$Rt,
3040                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3041defm LDURSHX
3042    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
3043              [(set GPR64:$Rt,
3044                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3045
3046// load sign-extended byte
3047defm LDURSBW
3048    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
3049                [(set GPR32:$Rt,
3050                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3051defm LDURSBX
3052    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
3053                [(set GPR64:$Rt,
3054                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3055
3056// load sign-extended word
3057defm LDURSW
3058    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
3059              [(set GPR64:$Rt,
3060                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
3061
3062// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
3063def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
3064                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3065def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
3066                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3067def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3068                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3069def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3070                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3071def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3072                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3073def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3074                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3075def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
3076                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3077
3078// Pre-fetch.
3079defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
3080                  [(AArch64Prefetch imm:$Rt,
3081                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3082
3083//---
3084// (unscaled immediate, unprivileged)
3085defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
3086defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
3087
3088defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
3089defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
3090
3091// load sign-extended half-word
3092defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
3093defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
3094
3095// load sign-extended byte
3096defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
3097defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
3098
3099// load sign-extended word
3100defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
3101
3102//---
3103// (immediate pre-indexed)
3104def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3105def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3106def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3107def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3108def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3109def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3110def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3111
3112// load sign-extended half-word
3113def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3114def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3115
3116// load sign-extended byte
3117def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3118def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3119
3120// load zero-extended byte
3121def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3122def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3123
3124// load sign-extended word
3125def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3126
3127//---
3128// (immediate post-indexed)
3129def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3130def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3131def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3132def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3133def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3134def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3135def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3136
3137// load sign-extended half-word
3138def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3139def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3140
3141// load sign-extended byte
3142def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3143def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3144
3145// load zero-extended byte
3146def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3147def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3148
3149// load sign-extended word
3150def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3151
3152//===----------------------------------------------------------------------===//
3153// Store instructions.
3154//===----------------------------------------------------------------------===//
3155
3156// Pair (indexed, offset)
3157// FIXME: Use dedicated range-checked addressing mode operand here.
3158defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
3159defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
3160defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
3161defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
3162defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
3163
3164// Pair (pre-indexed)
3165def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3166def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3167def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3168def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3169def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3170
3171// Pair (pre-indexed)
3172def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3173def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3174def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3175def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3176def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3177
3178// Pair (no allocate)
3179defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
3180defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3181defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3182defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3183defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3184
3185def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3186          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3187
3188def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3189          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3190
3191
3192//---
3193// (Register offset)
3194
3195// Integer
3196defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3197defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3198defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3199defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3200
3201
3202// Floating-point
3203defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3204defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3205defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3206defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3207defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">;
3208
3209let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3210  def : Pat<(store (f128 FPR128:$Rt),
3211                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3212                                        ro_Wextend128:$extend)),
3213            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3214  def : Pat<(store (f128 FPR128:$Rt),
3215                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3216                                        ro_Xextend128:$extend)),
3217            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3218}
3219
3220multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3221                                 Instruction STRW, Instruction STRX> {
3222
3223  def : Pat<(storeop GPR64:$Rt,
3224                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3225            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3226                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3227
3228  def : Pat<(storeop GPR64:$Rt,
3229                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3230            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3231                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3232}
3233
3234let AddedComplexity = 10 in {
3235  // truncstore i64
3236  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3237  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3238  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3239}
3240
3241multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3242                         Instruction STRW, Instruction STRX> {
3243  def : Pat<(store (VecTy FPR:$Rt),
3244                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3245            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3246
3247  def : Pat<(store (VecTy FPR:$Rt),
3248                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3249            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3250}
3251
3252let AddedComplexity = 10 in {
3253// Match all store 64 bits width whose type is compatible with FPR64
3254let Predicates = [IsLE] in {
3255  // We must use ST1 to store vectors in big-endian.
3256  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3257  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3258  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3259  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3260  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3261  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3262}
3263
3264defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3265defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3266
3267// Match all store 128 bits width whose type is compatible with FPR128
3268let Predicates = [IsLE, UseSTRQro] in {
3269  // We must use ST1 to store vectors in big-endian.
3270  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3271  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3272  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3273  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3274  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3275  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3276  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3277  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3278}
3279} // AddedComplexity = 10
3280
3281// Match stores from lane 0 to the appropriate subreg's store.
3282multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3283                              ValueType VecTy, ValueType STy,
3284                              SubRegIndex SubRegIdx,
3285                              Instruction STRW, Instruction STRX> {
3286
3287  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3288                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3289            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3290                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3291
3292  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3293                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3294            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3295                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3296}
3297
3298let AddedComplexity = 19 in {
3299  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3300  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3301  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3302  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3303  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3304  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3305}
3306
3307//---
3308// (unsigned immediate)
3309defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3310                   [(store GPR64z:$Rt,
3311                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3312defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3313                    [(store GPR32z:$Rt,
3314                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3315defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3316                    [(store FPR8Op:$Rt,
3317                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3318defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3319                    [(store (f16 FPR16Op:$Rt),
3320                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3321defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3322                    [(store (f32 FPR32Op:$Rt),
3323                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3324defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3325                    [(store (f64 FPR64Op:$Rt),
3326                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3327defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3328
3329defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3330                     [(truncstorei16 GPR32z:$Rt,
3331                                     (am_indexed16 GPR64sp:$Rn,
3332                                                   uimm12s2:$offset))]>;
3333defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3334                     [(truncstorei8 GPR32z:$Rt,
3335                                    (am_indexed8 GPR64sp:$Rn,
3336                                                 uimm12s1:$offset))]>;
3337
3338// bf16 store pattern
3339def : Pat<(store (bf16 FPR16Op:$Rt),
3340                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3341          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3342
3343let AddedComplexity = 10 in {
3344
3345// Match all store 64 bits width whose type is compatible with FPR64
3346def : Pat<(store (v1i64 FPR64:$Rt),
3347                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3348          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3349def : Pat<(store (v1f64 FPR64:$Rt),
3350                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3351          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3352
3353let Predicates = [IsLE] in {
3354  // We must use ST1 to store vectors in big-endian.
3355  def : Pat<(store (v2f32 FPR64:$Rt),
3356                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3357            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3358  def : Pat<(store (v8i8 FPR64:$Rt),
3359                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3360            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3361  def : Pat<(store (v4i16 FPR64:$Rt),
3362                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3363            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3364  def : Pat<(store (v2i32 FPR64:$Rt),
3365                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3366            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3367  def : Pat<(store (v4f16 FPR64:$Rt),
3368                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3369            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3370  def : Pat<(store (v4bf16 FPR64:$Rt),
3371                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3372            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3373}
3374
3375// Match all store 128 bits width whose type is compatible with FPR128
3376def : Pat<(store (f128  FPR128:$Rt),
3377                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3378          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3379
3380let Predicates = [IsLE] in {
3381  // We must use ST1 to store vectors in big-endian.
3382  def : Pat<(store (v4f32 FPR128:$Rt),
3383                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3384            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3385  def : Pat<(store (v2f64 FPR128:$Rt),
3386                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3387            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3388  def : Pat<(store (v16i8 FPR128:$Rt),
3389                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3390            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3391  def : Pat<(store (v8i16 FPR128:$Rt),
3392                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3393            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3394  def : Pat<(store (v4i32 FPR128:$Rt),
3395                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3396            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3397  def : Pat<(store (v2i64 FPR128:$Rt),
3398                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3399            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3400  def : Pat<(store (v8f16 FPR128:$Rt),
3401                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3402            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3403  def : Pat<(store (v8bf16 FPR128:$Rt),
3404                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3405            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3406}
3407
3408// truncstore i64
3409def : Pat<(truncstorei32 GPR64:$Rt,
3410                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3411  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3412def : Pat<(truncstorei16 GPR64:$Rt,
3413                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3414  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3415def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3416  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3417
3418} // AddedComplexity = 10
3419
3420// Match stores from lane 0 to the appropriate subreg's store.
3421multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
3422                            ValueType VTy, ValueType STy,
3423                            SubRegIndex SubRegIdx, Operand IndexType,
3424                            Instruction STR> {
3425  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3426                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3427            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3428                 GPR64sp:$Rn, IndexType:$offset)>;
3429}
3430
3431let AddedComplexity = 19 in {
3432  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3433  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3434  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3435  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3436  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3437  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3438}
3439
3440//---
3441// (unscaled immediate)
3442defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3443                         [(store GPR64z:$Rt,
3444                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3445defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3446                         [(store GPR32z:$Rt,
3447                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3448defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3449                         [(store FPR8Op:$Rt,
3450                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3451defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3452                         [(store (f16 FPR16Op:$Rt),
3453                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3454defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3455                         [(store (f32 FPR32Op:$Rt),
3456                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3457defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3458                         [(store (f64 FPR64Op:$Rt),
3459                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3460defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3461                         [(store (f128 FPR128Op:$Rt),
3462                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3463defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3464                         [(truncstorei16 GPR32z:$Rt,
3465                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3466defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3467                         [(truncstorei8 GPR32z:$Rt,
3468                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3469
3470// Armv8.4 Weaker Release Consistency enhancements
3471//         LDAPR & STLR with Immediate Offset instructions
3472let Predicates = [HasRCPC_IMMO] in {
3473defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3474defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3475defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3476defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3477defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3478defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3479defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3480defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3481defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3482defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3483defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3484defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3485defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3486}
3487
3488// Match all store 64 bits width whose type is compatible with FPR64
3489def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3490          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3491def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3492          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3493
3494let AddedComplexity = 10 in {
3495
3496let Predicates = [IsLE] in {
3497  // We must use ST1 to store vectors in big-endian.
3498  def : Pat<(store (v2f32 FPR64:$Rt),
3499                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3500            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3501  def : Pat<(store (v8i8 FPR64:$Rt),
3502                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3503            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3504  def : Pat<(store (v4i16 FPR64:$Rt),
3505                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3506            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3507  def : Pat<(store (v2i32 FPR64:$Rt),
3508                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3509            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3510  def : Pat<(store (v4f16 FPR64:$Rt),
3511                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3512            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3513  def : Pat<(store (v4bf16 FPR64:$Rt),
3514                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3515            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3516}
3517
3518// Match all store 128 bits width whose type is compatible with FPR128
3519def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3520          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3521
3522let Predicates = [IsLE] in {
3523  // We must use ST1 to store vectors in big-endian.
3524  def : Pat<(store (v4f32 FPR128:$Rt),
3525                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3526            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3527  def : Pat<(store (v2f64 FPR128:$Rt),
3528                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3529            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3530  def : Pat<(store (v16i8 FPR128:$Rt),
3531                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3532            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3533  def : Pat<(store (v8i16 FPR128:$Rt),
3534                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3535            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3536  def : Pat<(store (v4i32 FPR128:$Rt),
3537                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3538            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3539  def : Pat<(store (v2i64 FPR128:$Rt),
3540                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3541            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3542  def : Pat<(store (v2f64 FPR128:$Rt),
3543                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3544            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3545  def : Pat<(store (v8f16 FPR128:$Rt),
3546                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3547            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3548  def : Pat<(store (v8bf16 FPR128:$Rt),
3549                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3550            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3551}
3552
3553} // AddedComplexity = 10
3554
3555// unscaled i64 truncating stores
3556def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3557  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3558def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3559  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3560def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3561  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3562
3563// Match stores from lane 0 to the appropriate subreg's store.
3564multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3565                             ValueType VTy, ValueType STy,
3566                             SubRegIndex SubRegIdx, Instruction STR> {
3567  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3568}
3569
3570let AddedComplexity = 19 in {
3571  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3572  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3573  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3574  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3575  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3576  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3577}
3578
3579//---
3580// STR mnemonics fall back to STUR for negative or unaligned offsets.
3581def : InstAlias<"str $Rt, [$Rn, $offset]",
3582                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3583def : InstAlias<"str $Rt, [$Rn, $offset]",
3584                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3585def : InstAlias<"str $Rt, [$Rn, $offset]",
3586                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3587def : InstAlias<"str $Rt, [$Rn, $offset]",
3588                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3589def : InstAlias<"str $Rt, [$Rn, $offset]",
3590                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3591def : InstAlias<"str $Rt, [$Rn, $offset]",
3592                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3593def : InstAlias<"str $Rt, [$Rn, $offset]",
3594                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3595
3596def : InstAlias<"strb $Rt, [$Rn, $offset]",
3597                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3598def : InstAlias<"strh $Rt, [$Rn, $offset]",
3599                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3600
3601//---
3602// (unscaled immediate, unprivileged)
3603defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3604defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3605
3606defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3607defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3608
3609//---
3610// (immediate pre-indexed)
3611def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3612def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3613def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3614def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3615def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3616def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3617def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3618
3619def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3620def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3621
3622// truncstore i64
3623def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3624  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3625           simm9:$off)>;
3626def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3627  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3628            simm9:$off)>;
3629def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3630  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3631            simm9:$off)>;
3632
3633def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3634          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3635def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3636          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3637def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3638          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3639def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3640          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3641def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3642          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3643def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3644          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3645def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3646          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3647
3648def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3649          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3650def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3651          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3652def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3653          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3654def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3655          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3656def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3657          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3658def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3659          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3660def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3661          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3662
3663//---
3664// (immediate post-indexed)
3665def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3666def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3667def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3668def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3669def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3670def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3671def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3672
3673def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3674def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3675
3676// truncstore i64
3677def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3678  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3679            simm9:$off)>;
3680def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3681  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3682             simm9:$off)>;
3683def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3684  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3685             simm9:$off)>;
3686
3687def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off),
3688          (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>;
3689
3690def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3691          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3692def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3693          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3694def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3695          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3696def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3697          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3698def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3699          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3700def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3701          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3702def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3703          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3704def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3705          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3706
3707def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3708          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3709def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3710          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3711def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3712          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3713def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3714          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3715def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3716          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3717def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3718          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3719def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3720          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3721def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3722          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3723
3724//===----------------------------------------------------------------------===//
3725// Load/store exclusive instructions.
3726//===----------------------------------------------------------------------===//
3727
3728def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3729def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3730def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3731def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3732
3733def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3734def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3735def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3736def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3737
3738def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3739def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3740def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3741def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3742
3743def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3744def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3745def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3746def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3747
3748def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3749def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3750def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3751def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3752
3753def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3754def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3755def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3756def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3757
3758def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3759def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3760
3761def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3762def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3763
3764def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3765def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3766
3767def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3768def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3769
3770let Predicates = [HasLOR] in {
3771  // v8.1a "Limited Order Region" extension load-acquire instructions
3772  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3773  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3774  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3775  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3776
3777  // v8.1a "Limited Order Region" extension store-release instructions
3778  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3779  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3780  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3781  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3782}
3783
3784//===----------------------------------------------------------------------===//
3785// Scaled floating point to integer conversion instructions.
3786//===----------------------------------------------------------------------===//
3787
3788defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3789defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3790defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3791defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3792defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3793defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3794defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3795defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3796defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3797defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3798defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3799defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3800
3801// AArch64's FCVT instructions saturate when out of range.
3802multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> {
3803  let Predicates = [HasFullFP16] in {
3804  def : Pat<(i32 (to_int_sat f16:$Rn, i32)),
3805            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3806  def : Pat<(i64 (to_int_sat f16:$Rn, i64)),
3807            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3808  }
3809  def : Pat<(i32 (to_int_sat f32:$Rn, i32)),
3810            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3811  def : Pat<(i64 (to_int_sat f32:$Rn, i64)),
3812            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3813  def : Pat<(i32 (to_int_sat f64:$Rn, i32)),
3814            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3815  def : Pat<(i64 (to_int_sat f64:$Rn, i64)),
3816            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3817
3818  let Predicates = [HasFullFP16] in {
3819  def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)),
3820            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3821  def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)),
3822            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3823  }
3824  def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)),
3825            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3826  def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)),
3827            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3828  def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)),
3829            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3830  def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)),
3831            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3832}
3833
3834defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">;
3835defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">;
3836
3837multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3838  let Predicates = [HasFullFP16] in {
3839  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3840  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3841  }
3842  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3843  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3844  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3845  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3846
3847  let Predicates = [HasFullFP16] in {
3848  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3849            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3850  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3851            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3852  }
3853  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3854            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3855  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3856            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3857  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3858            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3859  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3860            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3861}
3862
3863defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3864defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3865
3866multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> {
3867  def : Pat<(i32 (to_int (round f32:$Rn))),
3868            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3869  def : Pat<(i64 (to_int (round f32:$Rn))),
3870            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3871  def : Pat<(i32 (to_int (round f64:$Rn))),
3872            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3873  def : Pat<(i64 (to_int (round f64:$Rn))),
3874            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3875
3876  // These instructions saturate like fp_to_[su]int_sat.
3877  let Predicates = [HasFullFP16] in {
3878  def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)),
3879            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3880  def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)),
3881            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3882  }
3883  def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)),
3884            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3885  def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)),
3886            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3887  def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)),
3888            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3889  def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)),
3890            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3891}
3892
3893defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil,  "FCVTPS">;
3894defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil,  "FCVTPU">;
3895defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">;
3896defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">;
3897defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">;
3898defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">;
3899defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">;
3900defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">;
3901
3902
3903
3904let Predicates = [HasFullFP16] in {
3905  def : Pat<(i32 (lround f16:$Rn)),
3906            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3907  def : Pat<(i64 (lround f16:$Rn)),
3908            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3909  def : Pat<(i64 (llround f16:$Rn)),
3910            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3911}
3912def : Pat<(i32 (lround f32:$Rn)),
3913          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3914def : Pat<(i32 (lround f64:$Rn)),
3915          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3916def : Pat<(i64 (lround f32:$Rn)),
3917          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3918def : Pat<(i64 (lround f64:$Rn)),
3919          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3920def : Pat<(i64 (llround f32:$Rn)),
3921          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3922def : Pat<(i64 (llround f64:$Rn)),
3923          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3924
3925//===----------------------------------------------------------------------===//
3926// Scaled integer to floating point conversion instructions.
3927//===----------------------------------------------------------------------===//
3928
3929defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3930defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3931
3932//===----------------------------------------------------------------------===//
3933// Unscaled integer to floating point conversion instruction.
3934//===----------------------------------------------------------------------===//
3935
3936defm FMOV : UnscaledConversion<"fmov">;
3937
3938// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3939let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3940def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3941    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3942def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3943    Sched<[WriteF]>;
3944def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3945    Sched<[WriteF]>;
3946}
3947// Similarly add aliases
3948def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3949    Requires<[HasFullFP16]>;
3950def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3951def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3952
3953//===----------------------------------------------------------------------===//
3954// Floating point conversion instruction.
3955//===----------------------------------------------------------------------===//
3956
3957defm FCVT : FPConversion<"fcvt">;
3958
3959//===----------------------------------------------------------------------===//
3960// Floating point single operand instructions.
3961//===----------------------------------------------------------------------===//
3962
3963defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3964defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3965defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3966defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3967defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3968defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3969defm FRINTN : SingleOperandFPData<0b1000, "frintn", froundeven>;
3970defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3971
3972defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3973defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3974
3975let SchedRW = [WriteFDiv] in {
3976defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3977}
3978
3979let Predicates = [HasFRInt3264] in {
3980  defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>;
3981  defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>;
3982  defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>;
3983  defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>;
3984} // HasFRInt3264
3985
3986let Predicates = [HasFullFP16] in {
3987  def : Pat<(i32 (lrint f16:$Rn)),
3988            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3989  def : Pat<(i64 (lrint f16:$Rn)),
3990            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3991  def : Pat<(i64 (llrint f16:$Rn)),
3992            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3993}
3994def : Pat<(i32 (lrint f32:$Rn)),
3995          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3996def : Pat<(i32 (lrint f64:$Rn)),
3997          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3998def : Pat<(i64 (lrint f32:$Rn)),
3999          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4000def : Pat<(i64 (lrint f64:$Rn)),
4001          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4002def : Pat<(i64 (llrint f32:$Rn)),
4003          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4004def : Pat<(i64 (llrint f64:$Rn)),
4005          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4006
4007//===----------------------------------------------------------------------===//
4008// Floating point two operand instructions.
4009//===----------------------------------------------------------------------===//
4010
4011defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
4012let SchedRW = [WriteFDiv] in {
4013defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
4014}
4015defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
4016defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
4017defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
4018defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
4019let SchedRW = [WriteFMul] in {
4020defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
4021defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
4022}
4023defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
4024
4025def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4026          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
4027def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4028          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
4029def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4030          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
4031def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4032          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
4033
4034//===----------------------------------------------------------------------===//
4035// Floating point three operand instructions.
4036//===----------------------------------------------------------------------===//
4037
4038defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
4039defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
4040     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
4041defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
4042     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
4043defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
4044     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
4045
4046// The following def pats catch the case where the LHS of an FMA is negated.
4047// The TriOpFrag above catches the case where the middle operand is negated.
4048
4049// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
4050// the NEON variant.
4051
4052// Here we handle first -(a + b*c) for FNMADD:
4053
4054let Predicates = [HasNEON, HasFullFP16] in
4055def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
4056          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4057
4058def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
4059          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4060
4061def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
4062          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4063
4064// Now it's time for "(-a) + (-b)*c"
4065
4066let Predicates = [HasNEON, HasFullFP16] in
4067def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
4068          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4069
4070def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
4071          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4072
4073def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
4074          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4075
4076//===----------------------------------------------------------------------===//
4077// Floating point comparison instructions.
4078//===----------------------------------------------------------------------===//
4079
4080defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
4081defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
4082
4083//===----------------------------------------------------------------------===//
4084// Floating point conditional comparison instructions.
4085//===----------------------------------------------------------------------===//
4086
4087defm FCCMPE : FPCondComparison<1, "fccmpe">;
4088defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
4089
4090//===----------------------------------------------------------------------===//
4091// Floating point conditional select instruction.
4092//===----------------------------------------------------------------------===//
4093
4094defm FCSEL : FPCondSelect<"fcsel">;
4095
4096// CSEL instructions providing f128 types need to be handled by a
4097// pseudo-instruction since the eventual code will need to introduce basic
4098// blocks and control flow.
4099def F128CSEL : Pseudo<(outs FPR128:$Rd),
4100                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
4101                      [(set (f128 FPR128:$Rd),
4102                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
4103                                       (i32 imm:$cond), NZCV))]> {
4104  let Uses = [NZCV];
4105  let usesCustomInserter = 1;
4106  let hasNoSchedulingInfo = 1;
4107}
4108
4109//===----------------------------------------------------------------------===//
4110// Instructions used for emitting unwind opcodes on ARM64 Windows.
4111//===----------------------------------------------------------------------===//
4112let isPseudo = 1 in {
4113  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
4114  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4115  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4116  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4117  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4118  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4119  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4120  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4121  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4122  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4123  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4124  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
4125  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4126  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
4127  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4128  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
4129  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4130}
4131
4132// Pseudo instructions for Windows EH
4133//===----------------------------------------------------------------------===//
4134let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
4135    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
4136   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
4137   let usesCustomInserter = 1 in
4138     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
4139                    Sched<[]>;
4140}
4141
4142// Pseudo instructions for homogeneous prolog/epilog
4143let isPseudo = 1 in {
4144  // Save CSRs in order, {FPOffset}
4145  def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4146  // Restore CSRs in order
4147  def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4148}
4149
4150//===----------------------------------------------------------------------===//
4151// Floating point immediate move.
4152//===----------------------------------------------------------------------===//
4153
4154let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
4155defm FMOV : FPMoveImmediate<"fmov">;
4156}
4157
4158//===----------------------------------------------------------------------===//
4159// Advanced SIMD two vector instructions.
4160//===----------------------------------------------------------------------===//
4161
4162defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
4163                                          AArch64uabd>;
4164// Match UABDL in log2-shuffle patterns.
4165def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
4166                           (zext (v8i8 V64:$opB))))),
4167          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4168def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4169               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
4170                                (zext (v8i8 V64:$opB))),
4171                           (AArch64vashr v8i16:$src, (i32 15))))),
4172          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4173def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
4174                           (zext (extract_high_v16i8 V128:$opB))))),
4175          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4176def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4177               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
4178                                (zext (extract_high_v16i8 V128:$opB))),
4179                           (AArch64vashr v8i16:$src, (i32 15))))),
4180          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4181def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
4182                           (zext (v4i16 V64:$opB))))),
4183          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
4184def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
4185                           (zext (extract_high_v8i16 V128:$opB))))),
4186          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
4187def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
4188                           (zext (v2i32 V64:$opB))))),
4189          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
4190def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
4191                           (zext (extract_high_v4i32 V128:$opB))))),
4192          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
4193
4194defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
4195defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
4196defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
4197defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
4198defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
4199defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
4200defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
4201defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
4202defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
4203defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
4204
4205def : Pat<(v8i8 (AArch64vashr (v8i8 V64:$Rn), (i32 7))),
4206          (CMLTv8i8rz V64:$Rn)>;
4207def : Pat<(v4i16 (AArch64vashr (v4i16 V64:$Rn), (i32 15))),
4208          (CMLTv4i16rz V64:$Rn)>;
4209def : Pat<(v2i32 (AArch64vashr (v2i32 V64:$Rn), (i32 31))),
4210          (CMLTv2i32rz V64:$Rn)>;
4211def : Pat<(v16i8 (AArch64vashr (v16i8 V128:$Rn), (i32 7))),
4212          (CMLTv16i8rz V128:$Rn)>;
4213def : Pat<(v8i16 (AArch64vashr (v8i16 V128:$Rn), (i32 15))),
4214          (CMLTv8i16rz V128:$Rn)>;
4215def : Pat<(v4i32 (AArch64vashr (v4i32 V128:$Rn), (i32 31))),
4216          (CMLTv4i32rz V128:$Rn)>;
4217def : Pat<(v2i64 (AArch64vashr (v2i64 V128:$Rn), (i32 63))),
4218          (CMLTv2i64rz V128:$Rn)>;
4219
4220defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4221defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4222defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4223defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4224defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4225defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
4226defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
4227defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
4228def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
4229          (FCVTLv4i16 V64:$Rn)>;
4230def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
4231                                                              (i64 4)))),
4232          (FCVTLv8i16 V128:$Rn)>;
4233def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
4234
4235def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
4236
4237defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
4238defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
4239defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
4240defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
4241defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
4242def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
4243          (FCVTNv4i16 V128:$Rn)>;
4244def : Pat<(concat_vectors V64:$Rd,
4245                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
4246          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4247def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
4248def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
4249def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
4250          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4251defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
4252defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
4253defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
4254                                        int_aarch64_neon_fcvtxn>;
4255defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
4256defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
4257
4258// AArch64's FCVT instructions saturate when out of range.
4259multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> {
4260  def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)),
4261            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
4262  def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)),
4263            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
4264  def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)),
4265            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
4266  def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)),
4267            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
4268  def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)),
4269            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
4270}
4271defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">;
4272defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">;
4273
4274def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
4275def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
4276def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
4277def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
4278def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
4279
4280def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
4281def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4282def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4283def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4284def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4285
4286defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
4287defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4288defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
4289defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
4290defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
4291defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", froundeven>;
4292defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
4293defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
4294defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
4295
4296let Predicates = [HasFRInt3264] in {
4297  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>;
4298  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>;
4299  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>;
4300  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>;
4301} // HasFRInt3264
4302
4303defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4304defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
4305defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4306                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4307defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4308// Aliases for MVN -> NOT.
4309def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4310                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4311def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4312                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4313
4314def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4315def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4316def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4317def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4318def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4319def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4320
4321defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>;
4322defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4323defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4324defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4325defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4326       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
4327defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
4328defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
4329defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4330defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4331defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4332defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4333defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4334defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4335defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4336       BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >;
4337defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>;
4338defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
4339defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4340defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4341defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4342defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4343defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4344
4345def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4346def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4347def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4348def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4349def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4350def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4351def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4352def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4353def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4354def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4355
4356// Patterns for vector long shift (by element width). These need to match all
4357// three of zext, sext and anyext so it's easier to pull the patterns out of the
4358// definition.
4359multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4360  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4361            (SHLLv8i8 V64:$Rn)>;
4362  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
4363            (SHLLv16i8 V128:$Rn)>;
4364  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4365            (SHLLv4i16 V64:$Rn)>;
4366  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
4367            (SHLLv8i16 V128:$Rn)>;
4368  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4369            (SHLLv2i32 V64:$Rn)>;
4370  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
4371            (SHLLv4i32 V128:$Rn)>;
4372}
4373
4374defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4375defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4376defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4377
4378// Constant vector values, used in the S/UQXTN patterns below.
4379def VImmFF:   PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>;
4380def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>;
4381def VImm7F:   PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>;
4382def VImm80:   PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>;
4383def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>;
4384def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>;
4385
4386// trunc(umin(X, 255)) -> UQXTRN v8i8
4387def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))),
4388          (UQXTNv8i8 V128:$Vn)>;
4389// trunc(umin(X, 65535)) -> UQXTRN v4i16
4390def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))),
4391          (UQXTNv4i16 V128:$Vn)>;
4392// trunc(smin(smax(X, -128), 128)) -> SQXTRN
4393//  with reversed min/max
4394def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4395                             (v8i16 VImm7F)))),
4396          (SQXTNv8i8 V128:$Vn)>;
4397def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4398                             (v8i16 VImm80)))),
4399          (SQXTNv8i8 V128:$Vn)>;
4400// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN
4401//  with reversed min/max
4402def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4403                              (v4i32 VImm7FFF)))),
4404          (SQXTNv4i16 V128:$Vn)>;
4405def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4406                              (v4i32 VImm8000)))),
4407          (SQXTNv4i16 V128:$Vn)>;
4408
4409// concat_vectors(Vd, trunc(smin(smax Vm, -128), 127) ~> SQXTN2(Vd, Vn)
4410// with reversed min/max
4411def : Pat<(v16i8 (concat_vectors
4412                 (v8i8 V64:$Vd),
4413                 (v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4414                                          (v8i16 VImm7F)))))),
4415          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4416def : Pat<(v16i8 (concat_vectors
4417                 (v8i8 V64:$Vd),
4418                 (v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4419                                          (v8i16 VImm80)))))),
4420          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4421
4422// concat_vectors(Vd, trunc(smin(smax Vm, -32768), 32767) ~> SQXTN2(Vd, Vn)
4423// with reversed min/max
4424def : Pat<(v8i16 (concat_vectors
4425                 (v4i16 V64:$Vd),
4426                 (v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4427                                           (v4i32 VImm7FFF)))))),
4428          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4429def : Pat<(v8i16 (concat_vectors
4430                 (v4i16 V64:$Vd),
4431                 (v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4432                                           (v4i32 VImm8000)))))),
4433          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4434
4435//===----------------------------------------------------------------------===//
4436// Advanced SIMD three vector instructions.
4437//===----------------------------------------------------------------------===//
4438
4439defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4440defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
4441defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4442defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4443defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4444defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4445defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4446defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4447foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in {
4448def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>;
4449}
4450defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4451let Predicates = [HasNEON] in {
4452foreach VT = [ v2f32, v4f32, v2f64 ] in
4453def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4454}
4455let Predicates = [HasNEON, HasFullFP16] in {
4456foreach VT = [ v4f16, v8f16 ] in
4457def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4458}
4459defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4460defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4461defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
4462defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
4463defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4464defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4465defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4466defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
4467defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4468defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
4469defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4470defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
4471defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4472defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
4473defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4474defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
4475
4476// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4477// instruction expects the addend first, while the fma intrinsic puts it last.
4478defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4479            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4480defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4481            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4482
4483defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4484defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
4485defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4486defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4487defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
4488
4489// MLA and MLS are generated in MachineCombine
4490defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4491defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4492
4493defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4494defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4495defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4496      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4497defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4498defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>;
4499defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4500defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4501defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4502defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4503defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4504defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4505defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4506defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4507defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4508defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4509defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4510defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
4511defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4512defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4513defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4514defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4515      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4516defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4517defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>;
4518defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4519defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4520defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4521defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4522defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4523defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4524defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4525defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4526defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4527defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
4528defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4529defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4530defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4531                                                  int_aarch64_neon_sqrdmlah>;
4532defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4533                                                    int_aarch64_neon_sqrdmlsh>;
4534
4535// Extra saturate patterns, other than the intrinsics matches above
4536defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4537defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4538defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4539defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4540
4541defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4542defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4543                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4544defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4545defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4546                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4547defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4548
4549// Pseudo bitwise select pattern BSP.
4550// It is expanded into BSL/BIT/BIF after register allocation.
4551defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4552                                                      (and (vnot node:$LHS), node:$RHS))>>;
4553defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4554defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4555defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4556
4557def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4558          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4559def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4560          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4561def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4562          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4563def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4564          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4565
4566def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4567          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4568def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4569          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4570def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4571          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4572def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4573          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4574
4575def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4576                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4577def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4578                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4579def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4580                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4581def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4582                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4583
4584def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4585                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4586def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4587                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4588def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4589                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4590def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4591                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4592
4593def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4594                "|cmls.8b\t$dst, $src1, $src2}",
4595                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4596def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4597                "|cmls.16b\t$dst, $src1, $src2}",
4598                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4599def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4600                "|cmls.4h\t$dst, $src1, $src2}",
4601                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4602def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4603                "|cmls.8h\t$dst, $src1, $src2}",
4604                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4605def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4606                "|cmls.2s\t$dst, $src1, $src2}",
4607                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4608def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4609                "|cmls.4s\t$dst, $src1, $src2}",
4610                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4611def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4612                "|cmls.2d\t$dst, $src1, $src2}",
4613                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4614
4615def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4616                "|cmlo.8b\t$dst, $src1, $src2}",
4617                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4618def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4619                "|cmlo.16b\t$dst, $src1, $src2}",
4620                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4621def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4622                "|cmlo.4h\t$dst, $src1, $src2}",
4623                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4624def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4625                "|cmlo.8h\t$dst, $src1, $src2}",
4626                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4627def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4628                "|cmlo.2s\t$dst, $src1, $src2}",
4629                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4630def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4631                "|cmlo.4s\t$dst, $src1, $src2}",
4632                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4633def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4634                "|cmlo.2d\t$dst, $src1, $src2}",
4635                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4636
4637def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4638                "|cmle.8b\t$dst, $src1, $src2}",
4639                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4640def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4641                "|cmle.16b\t$dst, $src1, $src2}",
4642                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4643def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4644                "|cmle.4h\t$dst, $src1, $src2}",
4645                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4646def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4647                "|cmle.8h\t$dst, $src1, $src2}",
4648                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4649def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4650                "|cmle.2s\t$dst, $src1, $src2}",
4651                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4652def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4653                "|cmle.4s\t$dst, $src1, $src2}",
4654                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4655def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4656                "|cmle.2d\t$dst, $src1, $src2}",
4657                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4658
4659def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4660                "|cmlt.8b\t$dst, $src1, $src2}",
4661                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4662def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4663                "|cmlt.16b\t$dst, $src1, $src2}",
4664                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4665def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4666                "|cmlt.4h\t$dst, $src1, $src2}",
4667                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4668def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4669                "|cmlt.8h\t$dst, $src1, $src2}",
4670                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4671def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4672                "|cmlt.2s\t$dst, $src1, $src2}",
4673                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4674def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4675                "|cmlt.4s\t$dst, $src1, $src2}",
4676                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4677def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4678                "|cmlt.2d\t$dst, $src1, $src2}",
4679                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4680
4681let Predicates = [HasNEON, HasFullFP16] in {
4682def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4683                "|fcmle.4h\t$dst, $src1, $src2}",
4684                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4685def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4686                "|fcmle.8h\t$dst, $src1, $src2}",
4687                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4688}
4689def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4690                "|fcmle.2s\t$dst, $src1, $src2}",
4691                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4692def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4693                "|fcmle.4s\t$dst, $src1, $src2}",
4694                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4695def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4696                "|fcmle.2d\t$dst, $src1, $src2}",
4697                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4698
4699let Predicates = [HasNEON, HasFullFP16] in {
4700def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4701                "|fcmlt.4h\t$dst, $src1, $src2}",
4702                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4703def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4704                "|fcmlt.8h\t$dst, $src1, $src2}",
4705                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4706}
4707def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4708                "|fcmlt.2s\t$dst, $src1, $src2}",
4709                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4710def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4711                "|fcmlt.4s\t$dst, $src1, $src2}",
4712                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4713def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4714                "|fcmlt.2d\t$dst, $src1, $src2}",
4715                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4716
4717let Predicates = [HasNEON, HasFullFP16] in {
4718def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4719                "|facle.4h\t$dst, $src1, $src2}",
4720                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4721def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4722                "|facle.8h\t$dst, $src1, $src2}",
4723                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4724}
4725def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4726                "|facle.2s\t$dst, $src1, $src2}",
4727                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4728def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4729                "|facle.4s\t$dst, $src1, $src2}",
4730                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4731def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4732                "|facle.2d\t$dst, $src1, $src2}",
4733                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4734
4735let Predicates = [HasNEON, HasFullFP16] in {
4736def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4737                "|faclt.4h\t$dst, $src1, $src2}",
4738                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4739def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4740                "|faclt.8h\t$dst, $src1, $src2}",
4741                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4742}
4743def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4744                "|faclt.2s\t$dst, $src1, $src2}",
4745                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4746def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4747                "|faclt.4s\t$dst, $src1, $src2}",
4748                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4749def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4750                "|faclt.2d\t$dst, $src1, $src2}",
4751                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4752
4753//===----------------------------------------------------------------------===//
4754// Advanced SIMD three scalar instructions.
4755//===----------------------------------------------------------------------===//
4756
4757defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4758defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4759defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4760defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4761defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4762defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4763defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4764defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4765def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4766          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4767let Predicates = [HasFullFP16] in {
4768def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4769}
4770def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4771def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4772defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4773                                     int_aarch64_neon_facge>;
4774defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4775                                     int_aarch64_neon_facgt>;
4776defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4777defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4778defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4779defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorStreamingSVE>;
4780defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorStreamingSVE>;
4781defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorStreamingSVE>;
4782defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4783defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4784defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4785defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4786defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4787defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4788defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4789defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4790defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4791defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4792defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4793defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4794defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4795defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4796defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4797let Predicates = [HasRDM] in {
4798  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4799  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4800  def : Pat<(i32 (int_aarch64_neon_sqrdmlah (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4801                                            (i32 FPR32:$Rm))),
4802            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4803  def : Pat<(i32 (int_aarch64_neon_sqrdmlsh (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4804                                            (i32 FPR32:$Rm))),
4805            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4806}
4807
4808def : InstAlias<"cmls $dst, $src1, $src2",
4809                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4810def : InstAlias<"cmle $dst, $src1, $src2",
4811                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4812def : InstAlias<"cmlo $dst, $src1, $src2",
4813                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4814def : InstAlias<"cmlt $dst, $src1, $src2",
4815                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4816def : InstAlias<"fcmle $dst, $src1, $src2",
4817                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4818def : InstAlias<"fcmle $dst, $src1, $src2",
4819                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4820def : InstAlias<"fcmlt $dst, $src1, $src2",
4821                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4822def : InstAlias<"fcmlt $dst, $src1, $src2",
4823                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4824def : InstAlias<"facle $dst, $src1, $src2",
4825                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4826def : InstAlias<"facle $dst, $src1, $src2",
4827                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4828def : InstAlias<"faclt $dst, $src1, $src2",
4829                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4830def : InstAlias<"faclt $dst, $src1, $src2",
4831                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4832
4833//===----------------------------------------------------------------------===//
4834// Advanced SIMD three scalar instructions (mixed operands).
4835//===----------------------------------------------------------------------===//
4836defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4837                                       int_aarch64_neon_sqdmulls_scalar>;
4838defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4839defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4840
4841def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4842                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4843                                                        (i32 FPR32:$Rm))))),
4844          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4845def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4846                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4847                                                        (i32 FPR32:$Rm))))),
4848          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4849
4850//===----------------------------------------------------------------------===//
4851// Advanced SIMD two scalar instructions.
4852//===----------------------------------------------------------------------===//
4853
4854defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4855defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4856defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4857defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4858defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4859defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4860defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4861defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4862defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4863defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4864defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4865defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4866defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4867defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4868defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4869defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4870defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4871defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4872defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4873def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4874defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4875defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4876defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe", HasNEONorStreamingSVE>;
4877defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx", HasNEONorStreamingSVE>;
4878defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte", HasNEONorStreamingSVE>;
4879defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4880                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4881defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4882defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4883defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4884defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4885defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4886defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4887                                     int_aarch64_neon_suqadd>;
4888defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4889defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4890defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4891                                    int_aarch64_neon_usqadd>;
4892
4893def : Pat<(v1i64 (AArch64vashr (v1i64 V64:$Rn), (i32 63))),
4894          (CMLTv1i64rz V64:$Rn)>;
4895
4896def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4897          (FCVTASv1i64 FPR64:$Rn)>;
4898def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4899          (FCVTAUv1i64 FPR64:$Rn)>;
4900def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4901          (FCVTMSv1i64 FPR64:$Rn)>;
4902def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4903          (FCVTMUv1i64 FPR64:$Rn)>;
4904def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4905          (FCVTNSv1i64 FPR64:$Rn)>;
4906def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4907          (FCVTNUv1i64 FPR64:$Rn)>;
4908def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4909          (FCVTPSv1i64 FPR64:$Rn)>;
4910def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4911          (FCVTPUv1i64 FPR64:$Rn)>;
4912def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
4913          (FCVTZSv1i64 FPR64:$Rn)>;
4914def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
4915          (FCVTZUv1i64 FPR64:$Rn)>;
4916
4917def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4918          (FRECPEv1f16 FPR16:$Rn)>;
4919def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4920          (FRECPEv1i32 FPR32:$Rn)>;
4921def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4922          (FRECPEv1i64 FPR64:$Rn)>;
4923def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4924          (FRECPEv1i64 FPR64:$Rn)>;
4925
4926def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4927          (FRECPEv1i32 FPR32:$Rn)>;
4928def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4929          (FRECPEv2f32 V64:$Rn)>;
4930def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4931          (FRECPEv4f32 FPR128:$Rn)>;
4932def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4933          (FRECPEv1i64 FPR64:$Rn)>;
4934def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4935          (FRECPEv1i64 FPR64:$Rn)>;
4936def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4937          (FRECPEv2f64 FPR128:$Rn)>;
4938
4939def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4940          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4941def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4942          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4943def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4944          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4945def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4946          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4947def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4948          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4949
4950def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4951          (FRECPXv1f16 FPR16:$Rn)>;
4952def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4953          (FRECPXv1i32 FPR32:$Rn)>;
4954def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4955          (FRECPXv1i64 FPR64:$Rn)>;
4956
4957def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4958          (FRSQRTEv1f16 FPR16:$Rn)>;
4959def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4960          (FRSQRTEv1i32 FPR32:$Rn)>;
4961def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4962          (FRSQRTEv1i64 FPR64:$Rn)>;
4963def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4964          (FRSQRTEv1i64 FPR64:$Rn)>;
4965
4966def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4967          (FRSQRTEv1i32 FPR32:$Rn)>;
4968def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4969          (FRSQRTEv2f32 V64:$Rn)>;
4970def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4971          (FRSQRTEv4f32 FPR128:$Rn)>;
4972def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4973          (FRSQRTEv1i64 FPR64:$Rn)>;
4974def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4975          (FRSQRTEv1i64 FPR64:$Rn)>;
4976def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4977          (FRSQRTEv2f64 FPR128:$Rn)>;
4978
4979def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4980          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4981def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4982          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4983def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4984          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4985def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4986          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4987def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4988          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4989
4990// Some float -> int -> float conversion patterns for which we want to keep the
4991// int values in FP registers using the corresponding NEON instructions to
4992// avoid more costly int <-> fp register transfers.
4993let Predicates = [HasNEON] in {
4994def : Pat<(f64 (sint_to_fp (i64 (fp_to_sint f64:$Rn)))),
4995          (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>;
4996def : Pat<(f32 (sint_to_fp (i32 (fp_to_sint f32:$Rn)))),
4997          (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>;
4998def : Pat<(f64 (uint_to_fp (i64 (fp_to_uint f64:$Rn)))),
4999          (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>;
5000def : Pat<(f32 (uint_to_fp (i32 (fp_to_uint f32:$Rn)))),
5001          (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>;
5002
5003let Predicates = [HasFullFP16] in {
5004def : Pat<(f16 (sint_to_fp (i32 (fp_to_sint f16:$Rn)))),
5005          (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>;
5006def : Pat<(f16 (uint_to_fp (i32 (fp_to_uint f16:$Rn)))),
5007          (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
5008}
5009}
5010
5011// If an integer is about to be converted to a floating point value,
5012// just load it on the floating point unit.
5013// Here are the patterns for 8 and 16-bits to float.
5014// 8-bits -> float.
5015multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
5016                             SDPatternOperator loadop, Instruction UCVTF,
5017                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
5018                             SubRegIndex sub> {
5019  def : Pat<(DstTy (uint_to_fp (SrcTy
5020                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
5021                                      ro.Wext:$extend))))),
5022           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5023                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
5024                                 sub))>;
5025
5026  def : Pat<(DstTy (uint_to_fp (SrcTy
5027                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
5028                                      ro.Wext:$extend))))),
5029           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5030                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
5031                                 sub))>;
5032}
5033
5034defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
5035                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
5036def : Pat <(f32 (uint_to_fp (i32
5037               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5038           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5039                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5040def : Pat <(f32 (uint_to_fp (i32
5041                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5042           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5043                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5044// 16-bits -> float.
5045defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
5046                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
5047def : Pat <(f32 (uint_to_fp (i32
5048                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5049           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5050                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5051def : Pat <(f32 (uint_to_fp (i32
5052                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5053           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5054                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5055// 32-bits are handled in target specific dag combine:
5056// performIntToFpCombine.
5057// 64-bits integer to 32-bits floating point, not possible with
5058// UCVTF on floating point registers (both source and destination
5059// must have the same size).
5060
5061// Here are the patterns for 8, 16, 32, and 64-bits to double.
5062// 8-bits -> double.
5063defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
5064                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
5065def : Pat <(f64 (uint_to_fp (i32
5066                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5067           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5068                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5069def : Pat <(f64 (uint_to_fp (i32
5070                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5071           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5072                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5073// 16-bits -> double.
5074defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
5075                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
5076def : Pat <(f64 (uint_to_fp (i32
5077                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5078           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5079                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5080def : Pat <(f64 (uint_to_fp (i32
5081                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5082           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5083                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5084// 32-bits -> double.
5085defm : UIntToFPROLoadPat<f64, i32, load,
5086                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
5087def : Pat <(f64 (uint_to_fp (i32
5088                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
5089           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5090                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
5091def : Pat <(f64 (uint_to_fp (i32
5092                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
5093           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5094                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
5095// 64-bits -> double are handled in target specific dag combine:
5096// performIntToFpCombine.
5097
5098//===----------------------------------------------------------------------===//
5099// Advanced SIMD three different-sized vector instructions.
5100//===----------------------------------------------------------------------===//
5101
5102defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
5103defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
5104defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
5105defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
5106defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
5107defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
5108                                             AArch64sabd>;
5109defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
5110                                          AArch64sabd>;
5111defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
5112            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
5113defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
5114                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
5115defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
5116    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5117defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
5118    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5119defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
5120defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
5121                                               int_aarch64_neon_sqadd>;
5122defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
5123                                               int_aarch64_neon_sqsub>;
5124defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
5125                                     int_aarch64_neon_sqdmull>;
5126defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
5127                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
5128defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
5129                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
5130defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
5131                                              AArch64uabd>;
5132defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
5133                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
5134defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
5135                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
5136defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
5137    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5138defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
5139    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5140defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
5141defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
5142                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
5143defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
5144                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
5145
5146// Additional patterns for [SU]ML[AS]L
5147multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
5148  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5149  def : Pat<(v4i16 (opnode
5150                    V64:$Ra,
5151                    (v4i16 (extract_subvector
5152                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
5153                            (i64 0))))),
5154             (EXTRACT_SUBREG (v8i16 (INST8B
5155                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
5156                                     V64:$Rn, V64:$Rm)), dsub)>;
5157  def : Pat<(v2i32 (opnode
5158                    V64:$Ra,
5159                    (v2i32 (extract_subvector
5160                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
5161                            (i64 0))))),
5162             (EXTRACT_SUBREG (v4i32 (INST4H
5163                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
5164                                     V64:$Rn, V64:$Rm)), dsub)>;
5165  def : Pat<(v1i64 (opnode
5166                    V64:$Ra,
5167                    (v1i64 (extract_subvector
5168                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
5169                            (i64 0))))),
5170             (EXTRACT_SUBREG (v2i64 (INST2S
5171                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
5172                                     V64:$Rn, V64:$Rm)), dsub)>;
5173}
5174
5175defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_umull,
5176     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5177defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_smull,
5178     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5179defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_umull,
5180     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5181defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_smull,
5182     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5183
5184// Additional patterns for SMULL and UMULL
5185multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
5186  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5187  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5188            (INST8B V64:$Rn, V64:$Rm)>;
5189  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5190            (INST4H V64:$Rn, V64:$Rm)>;
5191  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5192            (INST2S V64:$Rn, V64:$Rm)>;
5193}
5194
5195defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
5196  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
5197defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
5198  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
5199
5200// Patterns for smull2/umull2.
5201multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
5202  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5203  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
5204                           (extract_high_v16i8 V128:$Rm))),
5205             (INST8B V128:$Rn, V128:$Rm)>;
5206  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
5207                           (extract_high_v8i16 V128:$Rm))),
5208             (INST4H V128:$Rn, V128:$Rm)>;
5209  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
5210                           (extract_high_v4i32 V128:$Rm))),
5211             (INST2S V128:$Rn, V128:$Rm)>;
5212}
5213
5214defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
5215  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
5216defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
5217  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
5218
5219// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
5220multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
5221  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5222  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5223            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
5224  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5225            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
5226  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5227            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
5228}
5229
5230defm : Neon_mulacc_widen_patterns<
5231  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5232  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5233defm : Neon_mulacc_widen_patterns<
5234  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5235  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5236defm : Neon_mulacc_widen_patterns<
5237  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5238  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5239defm : Neon_mulacc_widen_patterns<
5240  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5241  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5242
5243// Patterns for 64-bit pmull
5244def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
5245          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
5246def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
5247                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
5248          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
5249
5250// CodeGen patterns for addhn and subhn instructions, which can actually be
5251// written in LLVM IR without too much difficulty.
5252
5253// ADDHN
5254def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
5255          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5256def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5257                                           (i32 16))))),
5258          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5259def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5260                                           (i32 32))))),
5261          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5262def : Pat<(concat_vectors (v8i8 V64:$Rd),
5263                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5264                                                    (i32 8))))),
5265          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5266                            V128:$Rn, V128:$Rm)>;
5267def : Pat<(concat_vectors (v4i16 V64:$Rd),
5268                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5269                                                    (i32 16))))),
5270          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5271                            V128:$Rn, V128:$Rm)>;
5272def : Pat<(concat_vectors (v2i32 V64:$Rd),
5273                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5274                                                    (i32 32))))),
5275          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5276                            V128:$Rn, V128:$Rm)>;
5277
5278// SUBHN
5279def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
5280          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5281def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5282                                           (i32 16))))),
5283          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5284def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5285                                           (i32 32))))),
5286          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5287def : Pat<(concat_vectors (v8i8 V64:$Rd),
5288                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5289                                                    (i32 8))))),
5290          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5291                            V128:$Rn, V128:$Rm)>;
5292def : Pat<(concat_vectors (v4i16 V64:$Rd),
5293                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5294                                                    (i32 16))))),
5295          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5296                            V128:$Rn, V128:$Rm)>;
5297def : Pat<(concat_vectors (v2i32 V64:$Rd),
5298                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5299                                                    (i32 32))))),
5300          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5301                            V128:$Rn, V128:$Rm)>;
5302
5303//----------------------------------------------------------------------------
5304// AdvSIMD bitwise extract from vector instruction.
5305//----------------------------------------------------------------------------
5306
5307defm EXT : SIMDBitwiseExtract<"ext">;
5308
5309def AdjustExtImm : SDNodeXForm<imm, [{
5310  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
5311}]>;
5312multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
5313  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
5314            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
5315  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
5316            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
5317  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
5318  // 128-bit vector.
5319  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
5320            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
5321  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
5322  // single 128-bit EXT.
5323  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
5324                              (extract_subvector V128:$Rn, (i64 N)),
5325                              (i32 imm:$imm))),
5326            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
5327  // A 64-bit EXT of the high half of a 128-bit register can be done using a
5328  // 128-bit EXT of the whole register with an adjustment to the immediate. The
5329  // top half of the other operand will be unset, but that doesn't matter as it
5330  // will not be used.
5331  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
5332                              V64:$Rm,
5333                              (i32 imm:$imm))),
5334            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
5335                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5336                                      (AdjustExtImm imm:$imm)), dsub)>;
5337}
5338
5339defm : ExtPat<v8i8, v16i8, 8>;
5340defm : ExtPat<v4i16, v8i16, 4>;
5341defm : ExtPat<v4f16, v8f16, 4>;
5342defm : ExtPat<v4bf16, v8bf16, 4>;
5343defm : ExtPat<v2i32, v4i32, 2>;
5344defm : ExtPat<v2f32, v4f32, 2>;
5345defm : ExtPat<v1i64, v2i64, 1>;
5346defm : ExtPat<v1f64, v2f64, 1>;
5347
5348//----------------------------------------------------------------------------
5349// AdvSIMD zip vector
5350//----------------------------------------------------------------------------
5351
5352defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5353defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5354defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5355defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5356defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5357defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5358
5359def : Pat<(v16i8 (concat_vectors (v8i8 (trunc (v8i16 V128:$Vn))),
5360                                 (v8i8 (trunc (v8i16 V128:$Vm))))),
5361          (UZP1v16i8 V128:$Vn, V128:$Vm)>;
5362def : Pat<(v8i16 (concat_vectors (v4i16 (trunc (v4i32 V128:$Vn))),
5363                                 (v4i16 (trunc (v4i32 V128:$Vm))))),
5364          (UZP1v8i16 V128:$Vn, V128:$Vm)>;
5365def : Pat<(v4i32 (concat_vectors (v2i32 (trunc (v2i64 V128:$Vn))),
5366                                 (v2i32 (trunc (v2i64 V128:$Vm))))),
5367          (UZP1v4i32 V128:$Vn, V128:$Vm)>;
5368
5369//----------------------------------------------------------------------------
5370// AdvSIMD TBL/TBX instructions
5371//----------------------------------------------------------------------------
5372
5373defm TBL : SIMDTableLookup<    0, "tbl">;
5374defm TBX : SIMDTableLookupTied<1, "tbx">;
5375
5376def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5377          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5378def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5379          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5380
5381def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5382                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5383          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5384def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5385                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5386          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5387
5388
5389//----------------------------------------------------------------------------
5390// AdvSIMD scalar DUP instruction
5391//----------------------------------------------------------------------------
5392
5393defm DUP : SIMDScalarDUP<"mov">;
5394
5395//----------------------------------------------------------------------------
5396// AdvSIMD scalar pairwise instructions
5397//----------------------------------------------------------------------------
5398
5399defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5400defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5401defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5402defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5403defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5404defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5405
5406let Predicates = [HasFullFP16] in {
5407def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5408            (FADDPv2i16p
5409              (EXTRACT_SUBREG
5410                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, (v8f16 (IMPLICIT_DEF))), (v8f16 (IMPLICIT_DEF))),
5411               dsub))>;
5412def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5413          (FADDPv2i16p (FADDPv4f16 V64:$Rn, (v4f16 (IMPLICIT_DEF))))>;
5414}
5415def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5416          (FADDPv2i32p
5417            (EXTRACT_SUBREG
5418              (FADDPv4f32 V128:$Rn, (v4f32 (IMPLICIT_DEF))),
5419             dsub))>;
5420def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5421          (FADDPv2i32p V64:$Rn)>;
5422def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5423          (FADDPv2i64p V128:$Rn)>;
5424
5425def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5426          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5427def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5428          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5429def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5430          (FADDPv2i32p V64:$Rn)>;
5431def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5432          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5433def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5434          (FADDPv2i64p V128:$Rn)>;
5435def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5436          (FMAXNMPv2i32p V64:$Rn)>;
5437def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5438          (FMAXNMPv2i64p V128:$Rn)>;
5439def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5440          (FMAXPv2i32p V64:$Rn)>;
5441def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5442          (FMAXPv2i64p V128:$Rn)>;
5443def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5444          (FMINNMPv2i32p V64:$Rn)>;
5445def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5446          (FMINNMPv2i64p V128:$Rn)>;
5447def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5448          (FMINPv2i32p V64:$Rn)>;
5449def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5450          (FMINPv2i64p V128:$Rn)>;
5451
5452//----------------------------------------------------------------------------
5453// AdvSIMD INS/DUP instructions
5454//----------------------------------------------------------------------------
5455
5456def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5457def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5458def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5459def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5460def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5461def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5462def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5463
5464def DUPv2i64lane : SIMDDup64FromElement;
5465def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5466def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5467def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5468def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5469def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5470def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5471
5472// DUP from a 64-bit register to a 64-bit register is just a copy
5473def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5474          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5475def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5476          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5477
5478def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5479          (v2f32 (DUPv2i32lane
5480            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5481            (i64 0)))>;
5482def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5483          (v4f32 (DUPv4i32lane
5484            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5485            (i64 0)))>;
5486def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5487          (v2f64 (DUPv2i64lane
5488            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5489            (i64 0)))>;
5490def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5491          (v4f16 (DUPv4i16lane
5492            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5493            (i64 0)))>;
5494def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5495          (v4bf16 (DUPv4i16lane
5496            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5497            (i64 0)))>;
5498def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5499          (v8f16 (DUPv8i16lane
5500            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5501            (i64 0)))>;
5502def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5503          (v8bf16 (DUPv8i16lane
5504            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5505            (i64 0)))>;
5506
5507def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5508          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5509def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5510          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5511
5512def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5513          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5514def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5515          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5516
5517def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5518          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5519def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5520         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5521def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5522          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5523
5524// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5525// instruction even if the types don't match: we just have to remap the lane
5526// carefully. N.b. this trick only applies to truncations.
5527def VecIndex_x2 : SDNodeXForm<imm, [{
5528  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5529}]>;
5530def VecIndex_x4 : SDNodeXForm<imm, [{
5531  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5532}]>;
5533def VecIndex_x8 : SDNodeXForm<imm, [{
5534  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5535}]>;
5536
5537multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5538                            ValueType Src128VT, ValueType ScalVT,
5539                            Instruction DUP, SDNodeXForm IdxXFORM> {
5540  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5541                                                     imm:$idx)))),
5542            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5543
5544  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5545                                                     imm:$idx)))),
5546            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5547}
5548
5549defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5550defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5551defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5552
5553defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5554defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5555defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5556
5557multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5558                               SDNodeXForm IdxXFORM> {
5559  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5560                                                         imm:$idx))))),
5561            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5562
5563  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5564                                                       imm:$idx))))),
5565            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5566}
5567
5568defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5569defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5570defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5571
5572defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5573defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5574defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5575
5576// SMOV and UMOV definitions, with some extra patterns for convenience
5577defm SMOV : SMov;
5578defm UMOV : UMov;
5579
5580def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5581          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5582def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5583          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5584def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5585          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5586def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5587          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5588def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5589          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5590def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5591          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5592
5593def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5594            VectorIndexB:$idx)))), i8),
5595          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5596def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5597            VectorIndexH:$idx)))), i16),
5598          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5599
5600// Extracting i8 or i16 elements will have the zero-extend transformed to
5601// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5602// for AArch64. Match these patterns here since UMOV already zeroes out the high
5603// bits of the destination register.
5604def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5605               (i32 0xff)),
5606          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5607def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5608               (i32 0xffff)),
5609          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5610
5611def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5612            VectorIndexB:$idx)))), (i64 0xff))),
5613          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>;
5614def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5615            VectorIndexH:$idx)))), (i64 0xffff))),
5616          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>;
5617
5618defm INS : SIMDIns;
5619
5620def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5621          (SUBREG_TO_REG (i32 0),
5622                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5623def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5624          (SUBREG_TO_REG (i32 0),
5625                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5626
5627def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5628          (SUBREG_TO_REG (i32 0),
5629                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5630def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5631          (SUBREG_TO_REG (i32 0),
5632                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5633
5634def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5635          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5636def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5637          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5638
5639def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5640          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5641def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5642          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5643
5644def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5645            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5646                                  (i32 FPR32:$Rn), ssub))>;
5647def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5648            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5649                                  (i32 FPR32:$Rn), ssub))>;
5650
5651def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5652            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5653                                  (i64 FPR64:$Rn), dsub))>;
5654
5655def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5656          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5657def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5658          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5659
5660def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5661          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5662def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5663          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5664
5665def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5666          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5667def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5668          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5669
5670def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5671          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5672
5673def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5674            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5675          (EXTRACT_SUBREG
5676            (INSvi16lane
5677              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5678              VectorIndexS:$imm,
5679              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5680              (i64 0)),
5681            dsub)>;
5682
5683def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5684            (i64 VectorIndexH:$imm)),
5685          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5686def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5687            (i64 VectorIndexS:$imm)),
5688          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5689def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5690            (i64 VectorIndexD:$imm)),
5691          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5692
5693def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5694            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5695          (INSvi16lane
5696            V128:$Rn, VectorIndexH:$imm,
5697            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5698            (i64 0))>;
5699
5700def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5701            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5702          (EXTRACT_SUBREG
5703            (INSvi16lane
5704              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5705              VectorIndexS:$imm,
5706              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5707              (i64 0)),
5708            dsub)>;
5709
5710def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5711            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5712          (INSvi16lane
5713            V128:$Rn, VectorIndexH:$imm,
5714            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5715            (i64 0))>;
5716
5717def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5718            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5719          (EXTRACT_SUBREG
5720            (INSvi32lane
5721              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5722              VectorIndexS:$imm,
5723              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5724              (i64 0)),
5725            dsub)>;
5726def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5727            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5728          (INSvi32lane
5729            V128:$Rn, VectorIndexS:$imm,
5730            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5731            (i64 0))>;
5732def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5733            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5734          (INSvi64lane
5735            V128:$Rn, VectorIndexD:$imm,
5736            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5737            (i64 0))>;
5738
5739// Copy an element at a constant index in one vector into a constant indexed
5740// element of another.
5741// FIXME refactor to a shared class/dev parameterized on vector type, vector
5742// index type and INS extension
5743def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5744                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5745                   VectorIndexB:$idx2)),
5746          (v16i8 (INSvi8lane
5747                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5748          )>;
5749def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5750                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5751                   VectorIndexH:$idx2)),
5752          (v8i16 (INSvi16lane
5753                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5754          )>;
5755def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5756                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5757                   VectorIndexS:$idx2)),
5758          (v4i32 (INSvi32lane
5759                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5760          )>;
5761def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5762                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5763                   VectorIndexD:$idx2)),
5764          (v2i64 (INSvi64lane
5765                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5766          )>;
5767
5768multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5769                                ValueType VTScal, Instruction INS> {
5770  def : Pat<(VT128 (vector_insert V128:$src,
5771                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5772                        imm:$Immd)),
5773            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5774
5775  def : Pat<(VT128 (vector_insert V128:$src,
5776                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5777                        imm:$Immd)),
5778            (INS V128:$src, imm:$Immd,
5779                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5780
5781  def : Pat<(VT64 (vector_insert V64:$src,
5782                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5783                        imm:$Immd)),
5784            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5785                                 imm:$Immd, V128:$Rn, imm:$Immn),
5786                            dsub)>;
5787
5788  def : Pat<(VT64 (vector_insert V64:$src,
5789                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5790                        imm:$Immd)),
5791            (EXTRACT_SUBREG
5792                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5793                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5794                dsub)>;
5795}
5796
5797defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5798defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5799defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5800defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5801
5802
5803// Floating point vector extractions are codegen'd as either a sequence of
5804// subregister extractions, or a MOV (aka DUP here) if
5805// the lane number is anything other than zero.
5806def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5807          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5808def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5809          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5810def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5811          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5812def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5813          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5814
5815
5816def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5817          (f64 (DUPi64 V128:$Rn, VectorIndexD:$idx))>;
5818def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5819          (f32 (DUPi32 V128:$Rn, VectorIndexS:$idx))>;
5820def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5821          (f16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5822def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5823          (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5824
5825// All concat_vectors operations are canonicalised to act on i64 vectors for
5826// AArch64. In the general case we need an instruction, which had just as well be
5827// INS.
5828class ConcatPat<ValueType DstTy, ValueType SrcTy>
5829  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5830        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5831                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5832
5833def : ConcatPat<v2i64, v1i64>;
5834def : ConcatPat<v2f64, v1f64>;
5835def : ConcatPat<v4i32, v2i32>;
5836def : ConcatPat<v4f32, v2f32>;
5837def : ConcatPat<v8i16, v4i16>;
5838def : ConcatPat<v8f16, v4f16>;
5839def : ConcatPat<v8bf16, v4bf16>;
5840def : ConcatPat<v16i8, v8i8>;
5841
5842// If the high lanes are undef, though, we can just ignore them:
5843class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5844  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5845        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5846
5847def : ConcatUndefPat<v2i64, v1i64>;
5848def : ConcatUndefPat<v2f64, v1f64>;
5849def : ConcatUndefPat<v4i32, v2i32>;
5850def : ConcatUndefPat<v4f32, v2f32>;
5851def : ConcatUndefPat<v8i16, v4i16>;
5852def : ConcatUndefPat<v16i8, v8i8>;
5853
5854//----------------------------------------------------------------------------
5855// AdvSIMD across lanes instructions
5856//----------------------------------------------------------------------------
5857
5858defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5859defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5860defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5861defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5862defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5863defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5864defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5865defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5866defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5867defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5868defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5869
5870// Patterns for uaddv(uaddlp(x)) ==> uaddlv
5871def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
5872            (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))),
5873            (i64 0))), (i64 0))),
5874          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5875           (UADDLVv8i8v V64:$op), hsub), ssub)>;
5876def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp
5877           (v16i8 V128:$op))))), (i64 0))),
5878          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5879           (UADDLVv16i8v V128:$op), hsub), ssub)>;
5880def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))),
5881          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>;
5882
5883// Patterns for addp(uaddlp(x))) ==> uaddlv
5884def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))),
5885          (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>;
5886def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))),
5887          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>;
5888
5889// Patterns for across-vector intrinsics, that have a node equivalent, that
5890// returns a vector (with only the low lane defined) instead of a scalar.
5891// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5892multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5893                                    SDPatternOperator opNode> {
5894// If a lane instruction caught the vector_extract around opNode, we can
5895// directly match the latter to the instruction.
5896def : Pat<(v8i8 (opNode V64:$Rn)),
5897          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5898           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5899def : Pat<(v16i8 (opNode V128:$Rn)),
5900          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5901           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5902def : Pat<(v4i16 (opNode V64:$Rn)),
5903          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5904           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5905def : Pat<(v8i16 (opNode V128:$Rn)),
5906          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5907           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5908def : Pat<(v4i32 (opNode V128:$Rn)),
5909          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5910           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5911
5912
5913// If none did, fallback to the explicit patterns, consuming the vector_extract.
5914def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5915            (i64 0)), (i64 0))),
5916          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5917            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5918            bsub), ssub)>;
5919def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5920          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5921            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5922            bsub), ssub)>;
5923def : Pat<(i32 (vector_extract (insert_subvector undef,
5924            (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))),
5925          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5926            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5927            hsub), ssub)>;
5928def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5929          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5930            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5931            hsub), ssub)>;
5932def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5933          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5934            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5935            ssub), ssub)>;
5936
5937}
5938
5939multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5940                                          SDPatternOperator opNode>
5941    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5942// If there is a sign extension after this intrinsic, consume it as smov already
5943// performed it
5944def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5945            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)),
5946          (i32 (SMOVvi8to32
5947            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5948              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5949            (i64 0)))>;
5950def : Pat<(i32 (sext_inreg (i32 (vector_extract
5951            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5952          (i32 (SMOVvi8to32
5953            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5954             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5955            (i64 0)))>;
5956def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5957            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)),
5958          (i32 (SMOVvi16to32
5959           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5960            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5961           (i64 0)))>;
5962def : Pat<(i32 (sext_inreg (i32 (vector_extract
5963            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5964          (i32 (SMOVvi16to32
5965            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5966             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5967            (i64 0)))>;
5968}
5969
5970multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5971                                            SDPatternOperator opNode>
5972    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5973// If there is a masking operation keeping only what has been actually
5974// generated, consume it.
5975def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5976            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)),
5977      (i32 (EXTRACT_SUBREG
5978        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5979          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5980        ssub))>;
5981def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5982            maski8_or_more)),
5983        (i32 (EXTRACT_SUBREG
5984          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5985            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5986          ssub))>;
5987def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5988            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)),
5989          (i32 (EXTRACT_SUBREG
5990            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5991              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5992            ssub))>;
5993def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5994            maski16_or_more)),
5995        (i32 (EXTRACT_SUBREG
5996          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5997            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5998          ssub))>;
5999}
6000
6001defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
6002// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
6003def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
6004          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
6005
6006defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
6007// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
6008def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
6009          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
6010
6011defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
6012def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
6013          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
6014
6015defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
6016def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
6017          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
6018
6019defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
6020def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
6021          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
6022
6023defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
6024def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
6025          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
6026
6027multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
6028  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6029        (i32 (SMOVvi16to32
6030          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6031            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6032          (i64 0)))>;
6033def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6034        (i32 (SMOVvi16to32
6035          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6036           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6037          (i64 0)))>;
6038
6039def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6040          (i32 (EXTRACT_SUBREG
6041           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6042            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6043           ssub))>;
6044def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6045        (i32 (EXTRACT_SUBREG
6046          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6047           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6048          ssub))>;
6049
6050def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6051        (i64 (EXTRACT_SUBREG
6052          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6053           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6054          dsub))>;
6055}
6056
6057multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
6058                                                Intrinsic intOp> {
6059  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6060        (i32 (EXTRACT_SUBREG
6061          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6062            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6063          ssub))>;
6064def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6065        (i32 (EXTRACT_SUBREG
6066          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6067            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6068          ssub))>;
6069
6070def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6071          (i32 (EXTRACT_SUBREG
6072            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6073              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6074            ssub))>;
6075def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6076        (i32 (EXTRACT_SUBREG
6077          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6078            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6079          ssub))>;
6080
6081def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6082        (i64 (EXTRACT_SUBREG
6083          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6084            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6085          dsub))>;
6086}
6087
6088defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
6089defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
6090
6091// The vaddlv_s32 intrinsic gets mapped to SADDLP.
6092def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
6093          (i64 (EXTRACT_SUBREG
6094            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6095              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
6096            dsub))>;
6097// The vaddlv_u32 intrinsic gets mapped to UADDLP.
6098def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
6099          (i64 (EXTRACT_SUBREG
6100            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6101              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
6102            dsub))>;
6103
6104//------------------------------------------------------------------------------
6105// AdvSIMD modified immediate instructions
6106//------------------------------------------------------------------------------
6107
6108// AdvSIMD BIC
6109defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
6110// AdvSIMD ORR
6111defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
6112
6113def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6114def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6115def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6116def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6117
6118def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6119def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6120def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6121def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6122
6123def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6124def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6125def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6126def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6127
6128def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6129def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6130def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6131def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6132
6133// AdvSIMD FMOV
6134def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
6135                                              "fmov", ".2d",
6136                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6137def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
6138                                              "fmov", ".2s",
6139                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6140def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
6141                                              "fmov", ".4s",
6142                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6143let Predicates = [HasNEON, HasFullFP16] in {
6144def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
6145                                              "fmov", ".4h",
6146                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6147def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
6148                                              "fmov", ".8h",
6149                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6150} // Predicates = [HasNEON, HasFullFP16]
6151
6152// AdvSIMD MOVI
6153
6154// EDIT byte mask: scalar
6155let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6156def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
6157                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
6158// The movi_edit node has the immediate value already encoded, so we use
6159// a plain imm0_255 here.
6160def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
6161          (MOVID imm0_255:$shift)>;
6162
6163// EDIT byte mask: 2d
6164
6165// The movi_edit node has the immediate value already encoded, so we use
6166// a plain imm0_255 in the pattern
6167let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6168def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
6169                                                simdimmtype10,
6170                                                "movi", ".2d",
6171                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
6172
6173def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6174def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6175def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6176def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6177
6178def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6179def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6180def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6181def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6182
6183// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
6184// extract is free and this gives better MachineCSE results.
6185def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6186def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6187def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6188def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6189
6190def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6191def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6192def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6193def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6194
6195// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6196let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6197defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
6198
6199def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6200def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6201def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6202def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6203
6204def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6205def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6206def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6207def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6208
6209def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6210          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
6211def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6212          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
6213def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6214          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
6215def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6216          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
6217
6218let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6219// EDIT per word: 2s & 4s with MSL shifter
6220def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
6221                      [(set (v2i32 V64:$Rd),
6222                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6223def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
6224                      [(set (v4i32 V128:$Rd),
6225                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6226
6227// Per byte: 8b & 16b
6228def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
6229                                                 "movi", ".8b",
6230                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
6231
6232def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
6233                                                 "movi", ".16b",
6234                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
6235}
6236
6237// AdvSIMD MVNI
6238
6239// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6240let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6241defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
6242
6243def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6244def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6245def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6246def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6247
6248def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6249def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6250def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6251def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6252
6253def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6254          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
6255def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6256          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
6257def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6258          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
6259def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6260          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
6261
6262// EDIT per word: 2s & 4s with MSL shifter
6263let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6264def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
6265                      [(set (v2i32 V64:$Rd),
6266                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6267def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
6268                      [(set (v4i32 V128:$Rd),
6269                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6270}
6271
6272//----------------------------------------------------------------------------
6273// AdvSIMD indexed element
6274//----------------------------------------------------------------------------
6275
6276let hasSideEffects = 0 in {
6277  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
6278  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
6279}
6280
6281// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
6282// instruction expects the addend first, while the intrinsic expects it last.
6283
6284// On the other hand, there are quite a few valid combinatorial options due to
6285// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
6286defm : SIMDFPIndexedTiedPatterns<"FMLA",
6287           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
6288defm : SIMDFPIndexedTiedPatterns<"FMLA",
6289           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
6290
6291defm : SIMDFPIndexedTiedPatterns<"FMLS",
6292           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
6293defm : SIMDFPIndexedTiedPatterns<"FMLS",
6294           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
6295defm : SIMDFPIndexedTiedPatterns<"FMLS",
6296           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
6297defm : SIMDFPIndexedTiedPatterns<"FMLS",
6298           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
6299
6300multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
6301  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6302  // and DUP scalar.
6303  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6304                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6305                                           VectorIndexS:$idx))),
6306            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6307  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6308                           (v2f32 (AArch64duplane32
6309                                      (v4f32 (insert_subvector undef,
6310                                                 (v2f32 (fneg V64:$Rm)),
6311                                                 (i64 0))),
6312                                      VectorIndexS:$idx)))),
6313            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6314                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6315                               VectorIndexS:$idx)>;
6316  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6317                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6318            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6319                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6320
6321  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6322  // and DUP scalar.
6323  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6324                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6325                                           VectorIndexS:$idx))),
6326            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
6327                               VectorIndexS:$idx)>;
6328  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6329                           (v4f32 (AArch64duplane32
6330                                      (v4f32 (insert_subvector undef,
6331                                                 (v2f32 (fneg V64:$Rm)),
6332                                                 (i64 0))),
6333                                      VectorIndexS:$idx)))),
6334            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6335                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6336                               VectorIndexS:$idx)>;
6337  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6338                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6339            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6340                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6341
6342  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
6343  // (DUPLANE from 64-bit would be trivial).
6344  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6345                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
6346                                           VectorIndexD:$idx))),
6347            (FMLSv2i64_indexed
6348                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6349  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6350                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
6351            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
6352                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
6353
6354  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
6355  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6356                         (vector_extract (v4f32 (fneg V128:$Rm)),
6357                                         VectorIndexS:$idx))),
6358            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6359                V128:$Rm, VectorIndexS:$idx)>;
6360  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6361                         (vector_extract (v4f32 (insert_subvector undef,
6362                                                    (v2f32 (fneg V64:$Rm)),
6363                                                    (i64 0))),
6364                                         VectorIndexS:$idx))),
6365            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6366                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
6367
6368  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
6369  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
6370                         (vector_extract (v2f64 (fneg V128:$Rm)),
6371                                         VectorIndexS:$idx))),
6372            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
6373                V128:$Rm, VectorIndexS:$idx)>;
6374}
6375
6376defm : FMLSIndexedAfterNegPatterns<
6377           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
6378defm : FMLSIndexedAfterNegPatterns<
6379           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
6380
6381defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
6382defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
6383
6384def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6385          (FMULv2i32_indexed V64:$Rn,
6386            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6387            (i64 0))>;
6388def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6389          (FMULv4i32_indexed V128:$Rn,
6390            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6391            (i64 0))>;
6392def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6393          (FMULv2i64_indexed V128:$Rn,
6394            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6395            (i64 0))>;
6396
6397defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6398defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6399
6400defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6401                                     int_aarch64_neon_sqdmulh_laneq>;
6402defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6403                                      int_aarch64_neon_sqrdmulh_laneq>;
6404
6405// Generated by MachineCombine
6406defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6407defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6408
6409defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6410defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6411    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6412defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6413    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6414defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
6415                int_aarch64_neon_smull>;
6416defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6417                                           int_aarch64_neon_sqadd>;
6418defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6419                                           int_aarch64_neon_sqsub>;
6420defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6421                                          int_aarch64_neon_sqrdmlah>;
6422defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6423                                          int_aarch64_neon_sqrdmlsh>;
6424defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6425defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6426    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6427defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6428    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6429defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
6430                int_aarch64_neon_umull>;
6431
6432// A scalar sqdmull with the second operand being a vector lane can be
6433// handled directly with the indexed instruction encoding.
6434def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6435                                          (vector_extract (v4i32 V128:$Vm),
6436                                                           VectorIndexS:$idx)),
6437          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6438
6439// Match add node and also treat an 'or' node is as an 'add' if the or'ed operands
6440// have no common bits.
6441def add_and_or_is_add : PatFrags<(ops node:$lhs, node:$rhs),
6442                         [(add node:$lhs, node:$rhs), (or node:$lhs, node:$rhs)],[{
6443   if (N->getOpcode() == ISD::ADD)
6444     return true;
6445   return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1));
6446}]> {
6447  let GISelPredicateCode = [{
6448     // Only handle G_ADD for now. FIXME. build capability to compute whether
6449     // operands of G_OR have common bits set or not.
6450     return MI.getOpcode() == TargetOpcode::G_ADD;
6451  }];
6452}
6453
6454
6455//----------------------------------------------------------------------------
6456// AdvSIMD scalar shift instructions
6457//----------------------------------------------------------------------------
6458defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6459defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6460defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6461defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6462// Codegen patterns for the above. We don't put these directly on the
6463// instructions because TableGen's type inference can't handle the truth.
6464// Having the same base pattern for fp <--> int totally freaks it out.
6465def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6466          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6467def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6468          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6469def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6470          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6471def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6472          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6473def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6474                                            vecshiftR64:$imm)),
6475          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6476def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6477                                            vecshiftR64:$imm)),
6478          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6479def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6480          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6481def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6482          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6483def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6484                                            vecshiftR64:$imm)),
6485          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6486def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6487          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6488def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6489                                            vecshiftR64:$imm)),
6490          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6491def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6492          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6493
6494// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
6495
6496def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6497          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6498def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6499          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6500def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6501          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6502def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6503            (and FPR32:$Rn, (i32 65535)),
6504            vecshiftR16:$imm)),
6505          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6506def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6507          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6508def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6509          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6510def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6511          (i32 (INSERT_SUBREG
6512            (i32 (IMPLICIT_DEF)),
6513            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6514            hsub))>;
6515def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6516          (i64 (INSERT_SUBREG
6517            (i64 (IMPLICIT_DEF)),
6518            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6519            hsub))>;
6520def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6521          (i32 (INSERT_SUBREG
6522            (i32 (IMPLICIT_DEF)),
6523            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6524            hsub))>;
6525def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6526          (i64 (INSERT_SUBREG
6527            (i64 (IMPLICIT_DEF)),
6528            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6529            hsub))>;
6530def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6531          (i32 (INSERT_SUBREG
6532            (i32 (IMPLICIT_DEF)),
6533            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6534            hsub))>;
6535def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6536          (i32 (INSERT_SUBREG
6537            (i32 (IMPLICIT_DEF)),
6538            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6539            hsub))>;
6540
6541defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6542defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6543defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6544                                     int_aarch64_neon_sqrshrn>;
6545defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6546                                     int_aarch64_neon_sqrshrun>;
6547defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6548defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6549defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6550                                     int_aarch64_neon_sqshrn>;
6551defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6552                                     int_aarch64_neon_sqshrun>;
6553defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6554defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6555defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6556    TriOpFrag<(add node:$LHS,
6557                   (AArch64srshri node:$MHS, node:$RHS))>>;
6558defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6559defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6560    TriOpFrag<(add_and_or_is_add node:$LHS,
6561                   (AArch64vashr node:$MHS, node:$RHS))>>;
6562defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6563                                     int_aarch64_neon_uqrshrn>;
6564defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6565defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6566                                     int_aarch64_neon_uqshrn>;
6567defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6568defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6569    TriOpFrag<(add node:$LHS,
6570                   (AArch64urshri node:$MHS, node:$RHS))>>;
6571defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6572defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6573    TriOpFrag<(add_and_or_is_add node:$LHS,
6574                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6575
6576//----------------------------------------------------------------------------
6577// AdvSIMD vector shift instructions
6578//----------------------------------------------------------------------------
6579defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6580defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6581defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6582                                   int_aarch64_neon_vcvtfxs2fp>;
6583defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6584                                         int_aarch64_neon_rshrn>;
6585defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6586defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6587                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6588defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6589def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6590                                      (i32 vecshiftL64:$imm))),
6591          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6592defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6593                                         int_aarch64_neon_sqrshrn>;
6594defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6595                                         int_aarch64_neon_sqrshrun>;
6596defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6597defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6598defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6599                                         int_aarch64_neon_sqshrn>;
6600defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6601                                         int_aarch64_neon_sqshrun>;
6602defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6603def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6604                                      (i32 vecshiftR64:$imm))),
6605          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6606defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6607defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6608                 TriOpFrag<(add node:$LHS,
6609                                (AArch64srshri node:$MHS, node:$RHS))> >;
6610defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6611                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6612
6613defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6614defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6615                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6616defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6617                        int_aarch64_neon_vcvtfxu2fp>;
6618defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6619                                         int_aarch64_neon_uqrshrn>;
6620defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6621defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6622                                         int_aarch64_neon_uqshrn>;
6623defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6624defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6625                TriOpFrag<(add node:$LHS,
6626                               (AArch64urshri node:$MHS, node:$RHS))> >;
6627defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6628                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6629defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6630defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6631                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6632
6633// RADDHN patterns for when RSHRN shifts by half the size of the vector element
6634def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))),
6635          (RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>;
6636def : Pat<(v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))),
6637          (RADDHNv4i32_v4i16 V128:$Vn, (v4i32 (MOVIv2d_ns (i32 0))))>;
6638def : Pat<(v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))),
6639          (RADDHNv2i64_v2i32 V128:$Vn, (v2i64 (MOVIv2d_ns (i32 0))))>;
6640
6641// RADDHN2 patterns for when RSHRN shifts by half the size of the vector element
6642def : Pat<(v16i8 (concat_vectors
6643                 (v8i8 V64:$Vd),
6644                 (v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))))),
6645          (RADDHNv8i16_v16i8
6646                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6647                 (v8i16 (MOVIv2d_ns (i32 0))))>;
6648def : Pat<(v8i16 (concat_vectors
6649                 (v4i16 V64:$Vd),
6650                 (v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))))),
6651          (RADDHNv4i32_v8i16
6652                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6653                 (v4i32 (MOVIv2d_ns (i32 0))))>;
6654def : Pat<(v4i32 (concat_vectors
6655                 (v2i32 V64:$Vd),
6656                 (v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))))),
6657          (RADDHNv2i64_v4i32
6658                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6659                 (v2i64 (MOVIv2d_ns (i32 0))))>;
6660
6661// SHRN patterns for when a logical right shift was used instead of arithmetic
6662// (the immediate guarantees no sign bits actually end up in the result so it
6663// doesn't matter).
6664def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6665          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6666def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6667          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6668def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6669          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6670
6671def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6672                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6673                                                    vecshiftR16Narrow:$imm)))),
6674          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6675                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6676def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6677                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6678                                                    vecshiftR32Narrow:$imm)))),
6679          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6680                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6681def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6682                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6683                                                    vecshiftR64Narrow:$imm)))),
6684          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6685                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6686
6687// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6688// Anyexts are implemented as zexts.
6689def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6690def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6691def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6692def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6693def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6694def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6695def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6696def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6697def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6698// Also match an extend from the upper half of a 128 bit source register.
6699def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6700          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6701def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6702          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6703def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6704          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6705def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6706          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6707def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6708          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6709def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6710          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6711def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6712          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6713def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6714          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6715def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6716          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6717
6718// Vector shift sxtl aliases
6719def : InstAlias<"sxtl.8h $dst, $src1",
6720                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6721def : InstAlias<"sxtl $dst.8h, $src1.8b",
6722                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6723def : InstAlias<"sxtl.4s $dst, $src1",
6724                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6725def : InstAlias<"sxtl $dst.4s, $src1.4h",
6726                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6727def : InstAlias<"sxtl.2d $dst, $src1",
6728                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6729def : InstAlias<"sxtl $dst.2d, $src1.2s",
6730                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6731
6732// Vector shift sxtl2 aliases
6733def : InstAlias<"sxtl2.8h $dst, $src1",
6734                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6735def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6736                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6737def : InstAlias<"sxtl2.4s $dst, $src1",
6738                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6739def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6740                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6741def : InstAlias<"sxtl2.2d $dst, $src1",
6742                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6743def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6744                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6745
6746// Vector shift uxtl aliases
6747def : InstAlias<"uxtl.8h $dst, $src1",
6748                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6749def : InstAlias<"uxtl $dst.8h, $src1.8b",
6750                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6751def : InstAlias<"uxtl.4s $dst, $src1",
6752                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6753def : InstAlias<"uxtl $dst.4s, $src1.4h",
6754                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6755def : InstAlias<"uxtl.2d $dst, $src1",
6756                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6757def : InstAlias<"uxtl $dst.2d, $src1.2s",
6758                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6759
6760// Vector shift uxtl2 aliases
6761def : InstAlias<"uxtl2.8h $dst, $src1",
6762                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6763def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6764                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6765def : InstAlias<"uxtl2.4s $dst, $src1",
6766                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6767def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6768                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6769def : InstAlias<"uxtl2.2d $dst, $src1",
6770                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6771def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6772                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6773
6774// If an integer is about to be converted to a floating point value,
6775// just load it on the floating point unit.
6776// These patterns are more complex because floating point loads do not
6777// support sign extension.
6778// The sign extension has to be explicitly added and is only supported for
6779// one step: byte-to-half, half-to-word, word-to-doubleword.
6780// SCVTF GPR -> FPR is 9 cycles.
6781// SCVTF FPR -> FPR is 4 cyclces.
6782// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6783// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6784// and still being faster.
6785// However, this is not good for code size.
6786// 8-bits -> float. 2 sizes step-up.
6787class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6788  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6789        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6790                            (SSHLLv4i16_shift
6791                              (f64
6792                                (EXTRACT_SUBREG
6793                                  (SSHLLv8i8_shift
6794                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6795                                        INST,
6796                                        bsub),
6797                                    0),
6798                                  dsub)),
6799                               0),
6800                             ssub)))>,
6801    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6802
6803def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6804                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6805def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6806                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6807def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6808                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6809def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6810                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6811
6812// 16-bits -> float. 1 size step-up.
6813class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6814  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6815        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6816                            (SSHLLv4i16_shift
6817                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6818                                  INST,
6819                                  hsub),
6820                                0),
6821                            ssub)))>, Requires<[NotForCodeSize]>;
6822
6823def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6824                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6825def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6826                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6827def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6828                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6829def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6830                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6831
6832// 32-bits to 32-bits are handled in target specific dag combine:
6833// performIntToFpCombine.
6834// 64-bits integer to 32-bits floating point, not possible with
6835// SCVTF on floating point registers (both source and destination
6836// must have the same size).
6837
6838// Here are the patterns for 8, 16, 32, and 64-bits to double.
6839// 8-bits -> double. 3 size step-up: give up.
6840// 16-bits -> double. 2 size step.
6841class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6842  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6843           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6844                              (SSHLLv2i32_shift
6845                                 (f64
6846                                  (EXTRACT_SUBREG
6847                                    (SSHLLv4i16_shift
6848                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6849                                        INST,
6850                                        hsub),
6851                                     0),
6852                                   dsub)),
6853                               0),
6854                             dsub)))>,
6855    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6856
6857def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6858                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6859def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6860                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6861def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6862                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6863def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6864                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6865// 32-bits -> double. 1 size step-up.
6866class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6867  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6868           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6869                              (SSHLLv2i32_shift
6870                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6871                                  INST,
6872                                  ssub),
6873                               0),
6874                             dsub)))>, Requires<[NotForCodeSize]>;
6875
6876def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6877                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6878def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6879                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6880def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6881                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6882def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6883                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6884
6885// 64-bits -> double are handled in target specific dag combine:
6886// performIntToFpCombine.
6887
6888
6889//----------------------------------------------------------------------------
6890// AdvSIMD Load-Store Structure
6891//----------------------------------------------------------------------------
6892defm LD1 : SIMDLd1Multiple<"ld1">;
6893defm LD2 : SIMDLd2Multiple<"ld2">;
6894defm LD3 : SIMDLd3Multiple<"ld3">;
6895defm LD4 : SIMDLd4Multiple<"ld4">;
6896
6897defm ST1 : SIMDSt1Multiple<"st1">;
6898defm ST2 : SIMDSt2Multiple<"st2">;
6899defm ST3 : SIMDSt3Multiple<"st3">;
6900defm ST4 : SIMDSt4Multiple<"st4">;
6901
6902class Ld1Pat<ValueType ty, Instruction INST>
6903  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6904
6905def : Ld1Pat<v16i8, LD1Onev16b>;
6906def : Ld1Pat<v8i16, LD1Onev8h>;
6907def : Ld1Pat<v4i32, LD1Onev4s>;
6908def : Ld1Pat<v2i64, LD1Onev2d>;
6909def : Ld1Pat<v8i8,  LD1Onev8b>;
6910def : Ld1Pat<v4i16, LD1Onev4h>;
6911def : Ld1Pat<v2i32, LD1Onev2s>;
6912def : Ld1Pat<v1i64, LD1Onev1d>;
6913
6914class St1Pat<ValueType ty, Instruction INST>
6915  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6916        (INST ty:$Vt, GPR64sp:$Rn)>;
6917
6918def : St1Pat<v16i8, ST1Onev16b>;
6919def : St1Pat<v8i16, ST1Onev8h>;
6920def : St1Pat<v4i32, ST1Onev4s>;
6921def : St1Pat<v2i64, ST1Onev2d>;
6922def : St1Pat<v8i8,  ST1Onev8b>;
6923def : St1Pat<v4i16, ST1Onev4h>;
6924def : St1Pat<v2i32, ST1Onev2s>;
6925def : St1Pat<v1i64, ST1Onev1d>;
6926
6927//---
6928// Single-element
6929//---
6930
6931defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6932defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6933defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6934defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6935let mayLoad = 1, hasSideEffects = 0 in {
6936defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6937defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6938defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6939defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6940defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6941defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6942defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6943defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6944defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6945defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6946defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6947defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6948defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6949defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6950defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6951defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6952}
6953
6954def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6955          (LD1Rv8b GPR64sp:$Rn)>;
6956def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6957          (LD1Rv16b GPR64sp:$Rn)>;
6958def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6959          (LD1Rv4h GPR64sp:$Rn)>;
6960def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6961          (LD1Rv8h GPR64sp:$Rn)>;
6962def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6963          (LD1Rv2s GPR64sp:$Rn)>;
6964def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6965          (LD1Rv4s GPR64sp:$Rn)>;
6966def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6967          (LD1Rv2d GPR64sp:$Rn)>;
6968def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6969          (LD1Rv1d GPR64sp:$Rn)>;
6970// Grab the floating point version too
6971def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6972          (LD1Rv2s GPR64sp:$Rn)>;
6973def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6974          (LD1Rv4s GPR64sp:$Rn)>;
6975def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6976          (LD1Rv2d GPR64sp:$Rn)>;
6977def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6978          (LD1Rv1d GPR64sp:$Rn)>;
6979def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6980          (LD1Rv4h GPR64sp:$Rn)>;
6981def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6982          (LD1Rv8h GPR64sp:$Rn)>;
6983def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6984          (LD1Rv4h GPR64sp:$Rn)>;
6985def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6986          (LD1Rv8h GPR64sp:$Rn)>;
6987
6988class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6989                    ValueType VTy, ValueType STy, Instruction LD1>
6990  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6991           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6992        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6993
6994def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6995def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6996def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6997def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6998def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6999def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
7000def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
7001def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
7002
7003// Generate LD1 for extload if memory type does not match the
7004// destination type, for example:
7005//
7006//   (v4i32 (insert_vector_elt (load anyext from i8) idx))
7007//
7008// In this case, the index must be adjusted to match LD1 type.
7009//
7010class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand
7011                    VecIndex, ValueType VTy, ValueType STy,
7012                    Instruction LD1, SDNodeXForm IdxOp>
7013  : Pat<(vector_insert (VTy VecListOne128:$Rd),
7014                       (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7015        (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>;
7016
7017def VectorIndexStoH : SDNodeXForm<imm, [{
7018  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7019}]>;
7020def VectorIndexStoB : SDNodeXForm<imm, [{
7021  return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64);
7022}]>;
7023def VectorIndexHtoB : SDNodeXForm<imm, [{
7024  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7025}]>;
7026
7027def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>;
7028def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>;
7029def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>;
7030
7031// Same as above, but the first element is populated using
7032// scalar_to_vector + insert_subvector instead of insert_vector_elt.
7033class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
7034                        SDPatternOperator ExtLoad, Instruction LD1>
7035  : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))),
7036          (ResultTy (EXTRACT_SUBREG
7037            (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
7038
7039def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
7040def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
7041def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
7042
7043class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
7044                   ValueType VTy, ValueType STy, Instruction LD1>
7045  : Pat<(vector_insert (VTy VecListOne64:$Rd),
7046           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7047        (EXTRACT_SUBREG
7048            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
7049                          VecIndex:$idx, GPR64sp:$Rn),
7050            dsub)>;
7051
7052def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
7053def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
7054def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
7055def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
7056def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
7057def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
7058
7059
7060defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
7061defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
7062defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
7063defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
7064
7065// Stores
7066defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
7067defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
7068defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
7069defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
7070
7071let AddedComplexity = 19 in
7072class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7073                    ValueType VTy, ValueType STy, Instruction ST1>
7074  : Pat<(scalar_store
7075             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7076             GPR64sp:$Rn),
7077        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
7078
7079def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
7080def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
7081def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
7082def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
7083def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
7084def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
7085def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
7086def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
7087
7088let AddedComplexity = 19 in
7089class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7090                   ValueType VTy, ValueType STy, Instruction ST1>
7091  : Pat<(scalar_store
7092             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7093             GPR64sp:$Rn),
7094        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7095             VecIndex:$idx, GPR64sp:$Rn)>;
7096
7097def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
7098def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
7099def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
7100def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
7101def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
7102def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
7103
7104multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7105                             ValueType VTy, ValueType STy, Instruction ST1,
7106                             int offset> {
7107  def : Pat<(scalar_store
7108              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7109              GPR64sp:$Rn, offset),
7110        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7111             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7112
7113  def : Pat<(scalar_store
7114              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7115              GPR64sp:$Rn, GPR64:$Rm),
7116        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7117             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7118}
7119
7120defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
7121defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
7122                        2>;
7123defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
7124defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
7125defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
7126defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
7127defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
7128defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
7129
7130multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7131                             ValueType VTy, ValueType STy, Instruction ST1,
7132                             int offset> {
7133  def : Pat<(scalar_store
7134              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7135              GPR64sp:$Rn, offset),
7136        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7137
7138  def : Pat<(scalar_store
7139              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7140              GPR64sp:$Rn, GPR64:$Rm),
7141        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7142}
7143
7144defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
7145                         1>;
7146defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
7147                         2>;
7148defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
7149defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
7150defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
7151defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
7152defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
7153defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
7154
7155let mayStore = 1, hasSideEffects = 0 in {
7156defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
7157defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
7158defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
7159defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
7160defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
7161defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
7162defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
7163defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
7164defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
7165defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
7166defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
7167defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
7168}
7169
7170defm ST1 : SIMDLdSt1SingleAliases<"st1">;
7171defm ST2 : SIMDLdSt2SingleAliases<"st2">;
7172defm ST3 : SIMDLdSt3SingleAliases<"st3">;
7173defm ST4 : SIMDLdSt4SingleAliases<"st4">;
7174
7175//----------------------------------------------------------------------------
7176// Crypto extensions
7177//----------------------------------------------------------------------------
7178
7179let Predicates = [HasAES] in {
7180def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
7181def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
7182def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
7183def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
7184}
7185
7186// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
7187// for AES fusion on some CPUs.
7188let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
7189def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7190                        Sched<[WriteVq]>;
7191def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7192                         Sched<[WriteVq]>;
7193}
7194
7195// Only use constrained versions of AES(I)MC instructions if they are paired with
7196// AESE/AESD.
7197def : Pat<(v16i8 (int_aarch64_crypto_aesmc
7198            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
7199                                            (v16i8 V128:$src2))))),
7200          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
7201                                             (v16i8 V128:$src2)))))>,
7202          Requires<[HasFuseAES]>;
7203
7204def : Pat<(v16i8 (int_aarch64_crypto_aesimc
7205            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
7206                                            (v16i8 V128:$src2))))),
7207          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
7208                                              (v16i8 V128:$src2)))))>,
7209          Requires<[HasFuseAES]>;
7210
7211let Predicates = [HasSHA2] in {
7212def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
7213def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
7214def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
7215def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
7216def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
7217def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
7218def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
7219
7220def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
7221def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
7222def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
7223}
7224
7225//----------------------------------------------------------------------------
7226// Compiler-pseudos
7227//----------------------------------------------------------------------------
7228// FIXME: Like for X86, these should go in their own separate .td file.
7229
7230def def32 : PatLeaf<(i32 GPR32:$src), [{
7231  return isDef32(*N);
7232}]>;
7233
7234// In the case of a 32-bit def that is known to implicitly zero-extend,
7235// we can use a SUBREG_TO_REG.
7236def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
7237
7238// For an anyext, we don't care what the high bits are, so we can perform an
7239// INSERT_SUBREF into an IMPLICIT_DEF.
7240def : Pat<(i64 (anyext GPR32:$src)),
7241          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
7242
7243// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
7244// then assert the extension has happened.
7245def : Pat<(i64 (zext GPR32:$src)),
7246          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
7247
7248// To sign extend, we use a signed bitfield move instruction (SBFM) on the
7249// containing super-reg.
7250def : Pat<(i64 (sext GPR32:$src)),
7251   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
7252def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
7253def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
7254def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
7255def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
7256def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
7257def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
7258def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
7259
7260def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
7261          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
7262                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
7263def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
7264          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
7265                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
7266
7267def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
7268          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
7269                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
7270def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
7271          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
7272                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
7273
7274def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
7275          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7276                   (i64 (i64shift_a        imm0_63:$imm)),
7277                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
7278
7279// sra patterns have an AddedComplexity of 10, so make sure we have a higher
7280// AddedComplexity for the following patterns since we want to match sext + sra
7281// patterns before we attempt to match a single sra node.
7282let AddedComplexity = 20 in {
7283// We support all sext + sra combinations which preserve at least one bit of the
7284// original value which is to be sign extended. E.g. we support shifts up to
7285// bitwidth-1 bits.
7286def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
7287          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
7288def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
7289          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
7290
7291def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
7292          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
7293def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
7294          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
7295
7296def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
7297          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7298                   (i64 imm0_31:$imm), 31)>;
7299} // AddedComplexity = 20
7300
7301// To truncate, we can simply extract from a subregister.
7302def : Pat<(i32 (trunc GPR64sp:$src)),
7303          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
7304
7305// __builtin_trap() uses the BRK instruction on AArch64.
7306def : Pat<(trap), (BRK 1)>;
7307def : Pat<(debugtrap), (BRK 0xF000)>;
7308
7309def ubsan_trap_xform : SDNodeXForm<timm, [{
7310  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
7311}]>;
7312
7313def ubsan_trap_imm : TImmLeaf<i32, [{
7314  return isUInt<8>(Imm);
7315}], ubsan_trap_xform>;
7316
7317def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
7318
7319// Multiply high patterns which multiply the lower subvector using smull/umull
7320// and the upper subvector with smull2/umull2. Then shuffle the high the high
7321// part of both results together.
7322def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
7323          (UZP2v16i8
7324           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7325                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7326           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7327def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
7328          (UZP2v8i16
7329           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7330                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7331           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7332def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
7333          (UZP2v4i32
7334           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7335                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7336           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7337
7338def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
7339          (UZP2v16i8
7340           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7341                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7342           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7343def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
7344          (UZP2v8i16
7345           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7346                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7347           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7348def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
7349          (UZP2v4i32
7350           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7351                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7352           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7353
7354// Conversions within AdvSIMD types in the same register size are free.
7355// But because we need a consistent lane ordering, in big endian many
7356// conversions require one or more REV instructions.
7357//
7358// Consider a simple memory load followed by a bitconvert then a store.
7359//   v0 = load v2i32
7360//   v1 = BITCAST v2i32 v0 to v4i16
7361//        store v4i16 v2
7362//
7363// In big endian mode every memory access has an implicit byte swap. LDR and
7364// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
7365// is, they treat the vector as a sequence of elements to be byte-swapped.
7366// The two pairs of instructions are fundamentally incompatible. We've decided
7367// to use LD1/ST1 only to simplify compiler implementation.
7368//
7369// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
7370// the original code sequence:
7371//   v0 = load v2i32
7372//   v1 = REV v2i32                  (implicit)
7373//   v2 = BITCAST v2i32 v1 to v4i16
7374//   v3 = REV v4i16 v2               (implicit)
7375//        store v4i16 v3
7376//
7377// But this is now broken - the value stored is different to the value loaded
7378// due to lane reordering. To fix this, on every BITCAST we must perform two
7379// other REVs:
7380//   v0 = load v2i32
7381//   v1 = REV v2i32                  (implicit)
7382//   v2 = REV v2i32
7383//   v3 = BITCAST v2i32 v2 to v4i16
7384//   v4 = REV v4i16
7385//   v5 = REV v4i16 v4               (implicit)
7386//        store v4i16 v5
7387//
7388// This means an extra two instructions, but actually in most cases the two REV
7389// instructions can be combined into one. For example:
7390//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
7391//
7392// There is also no 128-bit REV instruction. This must be synthesized with an
7393// EXT instruction.
7394//
7395// Most bitconverts require some sort of conversion. The only exceptions are:
7396//   a) Identity conversions -  vNfX <-> vNiX
7397//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
7398//
7399
7400// Natural vector casts (64 bit)
7401def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
7402def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7403def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7404def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7405def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
7406def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7407def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7408
7409def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
7410def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
7411def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7412def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7413def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7414def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7415
7416def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
7417def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
7418def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
7419def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>;
7420def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
7421def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
7422def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
7423
7424def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
7425def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7426def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7427def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7428def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7429def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7430def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7431def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
7432
7433def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
7434def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7435def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7436def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
7437def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7438def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7439
7440// Natural vector casts (128 bit)
7441def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7442def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7443def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7444def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7445def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
7446def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7447def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7448def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7449
7450def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7451def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
7452def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7453def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7454def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7455def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7456def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7457def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7458
7459def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
7460def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7461def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7462def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7463def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7464def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7465def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7466def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7467
7468def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7469def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7470def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7471def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7472def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7473def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
7474def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7475def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7476
7477def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7478def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7479def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7480def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
7481def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7482def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7483def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7484def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7485
7486def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7487def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7488def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7489def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7490def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
7491def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7492def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7493def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7494
7495let Predicates = [IsLE] in {
7496def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7497def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7498def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7499def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7500def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7501def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7502
7503def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7504          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7505def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7506          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7507def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7508          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7509def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7510          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7511def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7512          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7513def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7514          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7515def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7516          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7517}
7518let Predicates = [IsBE] in {
7519def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7520                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7521def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7522                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7523def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7524                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7525def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7526                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7527def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7528                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7529def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7530                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7531
7532def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7533          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7534def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7535          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7536def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7537          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7538def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7539          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7540def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7541          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7542def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7543          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7544}
7545def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7546def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7547def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7548          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7549def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7550          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7551def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7552          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7553def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7554
7555def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7556          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7557def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7558          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7559def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7560          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7561def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7562          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7563def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7564          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7565
7566def : Pat<(f16 (bitconvert (bf16 FPR16:$src))), (f16 FPR16:$src)>;
7567def : Pat<(bf16 (bitconvert (f16 FPR16:$src))), (bf16 FPR16:$src)>;
7568
7569let Predicates = [IsLE] in {
7570def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7571def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7572def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7573def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7574def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7575def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7576}
7577let Predicates = [IsBE] in {
7578def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7579                             (v1i64 (REV64v2i32 FPR64:$src))>;
7580def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7581                             (v1i64 (REV64v4i16 FPR64:$src))>;
7582def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7583                             (v1i64 (REV64v8i8 FPR64:$src))>;
7584def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7585                             (v1i64 (REV64v4i16 FPR64:$src))>;
7586def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7587                             (v1i64 (REV64v4i16 FPR64:$src))>;
7588def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7589                             (v1i64 (REV64v2i32 FPR64:$src))>;
7590}
7591def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7592def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7593
7594let Predicates = [IsLE] in {
7595def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7596def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7597def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7598def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7599def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7600def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7601def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7602}
7603let Predicates = [IsBE] in {
7604def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7605                             (v2i32 (REV64v2i32 FPR64:$src))>;
7606def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7607                             (v2i32 (REV32v4i16 FPR64:$src))>;
7608def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7609                             (v2i32 (REV32v8i8 FPR64:$src))>;
7610def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7611                             (v2i32 (REV64v2i32 FPR64:$src))>;
7612def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7613                             (v2i32 (REV64v2i32 FPR64:$src))>;
7614def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7615                             (v2i32 (REV32v4i16 FPR64:$src))>;
7616def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7617                             (v2i32 (REV32v4i16 FPR64:$src))>;
7618}
7619def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7620
7621let Predicates = [IsLE] in {
7622def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7623def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7624def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7625def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7626def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7627def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7628}
7629let Predicates = [IsBE] in {
7630def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7631                             (v4i16 (REV64v4i16 FPR64:$src))>;
7632def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7633                             (v4i16 (REV32v4i16 FPR64:$src))>;
7634def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7635                             (v4i16 (REV16v8i8 FPR64:$src))>;
7636def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7637                             (v4i16 (REV64v4i16 FPR64:$src))>;
7638def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7639                             (v4i16 (REV32v4i16 FPR64:$src))>;
7640def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7641                             (v4i16 (REV64v4i16 FPR64:$src))>;
7642}
7643def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7644def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7645
7646let Predicates = [IsLE] in {
7647def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7648def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7649def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7650def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7651def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7652def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7653
7654def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7655def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7656def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7657def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7658def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7659def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7660}
7661let Predicates = [IsBE] in {
7662def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7663                             (v4f16 (REV64v4i16 FPR64:$src))>;
7664def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7665                             (v4f16 (REV32v4i16 FPR64:$src))>;
7666def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7667                             (v4f16 (REV16v8i8 FPR64:$src))>;
7668def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7669                             (v4f16 (REV64v4i16 FPR64:$src))>;
7670def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7671                             (v4f16 (REV32v4i16 FPR64:$src))>;
7672def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7673                             (v4f16 (REV64v4i16 FPR64:$src))>;
7674
7675def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7676                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7677def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7678                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7679def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7680                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7681def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7682                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7683def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7684                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7685def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7686                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7687}
7688def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7689def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7690
7691let Predicates = [IsLE] in {
7692def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7693def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7694def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7695def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7696def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7697def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7698def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7699def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7700}
7701let Predicates = [IsBE] in {
7702def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7703                             (v8i8 (REV64v8i8 FPR64:$src))>;
7704def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7705                             (v8i8 (REV32v8i8 FPR64:$src))>;
7706def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7707                             (v8i8 (REV16v8i8 FPR64:$src))>;
7708def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7709                             (v8i8 (REV64v8i8 FPR64:$src))>;
7710def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7711                             (v8i8 (REV32v8i8 FPR64:$src))>;
7712def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7713                             (v8i8 (REV64v8i8 FPR64:$src))>;
7714def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7715                             (v8i8 (REV16v8i8 FPR64:$src))>;
7716def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7717                             (v8i8 (REV16v8i8 FPR64:$src))>;
7718}
7719
7720let Predicates = [IsLE] in {
7721def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7722def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7723def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7724def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7725def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7726def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7727}
7728let Predicates = [IsBE] in {
7729def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7730                             (f64 (REV64v2i32 FPR64:$src))>;
7731def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7732                             (f64 (REV64v4i16 FPR64:$src))>;
7733def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7734                             (f64 (REV64v2i32 FPR64:$src))>;
7735def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7736                             (f64 (REV64v8i8 FPR64:$src))>;
7737def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7738                             (f64 (REV64v4i16 FPR64:$src))>;
7739def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7740                             (f64 (REV64v4i16 FPR64:$src))>;
7741}
7742def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7743def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7744
7745let Predicates = [IsLE] in {
7746def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7747def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7748def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7749def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7750def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7751def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7752}
7753let Predicates = [IsBE] in {
7754def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7755                             (v1f64 (REV64v2i32 FPR64:$src))>;
7756def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7757                             (v1f64 (REV64v4i16 FPR64:$src))>;
7758def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7759                             (v1f64 (REV64v8i8 FPR64:$src))>;
7760def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7761                             (v1f64 (REV64v2i32 FPR64:$src))>;
7762def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7763                             (v1f64 (REV64v4i16 FPR64:$src))>;
7764def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7765                             (v1f64 (REV64v4i16 FPR64:$src))>;
7766}
7767def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7768def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7769
7770let Predicates = [IsLE] in {
7771def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7772def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7773def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7774def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7775def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7776def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7777def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7778}
7779let Predicates = [IsBE] in {
7780def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7781                             (v2f32 (REV64v2i32 FPR64:$src))>;
7782def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7783                             (v2f32 (REV32v4i16 FPR64:$src))>;
7784def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7785                             (v2f32 (REV32v8i8 FPR64:$src))>;
7786def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7787                             (v2f32 (REV64v2i32 FPR64:$src))>;
7788def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7789                             (v2f32 (REV64v2i32 FPR64:$src))>;
7790def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7791                             (v2f32 (REV32v4i16 FPR64:$src))>;
7792def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7793                             (v2f32 (REV32v4i16 FPR64:$src))>;
7794}
7795def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7796
7797let Predicates = [IsLE] in {
7798def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7799def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7800def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7801def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7802def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7803def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7804def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7805def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7806}
7807let Predicates = [IsBE] in {
7808def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7809                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7810def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7811                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7812                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7813def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7814                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7815                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7816def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7817                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7818                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7819def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7820                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7821                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7822def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7823                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7824def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7825                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7826                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7827def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7828                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7829                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7830}
7831
7832let Predicates = [IsLE] in {
7833def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7834def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7835def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7836def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7837def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7838def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7839def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7840}
7841let Predicates = [IsBE] in {
7842def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7843                             (v2f64 (EXTv16i8 FPR128:$src,
7844                                              FPR128:$src, (i32 8)))>;
7845def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7846                             (v2f64 (REV64v4i32 FPR128:$src))>;
7847def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7848                             (v2f64 (REV64v8i16 FPR128:$src))>;
7849def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7850                             (v2f64 (REV64v8i16 FPR128:$src))>;
7851def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7852                             (v2f64 (REV64v8i16 FPR128:$src))>;
7853def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7854                             (v2f64 (REV64v16i8 FPR128:$src))>;
7855def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7856                             (v2f64 (REV64v4i32 FPR128:$src))>;
7857}
7858def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7859
7860let Predicates = [IsLE] in {
7861def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7862def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7863def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7864def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7865def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7866def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7867def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7868}
7869let Predicates = [IsBE] in {
7870def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7871                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7872                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7873def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7874                             (v4f32 (REV32v8i16 FPR128:$src))>;
7875def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7876                             (v4f32 (REV32v8i16 FPR128:$src))>;
7877def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7878                             (v4f32 (REV32v8i16 FPR128:$src))>;
7879def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7880                             (v4f32 (REV32v16i8 FPR128:$src))>;
7881def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7882                             (v4f32 (REV64v4i32 FPR128:$src))>;
7883def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7884                             (v4f32 (REV64v4i32 FPR128:$src))>;
7885}
7886def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7887
7888let Predicates = [IsLE] in {
7889def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7890def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7891def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7892def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7893def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7894def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7895def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7896}
7897let Predicates = [IsBE] in {
7898def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7899                             (v2i64 (EXTv16i8 FPR128:$src,
7900                                              FPR128:$src, (i32 8)))>;
7901def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7902                             (v2i64 (REV64v4i32 FPR128:$src))>;
7903def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7904                             (v2i64 (REV64v8i16 FPR128:$src))>;
7905def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7906                             (v2i64 (REV64v16i8 FPR128:$src))>;
7907def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7908                             (v2i64 (REV64v4i32 FPR128:$src))>;
7909def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7910                             (v2i64 (REV64v8i16 FPR128:$src))>;
7911def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7912                             (v2i64 (REV64v8i16 FPR128:$src))>;
7913}
7914def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7915
7916let Predicates = [IsLE] in {
7917def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7918def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7919def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7920def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7921def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7922def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7923def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7924}
7925let Predicates = [IsBE] in {
7926def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7927                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7928                                              (REV64v4i32 FPR128:$src),
7929                                              (i32 8)))>;
7930def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7931                             (v4i32 (REV64v4i32 FPR128:$src))>;
7932def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7933                             (v4i32 (REV32v8i16 FPR128:$src))>;
7934def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7935                             (v4i32 (REV32v16i8 FPR128:$src))>;
7936def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7937                             (v4i32 (REV64v4i32 FPR128:$src))>;
7938def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7939                             (v4i32 (REV32v8i16 FPR128:$src))>;
7940def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7941                             (v4i32 (REV32v8i16 FPR128:$src))>;
7942}
7943def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7944
7945let Predicates = [IsLE] in {
7946def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7947def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7948def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7949def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7950def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7951def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7952}
7953let Predicates = [IsBE] in {
7954def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7955                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7956                                              (REV64v8i16 FPR128:$src),
7957                                              (i32 8)))>;
7958def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7959                             (v8i16 (REV64v8i16 FPR128:$src))>;
7960def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7961                             (v8i16 (REV32v8i16 FPR128:$src))>;
7962def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7963                             (v8i16 (REV16v16i8 FPR128:$src))>;
7964def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7965                             (v8i16 (REV64v8i16 FPR128:$src))>;
7966def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7967                             (v8i16 (REV32v8i16 FPR128:$src))>;
7968}
7969def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7970def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7971
7972let Predicates = [IsLE] in {
7973def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7974def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7975def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7976def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7977def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7978def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7979
7980def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7981def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7982def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7983def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7984def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7985def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7986}
7987let Predicates = [IsBE] in {
7988def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7989                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7990                                              (REV64v8i16 FPR128:$src),
7991                                              (i32 8)))>;
7992def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7993                             (v8f16 (REV64v8i16 FPR128:$src))>;
7994def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7995                             (v8f16 (REV32v8i16 FPR128:$src))>;
7996def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7997                             (v8f16 (REV16v16i8 FPR128:$src))>;
7998def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7999                             (v8f16 (REV64v8i16 FPR128:$src))>;
8000def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
8001                             (v8f16 (REV32v8i16 FPR128:$src))>;
8002
8003def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
8004                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
8005                                              (REV64v8i16 FPR128:$src),
8006                                              (i32 8)))>;
8007def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
8008                             (v8bf16 (REV64v8i16 FPR128:$src))>;
8009def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
8010                             (v8bf16 (REV32v8i16 FPR128:$src))>;
8011def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
8012                             (v8bf16 (REV16v16i8 FPR128:$src))>;
8013def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
8014                             (v8bf16 (REV64v8i16 FPR128:$src))>;
8015def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
8016                             (v8bf16 (REV32v8i16 FPR128:$src))>;
8017}
8018def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
8019def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
8020
8021let Predicates = [IsLE] in {
8022def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
8023def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
8024def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
8025def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
8026def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
8027def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
8028def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
8029def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
8030}
8031let Predicates = [IsBE] in {
8032def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
8033                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
8034                                              (REV64v16i8 FPR128:$src),
8035                                              (i32 8)))>;
8036def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
8037                             (v16i8 (REV64v16i8 FPR128:$src))>;
8038def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
8039                             (v16i8 (REV32v16i8 FPR128:$src))>;
8040def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
8041                             (v16i8 (REV16v16i8 FPR128:$src))>;
8042def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
8043                             (v16i8 (REV64v16i8 FPR128:$src))>;
8044def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
8045                             (v16i8 (REV32v16i8 FPR128:$src))>;
8046def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
8047                             (v16i8 (REV16v16i8 FPR128:$src))>;
8048def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
8049                             (v16i8 (REV16v16i8 FPR128:$src))>;
8050}
8051
8052def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
8053           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8054def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
8055           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8056def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
8057           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8058def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
8059           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8060def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
8061           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8062def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
8063           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8064def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
8065           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8066def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
8067           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8068
8069def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
8070          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8071def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
8072          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8073def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
8074          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8075def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
8076          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8077
8078// A 64-bit subvector insert to the first 128-bit vector position
8079// is a subregister copy that needs no instruction.
8080multiclass InsertSubvectorUndef<ValueType Ty> {
8081  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
8082            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8083  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
8084            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8085  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
8086            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8087  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
8088            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8089  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
8090            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8091  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
8092            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8093  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
8094            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8095  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
8096            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8097}
8098
8099defm : InsertSubvectorUndef<i32>;
8100defm : InsertSubvectorUndef<i64>;
8101
8102// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
8103// or v2f32.
8104def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
8105                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
8106           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
8107def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
8108                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
8109           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
8110    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
8111    // so we match on v4f32 here, not v2f32. This will also catch adding
8112    // the low two lanes of a true v4f32 vector.
8113def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
8114                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
8115          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8116def : Pat<(fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
8117                (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
8118          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8119
8120// Scalar 64-bit shifts in FPR64 registers.
8121def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8122          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8123def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8124          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8125def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8126          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8127def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8128          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8129
8130// Patterns for nontemporal/no-allocate stores.
8131// We have to resort to tricks to turn a single-input store into a store pair,
8132// because there is no single-input nontemporal store, only STNP.
8133let Predicates = [IsLE] in {
8134let AddedComplexity = 15 in {
8135class NTStore128Pat<ValueType VT> :
8136  Pat<(nontemporalstore (VT FPR128:$Rt),
8137        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
8138      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
8139              (DUPi64 FPR128:$Rt, (i64 1)),
8140              GPR64sp:$Rn, simm7s8:$offset)>;
8141
8142def : NTStore128Pat<v2i64>;
8143def : NTStore128Pat<v4i32>;
8144def : NTStore128Pat<v8i16>;
8145def : NTStore128Pat<v16i8>;
8146
8147class NTStore64Pat<ValueType VT> :
8148  Pat<(nontemporalstore (VT FPR64:$Rt),
8149        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8150      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
8151              (DUPi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
8152              GPR64sp:$Rn, simm7s4:$offset)>;
8153
8154// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
8155def : NTStore64Pat<v1f64>;
8156def : NTStore64Pat<v1i64>;
8157def : NTStore64Pat<v2i32>;
8158def : NTStore64Pat<v4i16>;
8159def : NTStore64Pat<v8i8>;
8160
8161def : Pat<(nontemporalstore GPR64:$Rt,
8162            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8163          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
8164                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
8165                  GPR64sp:$Rn, simm7s4:$offset)>;
8166} // AddedComplexity=10
8167} // Predicates = [IsLE]
8168
8169// Tail call return handling. These are all compiler pseudo-instructions,
8170// so no encoding information or anything like that.
8171let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
8172  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
8173                   Sched<[WriteBrReg]>;
8174  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
8175                   Sched<[WriteBrReg]>;
8176  // Indirect tail-call with any register allowed, used by MachineOutliner when
8177  // this is proven safe.
8178  // FIXME: If we have to add any more hacks like this, we should instead relax
8179  // some verifier checks for outlined functions.
8180  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
8181                      Sched<[WriteBrReg]>;
8182  // Indirect tail-call limited to only use registers (x16 and x17) which are
8183  // allowed to tail-call a "BTI c" instruction.
8184  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
8185                      Sched<[WriteBrReg]>;
8186}
8187
8188def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
8189          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
8190      Requires<[NotUseBTI]>;
8191def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
8192          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
8193      Requires<[UseBTI]>;
8194def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
8195          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8196def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
8197          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8198
8199def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
8200def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
8201
8202// Extracting lane zero is a special case where we can just use a plain
8203// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
8204// rest of the compiler, especially the register allocator and copy propagation,
8205// to reason about, so is preferred when it's possible to use it.
8206let AddedComplexity = 10 in {
8207  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
8208  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
8209  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
8210}
8211
8212// dot_v4i8
8213class mul_v4i8<SDPatternOperator ldop> :
8214  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
8215          (mul (ldop (add node:$Rn, node:$offset)),
8216               (ldop (add node:$Rm, node:$offset)))>;
8217class mulz_v4i8<SDPatternOperator ldop> :
8218  PatFrag<(ops node:$Rn, node:$Rm),
8219          (mul (ldop node:$Rn), (ldop node:$Rm))>;
8220
8221def load_v4i8 :
8222  OutPatFrag<(ops node:$R),
8223             (INSERT_SUBREG
8224              (v2i32 (IMPLICIT_DEF)),
8225               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
8226              ssub)>;
8227
8228class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
8229  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
8230           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
8231           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
8232                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
8233      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
8234                                (load_v4i8 GPR64sp:$Rn),
8235                                (load_v4i8 GPR64sp:$Rm))),
8236                      sub_32)>, Requires<[HasDotProd]>;
8237
8238// dot_v8i8
8239class ee_v8i8<SDPatternOperator extend> :
8240  PatFrag<(ops node:$V, node:$K),
8241          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
8242
8243class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8244  PatFrag<(ops node:$M, node:$N, node:$K),
8245          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
8246                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
8247
8248class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8249  PatFrag<(ops node:$M, node:$N),
8250          (i32 (extractelt
8251           (v4i32 (AArch64uaddv
8252            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
8253                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
8254           (i64 0)))>;
8255
8256// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
8257def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
8258
8259class odot_v8i8<Instruction DOT> :
8260  OutPatFrag<(ops node:$Vm, node:$Vn),
8261             (EXTRACT_SUBREG
8262              (VADDV_32
8263               (i64 (DOT (DUPv2i32gpr WZR),
8264                         (v8i8 node:$Vm),
8265                         (v8i8 node:$Vn)))),
8266              sub_32)>;
8267
8268class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
8269                    SDPatternOperator extend> :
8270  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
8271      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
8272  Requires<[HasDotProd]>;
8273
8274// dot_v16i8
8275class ee_v16i8<SDPatternOperator extend> :
8276  PatFrag<(ops node:$V, node:$K1, node:$K2),
8277          (v4i16 (extract_subvector
8278           (v8i16 (extend
8279            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
8280
8281class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
8282  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
8283          (v4i32
8284           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
8285                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
8286
8287class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
8288  PatFrag<(ops node:$M, node:$N),
8289          (i32 (extractelt
8290           (v4i32 (AArch64uaddv
8291            (add
8292             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
8293                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
8294             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
8295                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
8296           (i64 0)))>;
8297
8298class odot_v16i8<Instruction DOT> :
8299  OutPatFrag<(ops node:$Vm, node:$Vn),
8300             (i32 (ADDVv4i32v
8301              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
8302
8303class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
8304                SDPatternOperator extend> :
8305  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
8306      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
8307  Requires<[HasDotProd]>;
8308
8309let AddedComplexity = 10 in {
8310  def : dot_v4i8<SDOTv8i8, sextloadi8>;
8311  def : dot_v4i8<UDOTv8i8, zextloadi8>;
8312  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
8313  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
8314  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
8315  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
8316
8317  // FIXME: add patterns to generate vector by element dot product.
8318  // FIXME: add SVE dot-product patterns.
8319}
8320
8321// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs,
8322// so that it can be used as input to inline asm, and vice versa.
8323def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>;
8324def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>;
8325def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3,
8326                             GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)),
8327          (REG_SEQUENCE GPR64x8Class,
8328              $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3,
8329              $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>;
8330foreach i = 0-7 in {
8331  def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))),
8332            (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>;
8333}
8334
8335let Predicates = [HasLS64] in {
8336  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
8337                                          (outs GPR64x8:$Rt)>;
8338  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
8339                                          (outs)>;
8340  def ST64BV:   Store64BV<0b011, "st64bv">;
8341  def ST64BV0:  Store64BV<0b010, "st64bv0">;
8342
8343  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
8344    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
8345          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
8346
8347  def : ST64BPattern<int_aarch64_st64b, ST64B>;
8348  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
8349  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
8350}
8351
8352let Predicates = [HasMOPS] in {
8353  let Defs = [NZCV] in {
8354    defm CPYFP : MOPSMemoryCopyInsns<0b00, "cpyfp">;
8355
8356    defm CPYP : MOPSMemoryMoveInsns<0b00, "cpyp">;
8357
8358    defm SETP : MOPSMemorySetInsns<0b00, "setp">;
8359  }
8360  let Uses = [NZCV] in {
8361    defm CPYFM : MOPSMemoryCopyInsns<0b01, "cpyfm">;
8362    defm CPYFE : MOPSMemoryCopyInsns<0b10, "cpyfe">;
8363
8364    defm CPYM : MOPSMemoryMoveInsns<0b01, "cpym">;
8365    defm CPYE : MOPSMemoryMoveInsns<0b10, "cpye">;
8366
8367    defm SETM : MOPSMemorySetInsns<0b01, "setm">;
8368    defm SETE : MOPSMemorySetInsns<0b10, "sete">;
8369  }
8370}
8371let Predicates = [HasMOPS, HasMTE] in {
8372  let Defs = [NZCV] in {
8373    defm SETGP     : MOPSMemorySetTaggingInsns<0b00, "setgp">;
8374  }
8375  let Uses = [NZCV] in {
8376    defm SETGM     : MOPSMemorySetTaggingInsns<0b01, "setgm">;
8377    // Can't use SETGE because it's a reserved name in TargetSelectionDAG.td
8378    defm MOPSSETGE : MOPSMemorySetTaggingInsns<0b10, "setge">;
8379  }
8380}
8381
8382// MOPS Node operands: 0: Dst, 1: Src or Value, 2: Size, 3: Chain
8383// MOPS Node results: 0: Dst writeback, 1: Size writeback, 2: Chain
8384def SDT_AArch64mops : SDTypeProfile<2, 3, [ SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2> ]>;
8385def AArch64mops_memset : SDNode<"AArch64ISD::MOPS_MEMSET", SDT_AArch64mops>;
8386def AArch64mops_memset_tagging : SDNode<"AArch64ISD::MOPS_MEMSET_TAGGING", SDT_AArch64mops>;
8387def AArch64mops_memcopy : SDNode<"AArch64ISD::MOPS_MEMCOPY", SDT_AArch64mops>;
8388def AArch64mops_memmove : SDNode<"AArch64ISD::MOPS_MEMMOVE", SDT_AArch64mops>;
8389
8390// MOPS operations always contain three 4-byte instructions
8391let Predicates = [HasMOPS], Defs = [NZCV], Size = 12, mayStore = 1 in {
8392  let mayLoad = 1 in {
8393    def MOPSMemoryCopyPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8394                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8395                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8396    def MOPSMemoryMovePseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8397                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8398                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8399  }
8400  let mayLoad = 0 in {
8401    def MOPSMemorySetPseudo  : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8402                                      (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8403                                      [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8404  }
8405}
8406let Predicates = [HasMOPS, HasMTE], Defs = [NZCV], Size = 12, mayLoad = 0, mayStore = 1 in {
8407  def MOPSMemorySetTaggingPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8408                                          (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8409                                          [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8410}
8411
8412// This gets lowered into an instruction sequence of 20 bytes
8413let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1, Size = 20 in
8414def StoreSwiftAsyncContext
8415      : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
8416               []>, Sched<[]>;
8417
8418def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>;
8419def : Pat<(AArch64AssertZExtBool GPR32:$op),
8420          (i32 GPR32:$op)>;
8421
8422include "AArch64InstrAtomics.td"
8423include "AArch64SVEInstrInfo.td"
8424include "AArch64SMEInstrInfo.td"
8425include "AArch64InstrGISel.td"
8426