xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision d9a42747950146bf03cda7f6e25d219253f8a57a)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicateWithAll<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicateWithAll<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicateWithAll<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicateWithAll<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicateWithAll<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicateWithAll<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicateWithAll<(all_of HasV8_7aOps), "armv8.7a">;
30def HasV9_0a         : Predicate<"Subtarget->hasV9_0aOps()">,
31                                 AssemblerPredicateWithAll<(all_of HasV9_0aOps), "armv9-a">;
32def HasV9_1a         : Predicate<"Subtarget->hasV9_1aOps()">,
33                                 AssemblerPredicateWithAll<(all_of HasV9_1aOps), "armv9.1a">;
34def HasV9_2a         : Predicate<"Subtarget->hasV9_2aOps()">,
35                                 AssemblerPredicateWithAll<(all_of HasV9_2aOps), "armv9.2a">;
36def HasV9_3a         : Predicate<"Subtarget->hasV9_3aOps()">,
37                                 AssemblerPredicateWithAll<(all_of HasV9_3aOps), "armv9.3a">;
38def HasV8_0r         : Predicate<"Subtarget->hasV8_0rOps()">,
39                                 AssemblerPredicateWithAll<(all_of HasV8_0rOps), "armv8-r">;
40
41def HasEL2VMSA       : Predicate<"Subtarget->hasEL2VMSA()">,
42                       AssemblerPredicateWithAll<(all_of FeatureEL2VMSA), "el2vmsa">;
43
44def HasEL3           : Predicate<"Subtarget->hasEL3()">,
45                       AssemblerPredicateWithAll<(all_of FeatureEL3), "el3">;
46
47def HasVH            : Predicate<"Subtarget->hasVH()">,
48                       AssemblerPredicateWithAll<(all_of FeatureVH), "vh">;
49
50def HasLOR           : Predicate<"Subtarget->hasLOR()">,
51                       AssemblerPredicateWithAll<(all_of FeatureLOR), "lor">;
52
53def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
54                       AssemblerPredicateWithAll<(all_of FeaturePAuth), "pauth">;
55
56def HasJS            : Predicate<"Subtarget->hasJS()">,
57                       AssemblerPredicateWithAll<(all_of FeatureJS), "jsconv">;
58
59def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
60                       AssemblerPredicateWithAll<(all_of FeatureCCIDX), "ccidx">;
61
62def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
63                       AssemblerPredicateWithAll<(all_of FeatureComplxNum), "complxnum">;
64
65def HasNV            : Predicate<"Subtarget->hasNV()">,
66                       AssemblerPredicateWithAll<(all_of FeatureNV), "nv">;
67
68def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
69                       AssemblerPredicateWithAll<(all_of FeatureMPAM), "mpam">;
70
71def HasDIT           : Predicate<"Subtarget->hasDIT()">,
72                       AssemblerPredicateWithAll<(all_of FeatureDIT), "dit">;
73
74def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
75                       AssemblerPredicateWithAll<(all_of FeatureTRACEV8_4), "tracev8.4">;
76
77def HasAM            : Predicate<"Subtarget->hasAM()">,
78                       AssemblerPredicateWithAll<(all_of FeatureAM), "am">;
79
80def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
81                       AssemblerPredicateWithAll<(all_of FeatureSEL2), "sel2">;
82
83def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
84                       AssemblerPredicateWithAll<(all_of FeatureTLB_RMI), "tlb-rmi">;
85
86def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
87                       AssemblerPredicateWithAll<(all_of FeatureFlagM), "flagm">;
88
89def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
90                       AssemblerPredicateWithAll<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
91
92def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
93                               AssemblerPredicateWithAll<(all_of FeatureFPARMv8), "fp-armv8">;
94def HasNEON          : Predicate<"Subtarget->hasNEON()">,
95                                 AssemblerPredicateWithAll<(all_of FeatureNEON), "neon">;
96def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
97                                 AssemblerPredicateWithAll<(all_of FeatureCrypto), "crypto">;
98def HasSM4           : Predicate<"Subtarget->hasSM4()">,
99                                 AssemblerPredicateWithAll<(all_of FeatureSM4), "sm4">;
100def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
101                                 AssemblerPredicateWithAll<(all_of FeatureSHA3), "sha3">;
102def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
103                                 AssemblerPredicateWithAll<(all_of FeatureSHA2), "sha2">;
104def HasAES           : Predicate<"Subtarget->hasAES()">,
105                                 AssemblerPredicateWithAll<(all_of FeatureAES), "aes">;
106def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
107                                 AssemblerPredicateWithAll<(all_of FeatureDotProd), "dotprod">;
108def HasCRC           : Predicate<"Subtarget->hasCRC()">,
109                                 AssemblerPredicateWithAll<(all_of FeatureCRC), "crc">;
110def HasLSE           : Predicate<"Subtarget->hasLSE()">,
111                                 AssemblerPredicateWithAll<(all_of FeatureLSE), "lse">;
112def HasNoLSE         : Predicate<"!Subtarget->hasLSE()">;
113def HasRAS           : Predicate<"Subtarget->hasRAS()">,
114                                 AssemblerPredicateWithAll<(all_of FeatureRAS), "ras">;
115def HasRDM           : Predicate<"Subtarget->hasRDM()">,
116                                 AssemblerPredicateWithAll<(all_of FeatureRDM), "rdm">;
117def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
118def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
119                                 AssemblerPredicateWithAll<(all_of FeatureFullFP16), "fullfp16">;
120def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
121                                 AssemblerPredicateWithAll<(all_of FeatureFP16FML), "fp16fml">;
122def HasSPE           : Predicate<"Subtarget->hasSPE()">,
123                                 AssemblerPredicateWithAll<(all_of FeatureSPE), "spe">;
124def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
125                                 AssemblerPredicateWithAll<(all_of FeatureFuseAES),
126                                 "fuse-aes">;
127def HasSVE           : Predicate<"Subtarget->hasSVE()">,
128                                 AssemblerPredicateWithAll<(all_of FeatureSVE), "sve">;
129def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
130                                 AssemblerPredicateWithAll<(all_of FeatureSVE2), "sve2">;
131def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
132                                 AssemblerPredicateWithAll<(all_of FeatureSVE2AES), "sve2-aes">;
133def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
134                                 AssemblerPredicateWithAll<(all_of FeatureSVE2SM4), "sve2-sm4">;
135def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
136                                 AssemblerPredicateWithAll<(all_of FeatureSVE2SHA3), "sve2-sha3">;
137def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
138                                 AssemblerPredicateWithAll<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
139def HasSME           : Predicate<"Subtarget->hasSME()">,
140                                 AssemblerPredicateWithAll<(all_of FeatureSME), "sme">;
141def HasSMEF64        : Predicate<"Subtarget->hasSMEF64()">,
142                                 AssemblerPredicateWithAll<(all_of FeatureSMEF64), "sme-f64">;
143def HasSMEI64        : Predicate<"Subtarget->hasSMEI64()">,
144                                 AssemblerPredicateWithAll<(all_of FeatureSMEI64), "sme-i64">;
145// A subset of SVE(2) instructions are legal in Streaming SVE execution mode,
146// they should be enabled if either has been specified.
147def HasSVEorSME
148    : Predicate<"Subtarget->hasSVE() || Subtarget->hasSME()">,
149                AssemblerPredicateWithAll<(any_of FeatureSVE, FeatureSME),
150                "sve or sme">;
151def HasSVE2orSME
152    : Predicate<"Subtarget->hasSVE2() || Subtarget->hasSME()">,
153                AssemblerPredicateWithAll<(any_of FeatureSVE2, FeatureSME),
154                "sve2 or sme">;
155// A subset of NEON instructions are legal in Streaming SVE execution mode,
156// they should be enabled if either has been specified.
157def HasNEONorSME
158    : Predicate<"Subtarget->hasNEON() || Subtarget->hasSME()">,
159                AssemblerPredicateWithAll<(any_of FeatureNEON, FeatureSME),
160                "neon or sme">;
161def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
162                                 AssemblerPredicateWithAll<(all_of FeatureRCPC), "rcpc">;
163def HasLDAPR         : Predicate<"Subtarget->hasLDAPR()">,
164                                 AssemblerPredicateWithAll<(all_of FeatureLDAPR), "ldapr">;
165def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
166                       AssemblerPredicateWithAll<(all_of FeatureAltFPCmp), "altnzcv">;
167def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
168                       AssemblerPredicateWithAll<(all_of FeatureFRInt3264), "frint3264">;
169def HasSB            : Predicate<"Subtarget->hasSB()">,
170                       AssemblerPredicateWithAll<(all_of FeatureSB), "sb">;
171def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
172                       AssemblerPredicateWithAll<(all_of FeaturePredRes), "predres">;
173def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
174                       AssemblerPredicateWithAll<(all_of FeatureCacheDeepPersist), "ccdp">;
175def HasBTI           : Predicate<"Subtarget->hasBTI()">,
176                       AssemblerPredicateWithAll<(all_of FeatureBranchTargetId), "bti">;
177def HasMTE           : Predicate<"Subtarget->hasMTE()">,
178                       AssemblerPredicateWithAll<(all_of FeatureMTE), "mte">;
179def HasTME           : Predicate<"Subtarget->hasTME()">,
180                       AssemblerPredicateWithAll<(all_of FeatureTME), "tme">;
181def HasETE           : Predicate<"Subtarget->hasETE()">,
182                       AssemblerPredicateWithAll<(all_of FeatureETE), "ete">;
183def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
184                       AssemblerPredicateWithAll<(all_of FeatureTRBE), "trbe">;
185def HasBF16          : Predicate<"Subtarget->hasBF16()">,
186                       AssemblerPredicateWithAll<(all_of FeatureBF16), "bf16">;
187def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
188                       AssemblerPredicateWithAll<(all_of FeatureMatMulInt8), "i8mm">;
189def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
190                       AssemblerPredicateWithAll<(all_of FeatureMatMulFP32), "f32mm">;
191def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
192                       AssemblerPredicateWithAll<(all_of FeatureMatMulFP64), "f64mm">;
193def HasXS            : Predicate<"Subtarget->hasXS()">,
194                       AssemblerPredicateWithAll<(all_of FeatureXS), "xs">;
195def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
196                       AssemblerPredicateWithAll<(all_of FeatureWFxT), "wfxt">;
197def HasLS64          : Predicate<"Subtarget->hasLS64()">,
198                       AssemblerPredicateWithAll<(all_of FeatureLS64), "ls64">;
199def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
200                       AssemblerPredicateWithAll<(all_of FeatureBRBE), "brbe">;
201def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
202                       AssemblerPredicateWithAll<(all_of FeatureSPE_EEF), "spe-eef">;
203def HasHBC           : Predicate<"Subtarget->hasHBC()">,
204                       AssemblerPredicateWithAll<(all_of FeatureHBC), "hbc">;
205def HasMOPS          : Predicate<"Subtarget->hasMOPS()">,
206                       AssemblerPredicateWithAll<(all_of FeatureMOPS), "mops">;
207def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
208def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
209def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
210def UseExperimentalZeroingPseudos
211    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
212def UseAlternateSExtLoadCVTF32
213    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
214
215def UseNegativeImmediates
216    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
217                                             "NegativeImmediates">;
218
219def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">;
220
221def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
222                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
223                                                       SDTCisInt<1>]>>;
224
225
226//===----------------------------------------------------------------------===//
227// AArch64-specific DAG Nodes.
228//
229
230// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
231def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
232                                              [SDTCisSameAs<0, 2>,
233                                               SDTCisSameAs<0, 3>,
234                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
235
236// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
237def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
238                                            [SDTCisSameAs<0, 1>,
239                                             SDTCisSameAs<0, 2>,
240                                             SDTCisInt<0>,
241                                             SDTCisVT<3, i32>]>;
242
243// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
244def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
245                                            [SDTCisSameAs<0, 2>,
246                                             SDTCisSameAs<0, 3>,
247                                             SDTCisInt<0>,
248                                             SDTCisVT<1, i32>,
249                                             SDTCisVT<4, i32>]>;
250
251def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
252                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
253                                      SDTCisVT<2, i32>]>;
254def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
255def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
256                                        SDTCisVT<2, OtherVT>]>;
257
258
259def SDT_AArch64CSel  : SDTypeProfile<1, 4,
260                                   [SDTCisSameAs<0, 1>,
261                                    SDTCisSameAs<0, 2>,
262                                    SDTCisInt<3>,
263                                    SDTCisVT<4, i32>]>;
264def SDT_AArch64CCMP : SDTypeProfile<1, 5,
265                                    [SDTCisVT<0, i32>,
266                                     SDTCisInt<1>,
267                                     SDTCisSameAs<1, 2>,
268                                     SDTCisInt<3>,
269                                     SDTCisInt<4>,
270                                     SDTCisVT<5, i32>]>;
271def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
272                                     [SDTCisVT<0, i32>,
273                                      SDTCisFP<1>,
274                                      SDTCisSameAs<1, 2>,
275                                      SDTCisInt<3>,
276                                      SDTCisInt<4>,
277                                      SDTCisVT<5, i32>]>;
278def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
279                                   [SDTCisFP<0>,
280                                    SDTCisSameAs<0, 1>]>;
281def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
282def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
283def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
284def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
285                                          SDTCisSameAs<0, 1>,
286                                          SDTCisSameAs<0, 2>]>;
287def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
288def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
289def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
290                                           SDTCisInt<2>, SDTCisInt<3>]>;
291def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
292def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
293                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
294def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
295def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
296                                         SDTCisVec<2>, SDTCisSameAs<2,3>]>;
297
298def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
299                                                 SDTCisSameAs<0,1>,
300                                                 SDTCisSameAs<0,2>]>;
301
302def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
303def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
304def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
305def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
306                                           SDTCisSameAs<0,2>]>;
307def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
308                                           SDTCisSameAs<0,2>,
309                                           SDTCisSameAs<0,3>]>;
310def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
311def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
312
313def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
314
315def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
316                                                 SDTCisPtrTy<1>]>;
317
318def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
319
320def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
321def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
322def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
323
324// Generates the general dynamic sequences, i.e.
325//  adrp  x0, :tlsdesc:var
326//  ldr   x1, [x0, #:tlsdesc_lo12:var]
327//  add   x0, x0, #:tlsdesc_lo12:var
328//  .tlsdesccall var
329//  blr   x1
330
331// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
332// number of operands (the variable)
333def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
334                                          [SDTCisPtrTy<0>]>;
335
336def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
337                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
338                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
339                                         SDTCisSameAs<1, 4>]>;
340
341def SDT_AArch64TBL : SDTypeProfile<1, 2, [
342  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
343]>;
344
345// non-extending masked load fragment.
346def nonext_masked_load :
347  PatFrag<(ops node:$ptr, node:$pred, node:$def),
348          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
349  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
350         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
351         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
352}]>;
353// Any/Zero extending masked load fragments.
354def azext_masked_load :
355  PatFrag<(ops node:$ptr, node:$pred, node:$def),
356          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
357  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
358          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD) &&
359         cast<MaskedLoadSDNode>(N)->isUnindexed();
360}]>;
361def azext_masked_load_i8 :
362  PatFrag<(ops node:$ptr, node:$pred, node:$def),
363          (azext_masked_load node:$ptr, node:$pred, node:$def), [{
364  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
365}]>;
366def azext_masked_load_i16 :
367  PatFrag<(ops node:$ptr, node:$pred, node:$def),
368          (azext_masked_load node:$ptr, node:$pred, node:$def), [{
369  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
370}]>;
371def azext_masked_load_i32 :
372  PatFrag<(ops node:$ptr, node:$pred, node:$def),
373          (azext_masked_load node:$ptr, node:$pred, node:$def), [{
374  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
375}]>;
376// Sign extending masked load fragments.
377def sext_masked_load :
378  PatFrag<(ops node:$ptr, node:$pred, node:$def),
379          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
380  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD &&
381         cast<MaskedLoadSDNode>(N)->isUnindexed();
382}]>;
383def sext_masked_load_i8 :
384  PatFrag<(ops node:$ptr, node:$pred, node:$def),
385          (sext_masked_load node:$ptr, node:$pred, node:$def), [{
386  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
387}]>;
388def sext_masked_load_i16 :
389  PatFrag<(ops node:$ptr, node:$pred, node:$def),
390          (sext_masked_load node:$ptr, node:$pred, node:$def), [{
391  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
392}]>;
393def sext_masked_load_i32 :
394  PatFrag<(ops node:$ptr, node:$pred, node:$def),
395          (sext_masked_load node:$ptr, node:$pred, node:$def), [{
396  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
397}]>;
398
399def non_temporal_load :
400   PatFrag<(ops node:$ptr, node:$pred, node:$def),
401           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
402   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
403          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
404          cast<MaskedLoadSDNode>(N)->isNonTemporal();
405}]>;
406
407// non-truncating masked store fragment.
408def nontrunc_masked_store :
409  PatFrag<(ops node:$val, node:$ptr, node:$pred),
410          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
411  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
412         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
413         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
414}]>;
415// truncating masked store fragments.
416def trunc_masked_store :
417  PatFrag<(ops node:$val, node:$ptr, node:$pred),
418          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
419  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
420         cast<MaskedStoreSDNode>(N)->isUnindexed();
421}]>;
422def trunc_masked_store_i8 :
423  PatFrag<(ops node:$val, node:$ptr, node:$pred),
424          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
425  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
426}]>;
427def trunc_masked_store_i16 :
428  PatFrag<(ops node:$val, node:$ptr, node:$pred),
429          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
430  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
431}]>;
432def trunc_masked_store_i32 :
433  PatFrag<(ops node:$val, node:$ptr, node:$pred),
434          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
435  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
436}]>;
437
438def non_temporal_store :
439  PatFrag<(ops node:$val, node:$ptr, node:$pred),
440          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
441  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
442         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
443         cast<MaskedStoreSDNode>(N)->isNonTemporal();
444}]>;
445
446multiclass masked_gather_scatter<PatFrags GatherScatterOp> {
447  // offsets = (signed)Index << sizeof(elt)
448  def NAME#_signed_scaled :
449    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
450            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
451    auto MGS = cast<MaskedGatherScatterSDNode>(N);
452    bool Signed = MGS->isIndexSigned() ||
453        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
454    return Signed && MGS->isIndexScaled();
455  }]>;
456  // offsets = (signed)Index
457  def NAME#_signed_unscaled :
458    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
459            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
460    auto MGS = cast<MaskedGatherScatterSDNode>(N);
461    bool Signed = MGS->isIndexSigned() ||
462        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
463    return Signed && !MGS->isIndexScaled();
464  }]>;
465  // offsets = (unsigned)Index << sizeof(elt)
466  def NAME#_unsigned_scaled :
467    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
468            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
469    auto MGS = cast<MaskedGatherScatterSDNode>(N);
470    bool Signed = MGS->isIndexSigned() ||
471        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
472    return !Signed && MGS->isIndexScaled();
473  }]>;
474  // offsets = (unsigned)Index
475  def NAME#_unsigned_unscaled :
476    PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
477            (GatherScatterOp node:$val, node:$pred, node:$ptr, node:$idx),[{
478    auto MGS = cast<MaskedGatherScatterSDNode>(N);
479    bool Signed = MGS->isIndexSigned() ||
480        MGS->getIndex().getValueType().getVectorElementType() == MVT::i64;
481    return !Signed && !MGS->isIndexScaled();
482  }]>;
483}
484
485defm nonext_masked_gather    : masked_gather_scatter<nonext_masked_gather>;
486defm azext_masked_gather_i8  : masked_gather_scatter<azext_masked_gather_i8>;
487defm azext_masked_gather_i16 : masked_gather_scatter<azext_masked_gather_i16>;
488defm azext_masked_gather_i32 : masked_gather_scatter<azext_masked_gather_i32>;
489defm sext_masked_gather_i8   : masked_gather_scatter<sext_masked_gather_i8>;
490defm sext_masked_gather_i16  : masked_gather_scatter<sext_masked_gather_i16>;
491defm sext_masked_gather_i32  : masked_gather_scatter<sext_masked_gather_i32>;
492
493defm nontrunc_masked_scatter  : masked_gather_scatter<nontrunc_masked_scatter>;
494defm trunc_masked_scatter_i8  : masked_gather_scatter<trunc_masked_scatter_i8>;
495defm trunc_masked_scatter_i16 : masked_gather_scatter<trunc_masked_scatter_i16>;
496defm trunc_masked_scatter_i32 : masked_gather_scatter<trunc_masked_scatter_i32>;
497
498// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise
499def top16Zero: PatLeaf<(i32 GPR32:$src), [{
500  return SDValue(N,0)->getValueType(0) == MVT::i32 &&
501         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16));
502  }]>;
503
504// top32Zero - answer true if the upper 32 bits of $src are 0, false otherwise
505def top32Zero: PatLeaf<(i64 GPR64:$src), [{
506  return SDValue(N,0)->getValueType(0) == MVT::i64 &&
507         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(64, 32));
508  }]>;
509
510// Node definitions.
511def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
512def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
513def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
514def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
515def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
516                                SDCallSeqStart<[ SDTCisVT<0, i32>,
517                                                 SDTCisVT<1, i32> ]>,
518                                [SDNPHasChain, SDNPOutGlue]>;
519def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
520                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
521                                               SDTCisVT<1, i32> ]>,
522                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
523def AArch64call          : SDNode<"AArch64ISD::CALL",
524                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
525                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
526                                 SDNPVariadic]>;
527
528def AArch64call_bti      : SDNode<"AArch64ISD::CALL_BTI",
529                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
530                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
531                                 SDNPVariadic]>;
532
533def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
534                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
535                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
536                              SDNPVariadic]>;
537
538def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
539                                [SDNPHasChain]>;
540def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
541                                [SDNPHasChain]>;
542def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
543                                [SDNPHasChain]>;
544def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
545                                [SDNPHasChain]>;
546def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
547                                [SDNPHasChain]>;
548
549
550def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
551def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
552def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
553def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
554def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
555                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
556def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
557def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
558def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
559                            [SDNPCommutative]>;
560def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
561def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
562                            [SDNPCommutative]>;
563def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
564def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
565
566def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
567def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
568def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
569
570def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
571
572def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
573def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
574                                 [SDNPHasChain]>;
575def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
576                                 [SDNPHasChain]>;
577def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
578                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
579                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
580
581def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
582def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
583def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
584def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
585def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
586def AArch64duplane128 : SDNode<"AArch64ISD::DUPLANE128", SDT_AArch64DupLane>;
587
588def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
589
590def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
591def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
592def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
593def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
594def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
595def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
596
597def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
598def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
599def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
600def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
601def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
602def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
603def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
604
605def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
606def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
607def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
608def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
609
610def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
611def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
612def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
613def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
614def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
615def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
616def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
617def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
618def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
619def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
620
621def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
622def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
623
624def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
625def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
626def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
627def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
628def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
629
630def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
631def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
632def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
633
634def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
635def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
636def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
637def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
638def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
639def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
640                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
641
642def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
643def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
644def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
645def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
646def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
647
648def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
649def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
650
651def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
652                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
653
654def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
655                               [SDNPHasChain, SDNPSideEffect]>;
656
657def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
658def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
659
660def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
661                                    SDT_AArch64TLSDescCallSeq,
662                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
663                                     SDNPVariadic]>;
664
665
666def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
667                                 SDT_AArch64WrapperLarge>;
668
669def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
670
671def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
672                                    SDTCisSameAs<1, 2>]>;
673def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull,
674                             [SDNPCommutative]>;
675def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull,
676                             [SDNPCommutative]>;
677
678def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
679def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
680def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
681def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
682
683def AArch64sdot     : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>;
684def AArch64udot     : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>;
685
686def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
687def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
688def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
689def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
690def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
691def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
692
693def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
694                               [(abdu node:$lhs, node:$rhs),
695                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
696def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
697                               [(abds node:$lhs, node:$rhs),
698                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
699
700def AArch64addp_n   : SDNode<"AArch64ISD::ADDP", SDT_AArch64Zip>;
701def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>;
702def AArch64saddlp_n : SDNode<"AArch64ISD::SADDLP", SDT_AArch64uaddlp>;
703def AArch64addp     : PatFrags<(ops node:$Rn, node:$Rm),
704                               [(AArch64addp_n node:$Rn, node:$Rm),
705                                (int_aarch64_neon_addp node:$Rn, node:$Rm)]>;
706def AArch64uaddlp   : PatFrags<(ops node:$src),
707                               [(AArch64uaddlp_n node:$src),
708                                (int_aarch64_neon_uaddlp node:$src)]>;
709def AArch64saddlp   : PatFrags<(ops node:$src),
710                               [(AArch64saddlp_n node:$src),
711                                (int_aarch64_neon_saddlp node:$src)]>;
712def AArch64faddp     : PatFrags<(ops node:$Rn, node:$Rm),
713                                [(AArch64addp_n node:$Rn, node:$Rm),
714                                 (int_aarch64_neon_faddp node:$Rn, node:$Rm)]>;
715
716def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
717def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
718def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
719def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
720def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
721
722def SDT_AArch64unpk : SDTypeProfile<1, 1, [
723    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
724]>;
725def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
726def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
727def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
728def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
729
730def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
731def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
732def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
733
734def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
735def AArch64mrs : SDNode<"AArch64ISD::MRS",
736                        SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>,
737                        [SDNPHasChain, SDNPOutGlue]>;
738
739// Match add node and also treat an 'or' node is as an 'add' if the or'ed operands
740// have no common bits.
741def add_and_or_is_add : PatFrags<(ops node:$lhs, node:$rhs),
742                         [(add node:$lhs, node:$rhs), (or node:$lhs, node:$rhs)],[{
743   if (N->getOpcode() == ISD::ADD)
744     return true;
745   return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1));
746}]> {
747  let GISelPredicateCode = [{
748     // Only handle G_ADD for now. FIXME. build capability to compute whether
749     // operands of G_OR have common bits set or not.
750     return MI.getOpcode() == TargetOpcode::G_ADD;
751  }];
752}
753
754//===----------------------------------------------------------------------===//
755
756//===----------------------------------------------------------------------===//
757
758// AArch64 Instruction Predicate Definitions.
759// We could compute these on a per-module basis but doing so requires accessing
760// the Function object through the <Target>Subtarget and objections were raised
761// to that (see post-commit review comments for r301750).
762let RecomputePerFunction = 1 in {
763  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
764  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
765  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
766  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
767
768  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
769  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
770
771  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
772  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
773  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
774  // optimizing. This allows us to selectively use patterns without impacting
775  // SelectionDAG's behaviour.
776  // FIXME: One day there will probably be a nicer way to check for this, but
777  // today is not that day.
778  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
779}
780
781include "AArch64InstrFormats.td"
782include "SVEInstrFormats.td"
783include "SMEInstrFormats.td"
784
785//===----------------------------------------------------------------------===//
786
787//===----------------------------------------------------------------------===//
788// Miscellaneous instructions.
789//===----------------------------------------------------------------------===//
790
791let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
792// We set Sched to empty list because we expect these instructions to simply get
793// removed in most cases.
794def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
795                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
796                              Sched<[]>;
797def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
798                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
799                            Sched<[]>;
800} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
801
802let isReMaterializable = 1, isCodeGenOnly = 1 in {
803// FIXME: The following pseudo instructions are only needed because remat
804// cannot handle multiple instructions.  When that changes, they can be
805// removed, along with the AArch64Wrapper node.
806
807let AddedComplexity = 10 in
808def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr),
809                     [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
810              Sched<[WriteLDAdr]>;
811
812// The MOVaddr instruction should match only when the add is not folded
813// into a load or store address.
814def MOVaddr
815    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
816             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
817                                            tglobaladdr:$low))]>,
818      Sched<[WriteAdrAdr]>;
819def MOVaddrJT
820    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
821             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
822                                             tjumptable:$low))]>,
823      Sched<[WriteAdrAdr]>;
824def MOVaddrCP
825    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
826             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
827                                             tconstpool:$low))]>,
828      Sched<[WriteAdrAdr]>;
829def MOVaddrBA
830    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
831             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
832                                             tblockaddress:$low))]>,
833      Sched<[WriteAdrAdr]>;
834def MOVaddrTLS
835    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
836             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
837                                            tglobaltlsaddr:$low))]>,
838      Sched<[WriteAdrAdr]>;
839def MOVaddrEXT
840    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
841             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
842                                            texternalsym:$low))]>,
843      Sched<[WriteAdrAdr]>;
844// Normally AArch64addlow either gets folded into a following ldr/str,
845// or together with an adrp into MOVaddr above. For cases with TLS, it
846// might appear without either of them, so allow lowering it into a plain
847// add.
848def ADDlowTLS
849    : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low),
850             [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src,
851                                            tglobaltlsaddr:$low))]>,
852      Sched<[WriteAdr]>;
853
854} // isReMaterializable, isCodeGenOnly
855
856def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
857          (LOADgot tglobaltlsaddr:$addr)>;
858
859def : Pat<(AArch64LOADgot texternalsym:$addr),
860          (LOADgot texternalsym:$addr)>;
861
862def : Pat<(AArch64LOADgot tconstpool:$addr),
863          (LOADgot tconstpool:$addr)>;
864
865// In general these get lowered into a sequence of three 4-byte instructions.
866// 32-bit jump table destination is actually only 2 instructions since we can
867// use the table itself as a PC-relative base. But optimization occurs after
868// branch relaxation so be pessimistic.
869let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
870    isNotDuplicable = 1 in {
871def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
872                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
873                      Sched<[]>;
874def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
875                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
876                      Sched<[]>;
877def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
878                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
879                     Sched<[]>;
880}
881
882// Space-consuming pseudo to aid testing of placement and reachability
883// algorithms. Immediate operand is the number of bytes this "instruction"
884// occupies; register operands can be used to enforce dependency and constrain
885// the scheduler.
886let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
887def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
888                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
889            Sched<[]>;
890
891let hasSideEffects = 1, isCodeGenOnly = 1 in {
892  def SpeculationSafeValueX
893      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
894  def SpeculationSafeValueW
895      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
896}
897
898// SpeculationBarrierEndBB must only be used after an unconditional control
899// flow, i.e. after a terminator for which isBarrier is True.
900let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
901  // This gets lowered to a pair of 4-byte instructions.
902  let Size = 8 in
903  def SpeculationBarrierISBDSBEndBB
904      : Pseudo<(outs), (ins), []>, Sched<[]>;
905  // This gets lowered to a 4-byte instruction.
906  let Size = 4 in
907  def SpeculationBarrierSBEndBB
908      : Pseudo<(outs), (ins), []>, Sched<[]>;
909}
910
911//===----------------------------------------------------------------------===//
912// System instructions.
913//===----------------------------------------------------------------------===//
914
915def HINT : HintI<"hint">;
916def : InstAlias<"nop",  (HINT 0b000)>;
917def : InstAlias<"yield",(HINT 0b001)>;
918def : InstAlias<"wfe",  (HINT 0b010)>;
919def : InstAlias<"wfi",  (HINT 0b011)>;
920def : InstAlias<"sev",  (HINT 0b100)>;
921def : InstAlias<"sevl", (HINT 0b101)>;
922def : InstAlias<"dgh",  (HINT 0b110)>;
923def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
924def : InstAlias<"csdb", (HINT 20)>;
925// In order to be able to write readable assembly, LLVM should accept assembly
926// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
927// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
928// should not emit these mnemonics unless BTI is enabled.
929def : InstAlias<"bti",  (HINT 32), 0>;
930def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
931def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
932def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
933
934// v8.2a Statistical Profiling extension
935def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
936
937// As far as LLVM is concerned this writes to the system's exclusive monitors.
938let mayLoad = 1, mayStore = 1 in
939def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
940
941// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
942// model patterns with sufficiently fine granularity.
943let mayLoad = ?, mayStore = ? in {
944def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
945                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
946
947def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
948                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
949
950def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
951                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
952
953def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
954  let CRm        = 0b0010;
955  let Inst{12}   = 0;
956  let Predicates = [HasTRACEV8_4];
957}
958
959def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
960  let CRm{1-0}   = 0b11;
961  let Inst{9-8}  = 0b10;
962  let Predicates = [HasXS];
963}
964
965let Predicates = [HasWFxT] in {
966def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
967def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
968}
969
970// Branch Record Buffer two-word mnemonic instructions
971class BRBEI<bits<3> op2, string keyword>
972    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
973  let Inst{31-8} = 0b110101010000100101110010;
974  let Inst{7-5} = op2;
975  let Predicates = [HasBRBE];
976}
977def BRB_IALL: BRBEI<0b100, "\tiall">;
978def BRB_INJ:  BRBEI<0b101, "\tinj">;
979
980}
981
982// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
983def : TokenAlias<"INJ", "inj">;
984def : TokenAlias<"IALL", "iall">;
985
986// ARMv8.2-A Dot Product
987let Predicates = [HasDotProd] in {
988defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>;
989defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>;
990defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>;
991defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>;
992}
993
994// ARMv8.6-A BFloat
995let Predicates = [HasNEON, HasBF16] in {
996defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
997defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
998def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
999def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
1000def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
1001def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
1002def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
1003def BFCVTN       : SIMD_BFCVTN;
1004def BFCVTN2      : SIMD_BFCVTN2;
1005
1006// Vector-scalar BFDOT:
1007// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
1008// register (the instruction uses a single 32-bit lane from it), so the pattern
1009// is a bit tricky.
1010def : Pat<(v2f32 (int_aarch64_neon_bfdot
1011                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
1012                    (v4bf16 (bitconvert
1013                      (v2i32 (AArch64duplane32
1014                        (v4i32 (bitconvert
1015                          (v8bf16 (insert_subvector undef,
1016                            (v4bf16 V64:$Rm),
1017                            (i64 0))))),
1018                        VectorIndexS:$idx)))))),
1019          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
1020                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
1021                             VectorIndexS:$idx)>;
1022}
1023
1024let Predicates = [HasNEONorSME, HasBF16] in {
1025def BFCVT : BF16ToSinglePrecision<"bfcvt">;
1026}
1027
1028// ARMv8.6A AArch64 matrix multiplication
1029let Predicates = [HasMatMulInt8] in {
1030def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
1031def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
1032def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
1033defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
1034defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
1035
1036// sudot lane has a pattern where usdot is expected (there is no sudot).
1037// The second operand is used in the dup operation to repeat the indexed
1038// element.
1039class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
1040                         string rhs_kind, RegisterOperand RegType,
1041                         ValueType AccumType, ValueType InputType>
1042      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
1043                                        lhs_kind, rhs_kind, RegType, AccumType,
1044                                        InputType, null_frag> {
1045  let Pattern = [(set (AccumType RegType:$dst),
1046                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
1047                                 (InputType (bitconvert (AccumType
1048                                    (AArch64duplane32 (v4i32 V128:$Rm),
1049                                        VectorIndexS:$idx)))),
1050                                 (InputType RegType:$Rn))))];
1051}
1052
1053multiclass SIMDSUDOTIndex {
1054  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
1055  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
1056}
1057
1058defm SUDOTlane : SIMDSUDOTIndex;
1059
1060}
1061
1062// ARMv8.2-A FP16 Fused Multiply-Add Long
1063let Predicates = [HasNEON, HasFP16FML] in {
1064defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
1065defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
1066defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
1067defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
1068defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
1069defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
1070defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
1071defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
1072}
1073
1074// Armv8.2-A Crypto extensions
1075let Predicates = [HasSHA3] in {
1076def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
1077def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
1078def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
1079def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
1080def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
1081def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
1082def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
1083def XAR       : CryptoRRRi6<"xar">;
1084
1085class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy>
1086  : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))),
1087        (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>;
1088
1089def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1090          (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1091
1092def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>;
1093def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>;
1094def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>;
1095
1096def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>;
1097def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>;
1098def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>;
1099def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>;
1100
1101class EOR3_pattern<ValueType VecTy>
1102  : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)),
1103        (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1104
1105def : EOR3_pattern<v16i8>;
1106def : EOR3_pattern<v8i16>;
1107def : EOR3_pattern<v4i32>;
1108def : EOR3_pattern<v2i64>;
1109
1110class BCAX_pattern<ValueType VecTy>
1111  : Pat<(xor (VecTy V128:$Vn), (and (VecTy V128:$Vm), (vnot (VecTy V128:$Va)))),
1112        (BCAX (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1113
1114def : BCAX_pattern<v16i8>;
1115def : BCAX_pattern<v8i16>;
1116def : BCAX_pattern<v4i32>;
1117def : BCAX_pattern<v2i64>;
1118
1119def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>;
1120def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>;
1121def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>;
1122def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>;
1123
1124def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>;
1125def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>;
1126def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>;
1127def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>;
1128
1129def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>;
1130def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>;
1131def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>;
1132def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>;
1133
1134def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1135          (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1136
1137def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))),
1138          (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>;
1139
1140
1141} // HasSHA3
1142
1143let Predicates = [HasSM4] in {
1144def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
1145def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
1146def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
1147def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
1148def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
1149def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
1150def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
1151def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
1152def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
1153
1154def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))),
1155          (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>;
1156
1157class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode>
1158  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1159        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1160
1161class SM3TT_pattern<Instruction INST, Intrinsic OpNode>
1162  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )),
1163        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>;
1164
1165class SM4_pattern<Instruction INST, Intrinsic OpNode>
1166  : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1167        (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1168
1169def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>;
1170def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>;
1171
1172def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>;
1173def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>;
1174def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>;
1175def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>;
1176
1177def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>;
1178def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>;
1179} // HasSM4
1180
1181let Predicates = [HasRCPC] in {
1182  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
1183  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
1184  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
1185  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
1186  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
1187}
1188
1189// v8.3a complex add and multiply-accumulate. No predicate here, that is done
1190// inside the multiclass as the FP16 versions need different predicates.
1191defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
1192                                               "fcmla", null_frag>;
1193defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
1194                                           "fcadd", null_frag>;
1195defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">;
1196
1197let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1198  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1199            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
1200  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1201            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
1202  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1203            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
1204  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1205            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
1206}
1207
1208let Predicates = [HasComplxNum, HasNEON] in {
1209  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1210            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
1211  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1212            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
1213  foreach Ty = [v4f32, v2f64] in {
1214    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
1215              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
1216    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
1217              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
1218  }
1219}
1220
1221multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> {
1222  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1223            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
1224  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1225            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
1226  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1227            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
1228  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1229            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
1230}
1231
1232multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> {
1233  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1234            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
1235  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1236            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
1237  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1238            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1239  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1240            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1241}
1242
1243
1244let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1245  defm : FCMLA_PATS<v4f16, V64>;
1246  defm : FCMLA_PATS<v8f16, V128>;
1247
1248  defm : FCMLA_LANE_PATS<v4f16, V64,
1249                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1250  defm : FCMLA_LANE_PATS<v8f16, V128,
1251                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1252}
1253let Predicates = [HasComplxNum, HasNEON] in {
1254  defm : FCMLA_PATS<v2f32, V64>;
1255  defm : FCMLA_PATS<v4f32, V128>;
1256  defm : FCMLA_PATS<v2f64, V128>;
1257
1258  defm : FCMLA_LANE_PATS<v4f32, V128,
1259                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1260}
1261
1262// v8.3a Pointer Authentication
1263// These instructions inhabit part of the hint space and so can be used for
1264// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1265// important for compatibility with other assemblers (e.g. GAS) when building
1266// software compatible with both CPUs that do or don't implement PA.
1267let Uses = [LR], Defs = [LR] in {
1268  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1269  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1270  let isAuthenticated = 1 in {
1271    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1272    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1273  }
1274}
1275let Uses = [LR, SP], Defs = [LR] in {
1276  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1277  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1278  let isAuthenticated = 1 in {
1279    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1280    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1281  }
1282}
1283let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1284  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1285  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1286  let isAuthenticated = 1 in {
1287    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1288    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1289  }
1290}
1291
1292let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1293  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1294}
1295
1296// In order to be able to write readable assembly, LLVM should accept assembly
1297// inputs that use pointer authentication mnemonics, even with PA disabled.
1298// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1299// should not emit these mnemonics unless PA is enabled.
1300def : InstAlias<"paciaz", (PACIAZ), 0>;
1301def : InstAlias<"pacibz", (PACIBZ), 0>;
1302def : InstAlias<"autiaz", (AUTIAZ), 0>;
1303def : InstAlias<"autibz", (AUTIBZ), 0>;
1304def : InstAlias<"paciasp", (PACIASP), 0>;
1305def : InstAlias<"pacibsp", (PACIBSP), 0>;
1306def : InstAlias<"autiasp", (AUTIASP), 0>;
1307def : InstAlias<"autibsp", (AUTIBSP), 0>;
1308def : InstAlias<"pacia1716", (PACIA1716), 0>;
1309def : InstAlias<"pacib1716", (PACIB1716), 0>;
1310def : InstAlias<"autia1716", (AUTIA1716), 0>;
1311def : InstAlias<"autib1716", (AUTIB1716), 0>;
1312def : InstAlias<"xpaclri", (XPACLRI), 0>;
1313
1314// These pointer authentication instructions require armv8.3a
1315let Predicates = [HasPAuth] in {
1316
1317  // When PA is enabled, a better mnemonic should be emitted.
1318  def : InstAlias<"paciaz", (PACIAZ), 1>;
1319  def : InstAlias<"pacibz", (PACIBZ), 1>;
1320  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1321  def : InstAlias<"autibz", (AUTIBZ), 1>;
1322  def : InstAlias<"paciasp", (PACIASP), 1>;
1323  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1324  def : InstAlias<"autiasp", (AUTIASP), 1>;
1325  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1326  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1327  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1328  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1329  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1330  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1331
1332  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm,
1333                      SDPatternOperator op> {
1334    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm,  "ia"), op>;
1335    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm,  "ib"), op>;
1336    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm,  "da"), op>;
1337    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm,  "db"), op>;
1338    def IZA  : SignAuthZero<prefix_z,  0b00, !strconcat(asm, "iza"), op>;
1339    def DZA  : SignAuthZero<prefix_z,  0b10, !strconcat(asm, "dza"), op>;
1340    def IZB  : SignAuthZero<prefix_z,  0b01, !strconcat(asm, "izb"), op>;
1341    def DZB  : SignAuthZero<prefix_z,  0b11, !strconcat(asm, "dzb"), op>;
1342  }
1343
1344  defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>;
1345  defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>;
1346
1347  def XPACI : ClearAuth<0, "xpaci">;
1348  def XPACD : ClearAuth<1, "xpacd">;
1349
1350  def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>;
1351
1352  // Combined Instructions
1353  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1354    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1355    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1356  }
1357  let isCall = 1, Defs = [LR], Uses = [SP] in {
1358    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1359    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1360  }
1361
1362  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1363    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1364    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1365  }
1366  let isCall = 1, Defs = [LR], Uses = [SP] in {
1367    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1368    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1369  }
1370
1371  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1372    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1373    def RETAB   : AuthReturn<0b010, 1, "retab">;
1374    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1375    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1376  }
1377
1378  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1379  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1380
1381}
1382
1383// v8.3a floating point conversion for javascript
1384let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1385def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1386                                      "fjcvtzs",
1387                                      [(set GPR32:$Rd,
1388                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1389  let Inst{31} = 0;
1390} // HasJS, HasFPARMv8
1391
1392// v8.4 Flag manipulation instructions
1393let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1394def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1395  let Inst{20-5} = 0b0000001000000000;
1396}
1397def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1398def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1399def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1400                        "{\t$Rn, $imm, $mask}">;
1401} // HasFlagM
1402
1403// v8.5 flag manipulation instructions
1404let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1405
1406def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1407  let Inst{18-16} = 0b000;
1408  let Inst{11-8} = 0b0000;
1409  let Unpredictable{11-8} = 0b1111;
1410  let Inst{7-5} = 0b001;
1411}
1412
1413def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1414  let Inst{18-16} = 0b000;
1415  let Inst{11-8} = 0b0000;
1416  let Unpredictable{11-8} = 0b1111;
1417  let Inst{7-5} = 0b010;
1418}
1419} // HasAltNZCV
1420
1421
1422// Armv8.5-A speculation barrier
1423def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1424  let Inst{20-5} = 0b0001100110000111;
1425  let Unpredictable{11-8} = 0b1111;
1426  let Predicates = [HasSB];
1427  let hasSideEffects = 1;
1428}
1429
1430def : InstAlias<"clrex", (CLREX 0xf)>;
1431def : InstAlias<"isb", (ISB 0xf)>;
1432def : InstAlias<"ssbb", (DSB 0)>;
1433def : InstAlias<"pssbb", (DSB 4)>;
1434def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>;
1435
1436def MRS    : MRSI;
1437def MSR    : MSRI;
1438def MSRpstateImm1 : MSRpstateImm0_1;
1439def MSRpstateImm4 : MSRpstateImm0_15;
1440
1441def : Pat<(AArch64mrs imm:$id),
1442          (MRS imm:$id)>;
1443
1444// The thread pointer (on Linux, at least, where this has been implemented) is
1445// TPIDR_EL0.
1446def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1447                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1448
1449let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1450def HWASAN_CHECK_MEMACCESS : Pseudo<
1451  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1452  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1453  Sched<[]>;
1454}
1455
1456let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1457def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1458  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1459  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1460  Sched<[]>;
1461}
1462
1463// The cycle counter PMC register is PMCCNTR_EL0.
1464let Predicates = [HasPerfMon] in
1465def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1466
1467// FPCR register
1468def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1469def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>;
1470
1471// Generic system instructions
1472def SYSxt  : SystemXtI<0, "sys">;
1473def SYSLxt : SystemLXtI<1, "sysl">;
1474
1475def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1476                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1477                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1478
1479
1480let Predicates = [HasTME] in {
1481
1482def TSTART : TMSystemI<0b0000, "tstart",
1483                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1484
1485def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1486
1487def TCANCEL : TMSystemException<0b011, "tcancel",
1488                                [(int_aarch64_tcancel timm64_0_65535:$imm)]>;
1489
1490def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1491  let mayLoad = 0;
1492  let mayStore = 0;
1493}
1494} // HasTME
1495
1496//===----------------------------------------------------------------------===//
1497// Move immediate instructions.
1498//===----------------------------------------------------------------------===//
1499
1500defm MOVK : InsertImmediate<0b11, "movk">;
1501defm MOVN : MoveImmediate<0b00, "movn">;
1502
1503let PostEncoderMethod = "fixMOVZ" in
1504defm MOVZ : MoveImmediate<0b10, "movz">;
1505
1506// First group of aliases covers an implicit "lsl #0".
1507def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>;
1508def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>;
1509def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1510def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1511def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1512def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1513
1514// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1515def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1516def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1517def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1518def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1519
1520def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1521def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1522def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1523def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1524
1525def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1526def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1527def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1528def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1529
1530def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1531def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1532
1533def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1534def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1535
1536def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1537def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1538
1539// Final group of aliases covers true "mov $Rd, $imm" cases.
1540multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1541                          int width, int shift> {
1542  def _asmoperand : AsmOperandClass {
1543    let Name = basename # width # "_lsl" # shift # "MovAlias";
1544    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1545                               # shift # ">";
1546    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1547  }
1548
1549  def _movimm : Operand<i32> {
1550    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1551  }
1552
1553  def : InstAlias<"mov $Rd, $imm",
1554                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1555}
1556
1557defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1558defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1559
1560defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1561defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1562defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1563defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1564
1565defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1566defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1567
1568defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1569defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1570defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1571defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1572
1573let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1574    isAsCheapAsAMove = 1 in {
1575// FIXME: The following pseudo instructions are only needed because remat
1576// cannot handle multiple instructions.  When that changes, we can select
1577// directly to the real instructions and get rid of these pseudos.
1578
1579def MOVi32imm
1580    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1581             [(set GPR32:$dst, imm:$src)]>,
1582      Sched<[WriteImm]>;
1583def MOVi64imm
1584    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1585             [(set GPR64:$dst, imm:$src)]>,
1586      Sched<[WriteImm]>;
1587} // isReMaterializable, isCodeGenOnly
1588
1589// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1590// eventual expansion code fewer bits to worry about getting right. Marshalling
1591// the types is a little tricky though:
1592def i64imm_32bit : ImmLeaf<i64, [{
1593  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1594}]>;
1595
1596def s64imm_32bit : ImmLeaf<i64, [{
1597  int64_t Imm64 = static_cast<int64_t>(Imm);
1598  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1599         Imm64 <= std::numeric_limits<int32_t>::max();
1600}]>;
1601
1602def trunc_imm : SDNodeXForm<imm, [{
1603  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1604}]>;
1605
1606def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1607  GISDNodeXFormEquiv<trunc_imm>;
1608
1609let Predicates = [OptimizedGISelOrOtherSelector] in {
1610// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1611// copies.
1612def : Pat<(i64 i64imm_32bit:$src),
1613          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1614}
1615
1616// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1617def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1618return CurDAG->getTargetConstant(
1619  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1620}]>;
1621
1622def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1623return CurDAG->getTargetConstant(
1624  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1625}]>;
1626
1627
1628def : Pat<(f32 fpimm:$in),
1629  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1630def : Pat<(f64 fpimm:$in),
1631  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1632
1633
1634// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1635// sequences.
1636def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1637                             tglobaladdr:$g1, tglobaladdr:$g0),
1638          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1639                                  tglobaladdr:$g1, 16),
1640                          tglobaladdr:$g2, 32),
1641                  tglobaladdr:$g3, 48)>;
1642
1643def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1644                             tblockaddress:$g1, tblockaddress:$g0),
1645          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1646                                  tblockaddress:$g1, 16),
1647                          tblockaddress:$g2, 32),
1648                  tblockaddress:$g3, 48)>;
1649
1650def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1651                             tconstpool:$g1, tconstpool:$g0),
1652          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1653                                  tconstpool:$g1, 16),
1654                          tconstpool:$g2, 32),
1655                  tconstpool:$g3, 48)>;
1656
1657def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1658                             tjumptable:$g1, tjumptable:$g0),
1659          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1660                                  tjumptable:$g1, 16),
1661                          tjumptable:$g2, 32),
1662                  tjumptable:$g3, 48)>;
1663
1664
1665//===----------------------------------------------------------------------===//
1666// Arithmetic instructions.
1667//===----------------------------------------------------------------------===//
1668
1669// Add/subtract with carry.
1670defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1671defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1672
1673def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1674def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1675def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1676def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1677
1678// Add/subtract
1679defm ADD : AddSub<0, "add", "sub", add>;
1680defm SUB : AddSub<1, "sub", "add">;
1681
1682def : InstAlias<"mov $dst, $src",
1683                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1684def : InstAlias<"mov $dst, $src",
1685                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1686def : InstAlias<"mov $dst, $src",
1687                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1688def : InstAlias<"mov $dst, $src",
1689                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1690
1691defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1692defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1693
1694def copyFromSP: PatLeaf<(i64 GPR64:$src), [{
1695  return N->getOpcode() == ISD::CopyFromReg &&
1696         cast<RegisterSDNode>(N->getOperand(1))->getReg() == AArch64::SP;
1697}]>;
1698
1699// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1700def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1701          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1702def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1703          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1704def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1705          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1706def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1707          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1708def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1709          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1710def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1711          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1712let AddedComplexity = 1 in {
1713def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1714          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1715def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1716          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1717def : Pat<(sub copyFromSP:$R2, (arith_uxtx GPR64:$R3, arith_extendlsl64:$imm)),
1718          (SUBXrx64 GPR64sp:$R2, GPR64:$R3, arith_extendlsl64:$imm)>;
1719}
1720
1721// Because of the immediate format for add/sub-imm instructions, the
1722// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1723//  These patterns capture that transformation.
1724let AddedComplexity = 1 in {
1725def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1726          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1727def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1728          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1729def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1730          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1731def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1732          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1733}
1734
1735// Because of the immediate format for add/sub-imm instructions, the
1736// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1737//  These patterns capture that transformation.
1738let AddedComplexity = 1 in {
1739def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1740          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1741def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1742          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1743def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1744          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1745def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1746          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1747}
1748
1749def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1750def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1751def : InstAlias<"neg $dst, $src$shift",
1752                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1753def : InstAlias<"neg $dst, $src$shift",
1754                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1755
1756def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1757def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1758def : InstAlias<"negs $dst, $src$shift",
1759                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1760def : InstAlias<"negs $dst, $src$shift",
1761                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1762
1763
1764// Unsigned/Signed divide
1765defm UDIV : Div<0, "udiv", udiv>;
1766defm SDIV : Div<1, "sdiv", sdiv>;
1767
1768def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1769def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1770def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1771def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1772
1773// Variable shift
1774defm ASRV : Shift<0b10, "asr", sra>;
1775defm LSLV : Shift<0b00, "lsl", shl>;
1776defm LSRV : Shift<0b01, "lsr", srl>;
1777defm RORV : Shift<0b11, "ror", rotr>;
1778
1779def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1780def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1781def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1782def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1783def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1784def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1785def : ShiftAlias<"rorv", RORVWr, GPR32>;
1786def : ShiftAlias<"rorv", RORVXr, GPR64>;
1787
1788// Multiply-add
1789let AddedComplexity = 5 in {
1790defm MADD : MulAccum<0, "madd">;
1791defm MSUB : MulAccum<1, "msub">;
1792
1793def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1794          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1795def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1796          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1797
1798def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1799          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1800def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1801          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1802def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1803          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1804def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1805          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1806} // AddedComplexity = 5
1807
1808let AddedComplexity = 5 in {
1809def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1810def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1811def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1812def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1813
1814def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1815          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1816def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1817          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1818def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1819          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1820def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1821          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1822def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1823          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1824def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1825          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1826
1827def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1828          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1829def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1830          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1831
1832def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1833          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1834def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1835          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1836def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1837          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1838                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1839
1840def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1841          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1842def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1843          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1844def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1845          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1846                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1847
1848def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1849          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1850def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1851          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1852def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1853                    GPR64:$Ra)),
1854          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1855                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1856
1857def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1858          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1859def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1860          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1861def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1862                                    (s64imm_32bit:$C)))),
1863          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1864                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1865} // AddedComplexity = 5
1866
1867def : MulAccumWAlias<"mul", MADDWrrr>;
1868def : MulAccumXAlias<"mul", MADDXrrr>;
1869def : MulAccumWAlias<"mneg", MSUBWrrr>;
1870def : MulAccumXAlias<"mneg", MSUBXrrr>;
1871def : WideMulAccumAlias<"smull", SMADDLrrr>;
1872def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1873def : WideMulAccumAlias<"umull", UMADDLrrr>;
1874def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1875
1876// Multiply-high
1877def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1878def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1879
1880// CRC32
1881def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1882def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1883def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1884def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1885
1886def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1887def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1888def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1889def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1890
1891// v8.1 atomic CAS
1892defm CAS   : CompareAndSwap<0, 0, "">;
1893defm CASA  : CompareAndSwap<1, 0, "a">;
1894defm CASL  : CompareAndSwap<0, 1, "l">;
1895defm CASAL : CompareAndSwap<1, 1, "al">;
1896
1897// v8.1 atomic CASP
1898defm CASP   : CompareAndSwapPair<0, 0, "">;
1899defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1900defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1901defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1902
1903// v8.1 atomic SWP
1904defm SWP   : Swap<0, 0, "">;
1905defm SWPA  : Swap<1, 0, "a">;
1906defm SWPL  : Swap<0, 1, "l">;
1907defm SWPAL : Swap<1, 1, "al">;
1908
1909// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1910defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1911defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1912defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1913defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1914
1915defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1916defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1917defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1918defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1919
1920defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1921defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1922defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1923defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1924
1925defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1926defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1927defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1928defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1929
1930defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1931defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1932defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1933defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1934
1935defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1936defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1937defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1938defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1939
1940defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1941defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1942defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1943defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1944
1945defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1946defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1947defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1948defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1949
1950// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1951defm : STOPregister<"stadd","LDADD">; // STADDx
1952defm : STOPregister<"stclr","LDCLR">; // STCLRx
1953defm : STOPregister<"steor","LDEOR">; // STEORx
1954defm : STOPregister<"stset","LDSET">; // STSETx
1955defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1956defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1957defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1958defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1959
1960// v8.5 Memory Tagging Extension
1961let Predicates = [HasMTE] in {
1962
1963def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1964            Sched<[]>{
1965  let Inst{31} = 1;
1966}
1967def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1968  let Inst{31} = 1;
1969  let isNotDuplicable = 1;
1970}
1971def ADDG  : AddSubG<0, "addg", null_frag>;
1972def SUBG  : AddSubG<1, "subg", null_frag>;
1973
1974def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1975
1976def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1977def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1978  let Defs = [NZCV];
1979}
1980
1981def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1982
1983def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1984
1985def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1986          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1987def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1988          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1989
1990def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1991
1992def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1993                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1994def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1995                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1996def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1997                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1998  let Inst{23} = 0;
1999}
2000
2001defm STG   : MemTagStore<0b00, "stg">;
2002defm STZG  : MemTagStore<0b01, "stzg">;
2003defm ST2G  : MemTagStore<0b10, "st2g">;
2004defm STZ2G : MemTagStore<0b11, "stz2g">;
2005
2006def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2007          (STGOffset $Rn, $Rm, $imm)>;
2008def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2009          (STZGOffset $Rn, $Rm, $imm)>;
2010def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2011          (ST2GOffset $Rn, $Rm, $imm)>;
2012def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
2013          (STZ2GOffset $Rn, $Rm, $imm)>;
2014
2015defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
2016def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
2017def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
2018
2019def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
2020          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
2021
2022def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
2023          (STGPi $Rt, $Rt2, $Rn, $imm)>;
2024
2025def IRGstack
2026    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
2027      Sched<[]>;
2028def TAGPstack
2029    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
2030      Sched<[]>;
2031
2032// Explicit SP in the first operand prevents ShrinkWrap optimization
2033// from leaving this instruction out of the stack frame. When IRGstack
2034// is transformed into IRG, this operand is replaced with the actual
2035// register / expression for the tagged base pointer of the current function.
2036def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
2037
2038// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
2039// $Rn_wback is one past the end of the range. $Rm is the loop counter.
2040let isCodeGenOnly=1, mayStore=1 in {
2041def STGloop_wback
2042    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
2043             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
2044      Sched<[WriteAdr, WriteST]>;
2045
2046def STZGloop_wback
2047    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
2048             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
2049      Sched<[WriteAdr, WriteST]>;
2050
2051// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
2052// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
2053def STGloop
2054    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
2055             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
2056      Sched<[WriteAdr, WriteST]>;
2057
2058def STZGloop
2059    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
2060             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
2061      Sched<[WriteAdr, WriteST]>;
2062}
2063
2064} // Predicates = [HasMTE]
2065
2066//===----------------------------------------------------------------------===//
2067// Logical instructions.
2068//===----------------------------------------------------------------------===//
2069
2070// (immediate)
2071defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
2072defm AND  : LogicalImm<0b00, "and", and, "bic">;
2073defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
2074defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
2075
2076// FIXME: these aliases *are* canonical sometimes (when movz can't be
2077// used). Actually, it seems to be working right now, but putting logical_immXX
2078// here is a bit dodgy on the AsmParser side too.
2079def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
2080                                          logical_imm32:$imm), 0>;
2081def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
2082                                          logical_imm64:$imm), 0>;
2083
2084
2085// (register)
2086defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
2087defm BICS : LogicalRegS<0b11, 1, "bics",
2088                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
2089defm AND  : LogicalReg<0b00, 0, "and", and>;
2090defm BIC  : LogicalReg<0b00, 1, "bic",
2091                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
2092defm EON  : LogicalReg<0b10, 1, "eon",
2093                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
2094defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
2095defm ORN  : LogicalReg<0b01, 1, "orn",
2096                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
2097defm ORR  : LogicalReg<0b01, 0, "orr", or>;
2098
2099def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
2100def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
2101
2102def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
2103def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
2104
2105def : InstAlias<"mvn $Wd, $Wm$sh",
2106                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
2107def : InstAlias<"mvn $Xd, $Xm$sh",
2108                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
2109
2110def : InstAlias<"tst $src1, $src2",
2111                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
2112def : InstAlias<"tst $src1, $src2",
2113                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
2114
2115def : InstAlias<"tst $src1, $src2",
2116                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
2117def : InstAlias<"tst $src1, $src2",
2118                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
2119
2120def : InstAlias<"tst $src1, $src2$sh",
2121               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
2122def : InstAlias<"tst $src1, $src2$sh",
2123               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
2124
2125
2126def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
2127def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
2128
2129
2130//===----------------------------------------------------------------------===//
2131// One operand data processing instructions.
2132//===----------------------------------------------------------------------===//
2133
2134defm CLS    : OneOperandData<0b101, "cls">;
2135defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
2136defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
2137
2138def  REV16Wr : OneWRegData<0b001, "rev16",
2139                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
2140def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
2141
2142def : Pat<(cttz GPR32:$Rn),
2143          (CLZWr (RBITWr GPR32:$Rn))>;
2144def : Pat<(cttz GPR64:$Rn),
2145          (CLZXr (RBITXr GPR64:$Rn))>;
2146def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
2147                (i32 1))),
2148          (CLSWr GPR32:$Rn)>;
2149def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
2150                (i64 1))),
2151          (CLSXr GPR64:$Rn)>;
2152def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
2153def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
2154
2155// Unlike the other one operand instructions, the instructions with the "rev"
2156// mnemonic do *not* just different in the size bit, but actually use different
2157// opcode bits for the different sizes.
2158def REVWr   : OneWRegData<0b010, "rev", bswap>;
2159def REVXr   : OneXRegData<0b011, "rev", bswap>;
2160def REV32Xr : OneXRegData<0b010, "rev32",
2161                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
2162
2163def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
2164
2165// The bswap commutes with the rotr so we want a pattern for both possible
2166// orders.
2167def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
2168def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
2169
2170// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
2171def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
2172def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
2173
2174def : Pat<(or (and (srl GPR64:$Rn, (i64 8)), (i64 0x00ff00ff00ff00ff)),
2175              (and (shl GPR64:$Rn, (i64 8)), (i64 0xff00ff00ff00ff00))),
2176          (REV16Xr GPR64:$Rn)>;
2177
2178//===----------------------------------------------------------------------===//
2179// Bitfield immediate extraction instruction.
2180//===----------------------------------------------------------------------===//
2181let hasSideEffects = 0 in
2182defm EXTR : ExtractImm<"extr">;
2183def : InstAlias<"ror $dst, $src, $shift",
2184            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
2185def : InstAlias<"ror $dst, $src, $shift",
2186            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
2187
2188def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
2189          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
2190def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
2191          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
2192
2193//===----------------------------------------------------------------------===//
2194// Other bitfield immediate instructions.
2195//===----------------------------------------------------------------------===//
2196let hasSideEffects = 0 in {
2197defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
2198defm SBFM : BitfieldImm<0b00, "sbfm">;
2199defm UBFM : BitfieldImm<0b10, "ubfm">;
2200}
2201
2202def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
2203  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
2204  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2205}]>;
2206
2207def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
2208  uint64_t enc = 31 - N->getZExtValue();
2209  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2210}]>;
2211
2212// min(7, 31 - shift_amt)
2213def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2214  uint64_t enc = 31 - N->getZExtValue();
2215  enc = enc > 7 ? 7 : enc;
2216  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2217}]>;
2218
2219// min(15, 31 - shift_amt)
2220def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2221  uint64_t enc = 31 - N->getZExtValue();
2222  enc = enc > 15 ? 15 : enc;
2223  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2224}]>;
2225
2226def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
2227  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
2228  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2229}]>;
2230
2231def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
2232  uint64_t enc = 63 - N->getZExtValue();
2233  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2234}]>;
2235
2236// min(7, 63 - shift_amt)
2237def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2238  uint64_t enc = 63 - N->getZExtValue();
2239  enc = enc > 7 ? 7 : enc;
2240  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2241}]>;
2242
2243// min(15, 63 - shift_amt)
2244def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2245  uint64_t enc = 63 - N->getZExtValue();
2246  enc = enc > 15 ? 15 : enc;
2247  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2248}]>;
2249
2250// min(31, 63 - shift_amt)
2251def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
2252  uint64_t enc = 63 - N->getZExtValue();
2253  enc = enc > 31 ? 31 : enc;
2254  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2255}]>;
2256
2257def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
2258          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
2259                              (i64 (i32shift_b imm0_31:$imm)))>;
2260def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2261          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2262                              (i64 (i64shift_b imm0_63:$imm)))>;
2263
2264let AddedComplexity = 10 in {
2265def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2266          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2267def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2268          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2269}
2270
2271def : InstAlias<"asr $dst, $src, $shift",
2272                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2273def : InstAlias<"asr $dst, $src, $shift",
2274                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2275def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2276def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2277def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2278def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2279def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2280
2281def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2282          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2283def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2284          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2285
2286def : InstAlias<"lsr $dst, $src, $shift",
2287                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2288def : InstAlias<"lsr $dst, $src, $shift",
2289                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2290def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2291def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2292def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2293def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2294def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2295
2296//===----------------------------------------------------------------------===//
2297// Conditional comparison instructions.
2298//===----------------------------------------------------------------------===//
2299defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2300defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2301
2302//===----------------------------------------------------------------------===//
2303// Conditional select instructions.
2304//===----------------------------------------------------------------------===//
2305defm CSEL  : CondSelect<0, 0b00, "csel">;
2306
2307def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2308defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2309defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2310defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2311
2312def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2313          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2314def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2315          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2316def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2317          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2318def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2319          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2320def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2321          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2322def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2323          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2324
2325def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2326          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2327def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2328          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2329def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2330          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2331def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2332          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2333def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2334          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2335def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2336          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2337def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2338          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2339def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2340          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2341def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2342          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2343def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2344          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2345def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2346          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2347def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2348          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2349
2350def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)),
2351          (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>;
2352def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))),
2353          (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>;
2354
2355// The inverse of the condition code from the alias instruction is what is used
2356// in the aliased instruction. The parser all ready inverts the condition code
2357// for these aliases.
2358def : InstAlias<"cset $dst, $cc",
2359                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2360def : InstAlias<"cset $dst, $cc",
2361                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2362
2363def : InstAlias<"csetm $dst, $cc",
2364                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2365def : InstAlias<"csetm $dst, $cc",
2366                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2367
2368def : InstAlias<"cinc $dst, $src, $cc",
2369                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2370def : InstAlias<"cinc $dst, $src, $cc",
2371                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2372
2373def : InstAlias<"cinv $dst, $src, $cc",
2374                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2375def : InstAlias<"cinv $dst, $src, $cc",
2376                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2377
2378def : InstAlias<"cneg $dst, $src, $cc",
2379                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2380def : InstAlias<"cneg $dst, $src, $cc",
2381                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2382
2383//===----------------------------------------------------------------------===//
2384// PC-relative instructions.
2385//===----------------------------------------------------------------------===//
2386let isReMaterializable = 1 in {
2387let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2388def ADR  : ADRI<0, "adr", adrlabel,
2389                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2390} // hasSideEffects = 0
2391
2392def ADRP : ADRI<1, "adrp", adrplabel,
2393                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2394} // isReMaterializable = 1
2395
2396// page address of a constant pool entry, block address
2397def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2398def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2399def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2400def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2401def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2402def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2403def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2404
2405//===----------------------------------------------------------------------===//
2406// Unconditional branch (register) instructions.
2407//===----------------------------------------------------------------------===//
2408
2409let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2410def RET  : BranchReg<0b0010, "ret", []>;
2411def DRPS : SpecialReturn<0b0101, "drps">;
2412def ERET : SpecialReturn<0b0100, "eret">;
2413} // isReturn = 1, isTerminator = 1, isBarrier = 1
2414
2415// Default to the LR register.
2416def : InstAlias<"ret", (RET LR)>;
2417
2418let isCall = 1, Defs = [LR], Uses = [SP] in {
2419  def BLR : BranchReg<0b0001, "blr", []>;
2420  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2421                Sched<[WriteBrReg]>,
2422                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2423  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2424                     Sched<[WriteBrReg]>;
2425  def BLR_BTI : Pseudo<(outs), (ins variable_ops), []>,
2426                Sched<[WriteBrReg]>;
2427} // isCall
2428
2429def : Pat<(AArch64call GPR64:$Rn),
2430          (BLR GPR64:$Rn)>,
2431      Requires<[NoSLSBLRMitigation]>;
2432def : Pat<(AArch64call GPR64noip:$Rn),
2433          (BLRNoIP GPR64noip:$Rn)>,
2434      Requires<[SLSBLRMitigation]>;
2435
2436def : Pat<(AArch64call_rvmarker (i64 tglobaladdr:$rvfunc), GPR64:$Rn),
2437          (BLR_RVMARKER tglobaladdr:$rvfunc, GPR64:$Rn)>,
2438      Requires<[NoSLSBLRMitigation]>;
2439
2440def : Pat<(AArch64call_bti GPR64:$Rn),
2441          (BLR_BTI GPR64:$Rn)>,
2442      Requires<[NoSLSBLRMitigation]>;
2443
2444let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2445def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2446} // isBranch, isTerminator, isBarrier, isIndirectBranch
2447
2448// Create a separate pseudo-instruction for codegen to use so that we don't
2449// flag lr as used in every function. It'll be restored before the RET by the
2450// epilogue if it's legitimately used.
2451def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2452                   Sched<[WriteBrReg]> {
2453  let isTerminator = 1;
2454  let isBarrier = 1;
2455  let isReturn = 1;
2456}
2457
2458// This is a directive-like pseudo-instruction. The purpose is to insert an
2459// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2460// (which in the usual case is a BLR).
2461let hasSideEffects = 1 in
2462def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2463  let AsmString = ".tlsdesccall $sym";
2464}
2465
2466// Pseudo instruction to tell the streamer to emit a 'B' character into the
2467// augmentation string.
2468def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2469
2470// Pseudo instruction to tell the streamer to emit a 'G' character into the
2471// augmentation string.
2472def EMITMTETAGGED : Pseudo<(outs), (ins), []>, Sched<[]> {}
2473
2474// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2475// FIXME: can "hasSideEffects be dropped?
2476// This gets lowered to an instruction sequence which takes 16 bytes
2477let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1, Size = 16,
2478    isCodeGenOnly = 1 in
2479def TLSDESC_CALLSEQ
2480    : Pseudo<(outs), (ins i64imm:$sym),
2481             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2482      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2483def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2484          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2485
2486//===----------------------------------------------------------------------===//
2487// Conditional branch (immediate) instruction.
2488//===----------------------------------------------------------------------===//
2489def Bcc : BranchCond<0, "b">;
2490
2491// Armv8.8-A variant form which hints to the branch predictor that
2492// this branch is very likely to go the same way nearly all the time
2493// (even though it is not known at compile time _which_ way that is).
2494def BCcc : BranchCond<1, "bc">, Requires<[HasHBC]>;
2495
2496//===----------------------------------------------------------------------===//
2497// Compare-and-branch instructions.
2498//===----------------------------------------------------------------------===//
2499defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2500defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2501
2502//===----------------------------------------------------------------------===//
2503// Test-bit-and-branch instructions.
2504//===----------------------------------------------------------------------===//
2505defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2506defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2507
2508//===----------------------------------------------------------------------===//
2509// Unconditional branch (immediate) instructions.
2510//===----------------------------------------------------------------------===//
2511let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2512def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2513} // isBranch, isTerminator, isBarrier
2514
2515let isCall = 1, Defs = [LR], Uses = [SP] in {
2516def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2517} // isCall
2518def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2519
2520//===----------------------------------------------------------------------===//
2521// Exception generation instructions.
2522//===----------------------------------------------------------------------===//
2523let isTrap = 1 in {
2524def BRK   : ExceptionGeneration<0b001, 0b00, "brk",
2525                                [(int_aarch64_break timm32_0_65535:$imm)]>;
2526}
2527def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2528def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2529def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>;
2530def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2531def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2532def SMC   : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>;
2533def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2534
2535// DCPSn defaults to an immediate operand of zero if unspecified.
2536def : InstAlias<"dcps1", (DCPS1 0)>;
2537def : InstAlias<"dcps2", (DCPS2 0)>;
2538def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>;
2539
2540def UDF : UDFType<0, "udf">;
2541
2542//===----------------------------------------------------------------------===//
2543// Load instructions.
2544//===----------------------------------------------------------------------===//
2545
2546// Pair (indexed, offset)
2547defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2548defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2549defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2550defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2551defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2552
2553defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2554
2555// Pair (pre-indexed)
2556def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2557def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2558def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2559def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2560def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2561
2562def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2563
2564// Pair (post-indexed)
2565def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2566def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2567def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2568def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2569def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2570
2571def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2572
2573
2574// Pair (no allocate)
2575defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2576defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2577defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2578defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2579defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2580
2581def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2582          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2583
2584//---
2585// (register offset)
2586//---
2587
2588// Integer
2589defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2590defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2591defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2592defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2593
2594// Floating-point
2595defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2596defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2597defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2598defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2599defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2600
2601// Load sign-extended half-word
2602defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2603defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2604
2605// Load sign-extended byte
2606defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2607defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2608
2609// Load sign-extended word
2610defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2611
2612// Pre-fetch.
2613defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2614
2615// For regular load, we do not have any alignment requirement.
2616// Thus, it is safe to directly map the vector loads with interesting
2617// addressing modes.
2618// FIXME: We could do the same for bitconvert to floating point vectors.
2619multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2620                              ValueType ScalTy, ValueType VecTy,
2621                              Instruction LOADW, Instruction LOADX,
2622                              SubRegIndex sub> {
2623  def : Pat<(VecTy (scalar_to_vector (ScalTy
2624              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2625            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2626                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2627                           sub)>;
2628
2629  def : Pat<(VecTy (scalar_to_vector (ScalTy
2630              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2631            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2632                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2633                           sub)>;
2634}
2635
2636let AddedComplexity = 10 in {
2637defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2638defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2639
2640defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2641defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2642
2643defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2644defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2645
2646defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2647defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2648
2649defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2650defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2651
2652defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2653
2654defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2655
2656
2657def : Pat <(v1i64 (scalar_to_vector (i64
2658                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2659                                           ro_Wextend64:$extend))))),
2660           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2661
2662def : Pat <(v1i64 (scalar_to_vector (i64
2663                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2664                                           ro_Xextend64:$extend))))),
2665           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2666}
2667
2668// Match all load 64 bits width whose type is compatible with FPR64
2669multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2670                        Instruction LOADW, Instruction LOADX> {
2671
2672  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2673            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2674
2675  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2676            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2677}
2678
2679let AddedComplexity = 10 in {
2680let Predicates = [IsLE] in {
2681  // We must do vector loads with LD1 in big-endian.
2682  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2683  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2684  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2685  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2686  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2687  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2688}
2689
2690defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2691defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2692
2693// Match all load 128 bits width whose type is compatible with FPR128
2694let Predicates = [IsLE] in {
2695  // We must do vector loads with LD1 in big-endian.
2696  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2697  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2698  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2699  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2700  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2701  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2702  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2703  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2704}
2705} // AddedComplexity = 10
2706
2707// zextload -> i64
2708multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2709                            Instruction INSTW, Instruction INSTX> {
2710  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2711            (SUBREG_TO_REG (i64 0),
2712                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2713                           sub_32)>;
2714
2715  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2716            (SUBREG_TO_REG (i64 0),
2717                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2718                           sub_32)>;
2719}
2720
2721let AddedComplexity = 10 in {
2722  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2723  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2724  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2725
2726  // zextloadi1 -> zextloadi8
2727  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2728
2729  // extload -> zextload
2730  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2731  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2732  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2733
2734  // extloadi1 -> zextloadi8
2735  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2736}
2737
2738
2739// zextload -> i64
2740multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2741                            Instruction INSTW, Instruction INSTX> {
2742  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2743            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2744
2745  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2746            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2747
2748}
2749
2750let AddedComplexity = 10 in {
2751  // extload -> zextload
2752  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2753  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2754  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2755
2756  // zextloadi1 -> zextloadi8
2757  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2758}
2759
2760//---
2761// (unsigned immediate)
2762//---
2763defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2764                   [(set GPR64z:$Rt,
2765                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2766defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2767                   [(set GPR32z:$Rt,
2768                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2769defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2770                   [(set FPR8Op:$Rt,
2771                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2772defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2773                   [(set (f16 FPR16Op:$Rt),
2774                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2775defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2776                   [(set (f32 FPR32Op:$Rt),
2777                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2778defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2779                   [(set (f64 FPR64Op:$Rt),
2780                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2781defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2782                 [(set (f128 FPR128Op:$Rt),
2783                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2784
2785// bf16 load pattern
2786def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2787           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2788
2789// For regular load, we do not have any alignment requirement.
2790// Thus, it is safe to directly map the vector loads with interesting
2791// addressing modes.
2792// FIXME: We could do the same for bitconvert to floating point vectors.
2793def : Pat <(v8i8 (scalar_to_vector (i32
2794               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2795           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2796                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2797def : Pat <(v16i8 (scalar_to_vector (i32
2798               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2799           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2800                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2801def : Pat <(v4i16 (scalar_to_vector (i32
2802               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2803           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2804                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2805def : Pat <(v8i16 (scalar_to_vector (i32
2806               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2807           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2808                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2809def : Pat <(v2i32 (scalar_to_vector (i32
2810               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2811           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2812                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2813def : Pat <(v4i32 (scalar_to_vector (i32
2814               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2815           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2816                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2817def : Pat <(v1i64 (scalar_to_vector (i64
2818               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2819           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2820def : Pat <(v2i64 (scalar_to_vector (i64
2821               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2822           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2823                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2824
2825// Match all load 64 bits width whose type is compatible with FPR64
2826let Predicates = [IsLE] in {
2827  // We must use LD1 to perform vector loads in big-endian.
2828  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2829            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2830  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2831            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2832  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2833            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2834  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2835            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2836  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2837            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2838  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2839            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2840}
2841def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2842          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2843def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2844          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2845
2846// Match all load 128 bits width whose type is compatible with FPR128
2847let Predicates = [IsLE] in {
2848  // We must use LD1 to perform vector loads in big-endian.
2849  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2850            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2851  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2852            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2853  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2854            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2855  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2856            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2857  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2858            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2859  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2860            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2861  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2862            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2863  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2864            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2865}
2866def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2867          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2868
2869defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2870                    [(set GPR32:$Rt,
2871                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2872                                                     uimm12s2:$offset)))]>;
2873defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2874                    [(set GPR32:$Rt,
2875                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2876                                                   uimm12s1:$offset)))]>;
2877// zextload -> i64
2878def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2879    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2880def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2881    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2882
2883// zextloadi1 -> zextloadi8
2884def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2885          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2886def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2887    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2888
2889// extload -> zextload
2890def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2891          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2892def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2893          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2894def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2895          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2896def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2897    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2898def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2899    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2900def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2901    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2902def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2903    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2904
2905// load sign-extended half-word
2906defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2907                     [(set GPR32:$Rt,
2908                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2909                                                      uimm12s2:$offset)))]>;
2910defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2911                     [(set GPR64:$Rt,
2912                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2913                                                      uimm12s2:$offset)))]>;
2914
2915// load sign-extended byte
2916defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2917                     [(set GPR32:$Rt,
2918                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2919                                                    uimm12s1:$offset)))]>;
2920defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2921                     [(set GPR64:$Rt,
2922                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2923                                                    uimm12s1:$offset)))]>;
2924
2925// load sign-extended word
2926defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2927                     [(set GPR64:$Rt,
2928                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2929                                                      uimm12s4:$offset)))]>;
2930
2931// load zero-extended word
2932def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2933      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2934
2935// Pre-fetch.
2936def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2937                        [(AArch64Prefetch imm:$Rt,
2938                                        (am_indexed64 GPR64sp:$Rn,
2939                                                      uimm12s8:$offset))]>;
2940
2941def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2942
2943//---
2944// (literal)
2945
2946def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2947  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2948    const DataLayout &DL = MF->getDataLayout();
2949    Align Align = G->getGlobal()->getPointerAlignment(DL);
2950    return Align >= 4 && G->getOffset() % 4 == 0;
2951  }
2952  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2953    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2954  return false;
2955}]>;
2956
2957def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2958  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2959def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2960  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2961def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2962  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2963def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2964  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2965def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2966  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2967
2968// load sign-extended word
2969def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2970  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2971
2972let AddedComplexity = 20 in {
2973def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2974        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2975}
2976
2977// prefetch
2978def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2979//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2980
2981//---
2982// (unscaled immediate)
2983defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2984                    [(set GPR64z:$Rt,
2985                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2986defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2987                    [(set GPR32z:$Rt,
2988                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2989defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2990                    [(set FPR8Op:$Rt,
2991                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2992defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2993                    [(set (f16 FPR16Op:$Rt),
2994                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2995defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2996                    [(set (f32 FPR32Op:$Rt),
2997                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2998defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2999                    [(set (f64 FPR64Op:$Rt),
3000                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
3001defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
3002                    [(set (f128 FPR128Op:$Rt),
3003                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
3004
3005defm LDURHH
3006    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
3007             [(set GPR32:$Rt,
3008                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3009defm LDURBB
3010    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
3011             [(set GPR32:$Rt,
3012                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3013
3014// Match all load 64 bits width whose type is compatible with FPR64
3015let Predicates = [IsLE] in {
3016  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3017            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3018  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3019            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3020  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3021            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3022  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3023            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3024  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3025            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3026}
3027def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3028          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3029def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
3030          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
3031
3032// Match all load 128 bits width whose type is compatible with FPR128
3033let Predicates = [IsLE] in {
3034  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3035            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3036  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3037            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3038  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3039            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3040  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3041            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3042  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3043            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3044  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3045            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3046  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
3047            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
3048}
3049
3050//  anyext -> zext
3051def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3052          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
3053def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3054          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3055def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3056          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3057def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
3058    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3059def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3060    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3061def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3062    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3063def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3064    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3065// unscaled zext
3066def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3067          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
3068def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3069          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3070def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3071          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
3072def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
3073    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3074def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3075    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3076def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3077    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3078def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3079    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3080
3081
3082//---
3083// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
3084
3085// Define new assembler match classes as we want to only match these when
3086// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
3087// associate a DiagnosticType either, as we want the diagnostic for the
3088// canonical form (the scaled operand) to take precedence.
3089class SImm9OffsetOperand<int Width> : AsmOperandClass {
3090  let Name = "SImm9OffsetFB" # Width;
3091  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
3092  let RenderMethod = "addImmOperands";
3093}
3094
3095def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
3096def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
3097def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
3098def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
3099def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
3100
3101def simm9_offset_fb8 : Operand<i64> {
3102  let ParserMatchClass = SImm9OffsetFB8Operand;
3103}
3104def simm9_offset_fb16 : Operand<i64> {
3105  let ParserMatchClass = SImm9OffsetFB16Operand;
3106}
3107def simm9_offset_fb32 : Operand<i64> {
3108  let ParserMatchClass = SImm9OffsetFB32Operand;
3109}
3110def simm9_offset_fb64 : Operand<i64> {
3111  let ParserMatchClass = SImm9OffsetFB64Operand;
3112}
3113def simm9_offset_fb128 : Operand<i64> {
3114  let ParserMatchClass = SImm9OffsetFB128Operand;
3115}
3116
3117def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3118                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3119def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3120                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3121def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3122                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3123def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3124                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3125def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3126                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3127def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3128                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3129def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3130               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3131
3132// zextload -> i64
3133def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3134  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3135def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3136  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3137
3138// load sign-extended half-word
3139defm LDURSHW
3140    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
3141               [(set GPR32:$Rt,
3142                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3143defm LDURSHX
3144    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
3145              [(set GPR64:$Rt,
3146                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3147
3148// load sign-extended byte
3149defm LDURSBW
3150    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
3151                [(set GPR32:$Rt,
3152                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3153defm LDURSBX
3154    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
3155                [(set GPR64:$Rt,
3156                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3157
3158// load sign-extended word
3159defm LDURSW
3160    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
3161              [(set GPR64:$Rt,
3162                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
3163
3164// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
3165def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
3166                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3167def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
3168                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3169def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3170                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3171def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3172                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3173def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3174                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3175def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3176                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3177def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
3178                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3179
3180// Pre-fetch.
3181defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
3182                  [(AArch64Prefetch imm:$Rt,
3183                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3184
3185//---
3186// (unscaled immediate, unprivileged)
3187defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
3188defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
3189
3190defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
3191defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
3192
3193// load sign-extended half-word
3194defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
3195defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
3196
3197// load sign-extended byte
3198defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
3199defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
3200
3201// load sign-extended word
3202defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
3203
3204//---
3205// (immediate pre-indexed)
3206def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3207def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3208def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3209def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3210def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3211def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3212def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3213
3214// load sign-extended half-word
3215def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3216def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3217
3218// load sign-extended byte
3219def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3220def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3221
3222// load zero-extended byte
3223def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3224def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3225
3226// load sign-extended word
3227def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3228
3229//---
3230// (immediate post-indexed)
3231def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3232def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3233def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3234def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3235def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3236def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3237def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3238
3239// load sign-extended half-word
3240def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3241def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3242
3243// load sign-extended byte
3244def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3245def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3246
3247// load zero-extended byte
3248def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3249def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3250
3251// load sign-extended word
3252def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3253
3254//===----------------------------------------------------------------------===//
3255// Store instructions.
3256//===----------------------------------------------------------------------===//
3257
3258// Pair (indexed, offset)
3259// FIXME: Use dedicated range-checked addressing mode operand here.
3260defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
3261defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
3262defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
3263defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
3264defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
3265
3266// Pair (pre-indexed)
3267def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3268def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3269def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3270def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3271def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3272
3273// Pair (pre-indexed)
3274def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3275def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3276def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3277def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3278def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3279
3280// Pair (no allocate)
3281defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
3282defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3283defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3284defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3285defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3286
3287def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3288          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3289
3290def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3291          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3292
3293
3294//---
3295// (Register offset)
3296
3297// Integer
3298defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3299defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3300defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3301defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3302
3303
3304// Floating-point
3305defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3306defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3307defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3308defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3309defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">;
3310
3311let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3312  def : Pat<(store (f128 FPR128:$Rt),
3313                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3314                                        ro_Wextend128:$extend)),
3315            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3316  def : Pat<(store (f128 FPR128:$Rt),
3317                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3318                                        ro_Xextend128:$extend)),
3319            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3320}
3321
3322multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3323                                 Instruction STRW, Instruction STRX> {
3324
3325  def : Pat<(storeop GPR64:$Rt,
3326                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3327            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3328                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3329
3330  def : Pat<(storeop GPR64:$Rt,
3331                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3332            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3333                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3334}
3335
3336let AddedComplexity = 10 in {
3337  // truncstore i64
3338  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3339  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3340  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3341}
3342
3343multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3344                         Instruction STRW, Instruction STRX> {
3345  def : Pat<(store (VecTy FPR:$Rt),
3346                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3347            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3348
3349  def : Pat<(store (VecTy FPR:$Rt),
3350                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3351            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3352}
3353
3354let AddedComplexity = 10 in {
3355// Match all store 64 bits width whose type is compatible with FPR64
3356let Predicates = [IsLE] in {
3357  // We must use ST1 to store vectors in big-endian.
3358  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3359  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3360  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3361  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3362  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3363  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3364}
3365
3366defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3367defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3368
3369// Match all store 128 bits width whose type is compatible with FPR128
3370let Predicates = [IsLE, UseSTRQro] in {
3371  // We must use ST1 to store vectors in big-endian.
3372  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3373  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3374  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3375  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3376  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3377  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3378  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3379  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3380}
3381} // AddedComplexity = 10
3382
3383// Match stores from lane 0 to the appropriate subreg's store.
3384multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3385                              ValueType VecTy, ValueType STy,
3386                              SubRegIndex SubRegIdx,
3387                              Instruction STRW, Instruction STRX> {
3388
3389  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3390                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3391            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3392                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3393
3394  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3395                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3396            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3397                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3398}
3399
3400let AddedComplexity = 19 in {
3401  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3402  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3403  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3404  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3405  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3406  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3407}
3408
3409//---
3410// (unsigned immediate)
3411defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3412                   [(store GPR64z:$Rt,
3413                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3414defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3415                    [(store GPR32z:$Rt,
3416                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3417defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3418                    [(store FPR8Op:$Rt,
3419                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3420defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3421                    [(store (f16 FPR16Op:$Rt),
3422                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3423defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3424                    [(store (f32 FPR32Op:$Rt),
3425                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3426defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3427                    [(store (f64 FPR64Op:$Rt),
3428                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3429defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3430
3431defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3432                     [(truncstorei16 GPR32z:$Rt,
3433                                     (am_indexed16 GPR64sp:$Rn,
3434                                                   uimm12s2:$offset))]>;
3435defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3436                     [(truncstorei8 GPR32z:$Rt,
3437                                    (am_indexed8 GPR64sp:$Rn,
3438                                                 uimm12s1:$offset))]>;
3439
3440// bf16 store pattern
3441def : Pat<(store (bf16 FPR16Op:$Rt),
3442                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3443          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3444
3445let AddedComplexity = 10 in {
3446
3447// Match all store 64 bits width whose type is compatible with FPR64
3448def : Pat<(store (v1i64 FPR64:$Rt),
3449                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3450          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3451def : Pat<(store (v1f64 FPR64:$Rt),
3452                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3453          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3454
3455let Predicates = [IsLE] in {
3456  // We must use ST1 to store vectors in big-endian.
3457  def : Pat<(store (v2f32 FPR64:$Rt),
3458                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3459            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3460  def : Pat<(store (v8i8 FPR64:$Rt),
3461                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3462            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3463  def : Pat<(store (v4i16 FPR64:$Rt),
3464                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3465            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3466  def : Pat<(store (v2i32 FPR64:$Rt),
3467                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3468            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3469  def : Pat<(store (v4f16 FPR64:$Rt),
3470                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3471            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3472  def : Pat<(store (v4bf16 FPR64:$Rt),
3473                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3474            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3475}
3476
3477// Match all store 128 bits width whose type is compatible with FPR128
3478def : Pat<(store (f128  FPR128:$Rt),
3479                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3480          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3481
3482let Predicates = [IsLE] in {
3483  // We must use ST1 to store vectors in big-endian.
3484  def : Pat<(store (v4f32 FPR128:$Rt),
3485                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3486            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3487  def : Pat<(store (v2f64 FPR128:$Rt),
3488                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3489            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3490  def : Pat<(store (v16i8 FPR128:$Rt),
3491                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3492            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3493  def : Pat<(store (v8i16 FPR128:$Rt),
3494                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3495            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3496  def : Pat<(store (v4i32 FPR128:$Rt),
3497                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3498            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3499  def : Pat<(store (v2i64 FPR128:$Rt),
3500                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3501            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3502  def : Pat<(store (v8f16 FPR128:$Rt),
3503                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3504            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3505  def : Pat<(store (v8bf16 FPR128:$Rt),
3506                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3507            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3508}
3509
3510// truncstore i64
3511def : Pat<(truncstorei32 GPR64:$Rt,
3512                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3513  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3514def : Pat<(truncstorei16 GPR64:$Rt,
3515                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3516  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3517def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3518  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3519
3520} // AddedComplexity = 10
3521
3522// Match stores from lane 0 to the appropriate subreg's store.
3523multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
3524                            ValueType VTy, ValueType STy,
3525                            SubRegIndex SubRegIdx, Operand IndexType,
3526                            Instruction STR> {
3527  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3528                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3529            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3530                 GPR64sp:$Rn, IndexType:$offset)>;
3531}
3532
3533let AddedComplexity = 19 in {
3534  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3535  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3536  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3537  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3538  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3539  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3540}
3541
3542//---
3543// (unscaled immediate)
3544defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3545                         [(store GPR64z:$Rt,
3546                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3547defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3548                         [(store GPR32z:$Rt,
3549                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3550defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3551                         [(store FPR8Op:$Rt,
3552                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3553defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3554                         [(store (f16 FPR16Op:$Rt),
3555                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3556defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3557                         [(store (f32 FPR32Op:$Rt),
3558                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3559defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3560                         [(store (f64 FPR64Op:$Rt),
3561                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3562defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3563                         [(store (f128 FPR128Op:$Rt),
3564                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3565defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3566                         [(truncstorei16 GPR32z:$Rt,
3567                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3568defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3569                         [(truncstorei8 GPR32z:$Rt,
3570                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3571
3572// Armv8.4 Weaker Release Consistency enhancements
3573//         LDAPR & STLR with Immediate Offset instructions
3574let Predicates = [HasRCPC_IMMO] in {
3575defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3576defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3577defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3578defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3579defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3580defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3581defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3582defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3583defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3584defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3585defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3586defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3587defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3588}
3589
3590// Match all store 64 bits width whose type is compatible with FPR64
3591def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3592          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3593def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3594          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3595
3596let AddedComplexity = 10 in {
3597
3598let Predicates = [IsLE] in {
3599  // We must use ST1 to store vectors in big-endian.
3600  def : Pat<(store (v2f32 FPR64:$Rt),
3601                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3602            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3603  def : Pat<(store (v8i8 FPR64:$Rt),
3604                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3605            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3606  def : Pat<(store (v4i16 FPR64:$Rt),
3607                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3608            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3609  def : Pat<(store (v2i32 FPR64:$Rt),
3610                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3611            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3612  def : Pat<(store (v4f16 FPR64:$Rt),
3613                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3614            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3615  def : Pat<(store (v4bf16 FPR64:$Rt),
3616                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3617            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3618}
3619
3620// Match all store 128 bits width whose type is compatible with FPR128
3621def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3622          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3623
3624let Predicates = [IsLE] in {
3625  // We must use ST1 to store vectors in big-endian.
3626  def : Pat<(store (v4f32 FPR128:$Rt),
3627                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3628            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3629  def : Pat<(store (v2f64 FPR128:$Rt),
3630                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3631            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3632  def : Pat<(store (v16i8 FPR128:$Rt),
3633                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3634            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3635  def : Pat<(store (v8i16 FPR128:$Rt),
3636                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3637            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3638  def : Pat<(store (v4i32 FPR128:$Rt),
3639                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3640            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3641  def : Pat<(store (v2i64 FPR128:$Rt),
3642                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3643            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3644  def : Pat<(store (v2f64 FPR128:$Rt),
3645                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3646            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3647  def : Pat<(store (v8f16 FPR128:$Rt),
3648                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3649            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3650  def : Pat<(store (v8bf16 FPR128:$Rt),
3651                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3652            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3653}
3654
3655} // AddedComplexity = 10
3656
3657// unscaled i64 truncating stores
3658def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3659  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3660def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3661  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3662def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3663  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3664
3665// Match stores from lane 0 to the appropriate subreg's store.
3666multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3667                             ValueType VTy, ValueType STy,
3668                             SubRegIndex SubRegIdx, Instruction STR> {
3669  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3670}
3671
3672let AddedComplexity = 19 in {
3673  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3674  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3675  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3676  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3677  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3678  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3679}
3680
3681//---
3682// STR mnemonics fall back to STUR for negative or unaligned offsets.
3683def : InstAlias<"str $Rt, [$Rn, $offset]",
3684                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3685def : InstAlias<"str $Rt, [$Rn, $offset]",
3686                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3687def : InstAlias<"str $Rt, [$Rn, $offset]",
3688                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3689def : InstAlias<"str $Rt, [$Rn, $offset]",
3690                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3691def : InstAlias<"str $Rt, [$Rn, $offset]",
3692                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3693def : InstAlias<"str $Rt, [$Rn, $offset]",
3694                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3695def : InstAlias<"str $Rt, [$Rn, $offset]",
3696                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3697
3698def : InstAlias<"strb $Rt, [$Rn, $offset]",
3699                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3700def : InstAlias<"strh $Rt, [$Rn, $offset]",
3701                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3702
3703//---
3704// (unscaled immediate, unprivileged)
3705defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3706defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3707
3708defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3709defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3710
3711//---
3712// (immediate pre-indexed)
3713def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3714def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3715def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3716def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3717def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3718def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3719def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3720
3721def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3722def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3723
3724// truncstore i64
3725def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3726  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3727           simm9:$off)>;
3728def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3729  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3730            simm9:$off)>;
3731def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3732  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3733            simm9:$off)>;
3734
3735def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3736          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3737def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3738          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3739def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3740          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3741def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3742          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3743def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3744          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3745def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3746          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3747def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3748          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3749
3750def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3751          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3752def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3753          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3754def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3755          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3756def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3757          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3758def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3759          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3760def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3761          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3762def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3763          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3764
3765//---
3766// (immediate post-indexed)
3767def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3768def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3769def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3770def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3771def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3772def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3773def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3774
3775def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3776def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3777
3778// truncstore i64
3779def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3780  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3781            simm9:$off)>;
3782def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3783  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3784             simm9:$off)>;
3785def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3786  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3787             simm9:$off)>;
3788
3789def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off),
3790          (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>;
3791
3792def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3793          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3794def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3795          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3796def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3797          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3798def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3799          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3800def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3801          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3802def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3803          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3804def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3805          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3806def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3807          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3808
3809def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3810          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3811def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3812          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3813def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3814          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3815def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3816          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3817def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3818          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3819def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3820          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3821def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3822          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3823def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3824          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3825
3826//===----------------------------------------------------------------------===//
3827// Load/store exclusive instructions.
3828//===----------------------------------------------------------------------===//
3829
3830def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3831def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3832def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3833def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3834
3835def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3836def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3837def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3838def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3839
3840def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3841def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3842def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3843def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3844
3845def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3846def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3847def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3848def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3849
3850def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3851def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3852def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3853def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3854
3855def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3856def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3857def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3858def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3859
3860def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3861def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3862
3863def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3864def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3865
3866def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3867def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3868
3869def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3870def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3871
3872let Predicates = [HasLOR] in {
3873  // v8.1a "Limited Order Region" extension load-acquire instructions
3874  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3875  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3876  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3877  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3878
3879  // v8.1a "Limited Order Region" extension store-release instructions
3880  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3881  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3882  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3883  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3884}
3885
3886//===----------------------------------------------------------------------===//
3887// Scaled floating point to integer conversion instructions.
3888//===----------------------------------------------------------------------===//
3889
3890defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3891defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3892defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3893defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3894defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3895defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3896defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3897defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3898defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3899defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3900defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3901defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3902
3903// AArch64's FCVT instructions saturate when out of range.
3904multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> {
3905  let Predicates = [HasFullFP16] in {
3906  def : Pat<(i32 (to_int_sat f16:$Rn, i32)),
3907            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3908  def : Pat<(i64 (to_int_sat f16:$Rn, i64)),
3909            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3910  }
3911  def : Pat<(i32 (to_int_sat f32:$Rn, i32)),
3912            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3913  def : Pat<(i64 (to_int_sat f32:$Rn, i64)),
3914            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3915  def : Pat<(i32 (to_int_sat f64:$Rn, i32)),
3916            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3917  def : Pat<(i64 (to_int_sat f64:$Rn, i64)),
3918            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3919
3920  let Predicates = [HasFullFP16] in {
3921  def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)),
3922            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3923  def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)),
3924            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3925  }
3926  def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)),
3927            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3928  def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)),
3929            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3930  def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)),
3931            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3932  def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)),
3933            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3934}
3935
3936defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">;
3937defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">;
3938
3939multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3940  let Predicates = [HasFullFP16] in {
3941  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3942  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3943  }
3944  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3945  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3946  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3947  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3948
3949  let Predicates = [HasFullFP16] in {
3950  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3951            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3952  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3953            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3954  }
3955  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3956            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3957  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3958            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3959  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3960            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3961  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3962            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3963}
3964
3965defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3966defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3967
3968multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> {
3969  def : Pat<(i32 (to_int (round f32:$Rn))),
3970            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3971  def : Pat<(i64 (to_int (round f32:$Rn))),
3972            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3973  def : Pat<(i32 (to_int (round f64:$Rn))),
3974            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3975  def : Pat<(i64 (to_int (round f64:$Rn))),
3976            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3977
3978  // These instructions saturate like fp_to_[su]int_sat.
3979  let Predicates = [HasFullFP16] in {
3980  def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)),
3981            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3982  def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)),
3983            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3984  }
3985  def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)),
3986            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3987  def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)),
3988            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3989  def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)),
3990            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3991  def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)),
3992            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3993}
3994
3995defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil,  "FCVTPS">;
3996defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil,  "FCVTPU">;
3997defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">;
3998defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">;
3999defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">;
4000defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">;
4001defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">;
4002defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">;
4003
4004
4005
4006let Predicates = [HasFullFP16] in {
4007  def : Pat<(i32 (any_lround f16:$Rn)),
4008            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
4009  def : Pat<(i64 (any_lround f16:$Rn)),
4010            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
4011  def : Pat<(i64 (any_llround f16:$Rn)),
4012            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
4013}
4014def : Pat<(i32 (any_lround f32:$Rn)),
4015          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
4016def : Pat<(i32 (any_lround f64:$Rn)),
4017          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
4018def : Pat<(i64 (any_lround f32:$Rn)),
4019          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
4020def : Pat<(i64 (any_lround f64:$Rn)),
4021          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
4022def : Pat<(i64 (any_llround f32:$Rn)),
4023          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
4024def : Pat<(i64 (any_llround f64:$Rn)),
4025          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
4026
4027//===----------------------------------------------------------------------===//
4028// Scaled integer to floating point conversion instructions.
4029//===----------------------------------------------------------------------===//
4030
4031defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
4032defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
4033
4034//===----------------------------------------------------------------------===//
4035// Unscaled integer to floating point conversion instruction.
4036//===----------------------------------------------------------------------===//
4037
4038defm FMOV : UnscaledConversion<"fmov">;
4039
4040// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
4041let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
4042def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
4043    Sched<[WriteF]>, Requires<[HasFullFP16]>;
4044def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
4045    Sched<[WriteF]>;
4046def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
4047    Sched<[WriteF]>;
4048}
4049// Similarly add aliases
4050def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
4051    Requires<[HasFullFP16]>;
4052def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
4053def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
4054
4055// Pattern for FP16 immediates
4056let Predicates = [HasFullFP16] in {
4057  def : Pat<(f16 fpimm:$in),
4058    (FMOVWHr (MOVi32imm (bitcast_fpimm_to_i32 f16:$in)))>;
4059}
4060
4061//===----------------------------------------------------------------------===//
4062// Floating point conversion instruction.
4063//===----------------------------------------------------------------------===//
4064
4065defm FCVT : FPConversion<"fcvt">;
4066
4067//===----------------------------------------------------------------------===//
4068// Floating point single operand instructions.
4069//===----------------------------------------------------------------------===//
4070
4071defm FABS   : SingleOperandFPDataNoException<0b0001, "fabs", fabs>;
4072defm FMOV   : SingleOperandFPDataNoException<0b0000, "fmov">;
4073defm FNEG   : SingleOperandFPDataNoException<0b0010, "fneg", fneg>;
4074defm FRINTA : SingleOperandFPData<0b1100, "frinta", any_fround>;
4075defm FRINTI : SingleOperandFPData<0b1111, "frinti", any_fnearbyint>;
4076defm FRINTM : SingleOperandFPData<0b1010, "frintm", any_ffloor>;
4077defm FRINTN : SingleOperandFPData<0b1000, "frintn", any_froundeven>;
4078defm FRINTP : SingleOperandFPData<0b1001, "frintp", any_fceil>;
4079
4080defm FRINTX : SingleOperandFPData<0b1110, "frintx", any_frint>;
4081defm FRINTZ : SingleOperandFPData<0b1011, "frintz", any_ftrunc>;
4082
4083let SchedRW = [WriteFDiv] in {
4084defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", any_fsqrt>;
4085}
4086
4087let Predicates = [HasFRInt3264] in {
4088  defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>;
4089  defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>;
4090  defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>;
4091  defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>;
4092} // HasFRInt3264
4093
4094// Emitting strict_lrint as two instructions is valid as any exceptions that
4095// occur will happen in exactly one of the instructions (e.g. if the input is
4096// not an integer the inexact exception will happen in the FRINTX but not then
4097// in the FCVTZS as the output of FRINTX is an integer).
4098let Predicates = [HasFullFP16] in {
4099  def : Pat<(i32 (any_lrint f16:$Rn)),
4100            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
4101  def : Pat<(i64 (any_lrint f16:$Rn)),
4102            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
4103  def : Pat<(i64 (any_llrint f16:$Rn)),
4104            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
4105}
4106def : Pat<(i32 (any_lrint f32:$Rn)),
4107          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4108def : Pat<(i32 (any_lrint f64:$Rn)),
4109          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4110def : Pat<(i64 (any_lrint f32:$Rn)),
4111          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4112def : Pat<(i64 (any_lrint f64:$Rn)),
4113          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4114def : Pat<(i64 (any_llrint f32:$Rn)),
4115          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
4116def : Pat<(i64 (any_llrint f64:$Rn)),
4117          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
4118
4119//===----------------------------------------------------------------------===//
4120// Floating point two operand instructions.
4121//===----------------------------------------------------------------------===//
4122
4123defm FADD   : TwoOperandFPData<0b0010, "fadd", any_fadd>;
4124let SchedRW = [WriteFDiv] in {
4125defm FDIV   : TwoOperandFPData<0b0001, "fdiv", any_fdiv>;
4126}
4127defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", any_fmaxnum>;
4128defm FMAX   : TwoOperandFPData<0b0100, "fmax", any_fmaximum>;
4129defm FMINNM : TwoOperandFPData<0b0111, "fminnm", any_fminnum>;
4130defm FMIN   : TwoOperandFPData<0b0101, "fmin", any_fminimum>;
4131let SchedRW = [WriteFMul] in {
4132defm FMUL   : TwoOperandFPData<0b0000, "fmul", any_fmul>;
4133defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", any_fmul>;
4134}
4135defm FSUB   : TwoOperandFPData<0b0011, "fsub", any_fsub>;
4136
4137def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4138          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
4139def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4140          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
4141def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4142          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
4143def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4144          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
4145
4146//===----------------------------------------------------------------------===//
4147// Floating point three operand instructions.
4148//===----------------------------------------------------------------------===//
4149
4150defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", any_fma>;
4151defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
4152     TriOpFrag<(any_fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
4153defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
4154     TriOpFrag<(fneg (any_fma node:$LHS, node:$MHS, node:$RHS))> >;
4155defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
4156     TriOpFrag<(any_fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
4157
4158// The following def pats catch the case where the LHS of an FMA is negated.
4159// The TriOpFrag above catches the case where the middle operand is negated.
4160
4161// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
4162// the NEON variant.
4163
4164// Here we handle first -(a + b*c) for FNMADD:
4165
4166let Predicates = [HasNEON, HasFullFP16] in
4167def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
4168          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4169
4170def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
4171          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4172
4173def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
4174          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4175
4176// Now it's time for "(-a) + (-b)*c"
4177
4178let Predicates = [HasNEON, HasFullFP16] in
4179def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
4180          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4181
4182def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
4183          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4184
4185def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
4186          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4187
4188//===----------------------------------------------------------------------===//
4189// Floating point comparison instructions.
4190//===----------------------------------------------------------------------===//
4191
4192defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
4193defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
4194
4195//===----------------------------------------------------------------------===//
4196// Floating point conditional comparison instructions.
4197//===----------------------------------------------------------------------===//
4198
4199defm FCCMPE : FPCondComparison<1, "fccmpe">;
4200defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
4201
4202//===----------------------------------------------------------------------===//
4203// Floating point conditional select instruction.
4204//===----------------------------------------------------------------------===//
4205
4206defm FCSEL : FPCondSelect<"fcsel">;
4207
4208// CSEL instructions providing f128 types need to be handled by a
4209// pseudo-instruction since the eventual code will need to introduce basic
4210// blocks and control flow.
4211def F128CSEL : Pseudo<(outs FPR128:$Rd),
4212                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
4213                      [(set (f128 FPR128:$Rd),
4214                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
4215                                       (i32 imm:$cond), NZCV))]> {
4216  let Uses = [NZCV];
4217  let usesCustomInserter = 1;
4218  let hasNoSchedulingInfo = 1;
4219}
4220
4221//===----------------------------------------------------------------------===//
4222// Instructions used for emitting unwind opcodes on ARM64 Windows.
4223//===----------------------------------------------------------------------===//
4224let isPseudo = 1 in {
4225  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
4226  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4227  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4228  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4229  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4230  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4231  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4232  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4233  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4234  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4235  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4236  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
4237  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4238  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
4239  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4240  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
4241  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4242}
4243
4244// Pseudo instructions for Windows EH
4245//===----------------------------------------------------------------------===//
4246let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
4247    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
4248   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
4249   let usesCustomInserter = 1 in
4250     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
4251                    Sched<[]>;
4252}
4253
4254// Pseudo instructions for homogeneous prolog/epilog
4255let isPseudo = 1 in {
4256  // Save CSRs in order, {FPOffset}
4257  def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4258  // Restore CSRs in order
4259  def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4260}
4261
4262//===----------------------------------------------------------------------===//
4263// Floating point immediate move.
4264//===----------------------------------------------------------------------===//
4265
4266let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
4267defm FMOV : FPMoveImmediate<"fmov">;
4268}
4269
4270//===----------------------------------------------------------------------===//
4271// Advanced SIMD two vector instructions.
4272//===----------------------------------------------------------------------===//
4273
4274defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
4275                                          AArch64uabd>;
4276// Match UABDL in log2-shuffle patterns.
4277def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
4278                           (zext (v8i8 V64:$opB))))),
4279          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4280def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4281               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
4282                                (zext (v8i8 V64:$opB))),
4283                           (AArch64vashr v8i16:$src, (i32 15))))),
4284          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4285def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
4286                           (zext (extract_high_v16i8 (v16i8 V128:$opB)))))),
4287          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4288def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4289               (v8i16 (add (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
4290                                (zext (extract_high_v16i8 (v16i8 V128:$opB)))),
4291                           (AArch64vashr v8i16:$src, (i32 15))))),
4292          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4293def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
4294                           (zext (v4i16 V64:$opB))))),
4295          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
4296def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 (v8i16 V128:$opA))),
4297                           (zext (extract_high_v8i16 (v8i16 V128:$opB)))))),
4298          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
4299def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
4300                           (zext (v2i32 V64:$opB))))),
4301          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
4302def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 (v4i32 V128:$opA))),
4303                           (zext (extract_high_v4i32 (v4i32 V128:$opB)))))),
4304          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
4305
4306defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
4307defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
4308defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
4309defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
4310defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
4311defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
4312defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
4313defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
4314defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
4315defm FABS   : SIMDTwoVectorFPNoException<0, 1, 0b01111, "fabs", fabs>;
4316
4317def : Pat<(v8i8 (AArch64vashr (v8i8 V64:$Rn), (i32 7))),
4318          (CMLTv8i8rz V64:$Rn)>;
4319def : Pat<(v4i16 (AArch64vashr (v4i16 V64:$Rn), (i32 15))),
4320          (CMLTv4i16rz V64:$Rn)>;
4321def : Pat<(v2i32 (AArch64vashr (v2i32 V64:$Rn), (i32 31))),
4322          (CMLTv2i32rz V64:$Rn)>;
4323def : Pat<(v16i8 (AArch64vashr (v16i8 V128:$Rn), (i32 7))),
4324          (CMLTv16i8rz V128:$Rn)>;
4325def : Pat<(v8i16 (AArch64vashr (v8i16 V128:$Rn), (i32 15))),
4326          (CMLTv8i16rz V128:$Rn)>;
4327def : Pat<(v4i32 (AArch64vashr (v4i32 V128:$Rn), (i32 31))),
4328          (CMLTv4i32rz V128:$Rn)>;
4329def : Pat<(v2i64 (AArch64vashr (v2i64 V128:$Rn), (i32 63))),
4330          (CMLTv2i64rz V128:$Rn)>;
4331
4332defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4333defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4334defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4335defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4336defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4337defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
4338defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
4339defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
4340def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
4341          (FCVTLv4i16 V64:$Rn)>;
4342def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
4343                                                              (i64 4)))),
4344          (FCVTLv8i16 V128:$Rn)>;
4345def : Pat<(v2f64 (any_fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
4346
4347def : Pat<(v4f32 (any_fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
4348
4349defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
4350defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
4351defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
4352defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
4353defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
4354def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
4355          (FCVTNv4i16 V128:$Rn)>;
4356def : Pat<(concat_vectors V64:$Rd,
4357                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
4358          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4359def : Pat<(v2f32 (any_fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
4360def : Pat<(v4f16 (any_fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
4361def : Pat<(concat_vectors V64:$Rd, (v2f32 (any_fpround (v2f64 V128:$Rn)))),
4362          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4363defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
4364defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
4365defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
4366                                        int_aarch64_neon_fcvtxn>;
4367defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", any_fp_to_sint>;
4368defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", any_fp_to_uint>;
4369
4370// AArch64's FCVT instructions saturate when out of range.
4371multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> {
4372  let Predicates = [HasFullFP16] in {
4373  def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)),
4374            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
4375  def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)),
4376            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
4377  }
4378  def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)),
4379            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
4380  def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)),
4381            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
4382  def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)),
4383            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
4384}
4385defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">;
4386defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">;
4387
4388def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
4389def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
4390def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
4391def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
4392def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
4393
4394def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
4395def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4396def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4397def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4398def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4399
4400defm FNEG   : SIMDTwoVectorFPNoException<1, 1, 0b01111, "fneg", fneg>;
4401defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4402defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", any_fround>;
4403defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", any_fnearbyint>;
4404defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", any_ffloor>;
4405defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", any_froundeven>;
4406defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", any_fceil>;
4407defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", any_frint>;
4408defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", any_ftrunc>;
4409
4410let Predicates = [HasFRInt3264] in {
4411  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>;
4412  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>;
4413  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>;
4414  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>;
4415} // HasFRInt3264
4416
4417defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4418defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", any_fsqrt>;
4419defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4420                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4421defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4422// Aliases for MVN -> NOT.
4423def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4424                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4425def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4426                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4427
4428def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4429def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4430def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4431def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4432def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4433def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4434
4435defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>;
4436defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4437defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4438defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4439defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4440       BinOpFrag<(add node:$LHS, (AArch64saddlp node:$RHS))> >;
4441defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", AArch64saddlp>;
4442defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", any_sint_to_fp>;
4443defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4444defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4445defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4446defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4447defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4448defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4449defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4450       BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >;
4451defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>;
4452defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", any_uint_to_fp>;
4453defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4454defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4455defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4456defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4457defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4458
4459def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4460def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4461def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4462def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4463def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4464def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4465def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4466def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4467def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4468def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4469
4470// Patterns for vector long shift (by element width). These need to match all
4471// three of zext, sext and anyext so it's easier to pull the patterns out of the
4472// definition.
4473multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4474  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4475            (SHLLv8i8 V64:$Rn)>;
4476  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 (v16i8 V128:$Rn)))), (i32 8)),
4477            (SHLLv16i8 V128:$Rn)>;
4478  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4479            (SHLLv4i16 V64:$Rn)>;
4480  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 (v8i16 V128:$Rn)))), (i32 16)),
4481            (SHLLv8i16 V128:$Rn)>;
4482  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4483            (SHLLv2i32 V64:$Rn)>;
4484  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 (v4i32 V128:$Rn)))), (i32 32)),
4485            (SHLLv4i32 V128:$Rn)>;
4486}
4487
4488defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4489defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4490defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4491
4492// Constant vector values, used in the S/UQXTN patterns below.
4493def VImmFF:   PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>;
4494def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>;
4495def VImm7F:   PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>;
4496def VImm80:   PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>;
4497def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>;
4498def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>;
4499
4500// trunc(umin(X, 255)) -> UQXTRN v8i8
4501def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))),
4502          (UQXTNv8i8 V128:$Vn)>;
4503// trunc(umin(X, 65535)) -> UQXTRN v4i16
4504def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))),
4505          (UQXTNv4i16 V128:$Vn)>;
4506// trunc(smin(smax(X, -128), 128)) -> SQXTRN
4507//  with reversed min/max
4508def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4509                             (v8i16 VImm7F)))),
4510          (SQXTNv8i8 V128:$Vn)>;
4511def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4512                             (v8i16 VImm80)))),
4513          (SQXTNv8i8 V128:$Vn)>;
4514// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN
4515//  with reversed min/max
4516def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4517                              (v4i32 VImm7FFF)))),
4518          (SQXTNv4i16 V128:$Vn)>;
4519def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4520                              (v4i32 VImm8000)))),
4521          (SQXTNv4i16 V128:$Vn)>;
4522
4523// concat_vectors(Vd, trunc(smin(smax Vm, -128), 127) ~> SQXTN2(Vd, Vn)
4524// with reversed min/max
4525def : Pat<(v16i8 (concat_vectors
4526                 (v8i8 V64:$Vd),
4527                 (v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4528                                          (v8i16 VImm7F)))))),
4529          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4530def : Pat<(v16i8 (concat_vectors
4531                 (v8i8 V64:$Vd),
4532                 (v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4533                                          (v8i16 VImm80)))))),
4534          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4535
4536// concat_vectors(Vd, trunc(smin(smax Vm, -32768), 32767) ~> SQXTN2(Vd, Vn)
4537// with reversed min/max
4538def : Pat<(v8i16 (concat_vectors
4539                 (v4i16 V64:$Vd),
4540                 (v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4541                                           (v4i32 VImm7FFF)))))),
4542          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4543def : Pat<(v8i16 (concat_vectors
4544                 (v4i16 V64:$Vd),
4545                 (v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4546                                           (v4i32 VImm8000)))))),
4547          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4548
4549//===----------------------------------------------------------------------===//
4550// Advanced SIMD three vector instructions.
4551//===----------------------------------------------------------------------===//
4552
4553defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4554defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", AArch64addp>;
4555defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4556defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4557defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4558defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4559defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4560defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4561foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in {
4562def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>;
4563}
4564defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4565let Predicates = [HasNEON] in {
4566foreach VT = [ v2f32, v4f32, v2f64 ] in
4567def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4568}
4569let Predicates = [HasNEON, HasFullFP16] in {
4570foreach VT = [ v4f16, v8f16 ] in
4571def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4572}
4573defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4574defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4575defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp", AArch64faddp>;
4576defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", any_fadd>;
4577defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4578defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4579defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4580defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", any_fdiv>;
4581defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4582defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", any_fmaxnum>;
4583defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4584defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", any_fmaximum>;
4585defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4586defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", any_fminnum>;
4587defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4588defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", any_fminimum>;
4589
4590// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4591// instruction expects the addend first, while the fma intrinsic puts it last.
4592defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4593            TriOpFrag<(any_fma node:$RHS, node:$MHS, node:$LHS)> >;
4594defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4595            TriOpFrag<(any_fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4596
4597defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4598defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", any_fmul>;
4599defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4600defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4601defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", any_fsub>;
4602
4603// MLA and MLS are generated in MachineCombine
4604defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4605defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4606
4607defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4608defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4609defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4610      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4611defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4612defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", avgfloors>;
4613defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4614defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4615defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4616defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4617defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4618defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4619defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4620defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4621defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4622defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4623defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4624defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", avgceils>;
4625defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4626defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4627defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4628defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4629      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4630defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4631defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", avgflooru>;
4632defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4633defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4634defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4635defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4636defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4637defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4638defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4639defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4640defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4641defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", avgceilu>;
4642defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4643defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4644defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4645                                                  int_aarch64_neon_sqrdmlah>;
4646defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4647                                                    int_aarch64_neon_sqrdmlsh>;
4648
4649// Extra saturate patterns, other than the intrinsics matches above
4650defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4651defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4652defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4653defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4654
4655defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4656defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4657                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4658defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4659defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4660                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4661defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4662
4663// Pseudo bitwise select pattern BSP.
4664// It is expanded into BSL/BIT/BIF after register allocation.
4665defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4666                                                      (and (vnot node:$LHS), node:$RHS))>>;
4667defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4668defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4669defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4670
4671def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4672          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4673def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4674          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4675def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4676          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4677def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4678          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4679
4680def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4681          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4682def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4683          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4684def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4685          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4686def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4687          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4688
4689def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4690                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4691def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4692                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4693def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4694                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4695def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4696                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4697
4698def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4699                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4700def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4701                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4702def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4703                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4704def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4705                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4706
4707def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4708                "|cmls.8b\t$dst, $src1, $src2}",
4709                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4710def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4711                "|cmls.16b\t$dst, $src1, $src2}",
4712                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4713def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4714                "|cmls.4h\t$dst, $src1, $src2}",
4715                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4716def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4717                "|cmls.8h\t$dst, $src1, $src2}",
4718                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4719def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4720                "|cmls.2s\t$dst, $src1, $src2}",
4721                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4722def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4723                "|cmls.4s\t$dst, $src1, $src2}",
4724                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4725def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4726                "|cmls.2d\t$dst, $src1, $src2}",
4727                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4728
4729def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4730                "|cmlo.8b\t$dst, $src1, $src2}",
4731                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4732def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4733                "|cmlo.16b\t$dst, $src1, $src2}",
4734                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4735def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4736                "|cmlo.4h\t$dst, $src1, $src2}",
4737                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4738def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4739                "|cmlo.8h\t$dst, $src1, $src2}",
4740                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4741def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4742                "|cmlo.2s\t$dst, $src1, $src2}",
4743                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4744def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4745                "|cmlo.4s\t$dst, $src1, $src2}",
4746                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4747def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4748                "|cmlo.2d\t$dst, $src1, $src2}",
4749                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4750
4751def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4752                "|cmle.8b\t$dst, $src1, $src2}",
4753                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4754def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4755                "|cmle.16b\t$dst, $src1, $src2}",
4756                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4757def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4758                "|cmle.4h\t$dst, $src1, $src2}",
4759                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4760def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4761                "|cmle.8h\t$dst, $src1, $src2}",
4762                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4763def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4764                "|cmle.2s\t$dst, $src1, $src2}",
4765                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4766def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4767                "|cmle.4s\t$dst, $src1, $src2}",
4768                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4769def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4770                "|cmle.2d\t$dst, $src1, $src2}",
4771                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4772
4773def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4774                "|cmlt.8b\t$dst, $src1, $src2}",
4775                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4776def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4777                "|cmlt.16b\t$dst, $src1, $src2}",
4778                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4779def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4780                "|cmlt.4h\t$dst, $src1, $src2}",
4781                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4782def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4783                "|cmlt.8h\t$dst, $src1, $src2}",
4784                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4785def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4786                "|cmlt.2s\t$dst, $src1, $src2}",
4787                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4788def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4789                "|cmlt.4s\t$dst, $src1, $src2}",
4790                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4791def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4792                "|cmlt.2d\t$dst, $src1, $src2}",
4793                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4794
4795let Predicates = [HasNEON, HasFullFP16] in {
4796def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4797                "|fcmle.4h\t$dst, $src1, $src2}",
4798                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4799def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4800                "|fcmle.8h\t$dst, $src1, $src2}",
4801                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4802}
4803def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4804                "|fcmle.2s\t$dst, $src1, $src2}",
4805                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4806def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4807                "|fcmle.4s\t$dst, $src1, $src2}",
4808                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4809def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4810                "|fcmle.2d\t$dst, $src1, $src2}",
4811                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4812
4813let Predicates = [HasNEON, HasFullFP16] in {
4814def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4815                "|fcmlt.4h\t$dst, $src1, $src2}",
4816                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4817def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4818                "|fcmlt.8h\t$dst, $src1, $src2}",
4819                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4820}
4821def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4822                "|fcmlt.2s\t$dst, $src1, $src2}",
4823                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4824def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4825                "|fcmlt.4s\t$dst, $src1, $src2}",
4826                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4827def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4828                "|fcmlt.2d\t$dst, $src1, $src2}",
4829                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4830
4831let Predicates = [HasNEON, HasFullFP16] in {
4832def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4833                "|facle.4h\t$dst, $src1, $src2}",
4834                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4835def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4836                "|facle.8h\t$dst, $src1, $src2}",
4837                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4838}
4839def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4840                "|facle.2s\t$dst, $src1, $src2}",
4841                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4842def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4843                "|facle.4s\t$dst, $src1, $src2}",
4844                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4845def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4846                "|facle.2d\t$dst, $src1, $src2}",
4847                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4848
4849let Predicates = [HasNEON, HasFullFP16] in {
4850def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4851                "|faclt.4h\t$dst, $src1, $src2}",
4852                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4853def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4854                "|faclt.8h\t$dst, $src1, $src2}",
4855                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4856}
4857def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4858                "|faclt.2s\t$dst, $src1, $src2}",
4859                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4860def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4861                "|faclt.4s\t$dst, $src1, $src2}",
4862                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4863def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4864                "|faclt.2d\t$dst, $src1, $src2}",
4865                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4866
4867//===----------------------------------------------------------------------===//
4868// Advanced SIMD three scalar instructions.
4869//===----------------------------------------------------------------------===//
4870
4871defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4872defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4873defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4874defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4875defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4876defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4877defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4878defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4879def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4880          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4881let Predicates = [HasNEON, HasFullFP16] in {
4882def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4883}
4884let Predicates = [HasNEON] in {
4885def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4886def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4887}
4888defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4889                                     int_aarch64_neon_facge>;
4890defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4891                                     int_aarch64_neon_facgt>;
4892defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4893defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4894defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4895defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorSME>;
4896defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorSME>;
4897defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorSME>;
4898defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4899defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4900defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4901defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4902defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4903defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4904defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4905defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4906defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4907defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4908defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4909defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4910defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4911defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4912defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4913let Predicates = [HasRDM] in {
4914  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4915  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4916  def : Pat<(i32 (int_aarch64_neon_sqrdmlah (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4917                                            (i32 FPR32:$Rm))),
4918            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4919  def : Pat<(i32 (int_aarch64_neon_sqrdmlsh (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4920                                            (i32 FPR32:$Rm))),
4921            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4922}
4923
4924def : InstAlias<"cmls $dst, $src1, $src2",
4925                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4926def : InstAlias<"cmle $dst, $src1, $src2",
4927                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4928def : InstAlias<"cmlo $dst, $src1, $src2",
4929                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4930def : InstAlias<"cmlt $dst, $src1, $src2",
4931                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4932def : InstAlias<"fcmle $dst, $src1, $src2",
4933                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4934def : InstAlias<"fcmle $dst, $src1, $src2",
4935                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4936def : InstAlias<"fcmlt $dst, $src1, $src2",
4937                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4938def : InstAlias<"fcmlt $dst, $src1, $src2",
4939                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4940def : InstAlias<"facle $dst, $src1, $src2",
4941                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4942def : InstAlias<"facle $dst, $src1, $src2",
4943                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4944def : InstAlias<"faclt $dst, $src1, $src2",
4945                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4946def : InstAlias<"faclt $dst, $src1, $src2",
4947                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4948
4949//===----------------------------------------------------------------------===//
4950// Advanced SIMD three scalar instructions (mixed operands).
4951//===----------------------------------------------------------------------===//
4952defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4953                                       int_aarch64_neon_sqdmulls_scalar>;
4954defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4955defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4956
4957def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4958                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4959                                                        (i32 FPR32:$Rm))))),
4960          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4961def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4962                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4963                                                        (i32 FPR32:$Rm))))),
4964          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4965
4966//===----------------------------------------------------------------------===//
4967// Advanced SIMD two scalar instructions.
4968//===----------------------------------------------------------------------===//
4969
4970defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4971defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4972defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4973defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4974defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4975defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4976defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4977defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4978defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4979defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4980defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4981defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4982defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4983defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4984defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4985defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4986defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4987defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4988defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4989def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4990defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4991defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4992defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe", HasNEONorSME>;
4993defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx", HasNEONorSME>;
4994defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte", HasNEONorSME>;
4995defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4996                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4997defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4998defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4999defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
5000defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
5001defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
5002defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
5003                                     int_aarch64_neon_suqadd>;
5004defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
5005defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
5006defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
5007                                    int_aarch64_neon_usqadd>;
5008
5009def : Pat<(v1i64 (AArch64vashr (v1i64 V64:$Rn), (i32 63))),
5010          (CMLTv1i64rz V64:$Rn)>;
5011
5012def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
5013          (FCVTASv1i64 FPR64:$Rn)>;
5014def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
5015          (FCVTAUv1i64 FPR64:$Rn)>;
5016def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
5017          (FCVTMSv1i64 FPR64:$Rn)>;
5018def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
5019          (FCVTMUv1i64 FPR64:$Rn)>;
5020def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
5021          (FCVTNSv1i64 FPR64:$Rn)>;
5022def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
5023          (FCVTNUv1i64 FPR64:$Rn)>;
5024def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
5025          (FCVTPSv1i64 FPR64:$Rn)>;
5026def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
5027          (FCVTPUv1i64 FPR64:$Rn)>;
5028def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
5029          (FCVTZSv1i64 FPR64:$Rn)>;
5030def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
5031          (FCVTZUv1i64 FPR64:$Rn)>;
5032
5033def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
5034          (FRECPEv1f16 FPR16:$Rn)>;
5035def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
5036          (FRECPEv1i32 FPR32:$Rn)>;
5037def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
5038          (FRECPEv1i64 FPR64:$Rn)>;
5039def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
5040          (FRECPEv1i64 FPR64:$Rn)>;
5041
5042def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
5043          (FRECPEv1i32 FPR32:$Rn)>;
5044def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
5045          (FRECPEv2f32 V64:$Rn)>;
5046def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
5047          (FRECPEv4f32 FPR128:$Rn)>;
5048def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
5049          (FRECPEv1i64 FPR64:$Rn)>;
5050def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
5051          (FRECPEv1i64 FPR64:$Rn)>;
5052def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
5053          (FRECPEv2f64 FPR128:$Rn)>;
5054
5055def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
5056          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
5057def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
5058          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
5059def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
5060          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
5061def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
5062          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
5063def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
5064          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
5065
5066def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
5067          (FRECPXv1f16 FPR16:$Rn)>;
5068def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
5069          (FRECPXv1i32 FPR32:$Rn)>;
5070def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
5071          (FRECPXv1i64 FPR64:$Rn)>;
5072
5073def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
5074          (FRSQRTEv1f16 FPR16:$Rn)>;
5075def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
5076          (FRSQRTEv1i32 FPR32:$Rn)>;
5077def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
5078          (FRSQRTEv1i64 FPR64:$Rn)>;
5079def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
5080          (FRSQRTEv1i64 FPR64:$Rn)>;
5081
5082def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
5083          (FRSQRTEv1i32 FPR32:$Rn)>;
5084def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
5085          (FRSQRTEv2f32 V64:$Rn)>;
5086def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
5087          (FRSQRTEv4f32 FPR128:$Rn)>;
5088def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
5089          (FRSQRTEv1i64 FPR64:$Rn)>;
5090def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
5091          (FRSQRTEv1i64 FPR64:$Rn)>;
5092def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
5093          (FRSQRTEv2f64 FPR128:$Rn)>;
5094
5095def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
5096          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
5097def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
5098          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
5099def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
5100          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
5101def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
5102          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
5103def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
5104          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
5105
5106// Some float -> int -> float conversion patterns for which we want to keep the
5107// int values in FP registers using the corresponding NEON instructions to
5108// avoid more costly int <-> fp register transfers.
5109let Predicates = [HasNEON] in {
5110def : Pat<(f64 (any_sint_to_fp (i64 (any_fp_to_sint f64:$Rn)))),
5111          (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>;
5112def : Pat<(f32 (any_sint_to_fp (i32 (any_fp_to_sint f32:$Rn)))),
5113          (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>;
5114def : Pat<(f64 (any_uint_to_fp (i64 (any_fp_to_uint f64:$Rn)))),
5115          (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>;
5116def : Pat<(f32 (any_uint_to_fp (i32 (any_fp_to_uint f32:$Rn)))),
5117          (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>;
5118
5119let Predicates = [HasFullFP16] in {
5120def : Pat<(f16 (any_sint_to_fp (i32 (any_fp_to_sint f16:$Rn)))),
5121          (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>;
5122def : Pat<(f16 (any_uint_to_fp (i32 (any_fp_to_uint f16:$Rn)))),
5123          (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
5124}
5125// If an integer is about to be converted to a floating point value,
5126// just load it on the floating point unit.
5127// Here are the patterns for 8 and 16-bits to float.
5128// 8-bits -> float.
5129multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
5130                             SDPatternOperator loadop, Instruction UCVTF,
5131                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
5132                             SubRegIndex sub> {
5133  def : Pat<(DstTy (uint_to_fp (SrcTy
5134                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
5135                                      ro.Wext:$extend))))),
5136           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5137                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
5138                                 sub))>;
5139
5140  def : Pat<(DstTy (uint_to_fp (SrcTy
5141                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
5142                                      ro.Wext:$extend))))),
5143           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5144                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
5145                                 sub))>;
5146}
5147
5148defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
5149                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
5150def : Pat <(f32 (uint_to_fp (i32
5151               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5152           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5153                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5154def : Pat <(f32 (uint_to_fp (i32
5155                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5156           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5157                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5158// 16-bits -> float.
5159defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
5160                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
5161def : Pat <(f32 (uint_to_fp (i32
5162                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5163           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5164                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5165def : Pat <(f32 (uint_to_fp (i32
5166                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5167           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5168                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5169// 32-bits are handled in target specific dag combine:
5170// performIntToFpCombine.
5171// 64-bits integer to 32-bits floating point, not possible with
5172// UCVTF on floating point registers (both source and destination
5173// must have the same size).
5174
5175// Here are the patterns for 8, 16, 32, and 64-bits to double.
5176// 8-bits -> double.
5177defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
5178                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
5179def : Pat <(f64 (uint_to_fp (i32
5180                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5181           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5182                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5183def : Pat <(f64 (uint_to_fp (i32
5184                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5185           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5186                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5187// 16-bits -> double.
5188defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
5189                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
5190def : Pat <(f64 (uint_to_fp (i32
5191                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5192           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5193                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5194def : Pat <(f64 (uint_to_fp (i32
5195                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5196           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5197                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5198// 32-bits -> double.
5199defm : UIntToFPROLoadPat<f64, i32, load,
5200                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
5201def : Pat <(f64 (uint_to_fp (i32
5202                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
5203           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5204                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
5205def : Pat <(f64 (uint_to_fp (i32
5206                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
5207           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5208                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
5209// 64-bits -> double are handled in target specific dag combine:
5210// performIntToFpCombine.
5211} // let Predicates = [HasNEON]
5212
5213//===----------------------------------------------------------------------===//
5214// Advanced SIMD three different-sized vector instructions.
5215//===----------------------------------------------------------------------===//
5216
5217defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
5218defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
5219defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
5220defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
5221defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
5222defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
5223                                             AArch64sabd>;
5224defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
5225                                          AArch64sabd>;
5226defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
5227            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
5228defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
5229                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
5230defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
5231    TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
5232defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
5233    TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
5234defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", AArch64smull>;
5235defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
5236                                               int_aarch64_neon_sqadd>;
5237defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
5238                                               int_aarch64_neon_sqsub>;
5239defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
5240                                     int_aarch64_neon_sqdmull>;
5241defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
5242                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
5243defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
5244                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
5245defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
5246                                              AArch64uabd>;
5247defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
5248                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
5249defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
5250                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
5251defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
5252    TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
5253defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
5254    TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
5255defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", AArch64umull>;
5256defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
5257                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
5258defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
5259                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
5260
5261// Additional patterns for [SU]ML[AS]L
5262multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
5263  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5264  def : Pat<(v4i16 (opnode
5265                    V64:$Ra,
5266                    (v4i16 (extract_subvector
5267                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
5268                            (i64 0))))),
5269             (EXTRACT_SUBREG (v8i16 (INST8B
5270                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
5271                                     V64:$Rn, V64:$Rm)), dsub)>;
5272  def : Pat<(v2i32 (opnode
5273                    V64:$Ra,
5274                    (v2i32 (extract_subvector
5275                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
5276                            (i64 0))))),
5277             (EXTRACT_SUBREG (v4i32 (INST4H
5278                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
5279                                     V64:$Rn, V64:$Rm)), dsub)>;
5280  def : Pat<(v1i64 (opnode
5281                    V64:$Ra,
5282                    (v1i64 (extract_subvector
5283                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
5284                            (i64 0))))),
5285             (EXTRACT_SUBREG (v2i64 (INST2S
5286                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
5287                                     V64:$Rn, V64:$Rm)), dsub)>;
5288}
5289
5290defm : Neon_mul_acc_widen_patterns<add, AArch64umull,
5291     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5292defm : Neon_mul_acc_widen_patterns<add, AArch64smull,
5293     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5294defm : Neon_mul_acc_widen_patterns<sub, AArch64umull,
5295     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5296defm : Neon_mul_acc_widen_patterns<sub, AArch64smull,
5297     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5298
5299// Patterns for 64-bit pmull
5300def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
5301          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
5302def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
5303                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
5304          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
5305
5306// CodeGen patterns for addhn and subhn instructions, which can actually be
5307// written in LLVM IR without too much difficulty.
5308
5309// Prioritize ADDHN and SUBHN over UZP2.
5310let AddedComplexity = 10 in {
5311
5312// ADDHN
5313def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
5314          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5315def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5316                                           (i32 16))))),
5317          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5318def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5319                                           (i32 32))))),
5320          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5321def : Pat<(concat_vectors (v8i8 V64:$Rd),
5322                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5323                                                    (i32 8))))),
5324          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5325                            V128:$Rn, V128:$Rm)>;
5326def : Pat<(concat_vectors (v4i16 V64:$Rd),
5327                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5328                                                    (i32 16))))),
5329          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5330                            V128:$Rn, V128:$Rm)>;
5331def : Pat<(concat_vectors (v2i32 V64:$Rd),
5332                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5333                                                    (i32 32))))),
5334          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5335                            V128:$Rn, V128:$Rm)>;
5336
5337// SUBHN
5338def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
5339          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5340def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5341                                           (i32 16))))),
5342          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5343def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5344                                           (i32 32))))),
5345          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5346def : Pat<(concat_vectors (v8i8 V64:$Rd),
5347                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5348                                                    (i32 8))))),
5349          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5350                            V128:$Rn, V128:$Rm)>;
5351def : Pat<(concat_vectors (v4i16 V64:$Rd),
5352                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5353                                                    (i32 16))))),
5354          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5355                            V128:$Rn, V128:$Rm)>;
5356def : Pat<(concat_vectors (v2i32 V64:$Rd),
5357                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5358                                                    (i32 32))))),
5359          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5360                            V128:$Rn, V128:$Rm)>;
5361
5362} // AddedComplexity = 10
5363
5364//----------------------------------------------------------------------------
5365// AdvSIMD bitwise extract from vector instruction.
5366//----------------------------------------------------------------------------
5367
5368defm EXT : SIMDBitwiseExtract<"ext">;
5369
5370def AdjustExtImm : SDNodeXForm<imm, [{
5371  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
5372}]>;
5373multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
5374  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
5375            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
5376  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
5377            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
5378  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
5379  // 128-bit vector.
5380  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
5381            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
5382  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
5383  // single 128-bit EXT.
5384  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
5385                              (extract_subvector V128:$Rn, (i64 N)),
5386                              (i32 imm:$imm))),
5387            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
5388  // A 64-bit EXT of the high half of a 128-bit register can be done using a
5389  // 128-bit EXT of the whole register with an adjustment to the immediate. The
5390  // top half of the other operand will be unset, but that doesn't matter as it
5391  // will not be used.
5392  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
5393                              V64:$Rm,
5394                              (i32 imm:$imm))),
5395            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
5396                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5397                                      (AdjustExtImm imm:$imm)), dsub)>;
5398}
5399
5400defm : ExtPat<v8i8, v16i8, 8>;
5401defm : ExtPat<v4i16, v8i16, 4>;
5402defm : ExtPat<v4f16, v8f16, 4>;
5403defm : ExtPat<v4bf16, v8bf16, 4>;
5404defm : ExtPat<v2i32, v4i32, 2>;
5405defm : ExtPat<v2f32, v4f32, 2>;
5406defm : ExtPat<v1i64, v2i64, 1>;
5407defm : ExtPat<v1f64, v2f64, 1>;
5408
5409//----------------------------------------------------------------------------
5410// AdvSIMD zip vector
5411//----------------------------------------------------------------------------
5412
5413defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5414defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5415defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5416defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5417defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5418defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5419
5420def : Pat<(v16i8 (concat_vectors (v8i8 (trunc (v8i16 V128:$Vn))),
5421                                 (v8i8 (trunc (v8i16 V128:$Vm))))),
5422          (UZP1v16i8 V128:$Vn, V128:$Vm)>;
5423def : Pat<(v8i16 (concat_vectors (v4i16 (trunc (v4i32 V128:$Vn))),
5424                                 (v4i16 (trunc (v4i32 V128:$Vm))))),
5425          (UZP1v8i16 V128:$Vn, V128:$Vm)>;
5426def : Pat<(v4i32 (concat_vectors (v2i32 (trunc (v2i64 V128:$Vn))),
5427                                 (v2i32 (trunc (v2i64 V128:$Vm))))),
5428          (UZP1v4i32 V128:$Vn, V128:$Vm)>;
5429
5430def : Pat<(v16i8 (concat_vectors
5431                 (v8i8 (trunc (AArch64vlshr (v8i16 V128:$Vn), (i32 8)))),
5432                 (v8i8 (trunc (AArch64vlshr (v8i16 V128:$Vm), (i32 8)))))),
5433          (UZP2v16i8 V128:$Vn, V128:$Vm)>;
5434def : Pat<(v8i16 (concat_vectors
5435                 (v4i16 (trunc (AArch64vlshr (v4i32 V128:$Vn), (i32 16)))),
5436                 (v4i16 (trunc (AArch64vlshr (v4i32 V128:$Vm), (i32 16)))))),
5437          (UZP2v8i16 V128:$Vn, V128:$Vm)>;
5438def : Pat<(v4i32 (concat_vectors
5439                 (v2i32 (trunc (AArch64vlshr (v2i64 V128:$Vn), (i32 32)))),
5440                 (v2i32 (trunc (AArch64vlshr (v2i64 V128:$Vm), (i32 32)))))),
5441          (UZP2v4i32 V128:$Vn, V128:$Vm)>;
5442
5443//----------------------------------------------------------------------------
5444// AdvSIMD TBL/TBX instructions
5445//----------------------------------------------------------------------------
5446
5447defm TBL : SIMDTableLookup<    0, "tbl">;
5448defm TBX : SIMDTableLookupTied<1, "tbx">;
5449
5450def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5451          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5452def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5453          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5454
5455def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5456                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5457          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5458def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5459                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5460          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5461
5462
5463//----------------------------------------------------------------------------
5464// AdvSIMD scalar DUP instruction
5465//----------------------------------------------------------------------------
5466
5467defm DUP : SIMDScalarDUP<"mov">;
5468
5469//----------------------------------------------------------------------------
5470// AdvSIMD scalar pairwise instructions
5471//----------------------------------------------------------------------------
5472
5473defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5474defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5475defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5476defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5477defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5478defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5479
5480// Only the lower half of the result of the inner FADDP is used in the patterns
5481// below, so the second operand does not matter. Re-use the first input
5482// operand, so no additional dependencies need to be introduced.
5483let Predicates = [HasFullFP16] in {
5484def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5485            (FADDPv2i16p
5486              (EXTRACT_SUBREG
5487                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, V128:$Rn), V128:$Rn),
5488               dsub))>;
5489def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5490          (FADDPv2i16p (FADDPv4f16 V64:$Rn, V64:$Rn))>;
5491}
5492def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5493          (FADDPv2i32p
5494            (EXTRACT_SUBREG
5495              (FADDPv4f32 V128:$Rn, V128:$Rn),
5496             dsub))>;
5497def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5498          (FADDPv2i32p V64:$Rn)>;
5499def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5500          (FADDPv2i64p V128:$Rn)>;
5501
5502def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5503          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5504def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5505          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5506def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5507          (FADDPv2i32p V64:$Rn)>;
5508def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5509          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5510def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5511          (FADDPv2i64p V128:$Rn)>;
5512def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5513          (FMAXNMPv2i32p V64:$Rn)>;
5514def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5515          (FMAXNMPv2i64p V128:$Rn)>;
5516def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5517          (FMAXPv2i32p V64:$Rn)>;
5518def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5519          (FMAXPv2i64p V128:$Rn)>;
5520def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5521          (FMINNMPv2i32p V64:$Rn)>;
5522def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5523          (FMINNMPv2i64p V128:$Rn)>;
5524def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5525          (FMINPv2i32p V64:$Rn)>;
5526def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5527          (FMINPv2i64p V128:$Rn)>;
5528
5529//----------------------------------------------------------------------------
5530// AdvSIMD INS/DUP instructions
5531//----------------------------------------------------------------------------
5532
5533def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5534def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5535def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5536def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5537def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5538def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5539def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5540
5541def DUPv2i64lane : SIMDDup64FromElement;
5542def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5543def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5544def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5545def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5546def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5547def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5548
5549// DUP from a 64-bit register to a 64-bit register is just a copy
5550def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5551          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5552def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5553          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5554
5555def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5556          (v2f32 (DUPv2i32lane
5557            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5558            (i64 0)))>;
5559def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5560          (v4f32 (DUPv4i32lane
5561            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5562            (i64 0)))>;
5563def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5564          (v2f64 (DUPv2i64lane
5565            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5566            (i64 0)))>;
5567def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5568          (v4f16 (DUPv4i16lane
5569            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5570            (i64 0)))>;
5571def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5572          (v4bf16 (DUPv4i16lane
5573            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5574            (i64 0)))>;
5575def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5576          (v8f16 (DUPv8i16lane
5577            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5578            (i64 0)))>;
5579def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5580          (v8bf16 (DUPv8i16lane
5581            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5582            (i64 0)))>;
5583
5584def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5585          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5586def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5587          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5588
5589def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5590          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5591def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5592          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5593
5594def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5595          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5596def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5597         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5598def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5599          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5600
5601// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5602// instruction even if the types don't match: we just have to remap the lane
5603// carefully. N.b. this trick only applies to truncations.
5604def VecIndex_x2 : SDNodeXForm<imm, [{
5605  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5606}]>;
5607def VecIndex_x4 : SDNodeXForm<imm, [{
5608  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5609}]>;
5610def VecIndex_x8 : SDNodeXForm<imm, [{
5611  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5612}]>;
5613
5614multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5615                            ValueType Src128VT, ValueType ScalVT,
5616                            Instruction DUP, SDNodeXForm IdxXFORM> {
5617  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5618                                                     imm:$idx)))),
5619            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5620
5621  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5622                                                     imm:$idx)))),
5623            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5624}
5625
5626defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5627defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5628defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5629
5630defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5631defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5632defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5633
5634multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5635                               SDNodeXForm IdxXFORM> {
5636  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5637                                                         imm:$idx))))),
5638            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5639
5640  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5641                                                       imm:$idx))))),
5642            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5643}
5644
5645defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5646defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5647defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5648
5649defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5650defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5651defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5652
5653// SMOV and UMOV definitions, with some extra patterns for convenience
5654defm SMOV : SMov;
5655defm UMOV : UMov;
5656
5657def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5658          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5659def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5660          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5661def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5662          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5663def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5664          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5665def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5666          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5667def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5668          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5669
5670def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5671            VectorIndexB:$idx)))), i8),
5672          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5673def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5674            VectorIndexH:$idx)))), i16),
5675          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5676
5677// Extracting i8 or i16 elements will have the zero-extend transformed to
5678// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5679// for AArch64. Match these patterns here since UMOV already zeroes out the high
5680// bits of the destination register.
5681def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5682               (i32 0xff)),
5683          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5684def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5685               (i32 0xffff)),
5686          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5687
5688def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5689            VectorIndexB:$idx)))), (i64 0xff))),
5690          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>;
5691def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5692            VectorIndexH:$idx)))), (i64 0xffff))),
5693          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>;
5694
5695defm INS : SIMDIns;
5696
5697def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5698          (SUBREG_TO_REG (i32 0),
5699                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5700def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5701          (SUBREG_TO_REG (i32 0),
5702                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5703
5704def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5705          (SUBREG_TO_REG (i32 0),
5706                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5707def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5708          (SUBREG_TO_REG (i32 0),
5709                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5710
5711def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5712          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5713def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5714          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5715
5716def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5717          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5718def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5719          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5720
5721def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5722            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5723                                  (i32 FPR32:$Rn), ssub))>;
5724def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5725            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5726                                  (i32 FPR32:$Rn), ssub))>;
5727
5728def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5729            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5730                                  (i64 FPR64:$Rn), dsub))>;
5731
5732def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5733          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5734def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5735          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5736
5737def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5738          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5739def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5740          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5741
5742def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5743          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5744def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5745          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5746
5747def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5748          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5749
5750def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5751            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5752          (EXTRACT_SUBREG
5753            (INSvi16lane
5754              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5755              VectorIndexS:$imm,
5756              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5757              (i64 0)),
5758            dsub)>;
5759
5760def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5761            (i64 VectorIndexH:$imm)),
5762          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5763def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5764            (i64 VectorIndexS:$imm)),
5765          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5766def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5767            (i64 VectorIndexD:$imm)),
5768          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5769
5770def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5771            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5772          (INSvi16lane
5773            V128:$Rn, VectorIndexH:$imm,
5774            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5775            (i64 0))>;
5776
5777def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5778            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5779          (EXTRACT_SUBREG
5780            (INSvi16lane
5781              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5782              VectorIndexS:$imm,
5783              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5784              (i64 0)),
5785            dsub)>;
5786
5787def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5788            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5789          (INSvi16lane
5790            V128:$Rn, VectorIndexH:$imm,
5791            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5792            (i64 0))>;
5793
5794def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5795            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5796          (EXTRACT_SUBREG
5797            (INSvi32lane
5798              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5799              VectorIndexS:$imm,
5800              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5801              (i64 0)),
5802            dsub)>;
5803def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5804            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5805          (INSvi32lane
5806            V128:$Rn, VectorIndexS:$imm,
5807            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5808            (i64 0))>;
5809def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5810            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5811          (INSvi64lane
5812            V128:$Rn, VectorIndexD:$imm,
5813            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5814            (i64 0))>;
5815
5816// Copy an element at a constant index in one vector into a constant indexed
5817// element of another.
5818// FIXME refactor to a shared class/dev parameterized on vector type, vector
5819// index type and INS extension
5820def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5821                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5822                   VectorIndexB:$idx2)),
5823          (v16i8 (INSvi8lane
5824                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5825          )>;
5826def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5827                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5828                   VectorIndexH:$idx2)),
5829          (v8i16 (INSvi16lane
5830                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5831          )>;
5832def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5833                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5834                   VectorIndexS:$idx2)),
5835          (v4i32 (INSvi32lane
5836                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5837          )>;
5838def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5839                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5840                   VectorIndexD:$idx2)),
5841          (v2i64 (INSvi64lane
5842                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5843          )>;
5844
5845multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5846                                ValueType VTScal, Instruction INS> {
5847  def : Pat<(VT128 (vector_insert V128:$src,
5848                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5849                        imm:$Immd)),
5850            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5851
5852  def : Pat<(VT128 (vector_insert V128:$src,
5853                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5854                        imm:$Immd)),
5855            (INS V128:$src, imm:$Immd,
5856                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5857
5858  def : Pat<(VT64 (vector_insert V64:$src,
5859                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5860                        imm:$Immd)),
5861            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5862                                 imm:$Immd, V128:$Rn, imm:$Immn),
5863                            dsub)>;
5864
5865  def : Pat<(VT64 (vector_insert V64:$src,
5866                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5867                        imm:$Immd)),
5868            (EXTRACT_SUBREG
5869                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5870                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5871                dsub)>;
5872}
5873
5874defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5875defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5876defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5877defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5878
5879
5880// Floating point vector extractions are codegen'd as either a sequence of
5881// subregister extractions, or a MOV (aka DUP here) if
5882// the lane number is anything other than zero.
5883def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5884          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5885def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5886          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5887def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5888          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5889def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5890          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5891
5892
5893def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5894          (f64 (DUPi64 V128:$Rn, VectorIndexD:$idx))>;
5895def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5896          (f32 (DUPi32 V128:$Rn, VectorIndexS:$idx))>;
5897def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5898          (f16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5899def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5900          (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5901
5902// All concat_vectors operations are canonicalised to act on i64 vectors for
5903// AArch64. In the general case we need an instruction, which had just as well be
5904// INS.
5905class ConcatPat<ValueType DstTy, ValueType SrcTy>
5906  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5907        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5908                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5909
5910def : ConcatPat<v2i64, v1i64>;
5911def : ConcatPat<v2f64, v1f64>;
5912def : ConcatPat<v4i32, v2i32>;
5913def : ConcatPat<v4f32, v2f32>;
5914def : ConcatPat<v8i16, v4i16>;
5915def : ConcatPat<v8f16, v4f16>;
5916def : ConcatPat<v8bf16, v4bf16>;
5917def : ConcatPat<v16i8, v8i8>;
5918
5919// If the high lanes are undef, though, we can just ignore them:
5920class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5921  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5922        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5923
5924def : ConcatUndefPat<v2i64, v1i64>;
5925def : ConcatUndefPat<v2f64, v1f64>;
5926def : ConcatUndefPat<v4i32, v2i32>;
5927def : ConcatUndefPat<v4f32, v2f32>;
5928def : ConcatUndefPat<v8i16, v4i16>;
5929def : ConcatUndefPat<v16i8, v8i8>;
5930
5931//----------------------------------------------------------------------------
5932// AdvSIMD across lanes instructions
5933//----------------------------------------------------------------------------
5934
5935defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5936defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5937defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5938defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5939defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5940defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5941defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5942defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5943defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5944defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5945defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5946
5947multiclass SIMDAcrossLaneLongPairIntrinsic<string Opc, SDPatternOperator addlp> {
5948  // Patterns for addv(addlp(x)) ==> addlv
5949  def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
5950              (v4i16 (AArch64uaddv (v4i16 (addlp (v8i8 V64:$op))))),
5951              (i64 0))), (i64 0))),
5952            (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5953              (!cast<Instruction>(Opc#"v8i8v") V64:$op), hsub), ssub)>;
5954  def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (addlp (v16i8 V128:$op))))), (i64 0))),
5955            (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5956              (!cast<Instruction>(Opc#"v16i8v") V128:$op), hsub), ssub)>;
5957  def : Pat<(v4i32 (AArch64uaddv (v4i32 (addlp (v8i16 V128:$op))))),
5958            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (!cast<Instruction>(Opc#"v8i16v") V128:$op), ssub)>;
5959
5960  // Patterns for addp(addlp(x))) ==> addlv
5961  def : Pat<(v2i32 (AArch64uaddv (v2i32 (addlp (v4i16 V64:$op))))),
5962            (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (!cast<Instruction>(Opc#"v4i16v") V64:$op), ssub)>;
5963  def : Pat<(v2i64 (AArch64uaddv (v2i64 (addlp (v4i32 V128:$op))))),
5964            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (!cast<Instruction>(Opc#"v4i32v") V128:$op), dsub)>;
5965}
5966
5967defm : SIMDAcrossLaneLongPairIntrinsic<"UADDLV", AArch64uaddlp>;
5968defm : SIMDAcrossLaneLongPairIntrinsic<"SADDLV", AArch64saddlp>;
5969
5970// Patterns for across-vector intrinsics, that have a node equivalent, that
5971// returns a vector (with only the low lane defined) instead of a scalar.
5972// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5973multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5974                                    SDPatternOperator opNode> {
5975// If a lane instruction caught the vector_extract around opNode, we can
5976// directly match the latter to the instruction.
5977def : Pat<(v8i8 (opNode V64:$Rn)),
5978          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5979           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5980def : Pat<(v16i8 (opNode V128:$Rn)),
5981          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5982           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5983def : Pat<(v4i16 (opNode V64:$Rn)),
5984          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5985           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5986def : Pat<(v8i16 (opNode V128:$Rn)),
5987          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5988           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5989def : Pat<(v4i32 (opNode V128:$Rn)),
5990          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5991           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5992
5993
5994// If none did, fallback to the explicit patterns, consuming the vector_extract.
5995def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5996            (i64 0)), (i64 0))),
5997          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5998            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5999            bsub), ssub)>;
6000def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
6001          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6002            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
6003            bsub), ssub)>;
6004def : Pat<(i32 (vector_extract (insert_subvector undef,
6005            (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))),
6006          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
6007            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
6008            hsub), ssub)>;
6009def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
6010          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
6011            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
6012            hsub), ssub)>;
6013def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
6014          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
6015            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
6016            ssub), ssub)>;
6017
6018}
6019
6020multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
6021                                          SDPatternOperator opNode>
6022    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
6023// If there is a sign extension after this intrinsic, consume it as smov already
6024// performed it
6025def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
6026            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)),
6027          (i32 (SMOVvi8to32
6028            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6029              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
6030            (i64 0)))>;
6031def : Pat<(i32 (sext_inreg (i32 (vector_extract
6032            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
6033          (i32 (SMOVvi8to32
6034            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6035             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
6036            (i64 0)))>;
6037def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
6038            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)),
6039          (i32 (SMOVvi16to32
6040           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6041            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
6042           (i64 0)))>;
6043def : Pat<(i32 (sext_inreg (i32 (vector_extract
6044            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
6045          (i32 (SMOVvi16to32
6046            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6047             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
6048            (i64 0)))>;
6049}
6050
6051multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
6052                                            SDPatternOperator opNode>
6053    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
6054// If there is a masking operation keeping only what has been actually
6055// generated, consume it.
6056def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
6057            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)),
6058      (i32 (EXTRACT_SUBREG
6059        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6060          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
6061        ssub))>;
6062def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
6063            maski8_or_more)),
6064        (i32 (EXTRACT_SUBREG
6065          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6066            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
6067          ssub))>;
6068def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
6069            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)),
6070          (i32 (EXTRACT_SUBREG
6071            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6072              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
6073            ssub))>;
6074def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
6075            maski16_or_more)),
6076        (i32 (EXTRACT_SUBREG
6077          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6078            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
6079          ssub))>;
6080}
6081
6082defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
6083// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
6084def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
6085          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
6086
6087defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
6088// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
6089def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
6090          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
6091
6092defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
6093def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
6094          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
6095
6096defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
6097def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
6098          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
6099
6100defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
6101def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
6102          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
6103
6104defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
6105def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
6106          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
6107
6108multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
6109  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6110        (i32 (SMOVvi16to32
6111          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6112            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6113          (i64 0)))>;
6114def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6115        (i32 (SMOVvi16to32
6116          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6117           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6118          (i64 0)))>;
6119
6120def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6121          (i32 (EXTRACT_SUBREG
6122           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6123            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6124           ssub))>;
6125def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6126        (i32 (EXTRACT_SUBREG
6127          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6128           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6129          ssub))>;
6130
6131def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6132        (i64 (EXTRACT_SUBREG
6133          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6134           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6135          dsub))>;
6136}
6137
6138multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
6139                                                Intrinsic intOp> {
6140  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6141        (i32 (EXTRACT_SUBREG
6142          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6143            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6144          ssub))>;
6145def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6146        (i32 (EXTRACT_SUBREG
6147          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6148            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6149          ssub))>;
6150
6151def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6152          (i32 (EXTRACT_SUBREG
6153            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6154              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6155            ssub))>;
6156def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6157        (i32 (EXTRACT_SUBREG
6158          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6159            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6160          ssub))>;
6161
6162def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6163        (i64 (EXTRACT_SUBREG
6164          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6165            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6166          dsub))>;
6167}
6168
6169defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
6170defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
6171
6172// The vaddlv_s32 intrinsic gets mapped to SADDLP.
6173def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
6174          (i64 (EXTRACT_SUBREG
6175            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6176              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
6177            dsub))>;
6178// The vaddlv_u32 intrinsic gets mapped to UADDLP.
6179def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
6180          (i64 (EXTRACT_SUBREG
6181            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6182              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
6183            dsub))>;
6184
6185//------------------------------------------------------------------------------
6186// AdvSIMD modified immediate instructions
6187//------------------------------------------------------------------------------
6188
6189// AdvSIMD BIC
6190defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
6191// AdvSIMD ORR
6192defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
6193
6194def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6195def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6196def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6197def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6198
6199def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6200def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6201def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6202def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6203
6204def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6205def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6206def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6207def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6208
6209def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6210def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6211def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6212def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6213
6214// AdvSIMD FMOV
6215def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
6216                                              "fmov", ".2d",
6217                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6218def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
6219                                              "fmov", ".2s",
6220                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6221def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
6222                                              "fmov", ".4s",
6223                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6224let Predicates = [HasNEON, HasFullFP16] in {
6225def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
6226                                              "fmov", ".4h",
6227                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6228def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
6229                                              "fmov", ".8h",
6230                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6231} // Predicates = [HasNEON, HasFullFP16]
6232
6233// AdvSIMD MOVI
6234
6235// EDIT byte mask: scalar
6236let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6237def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
6238                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
6239// The movi_edit node has the immediate value already encoded, so we use
6240// a plain imm0_255 here.
6241def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
6242          (MOVID imm0_255:$shift)>;
6243
6244// EDIT byte mask: 2d
6245
6246// The movi_edit node has the immediate value already encoded, so we use
6247// a plain imm0_255 in the pattern
6248let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6249def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
6250                                                simdimmtype10,
6251                                                "movi", ".2d",
6252                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
6253
6254def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6255def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6256def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6257def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6258
6259def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6260def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6261def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6262def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6263
6264// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
6265// extract is free and this gives better MachineCSE results.
6266def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6267def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6268def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6269def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6270
6271def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6272def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6273def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6274def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6275
6276// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6277let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6278defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
6279
6280let Predicates = [HasNEON] in {
6281  // Using the MOVI to materialize fp constants.
6282  def : Pat<(f32 fpimm32SIMDModImmType4:$in),
6283            (EXTRACT_SUBREG (MOVIv2i32 (fpimm32SIMDModImmType4XForm f32:$in),
6284                                       (i32 24)),
6285                            ssub)>;
6286}
6287
6288def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6289def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6290def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6291def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6292
6293def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6294def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6295def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6296def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6297
6298def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6299          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
6300def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6301          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
6302def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6303          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
6304def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6305          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
6306
6307let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6308// EDIT per word: 2s & 4s with MSL shifter
6309def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
6310                      [(set (v2i32 V64:$Rd),
6311                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6312def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
6313                      [(set (v4i32 V128:$Rd),
6314                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6315
6316// Per byte: 8b & 16b
6317def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
6318                                                 "movi", ".8b",
6319                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
6320
6321def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
6322                                                 "movi", ".16b",
6323                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
6324}
6325
6326// AdvSIMD MVNI
6327
6328// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6329let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6330defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
6331
6332def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6333def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6334def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6335def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6336
6337def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6338def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6339def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6340def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6341
6342def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6343          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
6344def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6345          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
6346def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6347          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
6348def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6349          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
6350
6351// EDIT per word: 2s & 4s with MSL shifter
6352let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6353def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
6354                      [(set (v2i32 V64:$Rd),
6355                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6356def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
6357                      [(set (v4i32 V128:$Rd),
6358                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6359}
6360
6361//----------------------------------------------------------------------------
6362// AdvSIMD indexed element
6363//----------------------------------------------------------------------------
6364
6365let hasSideEffects = 0 in {
6366  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
6367  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
6368}
6369
6370// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
6371// instruction expects the addend first, while the intrinsic expects it last.
6372
6373// On the other hand, there are quite a few valid combinatorial options due to
6374// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
6375defm : SIMDFPIndexedTiedPatterns<"FMLA",
6376           TriOpFrag<(any_fma node:$RHS, node:$MHS, node:$LHS)>>;
6377defm : SIMDFPIndexedTiedPatterns<"FMLA",
6378           TriOpFrag<(any_fma node:$MHS, node:$RHS, node:$LHS)>>;
6379
6380defm : SIMDFPIndexedTiedPatterns<"FMLS",
6381           TriOpFrag<(any_fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
6382defm : SIMDFPIndexedTiedPatterns<"FMLS",
6383           TriOpFrag<(any_fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
6384defm : SIMDFPIndexedTiedPatterns<"FMLS",
6385           TriOpFrag<(any_fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
6386defm : SIMDFPIndexedTiedPatterns<"FMLS",
6387           TriOpFrag<(any_fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
6388
6389multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
6390  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6391  // and DUP scalar.
6392  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6393                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6394                                           VectorIndexS:$idx))),
6395            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6396  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6397                           (v2f32 (AArch64duplane32
6398                                      (v4f32 (insert_subvector undef,
6399                                                 (v2f32 (fneg V64:$Rm)),
6400                                                 (i64 0))),
6401                                      VectorIndexS:$idx)))),
6402            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6403                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6404                               VectorIndexS:$idx)>;
6405  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6406                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6407            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6408                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6409
6410  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6411  // and DUP scalar.
6412  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6413                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6414                                           VectorIndexS:$idx))),
6415            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
6416                               VectorIndexS:$idx)>;
6417  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6418                           (v4f32 (AArch64duplane32
6419                                      (v4f32 (insert_subvector undef,
6420                                                 (v2f32 (fneg V64:$Rm)),
6421                                                 (i64 0))),
6422                                      VectorIndexS:$idx)))),
6423            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6424                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6425                               VectorIndexS:$idx)>;
6426  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6427                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6428            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6429                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6430
6431  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
6432  // (DUPLANE from 64-bit would be trivial).
6433  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6434                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
6435                                           VectorIndexD:$idx))),
6436            (FMLSv2i64_indexed
6437                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6438  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6439                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
6440            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
6441                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
6442
6443  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
6444  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6445                         (vector_extract (v4f32 (fneg V128:$Rm)),
6446                                         VectorIndexS:$idx))),
6447            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6448                V128:$Rm, VectorIndexS:$idx)>;
6449  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6450                         (vector_extract (v4f32 (insert_subvector undef,
6451                                                    (v2f32 (fneg V64:$Rm)),
6452                                                    (i64 0))),
6453                                         VectorIndexS:$idx))),
6454            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6455                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
6456
6457  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
6458  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
6459                         (vector_extract (v2f64 (fneg V128:$Rm)),
6460                                         VectorIndexS:$idx))),
6461            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
6462                V128:$Rm, VectorIndexS:$idx)>;
6463}
6464
6465defm : FMLSIndexedAfterNegPatterns<
6466           TriOpFrag<(any_fma node:$RHS, node:$MHS, node:$LHS)> >;
6467defm : FMLSIndexedAfterNegPatterns<
6468           TriOpFrag<(any_fma node:$MHS, node:$RHS, node:$LHS)> >;
6469
6470defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
6471defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", any_fmul>;
6472
6473def : Pat<(v2f32 (any_fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6474          (FMULv2i32_indexed V64:$Rn,
6475            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6476            (i64 0))>;
6477def : Pat<(v4f32 (any_fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6478          (FMULv4i32_indexed V128:$Rn,
6479            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6480            (i64 0))>;
6481def : Pat<(v2f64 (any_fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6482          (FMULv2i64_indexed V128:$Rn,
6483            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6484            (i64 0))>;
6485
6486defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6487defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6488
6489defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6490                                     int_aarch64_neon_sqdmulh_laneq>;
6491defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6492                                      int_aarch64_neon_sqrdmulh_laneq>;
6493
6494// Generated by MachineCombine
6495defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6496defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6497
6498defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6499defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6500    TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
6501defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6502    TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>>;
6503defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull", AArch64smull>;
6504defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6505                                           int_aarch64_neon_sqadd>;
6506defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6507                                           int_aarch64_neon_sqsub>;
6508defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6509                                          int_aarch64_neon_sqrdmlah>;
6510defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6511                                          int_aarch64_neon_sqrdmlsh>;
6512defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6513defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6514    TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
6515defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6516    TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>>;
6517defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull", AArch64umull>;
6518
6519// A scalar sqdmull with the second operand being a vector lane can be
6520// handled directly with the indexed instruction encoding.
6521def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6522                                          (vector_extract (v4i32 V128:$Vm),
6523                                                           VectorIndexS:$idx)),
6524          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6525
6526//----------------------------------------------------------------------------
6527// AdvSIMD scalar shift instructions
6528//----------------------------------------------------------------------------
6529defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6530defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6531defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6532defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6533// Codegen patterns for the above. We don't put these directly on the
6534// instructions because TableGen's type inference can't handle the truth.
6535// Having the same base pattern for fp <--> int totally freaks it out.
6536def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6537          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6538def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6539          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6540def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6541          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6542def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6543          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6544def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6545                                            vecshiftR64:$imm)),
6546          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6547def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6548                                            vecshiftR64:$imm)),
6549          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6550def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6551          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6552def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6553          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6554def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6555                                            vecshiftR64:$imm)),
6556          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6557def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6558          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6559def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6560                                            vecshiftR64:$imm)),
6561          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6562def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6563          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6564
6565// Patterns for FP16 Intrinsics - requires reg copy to/from as i16s not supported.
6566
6567def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6568          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6569def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6570          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6571def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6572          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6573def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6574            (and FPR32:$Rn, (i32 65535)),
6575            vecshiftR16:$imm)),
6576          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6577def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6578          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6579def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6580          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6581def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6582          (i32 (INSERT_SUBREG
6583            (i32 (IMPLICIT_DEF)),
6584            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6585            hsub))>;
6586def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6587          (i64 (INSERT_SUBREG
6588            (i64 (IMPLICIT_DEF)),
6589            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6590            hsub))>;
6591def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6592          (i32 (INSERT_SUBREG
6593            (i32 (IMPLICIT_DEF)),
6594            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6595            hsub))>;
6596def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6597          (i64 (INSERT_SUBREG
6598            (i64 (IMPLICIT_DEF)),
6599            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6600            hsub))>;
6601def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6602          (i32 (INSERT_SUBREG
6603            (i32 (IMPLICIT_DEF)),
6604            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6605            hsub))>;
6606def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6607          (i32 (INSERT_SUBREG
6608            (i32 (IMPLICIT_DEF)),
6609            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6610            hsub))>;
6611
6612defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6613defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6614defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6615                                     int_aarch64_neon_sqrshrn>;
6616defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6617                                     int_aarch64_neon_sqrshrun>;
6618defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6619defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6620defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6621                                     int_aarch64_neon_sqshrn>;
6622defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6623                                     int_aarch64_neon_sqshrun>;
6624defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6625defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6626defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6627    TriOpFrag<(add node:$LHS,
6628                   (AArch64srshri node:$MHS, node:$RHS))>>;
6629defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6630defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6631    TriOpFrag<(add_and_or_is_add node:$LHS,
6632                   (AArch64vashr node:$MHS, node:$RHS))>>;
6633defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6634                                     int_aarch64_neon_uqrshrn>;
6635defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6636defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6637                                     int_aarch64_neon_uqshrn>;
6638defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6639defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6640    TriOpFrag<(add node:$LHS,
6641                   (AArch64urshri node:$MHS, node:$RHS))>>;
6642defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6643defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6644    TriOpFrag<(add_and_or_is_add node:$LHS,
6645                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6646
6647//----------------------------------------------------------------------------
6648// AdvSIMD vector shift instructions
6649//----------------------------------------------------------------------------
6650defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6651defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6652defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6653                                   int_aarch64_neon_vcvtfxs2fp>;
6654defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6655                                         int_aarch64_neon_rshrn>;
6656defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6657defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6658                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6659defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6660def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6661                                      (i32 vecshiftL64:$imm))),
6662          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6663defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6664                                         int_aarch64_neon_sqrshrn>;
6665defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6666                                         int_aarch64_neon_sqrshrun>;
6667defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6668defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6669defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6670                                         int_aarch64_neon_sqshrn>;
6671defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6672                                         int_aarch64_neon_sqshrun>;
6673defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6674def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6675                                      (i32 vecshiftR64:$imm))),
6676          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6677defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6678defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6679                 TriOpFrag<(add node:$LHS,
6680                                (AArch64srshri node:$MHS, node:$RHS))> >;
6681defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6682                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6683
6684defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6685defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6686                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6687defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6688                        int_aarch64_neon_vcvtfxu2fp>;
6689defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6690                                         int_aarch64_neon_uqrshrn>;
6691defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6692defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6693                                         int_aarch64_neon_uqshrn>;
6694defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6695defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6696                TriOpFrag<(add node:$LHS,
6697                               (AArch64urshri node:$MHS, node:$RHS))> >;
6698defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6699                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6700defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6701defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6702                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6703
6704// RADDHN patterns for when RSHRN shifts by half the size of the vector element
6705def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))),
6706          (RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>;
6707def : Pat<(v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))),
6708          (RADDHNv4i32_v4i16 V128:$Vn, (v4i32 (MOVIv2d_ns (i32 0))))>;
6709def : Pat<(v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))),
6710          (RADDHNv2i64_v2i32 V128:$Vn, (v2i64 (MOVIv2d_ns (i32 0))))>;
6711
6712// RADDHN2 patterns for when RSHRN shifts by half the size of the vector element
6713def : Pat<(v16i8 (concat_vectors
6714                 (v8i8 V64:$Vd),
6715                 (v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))))),
6716          (RADDHNv8i16_v16i8
6717                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6718                 (v8i16 (MOVIv2d_ns (i32 0))))>;
6719def : Pat<(v8i16 (concat_vectors
6720                 (v4i16 V64:$Vd),
6721                 (v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))))),
6722          (RADDHNv4i32_v8i16
6723                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6724                 (v4i32 (MOVIv2d_ns (i32 0))))>;
6725def : Pat<(v4i32 (concat_vectors
6726                 (v2i32 V64:$Vd),
6727                 (v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))))),
6728          (RADDHNv2i64_v4i32
6729                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6730                 (v2i64 (MOVIv2d_ns (i32 0))))>;
6731
6732// SHRN patterns for when a logical right shift was used instead of arithmetic
6733// (the immediate guarantees no sign bits actually end up in the result so it
6734// doesn't matter).
6735def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6736          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6737def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6738          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6739def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6740          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6741
6742def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6743                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6744                                                    vecshiftR16Narrow:$imm)))),
6745          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6746                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6747def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6748                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6749                                                    vecshiftR32Narrow:$imm)))),
6750          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6751                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6752def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6753                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6754                                                    vecshiftR64Narrow:$imm)))),
6755          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6756                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6757
6758// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6759// Anyexts are implemented as zexts.
6760def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6761def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6762def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6763def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6764def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6765def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6766def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6767def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6768def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6769// Also match an extend from the upper half of a 128 bit source register.
6770def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6771          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6772def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6773          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6774def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6775          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6776def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6777          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6778def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6779          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6780def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6781          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6782def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6783          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6784def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6785          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6786def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6787          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6788
6789// Vector shift sxtl aliases
6790def : InstAlias<"sxtl.8h $dst, $src1",
6791                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6792def : InstAlias<"sxtl $dst.8h, $src1.8b",
6793                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6794def : InstAlias<"sxtl.4s $dst, $src1",
6795                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6796def : InstAlias<"sxtl $dst.4s, $src1.4h",
6797                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6798def : InstAlias<"sxtl.2d $dst, $src1",
6799                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6800def : InstAlias<"sxtl $dst.2d, $src1.2s",
6801                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6802
6803// Vector shift sxtl2 aliases
6804def : InstAlias<"sxtl2.8h $dst, $src1",
6805                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6806def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6807                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6808def : InstAlias<"sxtl2.4s $dst, $src1",
6809                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6810def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6811                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6812def : InstAlias<"sxtl2.2d $dst, $src1",
6813                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6814def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6815                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6816
6817// Vector shift uxtl aliases
6818def : InstAlias<"uxtl.8h $dst, $src1",
6819                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6820def : InstAlias<"uxtl $dst.8h, $src1.8b",
6821                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6822def : InstAlias<"uxtl.4s $dst, $src1",
6823                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6824def : InstAlias<"uxtl $dst.4s, $src1.4h",
6825                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6826def : InstAlias<"uxtl.2d $dst, $src1",
6827                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6828def : InstAlias<"uxtl $dst.2d, $src1.2s",
6829                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6830
6831// Vector shift uxtl2 aliases
6832def : InstAlias<"uxtl2.8h $dst, $src1",
6833                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6834def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6835                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6836def : InstAlias<"uxtl2.4s $dst, $src1",
6837                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6838def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6839                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6840def : InstAlias<"uxtl2.2d $dst, $src1",
6841                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6842def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6843                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6844
6845// If an integer is about to be converted to a floating point value,
6846// just load it on the floating point unit.
6847// These patterns are more complex because floating point loads do not
6848// support sign extension.
6849// The sign extension has to be explicitly added and is only supported for
6850// one step: byte-to-half, half-to-word, word-to-doubleword.
6851// SCVTF GPR -> FPR is 9 cycles.
6852// SCVTF FPR -> FPR is 4 cyclces.
6853// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6854// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6855// and still being faster.
6856// However, this is not good for code size.
6857// 8-bits -> float. 2 sizes step-up.
6858class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6859  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6860        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6861                            (SSHLLv4i16_shift
6862                              (f64
6863                                (EXTRACT_SUBREG
6864                                  (SSHLLv8i8_shift
6865                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6866                                        INST,
6867                                        bsub),
6868                                    0),
6869                                  dsub)),
6870                               0),
6871                             ssub)))>,
6872    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6873
6874def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6875                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6876def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6877                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6878def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6879                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6880def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6881                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6882
6883// 16-bits -> float. 1 size step-up.
6884class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6885  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6886        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6887                            (SSHLLv4i16_shift
6888                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6889                                  INST,
6890                                  hsub),
6891                                0),
6892                            ssub)))>,
6893    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6894
6895def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6896                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6897def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6898                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6899def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6900                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6901def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6902                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6903
6904// 32-bits to 32-bits are handled in target specific dag combine:
6905// performIntToFpCombine.
6906// 64-bits integer to 32-bits floating point, not possible with
6907// SCVTF on floating point registers (both source and destination
6908// must have the same size).
6909
6910// Here are the patterns for 8, 16, 32, and 64-bits to double.
6911// 8-bits -> double. 3 size step-up: give up.
6912// 16-bits -> double. 2 size step.
6913class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6914  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6915           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6916                              (SSHLLv2i32_shift
6917                                 (f64
6918                                  (EXTRACT_SUBREG
6919                                    (SSHLLv4i16_shift
6920                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6921                                        INST,
6922                                        hsub),
6923                                     0),
6924                                   dsub)),
6925                               0),
6926                             dsub)))>,
6927    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6928
6929def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6930                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6931def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6932                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6933def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6934                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6935def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6936                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6937// 32-bits -> double. 1 size step-up.
6938class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6939  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6940           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6941                              (SSHLLv2i32_shift
6942                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6943                                  INST,
6944                                  ssub),
6945                               0),
6946                             dsub)))>,
6947    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32, HasNEON]>;
6948
6949def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6950                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6951def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6952                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6953def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6954                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6955def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6956                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6957
6958// 64-bits -> double are handled in target specific dag combine:
6959// performIntToFpCombine.
6960
6961
6962//----------------------------------------------------------------------------
6963// AdvSIMD Load-Store Structure
6964//----------------------------------------------------------------------------
6965defm LD1 : SIMDLd1Multiple<"ld1">;
6966defm LD2 : SIMDLd2Multiple<"ld2">;
6967defm LD3 : SIMDLd3Multiple<"ld3">;
6968defm LD4 : SIMDLd4Multiple<"ld4">;
6969
6970defm ST1 : SIMDSt1Multiple<"st1">;
6971defm ST2 : SIMDSt2Multiple<"st2">;
6972defm ST3 : SIMDSt3Multiple<"st3">;
6973defm ST4 : SIMDSt4Multiple<"st4">;
6974
6975class Ld1Pat<ValueType ty, Instruction INST>
6976  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6977
6978def : Ld1Pat<v16i8, LD1Onev16b>;
6979def : Ld1Pat<v8i16, LD1Onev8h>;
6980def : Ld1Pat<v4i32, LD1Onev4s>;
6981def : Ld1Pat<v2i64, LD1Onev2d>;
6982def : Ld1Pat<v8i8,  LD1Onev8b>;
6983def : Ld1Pat<v4i16, LD1Onev4h>;
6984def : Ld1Pat<v2i32, LD1Onev2s>;
6985def : Ld1Pat<v1i64, LD1Onev1d>;
6986
6987class St1Pat<ValueType ty, Instruction INST>
6988  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6989        (INST ty:$Vt, GPR64sp:$Rn)>;
6990
6991def : St1Pat<v16i8, ST1Onev16b>;
6992def : St1Pat<v8i16, ST1Onev8h>;
6993def : St1Pat<v4i32, ST1Onev4s>;
6994def : St1Pat<v2i64, ST1Onev2d>;
6995def : St1Pat<v8i8,  ST1Onev8b>;
6996def : St1Pat<v4i16, ST1Onev4h>;
6997def : St1Pat<v2i32, ST1Onev2s>;
6998def : St1Pat<v1i64, ST1Onev1d>;
6999
7000//---
7001// Single-element
7002//---
7003
7004defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
7005defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
7006defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
7007defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
7008let mayLoad = 1, hasSideEffects = 0 in {
7009defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
7010defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
7011defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
7012defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
7013defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
7014defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
7015defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
7016defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
7017defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
7018defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
7019defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
7020defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
7021defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
7022defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
7023defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
7024defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
7025}
7026
7027def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
7028          (LD1Rv8b GPR64sp:$Rn)>;
7029def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
7030          (LD1Rv16b GPR64sp:$Rn)>;
7031def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
7032          (LD1Rv4h GPR64sp:$Rn)>;
7033def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
7034          (LD1Rv8h GPR64sp:$Rn)>;
7035def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
7036          (LD1Rv2s GPR64sp:$Rn)>;
7037def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
7038          (LD1Rv4s GPR64sp:$Rn)>;
7039def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
7040          (LD1Rv2d GPR64sp:$Rn)>;
7041def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
7042          (LD1Rv1d GPR64sp:$Rn)>;
7043// Grab the floating point version too
7044def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
7045          (LD1Rv2s GPR64sp:$Rn)>;
7046def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
7047          (LD1Rv4s GPR64sp:$Rn)>;
7048def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
7049          (LD1Rv2d GPR64sp:$Rn)>;
7050def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
7051          (LD1Rv1d GPR64sp:$Rn)>;
7052def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
7053          (LD1Rv4h GPR64sp:$Rn)>;
7054def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
7055          (LD1Rv8h GPR64sp:$Rn)>;
7056def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
7057          (LD1Rv4h GPR64sp:$Rn)>;
7058def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
7059          (LD1Rv8h GPR64sp:$Rn)>;
7060
7061class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
7062                    ValueType VTy, ValueType STy, Instruction LD1>
7063  : Pat<(vector_insert (VTy VecListOne128:$Rd),
7064           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7065        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
7066
7067def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
7068def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
7069def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
7070def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
7071def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
7072def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
7073def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
7074def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
7075
7076// Generate LD1 for extload if memory type does not match the
7077// destination type, for example:
7078//
7079//   (v4i32 (insert_vector_elt (load anyext from i8) idx))
7080//
7081// In this case, the index must be adjusted to match LD1 type.
7082//
7083class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand
7084                    VecIndex, ValueType VTy, ValueType STy,
7085                    Instruction LD1, SDNodeXForm IdxOp>
7086  : Pat<(vector_insert (VTy VecListOne128:$Rd),
7087                       (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7088        (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>;
7089
7090def VectorIndexStoH : SDNodeXForm<imm, [{
7091  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7092}]>;
7093def VectorIndexStoB : SDNodeXForm<imm, [{
7094  return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64);
7095}]>;
7096def VectorIndexHtoB : SDNodeXForm<imm, [{
7097  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7098}]>;
7099
7100def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>;
7101def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>;
7102def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>;
7103
7104// Same as above, but the first element is populated using
7105// scalar_to_vector + insert_subvector instead of insert_vector_elt.
7106class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
7107                        SDPatternOperator ExtLoad, Instruction LD1>
7108  : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))),
7109          (ResultTy (EXTRACT_SUBREG
7110            (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
7111
7112def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
7113def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
7114def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
7115
7116class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
7117                   ValueType VTy, ValueType STy, Instruction LD1>
7118  : Pat<(vector_insert (VTy VecListOne64:$Rd),
7119           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7120        (EXTRACT_SUBREG
7121            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
7122                          VecIndex:$idx, GPR64sp:$Rn),
7123            dsub)>;
7124
7125def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
7126def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
7127def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
7128def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
7129def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
7130def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
7131
7132
7133defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
7134defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
7135defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
7136defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
7137
7138// Stores
7139defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
7140defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
7141defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
7142defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
7143
7144let AddedComplexity = 19 in
7145class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7146                    ValueType VTy, ValueType STy, Instruction ST1>
7147  : Pat<(scalar_store
7148             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7149             GPR64sp:$Rn),
7150        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
7151
7152def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
7153def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
7154def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
7155def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
7156def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
7157def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
7158def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
7159def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
7160
7161let AddedComplexity = 19 in
7162class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7163                   ValueType VTy, ValueType STy, Instruction ST1>
7164  : Pat<(scalar_store
7165             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7166             GPR64sp:$Rn),
7167        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7168             VecIndex:$idx, GPR64sp:$Rn)>;
7169
7170def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
7171def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
7172def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
7173def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
7174def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
7175def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
7176
7177multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7178                             ValueType VTy, ValueType STy, Instruction ST1,
7179                             int offset> {
7180  def : Pat<(scalar_store
7181              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7182              GPR64sp:$Rn, offset),
7183        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7184             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7185
7186  def : Pat<(scalar_store
7187              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7188              GPR64sp:$Rn, GPR64:$Rm),
7189        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7190             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7191}
7192
7193defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
7194defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
7195                        2>;
7196defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
7197defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
7198defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
7199defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
7200defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
7201defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
7202
7203multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7204                             ValueType VTy, ValueType STy, Instruction ST1,
7205                             int offset> {
7206  def : Pat<(scalar_store
7207              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7208              GPR64sp:$Rn, offset),
7209        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7210
7211  def : Pat<(scalar_store
7212              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7213              GPR64sp:$Rn, GPR64:$Rm),
7214        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7215}
7216
7217defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
7218                         1>;
7219defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
7220                         2>;
7221defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
7222defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
7223defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
7224defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
7225defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
7226defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
7227
7228let mayStore = 1, hasSideEffects = 0 in {
7229defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
7230defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
7231defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
7232defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
7233defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
7234defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
7235defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
7236defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
7237defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
7238defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
7239defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
7240defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
7241}
7242
7243defm ST1 : SIMDLdSt1SingleAliases<"st1">;
7244defm ST2 : SIMDLdSt2SingleAliases<"st2">;
7245defm ST3 : SIMDLdSt3SingleAliases<"st3">;
7246defm ST4 : SIMDLdSt4SingleAliases<"st4">;
7247
7248//----------------------------------------------------------------------------
7249// Crypto extensions
7250//----------------------------------------------------------------------------
7251
7252let Predicates = [HasAES] in {
7253def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
7254def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
7255def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
7256def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
7257}
7258
7259// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
7260// for AES fusion on some CPUs.
7261let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
7262def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7263                        Sched<[WriteVq]>;
7264def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7265                         Sched<[WriteVq]>;
7266}
7267
7268// Only use constrained versions of AES(I)MC instructions if they are paired with
7269// AESE/AESD.
7270def : Pat<(v16i8 (int_aarch64_crypto_aesmc
7271            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
7272                                            (v16i8 V128:$src2))))),
7273          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
7274                                             (v16i8 V128:$src2)))))>,
7275          Requires<[HasFuseAES]>;
7276
7277def : Pat<(v16i8 (int_aarch64_crypto_aesimc
7278            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
7279                                            (v16i8 V128:$src2))))),
7280          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
7281                                              (v16i8 V128:$src2)))))>,
7282          Requires<[HasFuseAES]>;
7283
7284let Predicates = [HasSHA2] in {
7285def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
7286def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
7287def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
7288def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
7289def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
7290def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
7291def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
7292
7293def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
7294def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
7295def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
7296}
7297
7298//----------------------------------------------------------------------------
7299// Compiler-pseudos
7300//----------------------------------------------------------------------------
7301// FIXME: Like for X86, these should go in their own separate .td file.
7302
7303// For an anyext, we don't care what the high bits are, so we can perform an
7304// INSERT_SUBREF into an IMPLICIT_DEF.
7305def : Pat<(i64 (anyext GPR32:$src)),
7306          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
7307
7308// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
7309// then assert the extension has happened.
7310def : Pat<(i64 (zext GPR32:$src)),
7311          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
7312
7313// To sign extend, we use a signed bitfield move instruction (SBFM) on the
7314// containing super-reg.
7315def : Pat<(i64 (sext GPR32:$src)),
7316   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
7317def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
7318def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
7319def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
7320def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
7321def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
7322def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
7323def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
7324
7325def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
7326          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
7327                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
7328def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
7329          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
7330                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
7331
7332def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
7333          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
7334                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
7335def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
7336          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
7337                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
7338
7339def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
7340          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7341                   (i64 (i64shift_a        imm0_63:$imm)),
7342                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
7343
7344// sra patterns have an AddedComplexity of 10, so make sure we have a higher
7345// AddedComplexity for the following patterns since we want to match sext + sra
7346// patterns before we attempt to match a single sra node.
7347let AddedComplexity = 20 in {
7348// We support all sext + sra combinations which preserve at least one bit of the
7349// original value which is to be sign extended. E.g. we support shifts up to
7350// bitwidth-1 bits.
7351def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
7352          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
7353def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
7354          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
7355
7356def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
7357          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
7358def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
7359          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
7360
7361def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
7362          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7363                   (i64 imm0_31:$imm), 31)>;
7364} // AddedComplexity = 20
7365
7366// To truncate, we can simply extract from a subregister.
7367def : Pat<(i32 (trunc GPR64sp:$src)),
7368          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
7369
7370// __builtin_trap() uses the BRK instruction on AArch64.
7371def : Pat<(trap), (BRK 1)>;
7372def : Pat<(debugtrap), (BRK 0xF000)>;
7373
7374def ubsan_trap_xform : SDNodeXForm<timm, [{
7375  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
7376}]>;
7377
7378def ubsan_trap_imm : TImmLeaf<i32, [{
7379  return isUInt<8>(Imm);
7380}], ubsan_trap_xform>;
7381
7382def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
7383
7384// Multiply high patterns which multiply the lower subvector using smull/umull
7385// and the upper subvector with smull2/umull2. Then shuffle the high the high
7386// part of both results together.
7387def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
7388          (UZP2v16i8
7389           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7390                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7391           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7392def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
7393          (UZP2v8i16
7394           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7395                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7396           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7397def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
7398          (UZP2v4i32
7399           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7400                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7401           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7402
7403def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
7404          (UZP2v16i8
7405           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7406                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7407           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7408def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
7409          (UZP2v8i16
7410           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7411                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7412           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7413def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
7414          (UZP2v4i32
7415           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7416                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7417           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7418
7419// Conversions within AdvSIMD types in the same register size are free.
7420// But because we need a consistent lane ordering, in big endian many
7421// conversions require one or more REV instructions.
7422//
7423// Consider a simple memory load followed by a bitconvert then a store.
7424//   v0 = load v2i32
7425//   v1 = BITCAST v2i32 v0 to v4i16
7426//        store v4i16 v2
7427//
7428// In big endian mode every memory access has an implicit byte swap. LDR and
7429// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
7430// is, they treat the vector as a sequence of elements to be byte-swapped.
7431// The two pairs of instructions are fundamentally incompatible. We've decided
7432// to use LD1/ST1 only to simplify compiler implementation.
7433//
7434// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
7435// the original code sequence:
7436//   v0 = load v2i32
7437//   v1 = REV v2i32                  (implicit)
7438//   v2 = BITCAST v2i32 v1 to v4i16
7439//   v3 = REV v4i16 v2               (implicit)
7440//        store v4i16 v3
7441//
7442// But this is now broken - the value stored is different to the value loaded
7443// due to lane reordering. To fix this, on every BITCAST we must perform two
7444// other REVs:
7445//   v0 = load v2i32
7446//   v1 = REV v2i32                  (implicit)
7447//   v2 = REV v2i32
7448//   v3 = BITCAST v2i32 v2 to v4i16
7449//   v4 = REV v4i16
7450//   v5 = REV v4i16 v4               (implicit)
7451//        store v4i16 v5
7452//
7453// This means an extra two instructions, but actually in most cases the two REV
7454// instructions can be combined into one. For example:
7455//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
7456//
7457// There is also no 128-bit REV instruction. This must be synthesized with an
7458// EXT instruction.
7459//
7460// Most bitconverts require some sort of conversion. The only exceptions are:
7461//   a) Identity conversions -  vNfX <-> vNiX
7462//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
7463//
7464
7465// Natural vector casts (64 bit)
7466foreach VT = [ v8i8, v4i16, v4f16, v4bf16, v2i32, v2f32, v1i64, v1f64, f64 ] in
7467  foreach VT2 = [ v8i8, v4i16, v4f16, v4bf16, v2i32, v2f32, v1i64, v1f64, f64 ] in
7468    def : Pat<(VT (AArch64NvCast (VT2 FPR64:$src))),
7469              (VT FPR64:$src)>;
7470
7471// Natural vector casts (128 bit)
7472foreach VT = [ v16i8, v8i16, v8f16, v8bf16, v4i32, v4f32, v2i64, v2f64 ] in
7473  foreach VT2 = [ v16i8, v8i16, v8f16, v8bf16, v4i32, v4f32, v2i64, v2f64 ] in
7474    def : Pat<(VT (AArch64NvCast (VT2 FPR128:$src))),
7475              (VT FPR128:$src)>;
7476
7477let Predicates = [IsLE] in {
7478def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7479def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7480def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7481def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7482def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7483def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7484
7485def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7486          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7487def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7488          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7489def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7490          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7491def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7492          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7493def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7494          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7495def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7496          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7497def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7498          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7499}
7500let Predicates = [IsBE] in {
7501def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7502                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7503def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7504                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7505def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7506                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7507def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7508                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7509def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7510                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7511def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7512                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7513
7514def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7515          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7516def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7517          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7518def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7519          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7520def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7521          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7522def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7523          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7524def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7525          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7526}
7527def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7528def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7529def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7530          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7531def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7532          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7533def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7534          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7535def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7536
7537def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7538          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7539def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7540          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7541def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7542          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7543def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7544          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7545def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7546          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7547
7548def : Pat<(f16 (bitconvert (bf16 FPR16:$src))), (f16 FPR16:$src)>;
7549def : Pat<(bf16 (bitconvert (f16 FPR16:$src))), (bf16 FPR16:$src)>;
7550
7551let Predicates = [IsLE] in {
7552def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7553def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7554def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7555def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7556def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7557def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7558}
7559let Predicates = [IsBE] in {
7560def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7561                             (v1i64 (REV64v2i32 FPR64:$src))>;
7562def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7563                             (v1i64 (REV64v4i16 FPR64:$src))>;
7564def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7565                             (v1i64 (REV64v8i8 FPR64:$src))>;
7566def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7567                             (v1i64 (REV64v4i16 FPR64:$src))>;
7568def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7569                             (v1i64 (REV64v4i16 FPR64:$src))>;
7570def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7571                             (v1i64 (REV64v2i32 FPR64:$src))>;
7572}
7573def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7574def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7575
7576let Predicates = [IsLE] in {
7577def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7578def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7579def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7580def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7581def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7582def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7583def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7584}
7585let Predicates = [IsBE] in {
7586def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7587                             (v2i32 (REV64v2i32 FPR64:$src))>;
7588def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7589                             (v2i32 (REV32v4i16 FPR64:$src))>;
7590def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7591                             (v2i32 (REV32v8i8 FPR64:$src))>;
7592def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7593                             (v2i32 (REV64v2i32 FPR64:$src))>;
7594def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7595                             (v2i32 (REV64v2i32 FPR64:$src))>;
7596def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7597                             (v2i32 (REV32v4i16 FPR64:$src))>;
7598def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7599                             (v2i32 (REV32v4i16 FPR64:$src))>;
7600}
7601def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7602
7603let Predicates = [IsLE] in {
7604def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7605def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7606def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7607def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7608def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7609def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7610}
7611let Predicates = [IsBE] in {
7612def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7613                             (v4i16 (REV64v4i16 FPR64:$src))>;
7614def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7615                             (v4i16 (REV32v4i16 FPR64:$src))>;
7616def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7617                             (v4i16 (REV16v8i8 FPR64:$src))>;
7618def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7619                             (v4i16 (REV64v4i16 FPR64:$src))>;
7620def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7621                             (v4i16 (REV32v4i16 FPR64:$src))>;
7622def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7623                             (v4i16 (REV64v4i16 FPR64:$src))>;
7624}
7625def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7626def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7627
7628let Predicates = [IsLE] in {
7629def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7630def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7631def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7632def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7633def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7634def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7635
7636def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7637def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7638def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7639def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7640def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7641def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7642}
7643let Predicates = [IsBE] in {
7644def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7645                             (v4f16 (REV64v4i16 FPR64:$src))>;
7646def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7647                             (v4f16 (REV32v4i16 FPR64:$src))>;
7648def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7649                             (v4f16 (REV16v8i8 FPR64:$src))>;
7650def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7651                             (v4f16 (REV64v4i16 FPR64:$src))>;
7652def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7653                             (v4f16 (REV32v4i16 FPR64:$src))>;
7654def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7655                             (v4f16 (REV64v4i16 FPR64:$src))>;
7656
7657def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7658                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7659def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7660                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7661def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7662                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7663def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7664                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7665def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7666                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7667def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7668                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7669}
7670def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7671def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7672
7673let Predicates = [IsLE] in {
7674def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7675def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7676def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7677def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7678def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7679def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7680def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7681def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7682}
7683let Predicates = [IsBE] in {
7684def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7685                             (v8i8 (REV64v8i8 FPR64:$src))>;
7686def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7687                             (v8i8 (REV32v8i8 FPR64:$src))>;
7688def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7689                             (v8i8 (REV16v8i8 FPR64:$src))>;
7690def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7691                             (v8i8 (REV64v8i8 FPR64:$src))>;
7692def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7693                             (v8i8 (REV32v8i8 FPR64:$src))>;
7694def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7695                             (v8i8 (REV64v8i8 FPR64:$src))>;
7696def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7697                             (v8i8 (REV16v8i8 FPR64:$src))>;
7698def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7699                             (v8i8 (REV16v8i8 FPR64:$src))>;
7700}
7701
7702let Predicates = [IsLE] in {
7703def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7704def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7705def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7706def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7707def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7708def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7709}
7710let Predicates = [IsBE] in {
7711def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7712                             (f64 (REV64v2i32 FPR64:$src))>;
7713def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7714                             (f64 (REV64v4i16 FPR64:$src))>;
7715def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7716                             (f64 (REV64v2i32 FPR64:$src))>;
7717def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7718                             (f64 (REV64v8i8 FPR64:$src))>;
7719def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7720                             (f64 (REV64v4i16 FPR64:$src))>;
7721def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7722                             (f64 (REV64v4i16 FPR64:$src))>;
7723}
7724def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7725def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7726
7727let Predicates = [IsLE] in {
7728def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7729def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7730def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7731def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7732def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7733def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7734}
7735let Predicates = [IsBE] in {
7736def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7737                             (v1f64 (REV64v2i32 FPR64:$src))>;
7738def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7739                             (v1f64 (REV64v4i16 FPR64:$src))>;
7740def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7741                             (v1f64 (REV64v8i8 FPR64:$src))>;
7742def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7743                             (v1f64 (REV64v2i32 FPR64:$src))>;
7744def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7745                             (v1f64 (REV64v4i16 FPR64:$src))>;
7746def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7747                             (v1f64 (REV64v4i16 FPR64:$src))>;
7748}
7749def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7750def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7751
7752let Predicates = [IsLE] in {
7753def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7754def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7755def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7756def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7757def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7758def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7759def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7760}
7761let Predicates = [IsBE] in {
7762def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7763                             (v2f32 (REV64v2i32 FPR64:$src))>;
7764def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7765                             (v2f32 (REV32v4i16 FPR64:$src))>;
7766def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7767                             (v2f32 (REV32v8i8 FPR64:$src))>;
7768def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7769                             (v2f32 (REV64v2i32 FPR64:$src))>;
7770def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7771                             (v2f32 (REV64v2i32 FPR64:$src))>;
7772def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7773                             (v2f32 (REV32v4i16 FPR64:$src))>;
7774def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7775                             (v2f32 (REV32v4i16 FPR64:$src))>;
7776}
7777def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7778
7779let Predicates = [IsLE] in {
7780def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7781def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7782def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7783def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7784def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7785def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7786def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7787def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7788}
7789let Predicates = [IsBE] in {
7790def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7791                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7792def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7793                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7794                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7795def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7796                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7797                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7798def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7799                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7800                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7801def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7802                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7803                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7804def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7805                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7806def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7807                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7808                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7809def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7810                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7811                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7812}
7813
7814let Predicates = [IsLE] in {
7815def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7816def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7817def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7818def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7819def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7820def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7821def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7822}
7823let Predicates = [IsBE] in {
7824def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7825                             (v2f64 (EXTv16i8 FPR128:$src,
7826                                              FPR128:$src, (i32 8)))>;
7827def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7828                             (v2f64 (REV64v4i32 FPR128:$src))>;
7829def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7830                             (v2f64 (REV64v8i16 FPR128:$src))>;
7831def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7832                             (v2f64 (REV64v8i16 FPR128:$src))>;
7833def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7834                             (v2f64 (REV64v8i16 FPR128:$src))>;
7835def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7836                             (v2f64 (REV64v16i8 FPR128:$src))>;
7837def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7838                             (v2f64 (REV64v4i32 FPR128:$src))>;
7839}
7840def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7841
7842let Predicates = [IsLE] in {
7843def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7844def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7845def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7846def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7847def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7848def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7849def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7850}
7851let Predicates = [IsBE] in {
7852def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7853                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7854                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7855def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7856                             (v4f32 (REV32v8i16 FPR128:$src))>;
7857def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7858                             (v4f32 (REV32v8i16 FPR128:$src))>;
7859def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7860                             (v4f32 (REV32v8i16 FPR128:$src))>;
7861def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7862                             (v4f32 (REV32v16i8 FPR128:$src))>;
7863def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7864                             (v4f32 (REV64v4i32 FPR128:$src))>;
7865def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7866                             (v4f32 (REV64v4i32 FPR128:$src))>;
7867}
7868def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7869
7870let Predicates = [IsLE] in {
7871def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7872def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7873def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7874def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7875def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7876def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7877def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7878}
7879let Predicates = [IsBE] in {
7880def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7881                             (v2i64 (EXTv16i8 FPR128:$src,
7882                                              FPR128:$src, (i32 8)))>;
7883def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7884                             (v2i64 (REV64v4i32 FPR128:$src))>;
7885def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7886                             (v2i64 (REV64v8i16 FPR128:$src))>;
7887def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7888                             (v2i64 (REV64v16i8 FPR128:$src))>;
7889def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7890                             (v2i64 (REV64v4i32 FPR128:$src))>;
7891def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7892                             (v2i64 (REV64v8i16 FPR128:$src))>;
7893def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7894                             (v2i64 (REV64v8i16 FPR128:$src))>;
7895}
7896def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7897
7898let Predicates = [IsLE] in {
7899def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7900def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7901def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7902def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7903def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7904def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7905def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7906}
7907let Predicates = [IsBE] in {
7908def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7909                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7910                                              (REV64v4i32 FPR128:$src),
7911                                              (i32 8)))>;
7912def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7913                             (v4i32 (REV64v4i32 FPR128:$src))>;
7914def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7915                             (v4i32 (REV32v8i16 FPR128:$src))>;
7916def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7917                             (v4i32 (REV32v16i8 FPR128:$src))>;
7918def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7919                             (v4i32 (REV64v4i32 FPR128:$src))>;
7920def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7921                             (v4i32 (REV32v8i16 FPR128:$src))>;
7922def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7923                             (v4i32 (REV32v8i16 FPR128:$src))>;
7924}
7925def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7926
7927let Predicates = [IsLE] in {
7928def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7929def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7930def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7931def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7932def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7933def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7934}
7935let Predicates = [IsBE] in {
7936def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7937                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7938                                              (REV64v8i16 FPR128:$src),
7939                                              (i32 8)))>;
7940def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7941                             (v8i16 (REV64v8i16 FPR128:$src))>;
7942def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7943                             (v8i16 (REV32v8i16 FPR128:$src))>;
7944def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7945                             (v8i16 (REV16v16i8 FPR128:$src))>;
7946def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7947                             (v8i16 (REV64v8i16 FPR128:$src))>;
7948def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7949                             (v8i16 (REV32v8i16 FPR128:$src))>;
7950}
7951def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7952def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7953
7954let Predicates = [IsLE] in {
7955def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7956def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7957def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7958def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7959def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7960def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7961
7962def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7963def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7964def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7965def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7966def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7967def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7968}
7969let Predicates = [IsBE] in {
7970def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7971                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7972                                              (REV64v8i16 FPR128:$src),
7973                                              (i32 8)))>;
7974def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7975                             (v8f16 (REV64v8i16 FPR128:$src))>;
7976def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7977                             (v8f16 (REV32v8i16 FPR128:$src))>;
7978def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7979                             (v8f16 (REV16v16i8 FPR128:$src))>;
7980def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7981                             (v8f16 (REV64v8i16 FPR128:$src))>;
7982def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7983                             (v8f16 (REV32v8i16 FPR128:$src))>;
7984
7985def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7986                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7987                                              (REV64v8i16 FPR128:$src),
7988                                              (i32 8)))>;
7989def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7990                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7991def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7992                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7993def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
7994                             (v8bf16 (REV16v16i8 FPR128:$src))>;
7995def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
7996                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7997def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
7998                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7999}
8000def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
8001def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
8002
8003let Predicates = [IsLE] in {
8004def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
8005def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
8006def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
8007def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
8008def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
8009def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
8010def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
8011def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
8012}
8013let Predicates = [IsBE] in {
8014def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
8015                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
8016                                              (REV64v16i8 FPR128:$src),
8017                                              (i32 8)))>;
8018def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
8019                             (v16i8 (REV64v16i8 FPR128:$src))>;
8020def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
8021                             (v16i8 (REV32v16i8 FPR128:$src))>;
8022def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
8023                             (v16i8 (REV16v16i8 FPR128:$src))>;
8024def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
8025                             (v16i8 (REV64v16i8 FPR128:$src))>;
8026def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
8027                             (v16i8 (REV32v16i8 FPR128:$src))>;
8028def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
8029                             (v16i8 (REV16v16i8 FPR128:$src))>;
8030def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
8031                             (v16i8 (REV16v16i8 FPR128:$src))>;
8032}
8033
8034def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
8035           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8036def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
8037           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8038def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
8039           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8040def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
8041           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8042def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
8043           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8044def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
8045           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8046def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
8047           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8048def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
8049           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8050
8051def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
8052          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8053def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
8054          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8055def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
8056          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8057def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
8058          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8059
8060// A 64-bit subvector insert to the first 128-bit vector position
8061// is a subregister copy that needs no instruction.
8062multiclass InsertSubvectorUndef<ValueType Ty> {
8063  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
8064            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8065  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
8066            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8067  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
8068            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8069  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
8070            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8071  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
8072            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8073  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
8074            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8075  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
8076            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8077  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
8078            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8079}
8080
8081defm : InsertSubvectorUndef<i32>;
8082defm : InsertSubvectorUndef<i64>;
8083
8084// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
8085// or v2f32.
8086def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
8087                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
8088           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
8089def : Pat<(f64 (any_fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
8090                         (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
8091           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
8092    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
8093    // so we match on v4f32 here, not v2f32. This will also catch adding
8094    // the low two lanes of a true v4f32 vector.
8095def : Pat<(any_fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
8096                    (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
8097          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8098def : Pat<(any_fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
8099                    (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
8100          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8101
8102// Scalar 64-bit shifts in FPR64 registers.
8103def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8104          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8105def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8106          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8107def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8108          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8109def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8110          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8111
8112// Patterns for nontemporal/no-allocate stores.
8113// We have to resort to tricks to turn a single-input store into a store pair,
8114// because there is no single-input nontemporal store, only STNP.
8115let Predicates = [IsLE] in {
8116let AddedComplexity = 15 in {
8117class NTStore128Pat<ValueType VT> :
8118  Pat<(nontemporalstore (VT FPR128:$Rt),
8119        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
8120      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
8121              (DUPi64 FPR128:$Rt, (i64 1)),
8122              GPR64sp:$Rn, simm7s8:$offset)>;
8123
8124def : NTStore128Pat<v2i64>;
8125def : NTStore128Pat<v4i32>;
8126def : NTStore128Pat<v8i16>;
8127def : NTStore128Pat<v16i8>;
8128
8129class NTStore64Pat<ValueType VT> :
8130  Pat<(nontemporalstore (VT FPR64:$Rt),
8131        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8132      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
8133              (DUPi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
8134              GPR64sp:$Rn, simm7s4:$offset)>;
8135
8136// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
8137def : NTStore64Pat<v1f64>;
8138def : NTStore64Pat<v1i64>;
8139def : NTStore64Pat<v2i32>;
8140def : NTStore64Pat<v4i16>;
8141def : NTStore64Pat<v8i8>;
8142
8143def : Pat<(nontemporalstore GPR64:$Rt,
8144            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8145          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
8146                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
8147                  GPR64sp:$Rn, simm7s4:$offset)>;
8148} // AddedComplexity=10
8149} // Predicates = [IsLE]
8150
8151// Tail call return handling. These are all compiler pseudo-instructions,
8152// so no encoding information or anything like that.
8153let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
8154  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
8155                   Sched<[WriteBrReg]>;
8156  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
8157                   Sched<[WriteBrReg]>;
8158  // Indirect tail-call with any register allowed, used by MachineOutliner when
8159  // this is proven safe.
8160  // FIXME: If we have to add any more hacks like this, we should instead relax
8161  // some verifier checks for outlined functions.
8162  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
8163                      Sched<[WriteBrReg]>;
8164  // Indirect tail-call limited to only use registers (x16 and x17) which are
8165  // allowed to tail-call a "BTI c" instruction.
8166  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
8167                      Sched<[WriteBrReg]>;
8168}
8169
8170def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
8171          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
8172      Requires<[NotUseBTI]>;
8173def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
8174          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
8175      Requires<[UseBTI]>;
8176def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
8177          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8178def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
8179          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8180
8181def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
8182def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
8183
8184// Extracting lane zero is a special case where we can just use a plain
8185// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
8186// rest of the compiler, especially the register allocator and copy propagation,
8187// to reason about, so is preferred when it's possible to use it.
8188let AddedComplexity = 10 in {
8189  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
8190  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
8191  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
8192}
8193
8194// dot_v4i8
8195class mul_v4i8<SDPatternOperator ldop> :
8196  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
8197          (mul (ldop (add node:$Rn, node:$offset)),
8198               (ldop (add node:$Rm, node:$offset)))>;
8199class mulz_v4i8<SDPatternOperator ldop> :
8200  PatFrag<(ops node:$Rn, node:$Rm),
8201          (mul (ldop node:$Rn), (ldop node:$Rm))>;
8202
8203def load_v4i8 :
8204  OutPatFrag<(ops node:$R),
8205             (INSERT_SUBREG
8206              (v2i32 (IMPLICIT_DEF)),
8207               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
8208              ssub)>;
8209
8210class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
8211  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
8212           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
8213           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
8214                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
8215      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
8216                                (load_v4i8 GPR64sp:$Rn),
8217                                (load_v4i8 GPR64sp:$Rm))),
8218                      sub_32)>, Requires<[HasDotProd]>;
8219
8220// dot_v8i8
8221class ee_v8i8<SDPatternOperator extend> :
8222  PatFrag<(ops node:$V, node:$K),
8223          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
8224
8225class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8226  PatFrag<(ops node:$M, node:$N, node:$K),
8227          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
8228                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
8229
8230class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8231  PatFrag<(ops node:$M, node:$N),
8232          (i32 (extractelt
8233           (v4i32 (AArch64uaddv
8234            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
8235                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
8236           (i64 0)))>;
8237
8238// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
8239def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
8240
8241class odot_v8i8<Instruction DOT> :
8242  OutPatFrag<(ops node:$Vm, node:$Vn),
8243             (EXTRACT_SUBREG
8244              (VADDV_32
8245               (i64 (DOT (DUPv2i32gpr WZR),
8246                         (v8i8 node:$Vm),
8247                         (v8i8 node:$Vn)))),
8248              sub_32)>;
8249
8250class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
8251                    SDPatternOperator extend> :
8252  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
8253      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
8254  Requires<[HasDotProd]>;
8255
8256// dot_v16i8
8257class ee_v16i8<SDPatternOperator extend> :
8258  PatFrag<(ops node:$V, node:$K1, node:$K2),
8259          (v4i16 (extract_subvector
8260           (v8i16 (extend
8261            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
8262
8263class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
8264  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
8265          (v4i32
8266           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
8267                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
8268
8269class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
8270  PatFrag<(ops node:$M, node:$N),
8271          (i32 (extractelt
8272           (v4i32 (AArch64uaddv
8273            (add
8274             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
8275                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
8276             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
8277                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
8278           (i64 0)))>;
8279
8280class odot_v16i8<Instruction DOT> :
8281  OutPatFrag<(ops node:$Vm, node:$Vn),
8282             (i32 (ADDVv4i32v
8283              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
8284
8285class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
8286                SDPatternOperator extend> :
8287  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
8288      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
8289  Requires<[HasDotProd]>;
8290
8291let AddedComplexity = 10 in {
8292  def : dot_v4i8<SDOTv8i8, sextloadi8>;
8293  def : dot_v4i8<UDOTv8i8, zextloadi8>;
8294  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
8295  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
8296  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
8297  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
8298
8299  // FIXME: add patterns to generate vector by element dot product.
8300  // FIXME: add SVE dot-product patterns.
8301}
8302
8303// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs,
8304// so that it can be used as input to inline asm, and vice versa.
8305def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>;
8306def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>;
8307def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3,
8308                             GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)),
8309          (REG_SEQUENCE GPR64x8Class,
8310              $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3,
8311              $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>;
8312foreach i = 0-7 in {
8313  def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))),
8314            (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>;
8315}
8316
8317let Predicates = [HasLS64] in {
8318  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
8319                                          (outs GPR64x8:$Rt)>;
8320  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
8321                                          (outs)>;
8322  def ST64BV:   Store64BV<0b011, "st64bv">;
8323  def ST64BV0:  Store64BV<0b010, "st64bv0">;
8324
8325  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
8326    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
8327          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
8328
8329  def : ST64BPattern<int_aarch64_st64b, ST64B>;
8330  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
8331  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
8332}
8333
8334let Predicates = [HasMOPS] in {
8335  let Defs = [NZCV] in {
8336    defm CPYFP : MOPSMemoryCopyInsns<0b00, "cpyfp">;
8337
8338    defm CPYP : MOPSMemoryMoveInsns<0b00, "cpyp">;
8339
8340    defm SETP : MOPSMemorySetInsns<0b00, "setp">;
8341  }
8342  let Uses = [NZCV] in {
8343    defm CPYFM : MOPSMemoryCopyInsns<0b01, "cpyfm">;
8344    defm CPYFE : MOPSMemoryCopyInsns<0b10, "cpyfe">;
8345
8346    defm CPYM : MOPSMemoryMoveInsns<0b01, "cpym">;
8347    defm CPYE : MOPSMemoryMoveInsns<0b10, "cpye">;
8348
8349    defm SETM : MOPSMemorySetInsns<0b01, "setm">;
8350    defm SETE : MOPSMemorySetInsns<0b10, "sete">;
8351  }
8352}
8353let Predicates = [HasMOPS, HasMTE] in {
8354  let Defs = [NZCV] in {
8355    defm SETGP     : MOPSMemorySetTaggingInsns<0b00, "setgp">;
8356  }
8357  let Uses = [NZCV] in {
8358    defm SETGM     : MOPSMemorySetTaggingInsns<0b01, "setgm">;
8359    // Can't use SETGE because it's a reserved name in TargetSelectionDAG.td
8360    defm MOPSSETGE : MOPSMemorySetTaggingInsns<0b10, "setge">;
8361  }
8362}
8363
8364// MOPS Node operands: 0: Dst, 1: Src or Value, 2: Size, 3: Chain
8365// MOPS Node results: 0: Dst writeback, 1: Size writeback, 2: Chain
8366def SDT_AArch64mops : SDTypeProfile<2, 3, [ SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2> ]>;
8367def AArch64mops_memset : SDNode<"AArch64ISD::MOPS_MEMSET", SDT_AArch64mops>;
8368def AArch64mops_memset_tagging : SDNode<"AArch64ISD::MOPS_MEMSET_TAGGING", SDT_AArch64mops>;
8369def AArch64mops_memcopy : SDNode<"AArch64ISD::MOPS_MEMCOPY", SDT_AArch64mops>;
8370def AArch64mops_memmove : SDNode<"AArch64ISD::MOPS_MEMMOVE", SDT_AArch64mops>;
8371
8372// MOPS operations always contain three 4-byte instructions
8373let Predicates = [HasMOPS], Defs = [NZCV], Size = 12, mayStore = 1 in {
8374  let mayLoad = 1 in {
8375    def MOPSMemoryCopyPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8376                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8377                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8378    def MOPSMemoryMovePseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8379                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8380                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8381  }
8382  let mayLoad = 0 in {
8383    def MOPSMemorySetPseudo  : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8384                                      (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8385                                      [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8386  }
8387}
8388let Predicates = [HasMOPS, HasMTE], Defs = [NZCV], Size = 12, mayLoad = 0, mayStore = 1 in {
8389  def MOPSMemorySetTaggingPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8390                                          (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8391                                          [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8392}
8393
8394// This gets lowered into an instruction sequence of 20 bytes
8395let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1, Size = 20 in
8396def StoreSwiftAsyncContext
8397      : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
8398               []>, Sched<[]>;
8399
8400def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>;
8401def : Pat<(AArch64AssertZExtBool GPR32:$op),
8402          (i32 GPR32:$op)>;
8403
8404include "AArch64InstrAtomics.td"
8405include "AArch64SVEInstrInfo.td"
8406include "AArch64SMEInstrInfo.td"
8407include "AArch64InstrGISel.td"
8408