xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision 53683b95ef66a12337999587cd98302b1b425920)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicate<(all_of HasV8_7aOps), "armv8.7a">;
30def HasV9_0a         : Predicate<"Subtarget->hasV9_0aOps()">,
31                                 AssemblerPredicate<(all_of HasV9_0aOps), "armv9-a">;
32def HasV9_1a         : Predicate<"Subtarget->hasV9_1aOps()">,
33                                 AssemblerPredicate<(all_of HasV9_1aOps), "armv9.1a">;
34def HasV9_2a         : Predicate<"Subtarget->hasV9_2aOps()">,
35                                 AssemblerPredicate<(all_of HasV9_2aOps), "armv9.2a">;
36def HasV9_3a         : Predicate<"Subtarget->hasV9_3aOps()">,
37                                 AssemblerPredicate<(all_of HasV9_3aOps), "armv9.3a">;
38def HasV8_0r         : Predicate<"Subtarget->hasV8_0rOps()">,
39                                 AssemblerPredicate<(all_of HasV8_0rOps), "armv8-r">;
40
41def HasEL2VMSA       : Predicate<"Subtarget->hasEL2VMSA()">,
42                       AssemblerPredicate<(all_of FeatureEL2VMSA), "el2vmsa">;
43
44def HasEL3           : Predicate<"Subtarget->hasEL3()">,
45                       AssemblerPredicate<(all_of FeatureEL3), "el3">;
46
47def HasVH            : Predicate<"Subtarget->hasVH()">,
48                       AssemblerPredicate<(all_of FeatureVH), "vh">;
49
50def HasLOR           : Predicate<"Subtarget->hasLOR()">,
51                       AssemblerPredicate<(all_of FeatureLOR), "lor">;
52
53def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
54                       AssemblerPredicate<(all_of FeaturePAuth), "pauth">;
55
56def HasJS            : Predicate<"Subtarget->hasJS()">,
57                       AssemblerPredicate<(all_of FeatureJS), "jsconv">;
58
59def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
60                       AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">;
61
62def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
63                       AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">;
64
65def HasNV            : Predicate<"Subtarget->hasNV()">,
66                       AssemblerPredicate<(all_of FeatureNV), "nv">;
67
68def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
69                       AssemblerPredicate<(all_of FeatureMPAM), "mpam">;
70
71def HasDIT           : Predicate<"Subtarget->hasDIT()">,
72                       AssemblerPredicate<(all_of FeatureDIT), "dit">;
73
74def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
75                       AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">;
76
77def HasAM            : Predicate<"Subtarget->hasAM()">,
78                       AssemblerPredicate<(all_of FeatureAM), "am">;
79
80def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
81                       AssemblerPredicate<(all_of FeatureSEL2), "sel2">;
82
83def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
84                       AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">;
85
86def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
87                       AssemblerPredicate<(all_of FeatureFlagM), "flagm">;
88
89def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
90                       AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
91
92def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
93                               AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">;
94def HasNEON          : Predicate<"Subtarget->hasNEON()">,
95                                 AssemblerPredicate<(all_of FeatureNEON), "neon">;
96def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
97                                 AssemblerPredicate<(all_of FeatureCrypto), "crypto">;
98def HasSM4           : Predicate<"Subtarget->hasSM4()">,
99                                 AssemblerPredicate<(all_of FeatureSM4), "sm4">;
100def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
101                                 AssemblerPredicate<(all_of FeatureSHA3), "sha3">;
102def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
103                                 AssemblerPredicate<(all_of FeatureSHA2), "sha2">;
104def HasAES           : Predicate<"Subtarget->hasAES()">,
105                                 AssemblerPredicate<(all_of FeatureAES), "aes">;
106def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
107                                 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">;
108def HasCRC           : Predicate<"Subtarget->hasCRC()">,
109                                 AssemblerPredicate<(all_of FeatureCRC), "crc">;
110def HasLSE           : Predicate<"Subtarget->hasLSE()">,
111                                 AssemblerPredicate<(all_of FeatureLSE), "lse">;
112def HasNoLSE         : Predicate<"!Subtarget->hasLSE()">;
113def HasRAS           : Predicate<"Subtarget->hasRAS()">,
114                                 AssemblerPredicate<(all_of FeatureRAS), "ras">;
115def HasRDM           : Predicate<"Subtarget->hasRDM()">,
116                                 AssemblerPredicate<(all_of FeatureRDM), "rdm">;
117def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
118def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
119                                 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">;
120def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
121                                 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">;
122def HasSPE           : Predicate<"Subtarget->hasSPE()">,
123                                 AssemblerPredicate<(all_of FeatureSPE), "spe">;
124def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
125                                 AssemblerPredicate<(all_of FeatureFuseAES),
126                                 "fuse-aes">;
127def HasSVE           : Predicate<"Subtarget->hasSVE()">,
128                                 AssemblerPredicate<(all_of FeatureSVE), "sve">;
129def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
130                                 AssemblerPredicate<(all_of FeatureSVE2), "sve2">;
131def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
132                                 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">;
133def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
134                                 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">;
135def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
136                                 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">;
137def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
138                                 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
139def HasSME           : Predicate<"Subtarget->hasSME()">,
140                                 AssemblerPredicate<(all_of FeatureSME), "sme">;
141def HasSMEF64        : Predicate<"Subtarget->hasSMEF64()">,
142                                 AssemblerPredicate<(all_of FeatureSMEF64), "sme-f64">;
143def HasSMEI64        : Predicate<"Subtarget->hasSMEI64()">,
144                                 AssemblerPredicate<(all_of FeatureSMEI64), "sme-i64">;
145def HasStreamingSVE  : Predicate<"Subtarget->hasStreamingSVE()">,
146                                 AssemblerPredicate<(all_of FeatureStreamingSVE), "sme">;
147// A subset of SVE(2) instructions are legal in Streaming SVE execution mode,
148// they should be enabled if either has been specified.
149def HasSVEorStreamingSVE
150    : Predicate<"Subtarget->hasSVE() || Subtarget->hasStreamingSVE()">,
151                AssemblerPredicate<(any_of FeatureSVE, FeatureStreamingSVE),
152                "sve or sme">;
153def HasSVE2orStreamingSVE
154    : Predicate<"Subtarget->hasSVE2() || Subtarget->hasStreamingSVE()">,
155                AssemblerPredicate<(any_of FeatureSVE2, FeatureStreamingSVE),
156                "sve2 or sme">;
157// A subset of NEON instructions are legal in Streaming SVE execution mode,
158// they should be enabled if either has been specified.
159def HasNEONorStreamingSVE
160    : Predicate<"Subtarget->hasNEON() || Subtarget->hasStreamingSVE()">,
161                AssemblerPredicate<(any_of FeatureNEON, FeatureStreamingSVE),
162                "neon or sme">;
163def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
164                                 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">;
165def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
166                       AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">;
167def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
168                       AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">;
169def HasSB            : Predicate<"Subtarget->hasSB()">,
170                       AssemblerPredicate<(all_of FeatureSB), "sb">;
171def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
172                       AssemblerPredicate<(all_of FeaturePredRes), "predres">;
173def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
174                       AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">;
175def HasBTI           : Predicate<"Subtarget->hasBTI()">,
176                       AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">;
177def HasMTE           : Predicate<"Subtarget->hasMTE()">,
178                       AssemblerPredicate<(all_of FeatureMTE), "mte">;
179def HasTME           : Predicate<"Subtarget->hasTME()">,
180                       AssemblerPredicate<(all_of FeatureTME), "tme">;
181def HasETE           : Predicate<"Subtarget->hasETE()">,
182                       AssemblerPredicate<(all_of FeatureETE), "ete">;
183def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
184                       AssemblerPredicate<(all_of FeatureTRBE), "trbe">;
185def HasBF16          : Predicate<"Subtarget->hasBF16()">,
186                       AssemblerPredicate<(all_of FeatureBF16), "bf16">;
187def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
188                       AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">;
189def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
190                       AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">;
191def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
192                       AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">;
193def HasXS            : Predicate<"Subtarget->hasXS()">,
194                       AssemblerPredicate<(all_of FeatureXS), "xs">;
195def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
196                       AssemblerPredicate<(all_of FeatureWFxT), "wfxt">;
197def HasLS64          : Predicate<"Subtarget->hasLS64()">,
198                       AssemblerPredicate<(all_of FeatureLS64), "ls64">;
199def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
200                       AssemblerPredicate<(all_of FeatureBRBE), "brbe">;
201def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
202                       AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">;
203def HasHBC           : Predicate<"Subtarget->hasHBC()">,
204                       AssemblerPredicate<(all_of FeatureHBC), "hbc">;
205def HasMOPS          : Predicate<"Subtarget->hasMOPS()">,
206                       AssemblerPredicate<(all_of FeatureMOPS), "mops">;
207def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
208def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
209def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
210def UseExperimentalZeroingPseudos
211    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
212def UseAlternateSExtLoadCVTF32
213    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
214
215def UseNegativeImmediates
216    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
217                                             "NegativeImmediates">;
218
219def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">;
220
221def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
222                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
223                                                       SDTCisInt<1>]>>;
224
225
226//===----------------------------------------------------------------------===//
227// AArch64-specific DAG Nodes.
228//
229
230// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
231def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
232                                              [SDTCisSameAs<0, 2>,
233                                               SDTCisSameAs<0, 3>,
234                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
235
236// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
237def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
238                                            [SDTCisSameAs<0, 1>,
239                                             SDTCisSameAs<0, 2>,
240                                             SDTCisInt<0>,
241                                             SDTCisVT<3, i32>]>;
242
243// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
244def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
245                                            [SDTCisSameAs<0, 2>,
246                                             SDTCisSameAs<0, 3>,
247                                             SDTCisInt<0>,
248                                             SDTCisVT<1, i32>,
249                                             SDTCisVT<4, i32>]>;
250
251def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
252                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
253                                      SDTCisVT<2, i32>]>;
254def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
255def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
256                                        SDTCisVT<2, OtherVT>]>;
257
258
259def SDT_AArch64CSel  : SDTypeProfile<1, 4,
260                                   [SDTCisSameAs<0, 1>,
261                                    SDTCisSameAs<0, 2>,
262                                    SDTCisInt<3>,
263                                    SDTCisVT<4, i32>]>;
264def SDT_AArch64CCMP : SDTypeProfile<1, 5,
265                                    [SDTCisVT<0, i32>,
266                                     SDTCisInt<1>,
267                                     SDTCisSameAs<1, 2>,
268                                     SDTCisInt<3>,
269                                     SDTCisInt<4>,
270                                     SDTCisVT<5, i32>]>;
271def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
272                                     [SDTCisVT<0, i32>,
273                                      SDTCisFP<1>,
274                                      SDTCisSameAs<1, 2>,
275                                      SDTCisInt<3>,
276                                      SDTCisInt<4>,
277                                      SDTCisVT<5, i32>]>;
278def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
279                                   [SDTCisFP<0>,
280                                    SDTCisSameAs<0, 1>]>;
281def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
282def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
283def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
284def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
285                                          SDTCisSameAs<0, 1>,
286                                          SDTCisSameAs<0, 2>]>;
287def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
288def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
289def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
290                                           SDTCisInt<2>, SDTCisInt<3>]>;
291def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
292def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
293                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
294def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
295def SDT_AArch64Dot: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
296                                         SDTCisVec<2>, SDTCisSameAs<2,3>]>;
297
298def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
299                                                 SDTCisSameAs<0,1>,
300                                                 SDTCisSameAs<0,2>]>;
301
302def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
303def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
304def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
305def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
306                                           SDTCisSameAs<0,2>]>;
307def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
308                                           SDTCisSameAs<0,2>,
309                                           SDTCisSameAs<0,3>]>;
310def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
311def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
312
313def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
314
315def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
316                                                 SDTCisPtrTy<1>]>;
317
318def SDT_AArch64uaddlp : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>;
319
320def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
321def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
322def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
323
324// Generates the general dynamic sequences, i.e.
325//  adrp  x0, :tlsdesc:var
326//  ldr   x1, [x0, #:tlsdesc_lo12:var]
327//  add   x0, x0, #:tlsdesc_lo12:var
328//  .tlsdesccall var
329//  blr   x1
330
331// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
332// number of operands (the variable)
333def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
334                                          [SDTCisPtrTy<0>]>;
335
336def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
337                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
338                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
339                                         SDTCisSameAs<1, 4>]>;
340
341def SDT_AArch64TBL : SDTypeProfile<1, 2, [
342  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
343]>;
344
345// non-extending masked load fragment.
346def nonext_masked_load :
347  PatFrag<(ops node:$ptr, node:$pred, node:$def),
348          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
349  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
350         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
351         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
352}]>;
353// sign extending masked load fragments.
354def asext_masked_load :
355  PatFrag<(ops node:$ptr, node:$pred, node:$def),
356          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
357  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
358          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
359         cast<MaskedLoadSDNode>(N)->isUnindexed();
360}]>;
361def asext_masked_load_i8 :
362  PatFrag<(ops node:$ptr, node:$pred, node:$def),
363          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
364  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
365}]>;
366def asext_masked_load_i16 :
367  PatFrag<(ops node:$ptr, node:$pred, node:$def),
368          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
369  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
370}]>;
371def asext_masked_load_i32 :
372  PatFrag<(ops node:$ptr, node:$pred, node:$def),
373          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
374  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
375}]>;
376// zero extending masked load fragments.
377def zext_masked_load :
378  PatFrag<(ops node:$ptr, node:$pred, node:$def),
379          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
380  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
381         cast<MaskedLoadSDNode>(N)->isUnindexed();
382}]>;
383def zext_masked_load_i8 :
384  PatFrag<(ops node:$ptr, node:$pred, node:$def),
385          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
386  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
387}]>;
388def zext_masked_load_i16 :
389  PatFrag<(ops node:$ptr, node:$pred, node:$def),
390          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
391  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
392}]>;
393def zext_masked_load_i32 :
394  PatFrag<(ops node:$ptr, node:$pred, node:$def),
395          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
396  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
397}]>;
398
399def non_temporal_load :
400   PatFrag<(ops node:$ptr, node:$pred, node:$def),
401           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
402   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
403          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
404          cast<MaskedLoadSDNode>(N)->isNonTemporal();
405}]>;
406
407// non-truncating masked store fragment.
408def nontrunc_masked_store :
409  PatFrag<(ops node:$val, node:$ptr, node:$pred),
410          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
411  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
412         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
413         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
414}]>;
415// truncating masked store fragments.
416def trunc_masked_store :
417  PatFrag<(ops node:$val, node:$ptr, node:$pred),
418          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
419  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
420         cast<MaskedStoreSDNode>(N)->isUnindexed();
421}]>;
422def trunc_masked_store_i8 :
423  PatFrag<(ops node:$val, node:$ptr, node:$pred),
424          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
425  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
426}]>;
427def trunc_masked_store_i16 :
428  PatFrag<(ops node:$val, node:$ptr, node:$pred),
429          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
430  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
431}]>;
432def trunc_masked_store_i32 :
433  PatFrag<(ops node:$val, node:$ptr, node:$pred),
434          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
435  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
436}]>;
437
438def non_temporal_store :
439  PatFrag<(ops node:$val, node:$ptr, node:$pred),
440          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
441  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
442         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
443         cast<MaskedStoreSDNode>(N)->isNonTemporal();
444}]>;
445
446// top16Zero - answer true if the upper 16 bits of $src are 0, false otherwise
447def top16Zero: PatLeaf<(i32 GPR32:$src), [{
448  return SDValue(N,0)->getValueType(0) == MVT::i32 &&
449         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(32, 16));
450  }]>;
451
452// top32Zero - answer true if the upper 32 bits of $src are 0, false otherwise
453def top32Zero: PatLeaf<(i64 GPR64:$src), [{
454  return SDValue(N,0)->getValueType(0) == MVT::i64 &&
455         CurDAG->MaskedValueIsZero(SDValue(N,0), APInt::getHighBitsSet(64, 32));
456  }]>;
457
458// Node definitions.
459def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
460def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
461def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
462def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
463def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
464                                SDCallSeqStart<[ SDTCisVT<0, i32>,
465                                                 SDTCisVT<1, i32> ]>,
466                                [SDNPHasChain, SDNPOutGlue]>;
467def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
468                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
469                                               SDTCisVT<1, i32> ]>,
470                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
471def AArch64call          : SDNode<"AArch64ISD::CALL",
472                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
473                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
474                                 SDNPVariadic]>;
475
476def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
477                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
478                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
479                              SDNPVariadic]>;
480
481def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
482                                [SDNPHasChain]>;
483def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
484                                [SDNPHasChain]>;
485def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
486                                [SDNPHasChain]>;
487def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
488                                [SDNPHasChain]>;
489def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
490                                [SDNPHasChain]>;
491
492
493def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
494def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
495def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
496def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
497def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
498                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
499def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
500def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
501def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
502                            [SDNPCommutative]>;
503def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
504def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
505                            [SDNPCommutative]>;
506def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
507def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
508
509def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
510def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
511def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
512
513def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
514
515def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
516def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
517                                 [SDNPHasChain]>;
518def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
519                                 [SDNPHasChain]>;
520def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
521                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
522                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
523
524def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
525def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
526def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
527def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
528def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
529
530def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
531
532def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
533def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
534def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
535def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
536def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
537def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
538
539def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
540def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
541def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
542def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
543def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
544def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
545def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
546
547def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
548def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
549def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
550def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
551
552def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
553def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
554def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
555def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
556def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
557def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
558def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
559def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
560def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
561def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
562
563def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
564def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
565
566def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
567def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
568def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
569def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
570def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
571
572def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
573def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
574def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
575
576def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
577def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
578def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
579def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
580def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
581def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
582                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
583
584def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
585def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
586def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
587def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
588def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
589
590def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
591def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
592
593def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
594                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
595
596def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
597                               [SDNPHasChain, SDNPSideEffect]>;
598
599def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
600def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
601
602def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
603                                    SDT_AArch64TLSDescCallSeq,
604                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
605                                     SDNPVariadic]>;
606
607
608def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
609                                 SDT_AArch64WrapperLarge>;
610
611def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
612
613def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
614                                    SDTCisSameAs<1, 2>]>;
615def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
616def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
617
618def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
619def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
620def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
621def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
622
623def AArch64sdot     : SDNode<"AArch64ISD::SDOT", SDT_AArch64Dot>;
624def AArch64udot     : SDNode<"AArch64ISD::UDOT", SDT_AArch64Dot>;
625
626def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
627def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
628def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
629def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
630def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
631def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
632
633def AArch64srhadd   : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
634def AArch64urhadd   : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
635def AArch64shadd   : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>;
636def AArch64uhadd   : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>;
637
638def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
639                               [(abdu node:$lhs, node:$rhs),
640                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
641def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
642                               [(abds node:$lhs, node:$rhs),
643                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
644
645def AArch64uaddlp_n : SDNode<"AArch64ISD::UADDLP", SDT_AArch64uaddlp>;
646def AArch64uaddlp   : PatFrags<(ops node:$src),
647                               [(AArch64uaddlp_n node:$src),
648                                (int_aarch64_neon_uaddlp node:$src)]>;
649
650def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
651def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
652def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
653def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
654def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
655
656def SDT_AArch64unpk : SDTypeProfile<1, 1, [
657    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
658]>;
659def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
660def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
661def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
662def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
663
664def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
665def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
666def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
667
668def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
669def AArch64mrs : SDNode<"AArch64ISD::MRS",
670                        SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, i32>]>,
671                        [SDNPHasChain, SDNPOutGlue]>;
672//===----------------------------------------------------------------------===//
673
674//===----------------------------------------------------------------------===//
675
676// AArch64 Instruction Predicate Definitions.
677// We could compute these on a per-module basis but doing so requires accessing
678// the Function object through the <Target>Subtarget and objections were raised
679// to that (see post-commit review comments for r301750).
680let RecomputePerFunction = 1 in {
681  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
682  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
683  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
684  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
685
686  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
687  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
688
689  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
690  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
691  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
692  // optimizing. This allows us to selectively use patterns without impacting
693  // SelectionDAG's behaviour.
694  // FIXME: One day there will probably be a nicer way to check for this, but
695  // today is not that day.
696  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
697}
698
699include "AArch64InstrFormats.td"
700include "SVEInstrFormats.td"
701include "SMEInstrFormats.td"
702
703//===----------------------------------------------------------------------===//
704
705//===----------------------------------------------------------------------===//
706// Miscellaneous instructions.
707//===----------------------------------------------------------------------===//
708
709let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
710// We set Sched to empty list because we expect these instructions to simply get
711// removed in most cases.
712def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
713                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
714                              Sched<[]>;
715def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
716                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
717                            Sched<[]>;
718} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
719
720let isReMaterializable = 1, isCodeGenOnly = 1 in {
721// FIXME: The following pseudo instructions are only needed because remat
722// cannot handle multiple instructions.  When that changes, they can be
723// removed, along with the AArch64Wrapper node.
724
725let AddedComplexity = 10 in
726def LOADgot : Pseudo<(outs GPR64common:$dst), (ins i64imm:$addr),
727                     [(set GPR64common:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
728              Sched<[WriteLDAdr]>;
729
730// The MOVaddr instruction should match only when the add is not folded
731// into a load or store address.
732def MOVaddr
733    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
734             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
735                                            tglobaladdr:$low))]>,
736      Sched<[WriteAdrAdr]>;
737def MOVaddrJT
738    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
739             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
740                                             tjumptable:$low))]>,
741      Sched<[WriteAdrAdr]>;
742def MOVaddrCP
743    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
744             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
745                                             tconstpool:$low))]>,
746      Sched<[WriteAdrAdr]>;
747def MOVaddrBA
748    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
749             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
750                                             tblockaddress:$low))]>,
751      Sched<[WriteAdrAdr]>;
752def MOVaddrTLS
753    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
754             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
755                                            tglobaltlsaddr:$low))]>,
756      Sched<[WriteAdrAdr]>;
757def MOVaddrEXT
758    : Pseudo<(outs GPR64common:$dst), (ins i64imm:$hi, i64imm:$low),
759             [(set GPR64common:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
760                                            texternalsym:$low))]>,
761      Sched<[WriteAdrAdr]>;
762// Normally AArch64addlow either gets folded into a following ldr/str,
763// or together with an adrp into MOVaddr above. For cases with TLS, it
764// might appear without either of them, so allow lowering it into a plain
765// add.
766def ADDlowTLS
767    : Pseudo<(outs GPR64sp:$dst), (ins GPR64sp:$src, i64imm:$low),
768             [(set GPR64sp:$dst, (AArch64addlow GPR64sp:$src,
769                                            tglobaltlsaddr:$low))]>,
770      Sched<[WriteAdr]>;
771
772} // isReMaterializable, isCodeGenOnly
773
774def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
775          (LOADgot tglobaltlsaddr:$addr)>;
776
777def : Pat<(AArch64LOADgot texternalsym:$addr),
778          (LOADgot texternalsym:$addr)>;
779
780def : Pat<(AArch64LOADgot tconstpool:$addr),
781          (LOADgot tconstpool:$addr)>;
782
783// In general these get lowered into a sequence of three 4-byte instructions.
784// 32-bit jump table destination is actually only 2 instructions since we can
785// use the table itself as a PC-relative base. But optimization occurs after
786// branch relaxation so be pessimistic.
787let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
788    isNotDuplicable = 1 in {
789def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
790                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
791                      Sched<[]>;
792def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
793                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
794                      Sched<[]>;
795def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
796                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
797                     Sched<[]>;
798}
799
800// Space-consuming pseudo to aid testing of placement and reachability
801// algorithms. Immediate operand is the number of bytes this "instruction"
802// occupies; register operands can be used to enforce dependency and constrain
803// the scheduler.
804let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
805def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
806                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
807            Sched<[]>;
808
809let hasSideEffects = 1, isCodeGenOnly = 1 in {
810  def SpeculationSafeValueX
811      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
812  def SpeculationSafeValueW
813      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
814}
815
816// SpeculationBarrierEndBB must only be used after an unconditional control
817// flow, i.e. after a terminator for which isBarrier is True.
818let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
819  // This gets lowered to a pair of 4-byte instructions.
820  let Size = 8 in
821  def SpeculationBarrierISBDSBEndBB
822      : Pseudo<(outs), (ins), []>, Sched<[]>;
823  // This gets lowered to a 4-byte instruction.
824  let Size = 4 in
825  def SpeculationBarrierSBEndBB
826      : Pseudo<(outs), (ins), []>, Sched<[]>;
827}
828
829//===----------------------------------------------------------------------===//
830// System instructions.
831//===----------------------------------------------------------------------===//
832
833def HINT : HintI<"hint">;
834def : InstAlias<"nop",  (HINT 0b000)>;
835def : InstAlias<"yield",(HINT 0b001)>;
836def : InstAlias<"wfe",  (HINT 0b010)>;
837def : InstAlias<"wfi",  (HINT 0b011)>;
838def : InstAlias<"sev",  (HINT 0b100)>;
839def : InstAlias<"sevl", (HINT 0b101)>;
840def : InstAlias<"dgh",  (HINT 0b110)>;
841def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
842def : InstAlias<"csdb", (HINT 20)>;
843// In order to be able to write readable assembly, LLVM should accept assembly
844// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
845// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
846// should not emit these mnemonics unless BTI is enabled.
847def : InstAlias<"bti",  (HINT 32), 0>;
848def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
849def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
850def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
851
852// v8.2a Statistical Profiling extension
853def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
854
855// As far as LLVM is concerned this writes to the system's exclusive monitors.
856let mayLoad = 1, mayStore = 1 in
857def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
858
859// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
860// model patterns with sufficiently fine granularity.
861let mayLoad = ?, mayStore = ? in {
862def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
863                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
864
865def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
866                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
867
868def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
869                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
870
871def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
872  let CRm        = 0b0010;
873  let Inst{12}   = 0;
874  let Predicates = [HasTRACEV8_4];
875}
876
877def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
878  let CRm{1-0}   = 0b11;
879  let Inst{9-8}  = 0b10;
880  let Predicates = [HasXS];
881}
882
883let Predicates = [HasWFxT] in {
884def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
885def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
886}
887
888// Branch Record Buffer two-word mnemonic instructions
889class BRBEI<bits<3> op2, string keyword>
890    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
891  let Inst{31-8} = 0b110101010000100101110010;
892  let Inst{7-5} = op2;
893  let Predicates = [HasBRBE];
894}
895def BRB_IALL: BRBEI<0b100, "\tiall">;
896def BRB_INJ:  BRBEI<0b101, "\tinj">;
897
898}
899
900// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
901def : TokenAlias<"INJ", "inj">;
902def : TokenAlias<"IALL", "iall">;
903
904// ARMv8.2-A Dot Product
905let Predicates = [HasDotProd] in {
906defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", AArch64sdot>;
907defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", AArch64udot>;
908defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", AArch64sdot>;
909defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", AArch64udot>;
910}
911
912// ARMv8.6-A BFloat
913let Predicates = [HasNEON, HasBF16] in {
914defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
915defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
916def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
917def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
918def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
919def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
920def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
921def BFCVTN       : SIMD_BFCVTN;
922def BFCVTN2      : SIMD_BFCVTN2;
923
924// Vector-scalar BFDOT:
925// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
926// register (the instruction uses a single 32-bit lane from it), so the pattern
927// is a bit tricky.
928def : Pat<(v2f32 (int_aarch64_neon_bfdot
929                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
930                    (v4bf16 (bitconvert
931                      (v2i32 (AArch64duplane32
932                        (v4i32 (bitconvert
933                          (v8bf16 (insert_subvector undef,
934                            (v4bf16 V64:$Rm),
935                            (i64 0))))),
936                        VectorIndexS:$idx)))))),
937          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
938                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
939                             VectorIndexS:$idx)>;
940}
941
942let Predicates = [HasNEONorStreamingSVE, HasBF16] in {
943def BFCVT : BF16ToSinglePrecision<"bfcvt">;
944}
945
946// ARMv8.6A AArch64 matrix multiplication
947let Predicates = [HasMatMulInt8] in {
948def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
949def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
950def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
951defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
952defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
953
954// sudot lane has a pattern where usdot is expected (there is no sudot).
955// The second operand is used in the dup operation to repeat the indexed
956// element.
957class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
958                         string rhs_kind, RegisterOperand RegType,
959                         ValueType AccumType, ValueType InputType>
960      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
961                                        lhs_kind, rhs_kind, RegType, AccumType,
962                                        InputType, null_frag> {
963  let Pattern = [(set (AccumType RegType:$dst),
964                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
965                                 (InputType (bitconvert (AccumType
966                                    (AArch64duplane32 (v4i32 V128:$Rm),
967                                        VectorIndexS:$idx)))),
968                                 (InputType RegType:$Rn))))];
969}
970
971multiclass SIMDSUDOTIndex {
972  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
973  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
974}
975
976defm SUDOTlane : SIMDSUDOTIndex;
977
978}
979
980// ARMv8.2-A FP16 Fused Multiply-Add Long
981let Predicates = [HasNEON, HasFP16FML] in {
982defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
983defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
984defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
985defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
986defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
987defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
988defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
989defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
990}
991
992// Armv8.2-A Crypto extensions
993let Predicates = [HasSHA3] in {
994def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
995def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
996def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
997def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
998def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
999def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
1000def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
1001def XAR       : CryptoRRRi6<"xar">;
1002
1003class SHA3_pattern<Instruction INST, Intrinsic OpNode, ValueType VecTy>
1004  : Pat<(VecTy (OpNode (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))),
1005        (INST (VecTy V128:$Vd), (VecTy V128:$Vn), (VecTy V128:$Vm))>;
1006
1007def : Pat<(v2i64 (int_aarch64_crypto_sha512su0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1008          (SHA512SU0 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1009
1010def : SHA3_pattern<SHA512H, int_aarch64_crypto_sha512h, v2i64>;
1011def : SHA3_pattern<SHA512H2, int_aarch64_crypto_sha512h2, v2i64>;
1012def : SHA3_pattern<SHA512SU1, int_aarch64_crypto_sha512su1, v2i64>;
1013
1014def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v16i8>;
1015def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v8i16>;
1016def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v4i32>;
1017def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3u, v2i64>;
1018
1019class EOR3_pattern<ValueType VecTy>
1020  : Pat<(xor (xor (VecTy V128:$Vn), (VecTy V128:$Vm)), (VecTy V128:$Va)),
1021        (EOR3 (VecTy V128:$Vn), (VecTy V128:$Vm), (VecTy V128:$Va))>;
1022
1023def : EOR3_pattern<v16i8>;
1024def : EOR3_pattern<v8i16>;
1025def : EOR3_pattern<v4i32>;
1026def : EOR3_pattern<v2i64>;
1027
1028def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v16i8>;
1029def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v8i16>;
1030def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v4i32>;
1031def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxu, v2i64>;
1032
1033def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v16i8>;
1034def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v8i16>;
1035def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v4i32>;
1036def : SHA3_pattern<EOR3, int_aarch64_crypto_eor3s, v2i64>;
1037
1038def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v16i8>;
1039def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v8i16>;
1040def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v4i32>;
1041def : SHA3_pattern<BCAX, int_aarch64_crypto_bcaxs, v2i64>;
1042
1043def : Pat<(v2i64 (int_aarch64_crypto_rax1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))),
1044          (RAX1 (v2i64 V128:$Vn), (v2i64 V128:$Vm))>;
1045
1046def : Pat<(v2i64 (int_aarch64_crypto_xar (v2i64 V128:$Vn), (v2i64 V128:$Vm), (i64 timm0_63:$imm))),
1047          (XAR (v2i64 V128:$Vn), (v2i64 V128:$Vm), (timm0_63:$imm))>;
1048
1049
1050} // HasSHA3
1051
1052let Predicates = [HasSM4] in {
1053def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
1054def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
1055def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
1056def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
1057def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
1058def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
1059def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
1060def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
1061def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
1062
1063def : Pat<(v4i32 (int_aarch64_crypto_sm3ss1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))),
1064          (SM3SS1 (v4i32 V128:$Vn), (v4i32 V128:$Vm), (v4i32 V128:$Va))>;
1065
1066class SM3PARTW_pattern<Instruction INST, Intrinsic OpNode>
1067  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1068        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1069
1070class SM3TT_pattern<Instruction INST, Intrinsic OpNode>
1071  : Pat<(v4i32 (OpNode (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (i64 VectorIndexS_timm:$imm) )),
1072        (INST (v4i32 V128:$Vd), (v4i32 V128:$Vn), (v4i32 V128:$Vm), (VectorIndexS_timm:$imm))>;
1073
1074class SM4_pattern<Instruction INST, Intrinsic OpNode>
1075  : Pat<(v4i32 (OpNode (v4i32 V128:$Vn), (v4i32 V128:$Vm))),
1076        (INST (v4i32 V128:$Vn), (v4i32 V128:$Vm))>;
1077
1078def : SM3PARTW_pattern<SM3PARTW1, int_aarch64_crypto_sm3partw1>;
1079def : SM3PARTW_pattern<SM3PARTW2, int_aarch64_crypto_sm3partw2>;
1080
1081def : SM3TT_pattern<SM3TT1A, int_aarch64_crypto_sm3tt1a>;
1082def : SM3TT_pattern<SM3TT1B, int_aarch64_crypto_sm3tt1b>;
1083def : SM3TT_pattern<SM3TT2A, int_aarch64_crypto_sm3tt2a>;
1084def : SM3TT_pattern<SM3TT2B, int_aarch64_crypto_sm3tt2b>;
1085
1086def : SM4_pattern<SM4ENCKEY, int_aarch64_crypto_sm4ekey>;
1087def : SM4_pattern<SM4E, int_aarch64_crypto_sm4e>;
1088} // HasSM4
1089
1090let Predicates = [HasRCPC] in {
1091  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
1092  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
1093  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
1094  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
1095  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
1096}
1097
1098// v8.3a complex add and multiply-accumulate. No predicate here, that is done
1099// inside the multiclass as the FP16 versions need different predicates.
1100defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
1101                                               "fcmla", null_frag>;
1102defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
1103                                           "fcadd", null_frag>;
1104defm FCMLA : SIMDIndexedTiedComplexHSD<0, 1, complexrotateop, "fcmla">;
1105
1106let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1107  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1108            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
1109  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
1110            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
1111  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1112            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
1113  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
1114            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
1115}
1116
1117let Predicates = [HasComplxNum, HasNEON] in {
1118  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1119            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
1120  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
1121            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
1122  foreach Ty = [v4f32, v2f64] in {
1123    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
1124              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
1125    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
1126              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
1127  }
1128}
1129
1130multiclass FCMLA_PATS<ValueType ty, DAGOperand Reg> {
1131  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1132            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
1133  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1134            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
1135  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1136            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
1137  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
1138            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
1139}
1140
1141multiclass FCMLA_LANE_PATS<ValueType ty, DAGOperand Reg, dag RHSDup> {
1142  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1143            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
1144  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1145            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
1146  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1147            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1148  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1149            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1150}
1151
1152
1153let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1154  defm : FCMLA_PATS<v4f16, V64>;
1155  defm : FCMLA_PATS<v8f16, V128>;
1156
1157  defm : FCMLA_LANE_PATS<v4f16, V64,
1158                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1159  defm : FCMLA_LANE_PATS<v8f16, V128,
1160                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1161}
1162let Predicates = [HasComplxNum, HasNEON] in {
1163  defm : FCMLA_PATS<v2f32, V64>;
1164  defm : FCMLA_PATS<v4f32, V128>;
1165  defm : FCMLA_PATS<v2f64, V128>;
1166
1167  defm : FCMLA_LANE_PATS<v4f32, V128,
1168                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1169}
1170
1171// v8.3a Pointer Authentication
1172// These instructions inhabit part of the hint space and so can be used for
1173// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1174// important for compatibility with other assemblers (e.g. GAS) when building
1175// software compatible with both CPUs that do or don't implement PA.
1176let Uses = [LR], Defs = [LR] in {
1177  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1178  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1179  let isAuthenticated = 1 in {
1180    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1181    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1182  }
1183}
1184let Uses = [LR, SP], Defs = [LR] in {
1185  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1186  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1187  let isAuthenticated = 1 in {
1188    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1189    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1190  }
1191}
1192let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1193  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1194  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1195  let isAuthenticated = 1 in {
1196    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1197    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1198  }
1199}
1200
1201let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1202  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1203}
1204
1205// In order to be able to write readable assembly, LLVM should accept assembly
1206// inputs that use pointer authentication mnemonics, even with PA disabled.
1207// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1208// should not emit these mnemonics unless PA is enabled.
1209def : InstAlias<"paciaz", (PACIAZ), 0>;
1210def : InstAlias<"pacibz", (PACIBZ), 0>;
1211def : InstAlias<"autiaz", (AUTIAZ), 0>;
1212def : InstAlias<"autibz", (AUTIBZ), 0>;
1213def : InstAlias<"paciasp", (PACIASP), 0>;
1214def : InstAlias<"pacibsp", (PACIBSP), 0>;
1215def : InstAlias<"autiasp", (AUTIASP), 0>;
1216def : InstAlias<"autibsp", (AUTIBSP), 0>;
1217def : InstAlias<"pacia1716", (PACIA1716), 0>;
1218def : InstAlias<"pacib1716", (PACIB1716), 0>;
1219def : InstAlias<"autia1716", (AUTIA1716), 0>;
1220def : InstAlias<"autib1716", (AUTIB1716), 0>;
1221def : InstAlias<"xpaclri", (XPACLRI), 0>;
1222
1223// These pointer authentication instructions require armv8.3a
1224let Predicates = [HasPAuth] in {
1225
1226  // When PA is enabled, a better mnemonic should be emitted.
1227  def : InstAlias<"paciaz", (PACIAZ), 1>;
1228  def : InstAlias<"pacibz", (PACIBZ), 1>;
1229  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1230  def : InstAlias<"autibz", (AUTIBZ), 1>;
1231  def : InstAlias<"paciasp", (PACIASP), 1>;
1232  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1233  def : InstAlias<"autiasp", (AUTIASP), 1>;
1234  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1235  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1236  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1237  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1238  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1239  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1240
1241  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm,
1242                      SDPatternOperator op> {
1243    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm,  "ia"), op>;
1244    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm,  "ib"), op>;
1245    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm,  "da"), op>;
1246    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm,  "db"), op>;
1247    def IZA  : SignAuthZero<prefix_z,  0b00, !strconcat(asm, "iza"), op>;
1248    def DZA  : SignAuthZero<prefix_z,  0b10, !strconcat(asm, "dza"), op>;
1249    def IZB  : SignAuthZero<prefix_z,  0b01, !strconcat(asm, "izb"), op>;
1250    def DZB  : SignAuthZero<prefix_z,  0b11, !strconcat(asm, "dzb"), op>;
1251  }
1252
1253  defm PAC : SignAuth<0b000, 0b010, "pac", int_ptrauth_sign>;
1254  defm AUT : SignAuth<0b001, 0b011, "aut", null_frag>;
1255
1256  def XPACI : ClearAuth<0, "xpaci">;
1257  def XPACD : ClearAuth<1, "xpacd">;
1258
1259  def PACGA : SignAuthTwoOperand<0b1100, "pacga", int_ptrauth_sign_generic>;
1260
1261  // Combined Instructions
1262  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1263    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1264    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1265  }
1266  let isCall = 1, Defs = [LR], Uses = [SP] in {
1267    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1268    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1269  }
1270
1271  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1272    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1273    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1274  }
1275  let isCall = 1, Defs = [LR], Uses = [SP] in {
1276    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1277    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1278  }
1279
1280  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1281    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1282    def RETAB   : AuthReturn<0b010, 1, "retab">;
1283    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1284    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1285  }
1286
1287  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1288  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1289
1290}
1291
1292// v8.3a floating point conversion for javascript
1293let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1294def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1295                                      "fjcvtzs",
1296                                      [(set GPR32:$Rd,
1297                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1298  let Inst{31} = 0;
1299} // HasJS, HasFPARMv8
1300
1301// v8.4 Flag manipulation instructions
1302let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1303def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1304  let Inst{20-5} = 0b0000001000000000;
1305}
1306def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1307def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1308def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1309                        "{\t$Rn, $imm, $mask}">;
1310} // HasFlagM
1311
1312// v8.5 flag manipulation instructions
1313let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1314
1315def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1316  let Inst{18-16} = 0b000;
1317  let Inst{11-8} = 0b0000;
1318  let Unpredictable{11-8} = 0b1111;
1319  let Inst{7-5} = 0b001;
1320}
1321
1322def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1323  let Inst{18-16} = 0b000;
1324  let Inst{11-8} = 0b0000;
1325  let Unpredictable{11-8} = 0b1111;
1326  let Inst{7-5} = 0b010;
1327}
1328} // HasAltNZCV
1329
1330
1331// Armv8.5-A speculation barrier
1332def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1333  let Inst{20-5} = 0b0001100110000111;
1334  let Unpredictable{11-8} = 0b1111;
1335  let Predicates = [HasSB];
1336  let hasSideEffects = 1;
1337}
1338
1339def : InstAlias<"clrex", (CLREX 0xf)>;
1340def : InstAlias<"isb", (ISB 0xf)>;
1341def : InstAlias<"ssbb", (DSB 0)>;
1342def : InstAlias<"pssbb", (DSB 4)>;
1343def : InstAlias<"dfb", (DSB 0b1100)>, Requires<[HasV8_0r]>;
1344
1345def MRS    : MRSI;
1346def MSR    : MSRI;
1347def MSRpstateImm1 : MSRpstateImm0_1;
1348def MSRpstateImm4 : MSRpstateImm0_15;
1349
1350def : Pat<(AArch64mrs imm:$id),
1351          (MRS imm:$id)>;
1352
1353// The thread pointer (on Linux, at least, where this has been implemented) is
1354// TPIDR_EL0.
1355def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1356                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1357
1358let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1359def HWASAN_CHECK_MEMACCESS : Pseudo<
1360  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1361  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1362  Sched<[]>;
1363}
1364
1365let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1366def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1367  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1368  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1369  Sched<[]>;
1370}
1371
1372// The cycle counter PMC register is PMCCNTR_EL0.
1373let Predicates = [HasPerfMon] in
1374def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1375
1376// FPCR register
1377def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1378def : Pat<(int_aarch64_set_fpcr i64:$val), (MSR 0xda20, GPR64:$val)>;
1379
1380// Generic system instructions
1381def SYSxt  : SystemXtI<0, "sys">;
1382def SYSLxt : SystemLXtI<1, "sysl">;
1383
1384def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1385                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1386                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1387
1388
1389let Predicates = [HasTME] in {
1390
1391def TSTART : TMSystemI<0b0000, "tstart",
1392                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1393
1394def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1395
1396def TCANCEL : TMSystemException<0b011, "tcancel",
1397                                [(int_aarch64_tcancel timm64_0_65535:$imm)]>;
1398
1399def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1400  let mayLoad = 0;
1401  let mayStore = 0;
1402}
1403} // HasTME
1404
1405//===----------------------------------------------------------------------===//
1406// Move immediate instructions.
1407//===----------------------------------------------------------------------===//
1408
1409defm MOVK : InsertImmediate<0b11, "movk">;
1410defm MOVN : MoveImmediate<0b00, "movn">;
1411
1412let PostEncoderMethod = "fixMOVZ" in
1413defm MOVZ : MoveImmediate<0b10, "movz">;
1414
1415// First group of aliases covers an implicit "lsl #0".
1416def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, timm32_0_65535:$imm, 0), 0>;
1417def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, timm32_0_65535:$imm, 0), 0>;
1418def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1419def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1420def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, timm32_0_65535:$imm, 0)>;
1421def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, timm32_0_65535:$imm, 0)>;
1422
1423// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1424def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1425def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1426def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1427def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1428
1429def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1430def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1431def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1432def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1433
1434def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1435def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1436def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1437def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1438
1439def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1440def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1441
1442def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1443def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1444
1445def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1446def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1447
1448// Final group of aliases covers true "mov $Rd, $imm" cases.
1449multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1450                          int width, int shift> {
1451  def _asmoperand : AsmOperandClass {
1452    let Name = basename # width # "_lsl" # shift # "MovAlias";
1453    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1454                               # shift # ">";
1455    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1456  }
1457
1458  def _movimm : Operand<i32> {
1459    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1460  }
1461
1462  def : InstAlias<"mov $Rd, $imm",
1463                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1464}
1465
1466defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1467defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1468
1469defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1470defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1471defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1472defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1473
1474defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1475defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1476
1477defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1478defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1479defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1480defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1481
1482let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1483    isAsCheapAsAMove = 1 in {
1484// FIXME: The following pseudo instructions are only needed because remat
1485// cannot handle multiple instructions.  When that changes, we can select
1486// directly to the real instructions and get rid of these pseudos.
1487
1488def MOVi32imm
1489    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1490             [(set GPR32:$dst, imm:$src)]>,
1491      Sched<[WriteImm]>;
1492def MOVi64imm
1493    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1494             [(set GPR64:$dst, imm:$src)]>,
1495      Sched<[WriteImm]>;
1496} // isReMaterializable, isCodeGenOnly
1497
1498// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1499// eventual expansion code fewer bits to worry about getting right. Marshalling
1500// the types is a little tricky though:
1501def i64imm_32bit : ImmLeaf<i64, [{
1502  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1503}]>;
1504
1505def s64imm_32bit : ImmLeaf<i64, [{
1506  int64_t Imm64 = static_cast<int64_t>(Imm);
1507  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1508         Imm64 <= std::numeric_limits<int32_t>::max();
1509}]>;
1510
1511def trunc_imm : SDNodeXForm<imm, [{
1512  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1513}]>;
1514
1515def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1516  GISDNodeXFormEquiv<trunc_imm>;
1517
1518let Predicates = [OptimizedGISelOrOtherSelector] in {
1519// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1520// copies.
1521def : Pat<(i64 i64imm_32bit:$src),
1522          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1523}
1524
1525// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1526def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1527return CurDAG->getTargetConstant(
1528  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1529}]>;
1530
1531def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1532return CurDAG->getTargetConstant(
1533  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1534}]>;
1535
1536
1537def : Pat<(f32 fpimm:$in),
1538  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1539def : Pat<(f64 fpimm:$in),
1540  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1541
1542
1543// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1544// sequences.
1545def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1546                             tglobaladdr:$g1, tglobaladdr:$g0),
1547          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1548                                  tglobaladdr:$g1, 16),
1549                          tglobaladdr:$g2, 32),
1550                  tglobaladdr:$g3, 48)>;
1551
1552def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1553                             tblockaddress:$g1, tblockaddress:$g0),
1554          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1555                                  tblockaddress:$g1, 16),
1556                          tblockaddress:$g2, 32),
1557                  tblockaddress:$g3, 48)>;
1558
1559def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1560                             tconstpool:$g1, tconstpool:$g0),
1561          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1562                                  tconstpool:$g1, 16),
1563                          tconstpool:$g2, 32),
1564                  tconstpool:$g3, 48)>;
1565
1566def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1567                             tjumptable:$g1, tjumptable:$g0),
1568          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1569                                  tjumptable:$g1, 16),
1570                          tjumptable:$g2, 32),
1571                  tjumptable:$g3, 48)>;
1572
1573
1574//===----------------------------------------------------------------------===//
1575// Arithmetic instructions.
1576//===----------------------------------------------------------------------===//
1577
1578// Add/subtract with carry.
1579defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1580defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1581
1582def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1583def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1584def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1585def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1586
1587// Add/subtract
1588defm ADD : AddSub<0, "add", "sub", add>;
1589defm SUB : AddSub<1, "sub", "add">;
1590
1591def : InstAlias<"mov $dst, $src",
1592                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1593def : InstAlias<"mov $dst, $src",
1594                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1595def : InstAlias<"mov $dst, $src",
1596                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1597def : InstAlias<"mov $dst, $src",
1598                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1599
1600defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1601defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1602
1603// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1604def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1605          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1606def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1607          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1608def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1609          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1610def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1611          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1612def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1613          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1614def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1615          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1616let AddedComplexity = 1 in {
1617def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1618          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1619def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1620          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1621}
1622
1623// Because of the immediate format for add/sub-imm instructions, the
1624// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1625//  These patterns capture that transformation.
1626let AddedComplexity = 1 in {
1627def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1628          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1629def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1630          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1631def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1632          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1633def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1634          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1635}
1636
1637// Because of the immediate format for add/sub-imm instructions, the
1638// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1639//  These patterns capture that transformation.
1640let AddedComplexity = 1 in {
1641def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1642          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1643def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1644          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1645def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1646          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1647def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1648          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1649}
1650
1651def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1652def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1653def : InstAlias<"neg $dst, $src$shift",
1654                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1655def : InstAlias<"neg $dst, $src$shift",
1656                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1657
1658def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1659def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1660def : InstAlias<"negs $dst, $src$shift",
1661                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1662def : InstAlias<"negs $dst, $src$shift",
1663                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1664
1665
1666// Unsigned/Signed divide
1667defm UDIV : Div<0, "udiv", udiv>;
1668defm SDIV : Div<1, "sdiv", sdiv>;
1669
1670def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1671def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1672def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1673def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1674
1675// Variable shift
1676defm ASRV : Shift<0b10, "asr", sra>;
1677defm LSLV : Shift<0b00, "lsl", shl>;
1678defm LSRV : Shift<0b01, "lsr", srl>;
1679defm RORV : Shift<0b11, "ror", rotr>;
1680
1681def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1682def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1683def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1684def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1685def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1686def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1687def : ShiftAlias<"rorv", RORVWr, GPR32>;
1688def : ShiftAlias<"rorv", RORVXr, GPR64>;
1689
1690// Multiply-add
1691let AddedComplexity = 5 in {
1692defm MADD : MulAccum<0, "madd">;
1693defm MSUB : MulAccum<1, "msub">;
1694
1695def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1696          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1697def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1698          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1699
1700def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1701          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1702def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1703          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1704def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1705          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1706def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1707          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1708} // AddedComplexity = 5
1709
1710let AddedComplexity = 5 in {
1711def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1712def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1713def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1714def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1715
1716def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1717          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1718def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1719          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1720def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1721          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1722def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1723          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1724def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1725          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1726def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1727          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1728
1729def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1730          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1731def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1732          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1733
1734def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1735          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1736def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1737          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1738def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1739          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1740                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1741
1742def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1743          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1744def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1745          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1746def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1747          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1748                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1749
1750def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1751          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1752def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1753          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1754def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1755                    GPR64:$Ra)),
1756          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1757                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1758
1759def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1760          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1761def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1762          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1763def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1764                                    (s64imm_32bit:$C)))),
1765          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1766                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1767} // AddedComplexity = 5
1768
1769def : MulAccumWAlias<"mul", MADDWrrr>;
1770def : MulAccumXAlias<"mul", MADDXrrr>;
1771def : MulAccumWAlias<"mneg", MSUBWrrr>;
1772def : MulAccumXAlias<"mneg", MSUBXrrr>;
1773def : WideMulAccumAlias<"smull", SMADDLrrr>;
1774def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1775def : WideMulAccumAlias<"umull", UMADDLrrr>;
1776def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1777
1778// Multiply-high
1779def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1780def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1781
1782// CRC32
1783def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1784def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1785def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1786def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1787
1788def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1789def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1790def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1791def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1792
1793// v8.1 atomic CAS
1794defm CAS   : CompareAndSwap<0, 0, "">;
1795defm CASA  : CompareAndSwap<1, 0, "a">;
1796defm CASL  : CompareAndSwap<0, 1, "l">;
1797defm CASAL : CompareAndSwap<1, 1, "al">;
1798
1799// v8.1 atomic CASP
1800defm CASP   : CompareAndSwapPair<0, 0, "">;
1801defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1802defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1803defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1804
1805// v8.1 atomic SWP
1806defm SWP   : Swap<0, 0, "">;
1807defm SWPA  : Swap<1, 0, "a">;
1808defm SWPL  : Swap<0, 1, "l">;
1809defm SWPAL : Swap<1, 1, "al">;
1810
1811// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1812defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1813defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1814defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1815defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1816
1817defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1818defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1819defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1820defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1821
1822defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1823defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1824defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1825defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1826
1827defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1828defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1829defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1830defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1831
1832defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1833defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1834defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1835defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1836
1837defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1838defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1839defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1840defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1841
1842defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1843defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1844defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1845defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1846
1847defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1848defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1849defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1850defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1851
1852// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1853defm : STOPregister<"stadd","LDADD">; // STADDx
1854defm : STOPregister<"stclr","LDCLR">; // STCLRx
1855defm : STOPregister<"steor","LDEOR">; // STEORx
1856defm : STOPregister<"stset","LDSET">; // STSETx
1857defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1858defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1859defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1860defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1861
1862// v8.5 Memory Tagging Extension
1863let Predicates = [HasMTE] in {
1864
1865def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1866            Sched<[]>{
1867  let Inst{31} = 1;
1868}
1869def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1870  let Inst{31} = 1;
1871  let isNotDuplicable = 1;
1872}
1873def ADDG  : AddSubG<0, "addg", null_frag>;
1874def SUBG  : AddSubG<1, "subg", null_frag>;
1875
1876def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1877
1878def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1879def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1880  let Defs = [NZCV];
1881}
1882
1883def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1884
1885def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1886
1887def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1888          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1889def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1890          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1891
1892def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1893
1894def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1895                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1896def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1897                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1898def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1899                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1900  let Inst{23} = 0;
1901}
1902
1903defm STG   : MemTagStore<0b00, "stg">;
1904defm STZG  : MemTagStore<0b01, "stzg">;
1905defm ST2G  : MemTagStore<0b10, "st2g">;
1906defm STZ2G : MemTagStore<0b11, "stz2g">;
1907
1908def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1909          (STGOffset $Rn, $Rm, $imm)>;
1910def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1911          (STZGOffset $Rn, $Rm, $imm)>;
1912def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1913          (ST2GOffset $Rn, $Rm, $imm)>;
1914def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1915          (STZ2GOffset $Rn, $Rm, $imm)>;
1916
1917defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1918def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1919def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1920
1921def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1922          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1923
1924def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1925          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1926
1927def IRGstack
1928    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1929      Sched<[]>;
1930def TAGPstack
1931    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1932      Sched<[]>;
1933
1934// Explicit SP in the first operand prevents ShrinkWrap optimization
1935// from leaving this instruction out of the stack frame. When IRGstack
1936// is transformed into IRG, this operand is replaced with the actual
1937// register / expression for the tagged base pointer of the current function.
1938def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1939
1940// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
1941// $Rn_wback is one past the end of the range. $Rm is the loop counter.
1942let isCodeGenOnly=1, mayStore=1 in {
1943def STGloop_wback
1944    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1945             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1946      Sched<[WriteAdr, WriteST]>;
1947
1948def STZGloop_wback
1949    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1950             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1951      Sched<[WriteAdr, WriteST]>;
1952
1953// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
1954// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
1955def STGloop
1956    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1957             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1958      Sched<[WriteAdr, WriteST]>;
1959
1960def STZGloop
1961    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1962             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1963      Sched<[WriteAdr, WriteST]>;
1964}
1965
1966} // Predicates = [HasMTE]
1967
1968//===----------------------------------------------------------------------===//
1969// Logical instructions.
1970//===----------------------------------------------------------------------===//
1971
1972// (immediate)
1973defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1974defm AND  : LogicalImm<0b00, "and", and, "bic">;
1975defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1976defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1977
1978// FIXME: these aliases *are* canonical sometimes (when movz can't be
1979// used). Actually, it seems to be working right now, but putting logical_immXX
1980// here is a bit dodgy on the AsmParser side too.
1981def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1982                                          logical_imm32:$imm), 0>;
1983def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1984                                          logical_imm64:$imm), 0>;
1985
1986
1987// (register)
1988defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1989defm BICS : LogicalRegS<0b11, 1, "bics",
1990                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1991defm AND  : LogicalReg<0b00, 0, "and", and>;
1992defm BIC  : LogicalReg<0b00, 1, "bic",
1993                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1994defm EON  : LogicalReg<0b10, 1, "eon",
1995                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1996defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
1997defm ORN  : LogicalReg<0b01, 1, "orn",
1998                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1999defm ORR  : LogicalReg<0b01, 0, "orr", or>;
2000
2001def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
2002def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
2003
2004def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
2005def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
2006
2007def : InstAlias<"mvn $Wd, $Wm$sh",
2008                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
2009def : InstAlias<"mvn $Xd, $Xm$sh",
2010                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
2011
2012def : InstAlias<"tst $src1, $src2",
2013                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
2014def : InstAlias<"tst $src1, $src2",
2015                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
2016
2017def : InstAlias<"tst $src1, $src2",
2018                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
2019def : InstAlias<"tst $src1, $src2",
2020                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
2021
2022def : InstAlias<"tst $src1, $src2$sh",
2023               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
2024def : InstAlias<"tst $src1, $src2$sh",
2025               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
2026
2027
2028def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
2029def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
2030
2031
2032//===----------------------------------------------------------------------===//
2033// One operand data processing instructions.
2034//===----------------------------------------------------------------------===//
2035
2036defm CLS    : OneOperandData<0b101, "cls">;
2037defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
2038defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
2039
2040def  REV16Wr : OneWRegData<0b001, "rev16",
2041                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
2042def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
2043
2044def : Pat<(cttz GPR32:$Rn),
2045          (CLZWr (RBITWr GPR32:$Rn))>;
2046def : Pat<(cttz GPR64:$Rn),
2047          (CLZXr (RBITXr GPR64:$Rn))>;
2048def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
2049                (i32 1))),
2050          (CLSWr GPR32:$Rn)>;
2051def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
2052                (i64 1))),
2053          (CLSXr GPR64:$Rn)>;
2054def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
2055def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
2056
2057// Unlike the other one operand instructions, the instructions with the "rev"
2058// mnemonic do *not* just different in the size bit, but actually use different
2059// opcode bits for the different sizes.
2060def REVWr   : OneWRegData<0b010, "rev", bswap>;
2061def REVXr   : OneXRegData<0b011, "rev", bswap>;
2062def REV32Xr : OneXRegData<0b010, "rev32",
2063                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
2064
2065def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
2066
2067// The bswap commutes with the rotr so we want a pattern for both possible
2068// orders.
2069def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
2070def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
2071
2072// Match (srl (bswap x), C) -> revC if the upper bswap bits are known zero.
2073def : Pat<(srl (bswap top16Zero:$Rn), (i64 16)), (REV16Wr GPR32:$Rn)>;
2074def : Pat<(srl (bswap top32Zero:$Rn), (i64 32)), (REV32Xr GPR64:$Rn)>;
2075
2076//===----------------------------------------------------------------------===//
2077// Bitfield immediate extraction instruction.
2078//===----------------------------------------------------------------------===//
2079let hasSideEffects = 0 in
2080defm EXTR : ExtractImm<"extr">;
2081def : InstAlias<"ror $dst, $src, $shift",
2082            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
2083def : InstAlias<"ror $dst, $src, $shift",
2084            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
2085
2086def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
2087          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
2088def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
2089          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
2090
2091//===----------------------------------------------------------------------===//
2092// Other bitfield immediate instructions.
2093//===----------------------------------------------------------------------===//
2094let hasSideEffects = 0 in {
2095defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
2096defm SBFM : BitfieldImm<0b00, "sbfm">;
2097defm UBFM : BitfieldImm<0b10, "ubfm">;
2098}
2099
2100def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
2101  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
2102  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2103}]>;
2104
2105def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
2106  uint64_t enc = 31 - N->getZExtValue();
2107  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2108}]>;
2109
2110// min(7, 31 - shift_amt)
2111def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2112  uint64_t enc = 31 - N->getZExtValue();
2113  enc = enc > 7 ? 7 : enc;
2114  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2115}]>;
2116
2117// min(15, 31 - shift_amt)
2118def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2119  uint64_t enc = 31 - N->getZExtValue();
2120  enc = enc > 15 ? 15 : enc;
2121  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2122}]>;
2123
2124def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
2125  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
2126  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2127}]>;
2128
2129def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
2130  uint64_t enc = 63 - N->getZExtValue();
2131  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2132}]>;
2133
2134// min(7, 63 - shift_amt)
2135def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
2136  uint64_t enc = 63 - N->getZExtValue();
2137  enc = enc > 7 ? 7 : enc;
2138  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2139}]>;
2140
2141// min(15, 63 - shift_amt)
2142def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
2143  uint64_t enc = 63 - N->getZExtValue();
2144  enc = enc > 15 ? 15 : enc;
2145  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2146}]>;
2147
2148// min(31, 63 - shift_amt)
2149def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
2150  uint64_t enc = 63 - N->getZExtValue();
2151  enc = enc > 31 ? 31 : enc;
2152  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
2153}]>;
2154
2155def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
2156          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
2157                              (i64 (i32shift_b imm0_31:$imm)))>;
2158def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2159          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2160                              (i64 (i64shift_b imm0_63:$imm)))>;
2161
2162let AddedComplexity = 10 in {
2163def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2164          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2165def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2166          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2167}
2168
2169def : InstAlias<"asr $dst, $src, $shift",
2170                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2171def : InstAlias<"asr $dst, $src, $shift",
2172                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2173def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2174def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2175def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2176def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2177def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2178
2179def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2180          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2181def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2182          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2183
2184def : InstAlias<"lsr $dst, $src, $shift",
2185                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2186def : InstAlias<"lsr $dst, $src, $shift",
2187                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2188def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2189def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2190def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2191def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2192def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2193
2194//===----------------------------------------------------------------------===//
2195// Conditional comparison instructions.
2196//===----------------------------------------------------------------------===//
2197defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2198defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2199
2200//===----------------------------------------------------------------------===//
2201// Conditional select instructions.
2202//===----------------------------------------------------------------------===//
2203defm CSEL  : CondSelect<0, 0b00, "csel">;
2204
2205def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2206defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2207defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2208defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2209
2210def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2211          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2212def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2213          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2214def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2215          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2216def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2217          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2218def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2219          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2220def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2221          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2222
2223def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2224          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2225def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2226          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2227def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2228          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2229def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2230          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2231def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2232          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2233def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2234          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2235def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2236          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2237def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2238          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2239def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2240          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2241def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2242          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2243def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2244          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2245def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2246          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2247
2248def : Pat<(add GPR32:$val, (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV)),
2249          (CSINCWr GPR32:$val, GPR32:$val, (i32 imm:$cc))>;
2250def : Pat<(add GPR64:$val, (zext (AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV))),
2251          (CSINCXr GPR64:$val, GPR64:$val, (i32 imm:$cc))>;
2252
2253// The inverse of the condition code from the alias instruction is what is used
2254// in the aliased instruction. The parser all ready inverts the condition code
2255// for these aliases.
2256def : InstAlias<"cset $dst, $cc",
2257                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2258def : InstAlias<"cset $dst, $cc",
2259                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2260
2261def : InstAlias<"csetm $dst, $cc",
2262                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2263def : InstAlias<"csetm $dst, $cc",
2264                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2265
2266def : InstAlias<"cinc $dst, $src, $cc",
2267                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2268def : InstAlias<"cinc $dst, $src, $cc",
2269                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2270
2271def : InstAlias<"cinv $dst, $src, $cc",
2272                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2273def : InstAlias<"cinv $dst, $src, $cc",
2274                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2275
2276def : InstAlias<"cneg $dst, $src, $cc",
2277                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2278def : InstAlias<"cneg $dst, $src, $cc",
2279                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2280
2281//===----------------------------------------------------------------------===//
2282// PC-relative instructions.
2283//===----------------------------------------------------------------------===//
2284let isReMaterializable = 1 in {
2285let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2286def ADR  : ADRI<0, "adr", adrlabel,
2287                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2288} // hasSideEffects = 0
2289
2290def ADRP : ADRI<1, "adrp", adrplabel,
2291                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2292} // isReMaterializable = 1
2293
2294// page address of a constant pool entry, block address
2295def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2296def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2297def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2298def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2299def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2300def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2301def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2302
2303//===----------------------------------------------------------------------===//
2304// Unconditional branch (register) instructions.
2305//===----------------------------------------------------------------------===//
2306
2307let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2308def RET  : BranchReg<0b0010, "ret", []>;
2309def DRPS : SpecialReturn<0b0101, "drps">;
2310def ERET : SpecialReturn<0b0100, "eret">;
2311} // isReturn = 1, isTerminator = 1, isBarrier = 1
2312
2313// Default to the LR register.
2314def : InstAlias<"ret", (RET LR)>;
2315
2316let isCall = 1, Defs = [LR], Uses = [SP] in {
2317  def BLR : BranchReg<0b0001, "blr", []>;
2318  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2319                Sched<[WriteBrReg]>,
2320                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2321  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2322                     Sched<[WriteBrReg]>;
2323} // isCall
2324
2325def : Pat<(AArch64call GPR64:$Rn),
2326          (BLR GPR64:$Rn)>,
2327      Requires<[NoSLSBLRMitigation]>;
2328def : Pat<(AArch64call GPR64noip:$Rn),
2329          (BLRNoIP GPR64noip:$Rn)>,
2330      Requires<[SLSBLRMitigation]>;
2331
2332def : Pat<(AArch64call_rvmarker (i64 tglobaladdr:$rvfunc), GPR64:$Rn),
2333          (BLR_RVMARKER tglobaladdr:$rvfunc, GPR64:$Rn)>,
2334      Requires<[NoSLSBLRMitigation]>;
2335
2336let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2337def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2338} // isBranch, isTerminator, isBarrier, isIndirectBranch
2339
2340// Create a separate pseudo-instruction for codegen to use so that we don't
2341// flag lr as used in every function. It'll be restored before the RET by the
2342// epilogue if it's legitimately used.
2343def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2344                   Sched<[WriteBrReg]> {
2345  let isTerminator = 1;
2346  let isBarrier = 1;
2347  let isReturn = 1;
2348}
2349
2350// This is a directive-like pseudo-instruction. The purpose is to insert an
2351// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2352// (which in the usual case is a BLR).
2353let hasSideEffects = 1 in
2354def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2355  let AsmString = ".tlsdesccall $sym";
2356}
2357
2358// Pseudo instruction to tell the streamer to emit a 'B' character into the
2359// augmentation string.
2360def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2361
2362// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2363// FIXME: can "hasSideEffects be dropped?
2364// This gets lowered to an instruction sequence which takes 16 bytes
2365let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1, Size = 16,
2366    isCodeGenOnly = 1 in
2367def TLSDESC_CALLSEQ
2368    : Pseudo<(outs), (ins i64imm:$sym),
2369             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2370      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2371def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2372          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2373
2374//===----------------------------------------------------------------------===//
2375// Conditional branch (immediate) instruction.
2376//===----------------------------------------------------------------------===//
2377def Bcc : BranchCond<0, "b">;
2378
2379// Armv8.8-A variant form which hints to the branch predictor that
2380// this branch is very likely to go the same way nearly all the time
2381// (even though it is not known at compile time _which_ way that is).
2382def BCcc : BranchCond<1, "bc">, Requires<[HasHBC]>;
2383
2384//===----------------------------------------------------------------------===//
2385// Compare-and-branch instructions.
2386//===----------------------------------------------------------------------===//
2387defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2388defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2389
2390//===----------------------------------------------------------------------===//
2391// Test-bit-and-branch instructions.
2392//===----------------------------------------------------------------------===//
2393defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2394defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2395
2396//===----------------------------------------------------------------------===//
2397// Unconditional branch (immediate) instructions.
2398//===----------------------------------------------------------------------===//
2399let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2400def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2401} // isBranch, isTerminator, isBarrier
2402
2403let isCall = 1, Defs = [LR], Uses = [SP] in {
2404def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2405} // isCall
2406def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2407
2408//===----------------------------------------------------------------------===//
2409// Exception generation instructions.
2410//===----------------------------------------------------------------------===//
2411let isTrap = 1 in {
2412def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
2413}
2414def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2415def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2416def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">, Requires<[HasEL3]>;
2417def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2418def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2419def SMC   : ExceptionGeneration<0b000, 0b11, "smc">, Requires<[HasEL3]>;
2420def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2421
2422// DCPSn defaults to an immediate operand of zero if unspecified.
2423def : InstAlias<"dcps1", (DCPS1 0)>;
2424def : InstAlias<"dcps2", (DCPS2 0)>;
2425def : InstAlias<"dcps3", (DCPS3 0)>, Requires<[HasEL3]>;
2426
2427def UDF : UDFType<0, "udf">;
2428
2429//===----------------------------------------------------------------------===//
2430// Load instructions.
2431//===----------------------------------------------------------------------===//
2432
2433// Pair (indexed, offset)
2434defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2435defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2436defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2437defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2438defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2439
2440defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2441
2442// Pair (pre-indexed)
2443def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2444def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2445def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2446def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2447def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2448
2449def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2450
2451// Pair (post-indexed)
2452def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2453def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2454def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2455def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2456def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2457
2458def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2459
2460
2461// Pair (no allocate)
2462defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2463defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2464defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2465defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2466defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2467
2468def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2469          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2470
2471//---
2472// (register offset)
2473//---
2474
2475// Integer
2476defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2477defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2478defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2479defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2480
2481// Floating-point
2482defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2483defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2484defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2485defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2486defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2487
2488// Load sign-extended half-word
2489defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2490defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2491
2492// Load sign-extended byte
2493defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2494defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2495
2496// Load sign-extended word
2497defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2498
2499// Pre-fetch.
2500defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2501
2502// For regular load, we do not have any alignment requirement.
2503// Thus, it is safe to directly map the vector loads with interesting
2504// addressing modes.
2505// FIXME: We could do the same for bitconvert to floating point vectors.
2506multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2507                              ValueType ScalTy, ValueType VecTy,
2508                              Instruction LOADW, Instruction LOADX,
2509                              SubRegIndex sub> {
2510  def : Pat<(VecTy (scalar_to_vector (ScalTy
2511              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2512            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2513                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2514                           sub)>;
2515
2516  def : Pat<(VecTy (scalar_to_vector (ScalTy
2517              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2518            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2519                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2520                           sub)>;
2521}
2522
2523let AddedComplexity = 10 in {
2524defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2525defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2526
2527defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2528defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2529
2530defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2531defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2532
2533defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2534defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2535
2536defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2537defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2538
2539defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2540
2541defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2542
2543
2544def : Pat <(v1i64 (scalar_to_vector (i64
2545                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2546                                           ro_Wextend64:$extend))))),
2547           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2548
2549def : Pat <(v1i64 (scalar_to_vector (i64
2550                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2551                                           ro_Xextend64:$extend))))),
2552           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2553}
2554
2555// Match all load 64 bits width whose type is compatible with FPR64
2556multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2557                        Instruction LOADW, Instruction LOADX> {
2558
2559  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2560            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2561
2562  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2563            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2564}
2565
2566let AddedComplexity = 10 in {
2567let Predicates = [IsLE] in {
2568  // We must do vector loads with LD1 in big-endian.
2569  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2570  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2571  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2572  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2573  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2574  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2575}
2576
2577defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2578defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2579
2580// Match all load 128 bits width whose type is compatible with FPR128
2581let Predicates = [IsLE] in {
2582  // We must do vector loads with LD1 in big-endian.
2583  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2584  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2585  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2586  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2587  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2588  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2589  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2590  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2591}
2592} // AddedComplexity = 10
2593
2594// zextload -> i64
2595multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2596                            Instruction INSTW, Instruction INSTX> {
2597  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2598            (SUBREG_TO_REG (i64 0),
2599                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2600                           sub_32)>;
2601
2602  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2603            (SUBREG_TO_REG (i64 0),
2604                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2605                           sub_32)>;
2606}
2607
2608let AddedComplexity = 10 in {
2609  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2610  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2611  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2612
2613  // zextloadi1 -> zextloadi8
2614  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2615
2616  // extload -> zextload
2617  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2618  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2619  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2620
2621  // extloadi1 -> zextloadi8
2622  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2623}
2624
2625
2626// zextload -> i64
2627multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2628                            Instruction INSTW, Instruction INSTX> {
2629  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2630            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2631
2632  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2633            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2634
2635}
2636
2637let AddedComplexity = 10 in {
2638  // extload -> zextload
2639  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2640  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2641  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2642
2643  // zextloadi1 -> zextloadi8
2644  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2645}
2646
2647//---
2648// (unsigned immediate)
2649//---
2650defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2651                   [(set GPR64z:$Rt,
2652                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2653defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2654                   [(set GPR32z:$Rt,
2655                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2656defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2657                   [(set FPR8Op:$Rt,
2658                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2659defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2660                   [(set (f16 FPR16Op:$Rt),
2661                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2662defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2663                   [(set (f32 FPR32Op:$Rt),
2664                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2665defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2666                   [(set (f64 FPR64Op:$Rt),
2667                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2668defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2669                 [(set (f128 FPR128Op:$Rt),
2670                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2671
2672// bf16 load pattern
2673def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2674           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2675
2676// For regular load, we do not have any alignment requirement.
2677// Thus, it is safe to directly map the vector loads with interesting
2678// addressing modes.
2679// FIXME: We could do the same for bitconvert to floating point vectors.
2680def : Pat <(v8i8 (scalar_to_vector (i32
2681               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2682           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2683                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2684def : Pat <(v16i8 (scalar_to_vector (i32
2685               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2686           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2687                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2688def : Pat <(v4i16 (scalar_to_vector (i32
2689               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2690           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2691                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2692def : Pat <(v8i16 (scalar_to_vector (i32
2693               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2694           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2695                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2696def : Pat <(v2i32 (scalar_to_vector (i32
2697               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2698           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2699                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2700def : Pat <(v4i32 (scalar_to_vector (i32
2701               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2702           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2703                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2704def : Pat <(v1i64 (scalar_to_vector (i64
2705               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2706           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2707def : Pat <(v2i64 (scalar_to_vector (i64
2708               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2709           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2710                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2711
2712// Match all load 64 bits width whose type is compatible with FPR64
2713let Predicates = [IsLE] in {
2714  // We must use LD1 to perform vector loads in big-endian.
2715  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2716            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2717  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2718            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2719  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2720            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2721  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2722            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2723  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2724            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2725  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2726            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2727}
2728def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2729          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2730def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2731          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2732
2733// Match all load 128 bits width whose type is compatible with FPR128
2734let Predicates = [IsLE] in {
2735  // We must use LD1 to perform vector loads in big-endian.
2736  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2737            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2738  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2739            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2740  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2741            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2742  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2743            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2744  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2745            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2746  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2747            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2748  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2749            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2750  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2751            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2752}
2753def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2754          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2755
2756defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2757                    [(set GPR32:$Rt,
2758                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2759                                                     uimm12s2:$offset)))]>;
2760defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2761                    [(set GPR32:$Rt,
2762                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2763                                                   uimm12s1:$offset)))]>;
2764// zextload -> i64
2765def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2766    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2767def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2768    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2769
2770// zextloadi1 -> zextloadi8
2771def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2772          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2773def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2774    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2775
2776// extload -> zextload
2777def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2778          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2779def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2780          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2781def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2782          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2783def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2784    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2785def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2786    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2787def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2788    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2789def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2790    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2791
2792// load sign-extended half-word
2793defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2794                     [(set GPR32:$Rt,
2795                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2796                                                      uimm12s2:$offset)))]>;
2797defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2798                     [(set GPR64:$Rt,
2799                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2800                                                      uimm12s2:$offset)))]>;
2801
2802// load sign-extended byte
2803defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2804                     [(set GPR32:$Rt,
2805                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2806                                                    uimm12s1:$offset)))]>;
2807defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2808                     [(set GPR64:$Rt,
2809                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2810                                                    uimm12s1:$offset)))]>;
2811
2812// load sign-extended word
2813defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2814                     [(set GPR64:$Rt,
2815                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2816                                                      uimm12s4:$offset)))]>;
2817
2818// load zero-extended word
2819def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2820      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2821
2822// Pre-fetch.
2823def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2824                        [(AArch64Prefetch imm:$Rt,
2825                                        (am_indexed64 GPR64sp:$Rn,
2826                                                      uimm12s8:$offset))]>;
2827
2828def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2829
2830//---
2831// (literal)
2832
2833def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2834  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2835    const DataLayout &DL = MF->getDataLayout();
2836    Align Align = G->getGlobal()->getPointerAlignment(DL);
2837    return Align >= 4 && G->getOffset() % 4 == 0;
2838  }
2839  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2840    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2841  return false;
2842}]>;
2843
2844def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2845  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2846def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2847  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2848def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2849  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2850def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2851  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2852def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2853  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2854
2855// load sign-extended word
2856def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2857  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2858
2859let AddedComplexity = 20 in {
2860def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2861        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2862}
2863
2864// prefetch
2865def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2866//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2867
2868//---
2869// (unscaled immediate)
2870defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2871                    [(set GPR64z:$Rt,
2872                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2873defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2874                    [(set GPR32z:$Rt,
2875                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2876defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2877                    [(set FPR8Op:$Rt,
2878                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2879defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2880                    [(set (f16 FPR16Op:$Rt),
2881                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2882defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2883                    [(set (f32 FPR32Op:$Rt),
2884                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2885defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2886                    [(set (f64 FPR64Op:$Rt),
2887                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2888defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2889                    [(set (f128 FPR128Op:$Rt),
2890                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2891
2892defm LDURHH
2893    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2894             [(set GPR32:$Rt,
2895                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2896defm LDURBB
2897    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2898             [(set GPR32:$Rt,
2899                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2900
2901// Match all load 64 bits width whose type is compatible with FPR64
2902let Predicates = [IsLE] in {
2903  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2904            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2905  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2906            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2907  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2908            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2909  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2910            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2911  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2912            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2913}
2914def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2915          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2916def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2917          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2918
2919// Match all load 128 bits width whose type is compatible with FPR128
2920let Predicates = [IsLE] in {
2921  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2922            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2923  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2924            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2925  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2926            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2927  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2928            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2929  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2930            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2931  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2932            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2933  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2934            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2935}
2936
2937//  anyext -> zext
2938def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2939          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2940def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2941          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2942def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2943          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2944def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2945    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2946def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2947    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2948def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2949    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2950def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2951    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2952// unscaled zext
2953def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2954          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2955def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2956          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2957def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2958          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2959def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2960    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2961def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2962    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2963def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2964    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2965def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2966    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2967
2968
2969//---
2970// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2971
2972// Define new assembler match classes as we want to only match these when
2973// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2974// associate a DiagnosticType either, as we want the diagnostic for the
2975// canonical form (the scaled operand) to take precedence.
2976class SImm9OffsetOperand<int Width> : AsmOperandClass {
2977  let Name = "SImm9OffsetFB" # Width;
2978  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2979  let RenderMethod = "addImmOperands";
2980}
2981
2982def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2983def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2984def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2985def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2986def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2987
2988def simm9_offset_fb8 : Operand<i64> {
2989  let ParserMatchClass = SImm9OffsetFB8Operand;
2990}
2991def simm9_offset_fb16 : Operand<i64> {
2992  let ParserMatchClass = SImm9OffsetFB16Operand;
2993}
2994def simm9_offset_fb32 : Operand<i64> {
2995  let ParserMatchClass = SImm9OffsetFB32Operand;
2996}
2997def simm9_offset_fb64 : Operand<i64> {
2998  let ParserMatchClass = SImm9OffsetFB64Operand;
2999}
3000def simm9_offset_fb128 : Operand<i64> {
3001  let ParserMatchClass = SImm9OffsetFB128Operand;
3002}
3003
3004def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3005                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3006def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3007                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3008def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3009                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3010def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3011                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3012def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3013                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3014def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3015                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3016def : InstAlias<"ldr $Rt, [$Rn, $offset]",
3017               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3018
3019// zextload -> i64
3020def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
3021  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3022def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
3023  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
3024
3025// load sign-extended half-word
3026defm LDURSHW
3027    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
3028               [(set GPR32:$Rt,
3029                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3030defm LDURSHX
3031    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
3032              [(set GPR64:$Rt,
3033                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
3034
3035// load sign-extended byte
3036defm LDURSBW
3037    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
3038                [(set GPR32:$Rt,
3039                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3040defm LDURSBX
3041    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
3042                [(set GPR64:$Rt,
3043                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
3044
3045// load sign-extended word
3046defm LDURSW
3047    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
3048              [(set GPR64:$Rt,
3049                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
3050
3051// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
3052def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
3053                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3054def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
3055                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3056def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3057                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3058def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
3059                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3060def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3061                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3062def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
3063                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3064def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
3065                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3066
3067// Pre-fetch.
3068defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
3069                  [(AArch64Prefetch imm:$Rt,
3070                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3071
3072//---
3073// (unscaled immediate, unprivileged)
3074defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
3075defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
3076
3077defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
3078defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
3079
3080// load sign-extended half-word
3081defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
3082defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
3083
3084// load sign-extended byte
3085defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
3086defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
3087
3088// load sign-extended word
3089defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
3090
3091//---
3092// (immediate pre-indexed)
3093def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3094def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3095def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3096def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3097def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3098def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3099def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3100
3101// load sign-extended half-word
3102def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3103def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3104
3105// load sign-extended byte
3106def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3107def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3108
3109// load zero-extended byte
3110def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3111def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3112
3113// load sign-extended word
3114def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3115
3116//---
3117// (immediate post-indexed)
3118def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
3119def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
3120def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
3121def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
3122def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
3123def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
3124def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
3125
3126// load sign-extended half-word
3127def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
3128def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
3129
3130// load sign-extended byte
3131def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
3132def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
3133
3134// load zero-extended byte
3135def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
3136def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
3137
3138// load sign-extended word
3139def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
3140
3141//===----------------------------------------------------------------------===//
3142// Store instructions.
3143//===----------------------------------------------------------------------===//
3144
3145// Pair (indexed, offset)
3146// FIXME: Use dedicated range-checked addressing mode operand here.
3147defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
3148defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
3149defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
3150defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
3151defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
3152
3153// Pair (pre-indexed)
3154def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3155def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3156def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3157def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3158def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3159
3160// Pair (pre-indexed)
3161def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
3162def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
3163def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
3164def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
3165def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
3166
3167// Pair (no allocate)
3168defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
3169defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3170defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3171defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3172defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3173
3174def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3175          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3176
3177def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3178          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3179
3180
3181//---
3182// (Register offset)
3183
3184// Integer
3185defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3186defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3187defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3188defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3189
3190
3191// Floating-point
3192defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3193defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3194defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3195defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3196defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str">;
3197
3198let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3199  def : Pat<(store (f128 FPR128:$Rt),
3200                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3201                                        ro_Wextend128:$extend)),
3202            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3203  def : Pat<(store (f128 FPR128:$Rt),
3204                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3205                                        ro_Xextend128:$extend)),
3206            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3207}
3208
3209multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3210                                 Instruction STRW, Instruction STRX> {
3211
3212  def : Pat<(storeop GPR64:$Rt,
3213                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3214            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3215                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3216
3217  def : Pat<(storeop GPR64:$Rt,
3218                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3219            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3220                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3221}
3222
3223let AddedComplexity = 10 in {
3224  // truncstore i64
3225  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3226  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3227  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3228}
3229
3230multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3231                         Instruction STRW, Instruction STRX> {
3232  def : Pat<(store (VecTy FPR:$Rt),
3233                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3234            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3235
3236  def : Pat<(store (VecTy FPR:$Rt),
3237                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3238            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3239}
3240
3241let AddedComplexity = 10 in {
3242// Match all store 64 bits width whose type is compatible with FPR64
3243let Predicates = [IsLE] in {
3244  // We must use ST1 to store vectors in big-endian.
3245  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3246  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3247  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3248  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3249  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3250  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3251}
3252
3253defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3254defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3255
3256// Match all store 128 bits width whose type is compatible with FPR128
3257let Predicates = [IsLE, UseSTRQro] in {
3258  // We must use ST1 to store vectors in big-endian.
3259  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3260  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3261  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3262  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3263  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3264  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3265  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3266  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3267}
3268} // AddedComplexity = 10
3269
3270// Match stores from lane 0 to the appropriate subreg's store.
3271multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3272                              ValueType VecTy, ValueType STy,
3273                              SubRegIndex SubRegIdx,
3274                              Instruction STRW, Instruction STRX> {
3275
3276  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3277                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3278            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3279                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3280
3281  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3282                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3283            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3284                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3285}
3286
3287let AddedComplexity = 19 in {
3288  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3289  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3290  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3291  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3292  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3293  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3294}
3295
3296//---
3297// (unsigned immediate)
3298defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3299                   [(store GPR64z:$Rt,
3300                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3301defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3302                    [(store GPR32z:$Rt,
3303                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3304defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3305                    [(store FPR8Op:$Rt,
3306                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3307defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3308                    [(store (f16 FPR16Op:$Rt),
3309                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3310defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3311                    [(store (f32 FPR32Op:$Rt),
3312                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3313defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3314                    [(store (f64 FPR64Op:$Rt),
3315                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3316defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3317
3318defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3319                     [(truncstorei16 GPR32z:$Rt,
3320                                     (am_indexed16 GPR64sp:$Rn,
3321                                                   uimm12s2:$offset))]>;
3322defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3323                     [(truncstorei8 GPR32z:$Rt,
3324                                    (am_indexed8 GPR64sp:$Rn,
3325                                                 uimm12s1:$offset))]>;
3326
3327// bf16 store pattern
3328def : Pat<(store (bf16 FPR16Op:$Rt),
3329                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3330          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3331
3332let AddedComplexity = 10 in {
3333
3334// Match all store 64 bits width whose type is compatible with FPR64
3335def : Pat<(store (v1i64 FPR64:$Rt),
3336                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3337          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3338def : Pat<(store (v1f64 FPR64:$Rt),
3339                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3340          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3341
3342let Predicates = [IsLE] in {
3343  // We must use ST1 to store vectors in big-endian.
3344  def : Pat<(store (v2f32 FPR64:$Rt),
3345                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3346            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3347  def : Pat<(store (v8i8 FPR64:$Rt),
3348                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3349            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3350  def : Pat<(store (v4i16 FPR64:$Rt),
3351                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3352            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3353  def : Pat<(store (v2i32 FPR64:$Rt),
3354                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3355            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3356  def : Pat<(store (v4f16 FPR64:$Rt),
3357                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3358            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3359  def : Pat<(store (v4bf16 FPR64:$Rt),
3360                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3361            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3362}
3363
3364// Match all store 128 bits width whose type is compatible with FPR128
3365def : Pat<(store (f128  FPR128:$Rt),
3366                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3367          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3368
3369let Predicates = [IsLE] in {
3370  // We must use ST1 to store vectors in big-endian.
3371  def : Pat<(store (v4f32 FPR128:$Rt),
3372                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3373            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3374  def : Pat<(store (v2f64 FPR128:$Rt),
3375                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3376            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3377  def : Pat<(store (v16i8 FPR128:$Rt),
3378                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3379            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3380  def : Pat<(store (v8i16 FPR128:$Rt),
3381                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3382            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3383  def : Pat<(store (v4i32 FPR128:$Rt),
3384                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3385            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3386  def : Pat<(store (v2i64 FPR128:$Rt),
3387                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3388            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3389  def : Pat<(store (v8f16 FPR128:$Rt),
3390                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3391            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3392  def : Pat<(store (v8bf16 FPR128:$Rt),
3393                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3394            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3395}
3396
3397// truncstore i64
3398def : Pat<(truncstorei32 GPR64:$Rt,
3399                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3400  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3401def : Pat<(truncstorei16 GPR64:$Rt,
3402                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3403  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3404def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3405  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3406
3407} // AddedComplexity = 10
3408
3409// Match stores from lane 0 to the appropriate subreg's store.
3410multiclass VecStoreLane0Pat<ComplexPattern UIAddrMode, SDPatternOperator storeop,
3411                            ValueType VTy, ValueType STy,
3412                            SubRegIndex SubRegIdx, Operand IndexType,
3413                            Instruction STR> {
3414  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3415                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3416            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3417                 GPR64sp:$Rn, IndexType:$offset)>;
3418}
3419
3420let AddedComplexity = 19 in {
3421  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3422  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3423  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3424  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3425  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3426  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3427}
3428
3429//---
3430// (unscaled immediate)
3431defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3432                         [(store GPR64z:$Rt,
3433                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3434defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3435                         [(store GPR32z:$Rt,
3436                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3437defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3438                         [(store FPR8Op:$Rt,
3439                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3440defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3441                         [(store (f16 FPR16Op:$Rt),
3442                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3443defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3444                         [(store (f32 FPR32Op:$Rt),
3445                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3446defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3447                         [(store (f64 FPR64Op:$Rt),
3448                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3449defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3450                         [(store (f128 FPR128Op:$Rt),
3451                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3452defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3453                         [(truncstorei16 GPR32z:$Rt,
3454                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3455defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3456                         [(truncstorei8 GPR32z:$Rt,
3457                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3458
3459// Armv8.4 Weaker Release Consistency enhancements
3460//         LDAPR & STLR with Immediate Offset instructions
3461let Predicates = [HasRCPC_IMMO] in {
3462defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3463defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3464defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3465defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3466defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3467defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3468defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3469defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3470defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3471defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3472defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3473defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3474defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3475}
3476
3477// Match all store 64 bits width whose type is compatible with FPR64
3478def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3479          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3480def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3481          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3482
3483let AddedComplexity = 10 in {
3484
3485let Predicates = [IsLE] in {
3486  // We must use ST1 to store vectors in big-endian.
3487  def : Pat<(store (v2f32 FPR64:$Rt),
3488                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3489            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3490  def : Pat<(store (v8i8 FPR64:$Rt),
3491                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3492            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3493  def : Pat<(store (v4i16 FPR64:$Rt),
3494                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3495            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3496  def : Pat<(store (v2i32 FPR64:$Rt),
3497                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3498            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3499  def : Pat<(store (v4f16 FPR64:$Rt),
3500                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3501            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3502  def : Pat<(store (v4bf16 FPR64:$Rt),
3503                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3504            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3505}
3506
3507// Match all store 128 bits width whose type is compatible with FPR128
3508def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3509          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3510
3511let Predicates = [IsLE] in {
3512  // We must use ST1 to store vectors in big-endian.
3513  def : Pat<(store (v4f32 FPR128:$Rt),
3514                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3515            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3516  def : Pat<(store (v2f64 FPR128:$Rt),
3517                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3518            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3519  def : Pat<(store (v16i8 FPR128:$Rt),
3520                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3521            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3522  def : Pat<(store (v8i16 FPR128:$Rt),
3523                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3524            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3525  def : Pat<(store (v4i32 FPR128:$Rt),
3526                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3527            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3528  def : Pat<(store (v2i64 FPR128:$Rt),
3529                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3530            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3531  def : Pat<(store (v2f64 FPR128:$Rt),
3532                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3533            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3534  def : Pat<(store (v8f16 FPR128:$Rt),
3535                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3536            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3537  def : Pat<(store (v8bf16 FPR128:$Rt),
3538                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3539            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3540}
3541
3542} // AddedComplexity = 10
3543
3544// unscaled i64 truncating stores
3545def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3546  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3547def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3548  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3549def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3550  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3551
3552// Match stores from lane 0 to the appropriate subreg's store.
3553multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3554                             ValueType VTy, ValueType STy,
3555                             SubRegIndex SubRegIdx, Instruction STR> {
3556  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3557}
3558
3559let AddedComplexity = 19 in {
3560  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3561  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3562  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3563  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3564  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3565  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3566}
3567
3568//---
3569// STR mnemonics fall back to STUR for negative or unaligned offsets.
3570def : InstAlias<"str $Rt, [$Rn, $offset]",
3571                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3572def : InstAlias<"str $Rt, [$Rn, $offset]",
3573                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3574def : InstAlias<"str $Rt, [$Rn, $offset]",
3575                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3576def : InstAlias<"str $Rt, [$Rn, $offset]",
3577                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3578def : InstAlias<"str $Rt, [$Rn, $offset]",
3579                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3580def : InstAlias<"str $Rt, [$Rn, $offset]",
3581                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3582def : InstAlias<"str $Rt, [$Rn, $offset]",
3583                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3584
3585def : InstAlias<"strb $Rt, [$Rn, $offset]",
3586                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3587def : InstAlias<"strh $Rt, [$Rn, $offset]",
3588                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3589
3590//---
3591// (unscaled immediate, unprivileged)
3592defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3593defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3594
3595defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3596defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3597
3598//---
3599// (immediate pre-indexed)
3600def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3601def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3602def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3603def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3604def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3605def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3606def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3607
3608def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3609def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3610
3611// truncstore i64
3612def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3613  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3614           simm9:$off)>;
3615def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3616  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3617            simm9:$off)>;
3618def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3619  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3620            simm9:$off)>;
3621
3622def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3623          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3624def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3625          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3626def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3627          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3628def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3629          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3630def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3631          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3632def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3633          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3634def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3635          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3636
3637def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3638          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3639def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3640          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3641def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3642          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3643def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3644          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3645def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3646          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3647def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3648          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3649def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3650          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3651
3652//---
3653// (immediate post-indexed)
3654def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3655def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3656def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3657def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3658def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3659def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3660def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3661
3662def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3663def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3664
3665// truncstore i64
3666def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3667  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3668            simm9:$off)>;
3669def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3670  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3671             simm9:$off)>;
3672def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3673  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3674             simm9:$off)>;
3675
3676def : Pat<(post_store (bf16 FPR16:$Rt), GPR64sp:$addr, simm9:$off),
3677          (STRHpost FPR16:$Rt, GPR64sp:$addr, simm9:$off)>;
3678
3679def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3680          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3681def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3682          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3683def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3684          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3685def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3686          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3687def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3688          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3689def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3690          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3691def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3692          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3693def : Pat<(post_store (v4bf16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3694          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3695
3696def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3697          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3698def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3699          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3700def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3701          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3702def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3703          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3704def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3705          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3706def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3707          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3708def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3709          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3710def : Pat<(post_store (v8bf16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3711          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3712
3713//===----------------------------------------------------------------------===//
3714// Load/store exclusive instructions.
3715//===----------------------------------------------------------------------===//
3716
3717def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3718def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3719def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3720def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3721
3722def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3723def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3724def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3725def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3726
3727def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3728def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3729def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3730def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3731
3732def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3733def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3734def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3735def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3736
3737def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3738def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3739def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3740def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3741
3742def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3743def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3744def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3745def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3746
3747def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3748def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3749
3750def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3751def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3752
3753def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3754def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3755
3756def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3757def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3758
3759let Predicates = [HasLOR] in {
3760  // v8.1a "Limited Order Region" extension load-acquire instructions
3761  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3762  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3763  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3764  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3765
3766  // v8.1a "Limited Order Region" extension store-release instructions
3767  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3768  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3769  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3770  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3771}
3772
3773//===----------------------------------------------------------------------===//
3774// Scaled floating point to integer conversion instructions.
3775//===----------------------------------------------------------------------===//
3776
3777defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3778defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3779defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3780defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3781defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3782defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3783defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3784defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3785defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3786defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3787defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3788defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3789
3790// AArch64's FCVT instructions saturate when out of range.
3791multiclass FPToIntegerSatPats<SDNode to_int_sat, string INST> {
3792  let Predicates = [HasFullFP16] in {
3793  def : Pat<(i32 (to_int_sat f16:$Rn, i32)),
3794            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3795  def : Pat<(i64 (to_int_sat f16:$Rn, i64)),
3796            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3797  }
3798  def : Pat<(i32 (to_int_sat f32:$Rn, i32)),
3799            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3800  def : Pat<(i64 (to_int_sat f32:$Rn, i64)),
3801            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3802  def : Pat<(i32 (to_int_sat f64:$Rn, i32)),
3803            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3804  def : Pat<(i64 (to_int_sat f64:$Rn, i64)),
3805            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3806
3807  let Predicates = [HasFullFP16] in {
3808  def : Pat<(i32 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i32:$scale), i32)),
3809            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3810  def : Pat<(i64 (to_int_sat (fmul f16:$Rn, fixedpoint_f16_i64:$scale), i64)),
3811            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3812  }
3813  def : Pat<(i32 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i32:$scale), i32)),
3814            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3815  def : Pat<(i64 (to_int_sat (fmul f32:$Rn, fixedpoint_f32_i64:$scale), i64)),
3816            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3817  def : Pat<(i32 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i32:$scale), i32)),
3818            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3819  def : Pat<(i64 (to_int_sat (fmul f64:$Rn, fixedpoint_f64_i64:$scale), i64)),
3820            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3821}
3822
3823defm : FPToIntegerSatPats<fp_to_sint_sat, "FCVTZS">;
3824defm : FPToIntegerSatPats<fp_to_uint_sat, "FCVTZU">;
3825
3826multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3827  let Predicates = [HasFullFP16] in {
3828  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3829  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3830  }
3831  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3832  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3833  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3834  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3835
3836  let Predicates = [HasFullFP16] in {
3837  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3838            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3839  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3840            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3841  }
3842  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3843            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3844  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3845            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3846  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3847            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3848  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3849            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3850}
3851
3852defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3853defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3854
3855multiclass FPToIntegerPats<SDNode to_int, SDNode to_int_sat, SDNode round, string INST> {
3856  def : Pat<(i32 (to_int (round f32:$Rn))),
3857            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3858  def : Pat<(i64 (to_int (round f32:$Rn))),
3859            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3860  def : Pat<(i32 (to_int (round f64:$Rn))),
3861            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3862  def : Pat<(i64 (to_int (round f64:$Rn))),
3863            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3864
3865  // These instructions saturate like fp_to_[su]int_sat.
3866  let Predicates = [HasFullFP16] in {
3867  def : Pat<(i32 (to_int_sat (round f16:$Rn), i32)),
3868            (!cast<Instruction>(INST # UWHr) f16:$Rn)>;
3869  def : Pat<(i64 (to_int_sat (round f16:$Rn), i64)),
3870            (!cast<Instruction>(INST # UXHr) f16:$Rn)>;
3871  }
3872  def : Pat<(i32 (to_int_sat (round f32:$Rn), i32)),
3873            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3874  def : Pat<(i64 (to_int_sat (round f32:$Rn), i64)),
3875            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3876  def : Pat<(i32 (to_int_sat (round f64:$Rn), i32)),
3877            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3878  def : Pat<(i64 (to_int_sat (round f64:$Rn), i64)),
3879            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3880}
3881
3882defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fceil,  "FCVTPS">;
3883defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fceil,  "FCVTPU">;
3884defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ffloor, "FCVTMS">;
3885defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ffloor, "FCVTMU">;
3886defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, ftrunc, "FCVTZS">;
3887defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, ftrunc, "FCVTZU">;
3888defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fround, "FCVTAS">;
3889defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fround, "FCVTAU">;
3890
3891
3892
3893let Predicates = [HasFullFP16] in {
3894  def : Pat<(i32 (lround f16:$Rn)),
3895            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3896  def : Pat<(i64 (lround f16:$Rn)),
3897            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3898  def : Pat<(i64 (llround f16:$Rn)),
3899            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3900}
3901def : Pat<(i32 (lround f32:$Rn)),
3902          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3903def : Pat<(i32 (lround f64:$Rn)),
3904          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3905def : Pat<(i64 (lround f32:$Rn)),
3906          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3907def : Pat<(i64 (lround f64:$Rn)),
3908          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3909def : Pat<(i64 (llround f32:$Rn)),
3910          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3911def : Pat<(i64 (llround f64:$Rn)),
3912          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3913
3914//===----------------------------------------------------------------------===//
3915// Scaled integer to floating point conversion instructions.
3916//===----------------------------------------------------------------------===//
3917
3918defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3919defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3920
3921//===----------------------------------------------------------------------===//
3922// Unscaled integer to floating point conversion instruction.
3923//===----------------------------------------------------------------------===//
3924
3925defm FMOV : UnscaledConversion<"fmov">;
3926
3927// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3928let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3929def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3930    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3931def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3932    Sched<[WriteF]>;
3933def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3934    Sched<[WriteF]>;
3935}
3936// Similarly add aliases
3937def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3938    Requires<[HasFullFP16]>;
3939def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3940def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3941
3942//===----------------------------------------------------------------------===//
3943// Floating point conversion instruction.
3944//===----------------------------------------------------------------------===//
3945
3946defm FCVT : FPConversion<"fcvt">;
3947
3948//===----------------------------------------------------------------------===//
3949// Floating point single operand instructions.
3950//===----------------------------------------------------------------------===//
3951
3952defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3953defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3954defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3955defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3956defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3957defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3958defm FRINTN : SingleOperandFPData<0b1000, "frintn", froundeven>;
3959defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3960
3961defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3962defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3963
3964let SchedRW = [WriteFDiv] in {
3965defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3966}
3967
3968let Predicates = [HasFRInt3264] in {
3969  defm FRINT32Z : FRIntNNT<0b00, "frint32z", int_aarch64_frint32z>;
3970  defm FRINT64Z : FRIntNNT<0b10, "frint64z", int_aarch64_frint64z>;
3971  defm FRINT32X : FRIntNNT<0b01, "frint32x", int_aarch64_frint32x>;
3972  defm FRINT64X : FRIntNNT<0b11, "frint64x", int_aarch64_frint64x>;
3973} // HasFRInt3264
3974
3975let Predicates = [HasFullFP16] in {
3976  def : Pat<(i32 (lrint f16:$Rn)),
3977            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3978  def : Pat<(i64 (lrint f16:$Rn)),
3979            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3980  def : Pat<(i64 (llrint f16:$Rn)),
3981            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3982}
3983def : Pat<(i32 (lrint f32:$Rn)),
3984          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3985def : Pat<(i32 (lrint f64:$Rn)),
3986          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3987def : Pat<(i64 (lrint f32:$Rn)),
3988          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3989def : Pat<(i64 (lrint f64:$Rn)),
3990          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3991def : Pat<(i64 (llrint f32:$Rn)),
3992          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3993def : Pat<(i64 (llrint f64:$Rn)),
3994          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3995
3996//===----------------------------------------------------------------------===//
3997// Floating point two operand instructions.
3998//===----------------------------------------------------------------------===//
3999
4000defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
4001let SchedRW = [WriteFDiv] in {
4002defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
4003}
4004defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
4005defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
4006defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
4007defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
4008let SchedRW = [WriteFMul] in {
4009defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
4010defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
4011}
4012defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
4013
4014def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4015          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
4016def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4017          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
4018def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4019          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
4020def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4021          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
4022
4023//===----------------------------------------------------------------------===//
4024// Floating point three operand instructions.
4025//===----------------------------------------------------------------------===//
4026
4027defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
4028defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
4029     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
4030defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
4031     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
4032defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
4033     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
4034
4035// The following def pats catch the case where the LHS of an FMA is negated.
4036// The TriOpFrag above catches the case where the middle operand is negated.
4037
4038// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
4039// the NEON variant.
4040
4041// Here we handle first -(a + b*c) for FNMADD:
4042
4043let Predicates = [HasNEON, HasFullFP16] in
4044def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
4045          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4046
4047def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
4048          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4049
4050def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
4051          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4052
4053// Now it's time for "(-a) + (-b)*c"
4054
4055let Predicates = [HasNEON, HasFullFP16] in
4056def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
4057          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
4058
4059def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
4060          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
4061
4062def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
4063          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
4064
4065//===----------------------------------------------------------------------===//
4066// Floating point comparison instructions.
4067//===----------------------------------------------------------------------===//
4068
4069defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
4070defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
4071
4072//===----------------------------------------------------------------------===//
4073// Floating point conditional comparison instructions.
4074//===----------------------------------------------------------------------===//
4075
4076defm FCCMPE : FPCondComparison<1, "fccmpe">;
4077defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
4078
4079//===----------------------------------------------------------------------===//
4080// Floating point conditional select instruction.
4081//===----------------------------------------------------------------------===//
4082
4083defm FCSEL : FPCondSelect<"fcsel">;
4084
4085// CSEL instructions providing f128 types need to be handled by a
4086// pseudo-instruction since the eventual code will need to introduce basic
4087// blocks and control flow.
4088def F128CSEL : Pseudo<(outs FPR128:$Rd),
4089                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
4090                      [(set (f128 FPR128:$Rd),
4091                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
4092                                       (i32 imm:$cond), NZCV))]> {
4093  let Uses = [NZCV];
4094  let usesCustomInserter = 1;
4095  let hasNoSchedulingInfo = 1;
4096}
4097
4098//===----------------------------------------------------------------------===//
4099// Instructions used for emitting unwind opcodes on ARM64 Windows.
4100//===----------------------------------------------------------------------===//
4101let isPseudo = 1 in {
4102  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
4103  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4104  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4105  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4106  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4107  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4108  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4109  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4110  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
4111  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4112  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
4113  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
4114  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
4115  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
4116  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4117  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
4118  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
4119}
4120
4121// Pseudo instructions for Windows EH
4122//===----------------------------------------------------------------------===//
4123let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
4124    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
4125   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
4126   let usesCustomInserter = 1 in
4127     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
4128                    Sched<[]>;
4129}
4130
4131// Pseudo instructions for homogeneous prolog/epilog
4132let isPseudo = 1 in {
4133  // Save CSRs in order, {FPOffset}
4134  def HOM_Prolog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4135  // Restore CSRs in order
4136  def HOM_Epilog : Pseudo<(outs), (ins variable_ops), []>, Sched<[]>;
4137}
4138
4139//===----------------------------------------------------------------------===//
4140// Floating point immediate move.
4141//===----------------------------------------------------------------------===//
4142
4143let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
4144defm FMOV : FPMoveImmediate<"fmov">;
4145}
4146
4147//===----------------------------------------------------------------------===//
4148// Advanced SIMD two vector instructions.
4149//===----------------------------------------------------------------------===//
4150
4151defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
4152                                          AArch64uabd>;
4153// Match UABDL in log2-shuffle patterns.
4154def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
4155                           (zext (v8i8 V64:$opB))))),
4156          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4157def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4158               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
4159                                (zext (v8i8 V64:$opB))),
4160                           (AArch64vashr v8i16:$src, (i32 15))))),
4161          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
4162def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
4163                           (zext (extract_high_v16i8 V128:$opB))))),
4164          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4165def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
4166               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
4167                                (zext (extract_high_v16i8 V128:$opB))),
4168                           (AArch64vashr v8i16:$src, (i32 15))))),
4169          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
4170def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
4171                           (zext (v4i16 V64:$opB))))),
4172          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
4173def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
4174                           (zext (extract_high_v8i16 V128:$opB))))),
4175          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
4176def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
4177                           (zext (v2i32 V64:$opB))))),
4178          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
4179def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
4180                           (zext (extract_high_v4i32 V128:$opB))))),
4181          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
4182
4183defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
4184defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
4185defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
4186defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
4187defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
4188defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
4189defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
4190defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
4191defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
4192defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
4193
4194def : Pat<(v8i8 (AArch64vashr (v8i8 V64:$Rn), (i32 7))),
4195          (CMLTv8i8rz V64:$Rn)>;
4196def : Pat<(v4i16 (AArch64vashr (v4i16 V64:$Rn), (i32 15))),
4197          (CMLTv4i16rz V64:$Rn)>;
4198def : Pat<(v2i32 (AArch64vashr (v2i32 V64:$Rn), (i32 31))),
4199          (CMLTv2i32rz V64:$Rn)>;
4200def : Pat<(v16i8 (AArch64vashr (v16i8 V128:$Rn), (i32 7))),
4201          (CMLTv16i8rz V128:$Rn)>;
4202def : Pat<(v8i16 (AArch64vashr (v8i16 V128:$Rn), (i32 15))),
4203          (CMLTv8i16rz V128:$Rn)>;
4204def : Pat<(v4i32 (AArch64vashr (v4i32 V128:$Rn), (i32 31))),
4205          (CMLTv4i32rz V128:$Rn)>;
4206def : Pat<(v2i64 (AArch64vashr (v2i64 V128:$Rn), (i32 63))),
4207          (CMLTv2i64rz V128:$Rn)>;
4208
4209defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4210defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4211defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4212defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4213defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4214defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
4215defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
4216defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
4217def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
4218          (FCVTLv4i16 V64:$Rn)>;
4219def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
4220                                                              (i64 4)))),
4221          (FCVTLv8i16 V128:$Rn)>;
4222def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
4223
4224def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
4225
4226defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
4227defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
4228defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
4229defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
4230defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
4231def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
4232          (FCVTNv4i16 V128:$Rn)>;
4233def : Pat<(concat_vectors V64:$Rd,
4234                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
4235          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4236def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
4237def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
4238def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
4239          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
4240defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
4241defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
4242defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
4243                                        int_aarch64_neon_fcvtxn>;
4244defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
4245defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
4246
4247// AArch64's FCVT instructions saturate when out of range.
4248multiclass SIMDTwoVectorFPToIntSatPats<SDNode to_int_sat, string INST> {
4249  def : Pat<(v4i16 (to_int_sat v4f16:$Rn, i16)),
4250            (!cast<Instruction>(INST # v4f16) v4f16:$Rn)>;
4251  def : Pat<(v8i16 (to_int_sat v8f16:$Rn, i16)),
4252            (!cast<Instruction>(INST # v8f16) v8f16:$Rn)>;
4253  def : Pat<(v2i32 (to_int_sat v2f32:$Rn, i32)),
4254            (!cast<Instruction>(INST # v2f32) v2f32:$Rn)>;
4255  def : Pat<(v4i32 (to_int_sat v4f32:$Rn, i32)),
4256            (!cast<Instruction>(INST # v4f32) v4f32:$Rn)>;
4257  def : Pat<(v2i64 (to_int_sat v2f64:$Rn, i64)),
4258            (!cast<Instruction>(INST # v2f64) v2f64:$Rn)>;
4259}
4260defm : SIMDTwoVectorFPToIntSatPats<fp_to_sint_sat, "FCVTZS">;
4261defm : SIMDTwoVectorFPToIntSatPats<fp_to_uint_sat, "FCVTZU">;
4262
4263def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
4264def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
4265def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
4266def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
4267def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
4268
4269def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
4270def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4271def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4272def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4273def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4274
4275defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
4276defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4277defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
4278defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
4279defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
4280defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", froundeven>;
4281defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
4282defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
4283defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
4284
4285let Predicates = [HasFRInt3264] in {
4286  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z", int_aarch64_neon_frint32z>;
4287  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z", int_aarch64_neon_frint64z>;
4288  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x", int_aarch64_neon_frint32x>;
4289  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x", int_aarch64_neon_frint64x>;
4290} // HasFRInt3264
4291
4292defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4293defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
4294defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4295                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4296defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4297// Aliases for MVN -> NOT.
4298def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4299                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4300def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4301                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4302
4303def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4304def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4305def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4306def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4307def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4308def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4309
4310defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", bitreverse>;
4311defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4312defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4313defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4314defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4315       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
4316defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
4317defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
4318defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4319defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4320defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4321defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4322defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4323defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4324defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4325       BinOpFrag<(add node:$LHS, (AArch64uaddlp node:$RHS))> >;
4326defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", AArch64uaddlp>;
4327defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
4328defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4329defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4330defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4331defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4332defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4333
4334def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4335def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4336def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4337def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4338def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4339def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4340def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4341def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4342def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4343def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4344
4345// Patterns for vector long shift (by element width). These need to match all
4346// three of zext, sext and anyext so it's easier to pull the patterns out of the
4347// definition.
4348multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4349  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4350            (SHLLv8i8 V64:$Rn)>;
4351  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
4352            (SHLLv16i8 V128:$Rn)>;
4353  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4354            (SHLLv4i16 V64:$Rn)>;
4355  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
4356            (SHLLv8i16 V128:$Rn)>;
4357  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4358            (SHLLv2i32 V64:$Rn)>;
4359  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
4360            (SHLLv4i32 V128:$Rn)>;
4361}
4362
4363defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4364defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4365defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4366
4367// Constant vector values, used in the S/UQXTN patterns below.
4368def VImmFF:   PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 85))))>;
4369def VImmFFFF: PatLeaf<(AArch64NvCast (v2i64 (AArch64movi_edit (i32 51))))>;
4370def VImm7F:   PatLeaf<(AArch64movi_shift (i32 127), (i32 0))>;
4371def VImm80:   PatLeaf<(AArch64mvni_shift (i32 127), (i32 0))>;
4372def VImm7FFF: PatLeaf<(AArch64movi_msl (i32 127), (i32 264))>;
4373def VImm8000: PatLeaf<(AArch64mvni_msl (i32 127), (i32 264))>;
4374
4375// trunc(umin(X, 255)) -> UQXTRN v8i8
4376def : Pat<(v8i8 (trunc (umin (v8i16 V128:$Vn), (v8i16 VImmFF)))),
4377          (UQXTNv8i8 V128:$Vn)>;
4378// trunc(umin(X, 65535)) -> UQXTRN v4i16
4379def : Pat<(v4i16 (trunc (umin (v4i32 V128:$Vn), (v4i32 VImmFFFF)))),
4380          (UQXTNv4i16 V128:$Vn)>;
4381// trunc(smin(smax(X, -128), 128)) -> SQXTRN
4382//  with reversed min/max
4383def : Pat<(v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4384                             (v8i16 VImm7F)))),
4385          (SQXTNv8i8 V128:$Vn)>;
4386def : Pat<(v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4387                             (v8i16 VImm80)))),
4388          (SQXTNv8i8 V128:$Vn)>;
4389// trunc(smin(smax(X, -32768), 32767)) -> SQXTRN
4390//  with reversed min/max
4391def : Pat<(v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4392                              (v4i32 VImm7FFF)))),
4393          (SQXTNv4i16 V128:$Vn)>;
4394def : Pat<(v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4395                              (v4i32 VImm8000)))),
4396          (SQXTNv4i16 V128:$Vn)>;
4397
4398// concat_vectors(Vd, trunc(smin(smax Vm, -128), 127) ~> SQXTN2(Vd, Vn)
4399// with reversed min/max
4400def : Pat<(v16i8 (concat_vectors
4401                 (v8i8 V64:$Vd),
4402                 (v8i8 (trunc (smin (smax (v8i16 V128:$Vn), (v8i16 VImm80)),
4403                                          (v8i16 VImm7F)))))),
4404          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4405def : Pat<(v16i8 (concat_vectors
4406                 (v8i8 V64:$Vd),
4407                 (v8i8 (trunc (smax (smin (v8i16 V128:$Vn), (v8i16 VImm7F)),
4408                                          (v8i16 VImm80)))))),
4409          (SQXTNv16i8 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4410
4411// concat_vectors(Vd, trunc(smin(smax Vm, -32768), 32767) ~> SQXTN2(Vd, Vn)
4412// with reversed min/max
4413def : Pat<(v8i16 (concat_vectors
4414                 (v4i16 V64:$Vd),
4415                 (v4i16 (trunc (smin (smax (v4i32 V128:$Vn), (v4i32 VImm8000)),
4416                                           (v4i32 VImm7FFF)))))),
4417          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4418def : Pat<(v8i16 (concat_vectors
4419                 (v4i16 V64:$Vd),
4420                 (v4i16 (trunc (smax (smin (v4i32 V128:$Vn), (v4i32 VImm7FFF)),
4421                                           (v4i32 VImm8000)))))),
4422          (SQXTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn)>;
4423
4424//===----------------------------------------------------------------------===//
4425// Advanced SIMD three vector instructions.
4426//===----------------------------------------------------------------------===//
4427
4428defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4429defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
4430defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4431defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4432defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4433defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4434defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4435defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4436foreach VT = [ v8i8, v16i8, v4i16, v8i16, v2i32, v4i32, v2i64 ] in {
4437def : Pat<(vnot (AArch64cmeqz VT:$Rn)), (!cast<Instruction>("CMTST"#VT) VT:$Rn, VT:$Rn)>;
4438}
4439defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4440let Predicates = [HasNEON] in {
4441foreach VT = [ v2f32, v4f32, v2f64 ] in
4442def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4443}
4444let Predicates = [HasNEON, HasFullFP16] in {
4445foreach VT = [ v4f16, v8f16 ] in
4446def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4447}
4448defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4449defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4450defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
4451defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
4452defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4453defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4454defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4455defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
4456defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4457defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
4458defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4459defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
4460defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4461defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
4462defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4463defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
4464
4465// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4466// instruction expects the addend first, while the fma intrinsic puts it last.
4467defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4468            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4469defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4470            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4471
4472defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4473defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
4474defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4475defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4476defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
4477
4478// MLA and MLS are generated in MachineCombine
4479defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4480defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4481
4482defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4483defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4484defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4485      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4486defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4487defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>;
4488defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4489defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4490defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4491defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4492defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4493defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4494defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4495defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4496defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4497defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4498defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4499defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
4500defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4501defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4502defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4503defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4504      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4505defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4506defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>;
4507defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4508defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4509defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4510defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4511defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4512defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4513defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4514defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4515defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4516defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
4517defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4518defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4519defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4520                                                  int_aarch64_neon_sqrdmlah>;
4521defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4522                                                    int_aarch64_neon_sqrdmlsh>;
4523
4524// Extra saturate patterns, other than the intrinsics matches above
4525defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4526defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4527defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4528defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4529
4530defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4531defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4532                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4533defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4534defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4535                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4536defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4537
4538// Pseudo bitwise select pattern BSP.
4539// It is expanded into BSL/BIT/BIF after register allocation.
4540defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4541                                                      (and (vnot node:$LHS), node:$RHS))>>;
4542defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4543defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4544defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4545
4546def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4547          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4548def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4549          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4550def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4551          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4552def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4553          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4554
4555def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4556          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4557def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4558          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4559def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4560          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4561def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4562          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4563
4564def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4565                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4566def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4567                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4568def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4569                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4570def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4571                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4572
4573def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4574                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4575def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4576                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4577def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4578                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4579def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4580                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4581
4582def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4583                "|cmls.8b\t$dst, $src1, $src2}",
4584                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4585def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4586                "|cmls.16b\t$dst, $src1, $src2}",
4587                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4588def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4589                "|cmls.4h\t$dst, $src1, $src2}",
4590                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4591def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4592                "|cmls.8h\t$dst, $src1, $src2}",
4593                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4594def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4595                "|cmls.2s\t$dst, $src1, $src2}",
4596                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4597def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4598                "|cmls.4s\t$dst, $src1, $src2}",
4599                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4600def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4601                "|cmls.2d\t$dst, $src1, $src2}",
4602                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4603
4604def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4605                "|cmlo.8b\t$dst, $src1, $src2}",
4606                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4607def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4608                "|cmlo.16b\t$dst, $src1, $src2}",
4609                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4610def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4611                "|cmlo.4h\t$dst, $src1, $src2}",
4612                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4613def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4614                "|cmlo.8h\t$dst, $src1, $src2}",
4615                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4616def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4617                "|cmlo.2s\t$dst, $src1, $src2}",
4618                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4619def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4620                "|cmlo.4s\t$dst, $src1, $src2}",
4621                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4622def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4623                "|cmlo.2d\t$dst, $src1, $src2}",
4624                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4625
4626def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4627                "|cmle.8b\t$dst, $src1, $src2}",
4628                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4629def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4630                "|cmle.16b\t$dst, $src1, $src2}",
4631                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4632def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4633                "|cmle.4h\t$dst, $src1, $src2}",
4634                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4635def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4636                "|cmle.8h\t$dst, $src1, $src2}",
4637                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4638def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4639                "|cmle.2s\t$dst, $src1, $src2}",
4640                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4641def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4642                "|cmle.4s\t$dst, $src1, $src2}",
4643                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4644def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4645                "|cmle.2d\t$dst, $src1, $src2}",
4646                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4647
4648def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4649                "|cmlt.8b\t$dst, $src1, $src2}",
4650                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4651def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4652                "|cmlt.16b\t$dst, $src1, $src2}",
4653                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4654def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4655                "|cmlt.4h\t$dst, $src1, $src2}",
4656                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4657def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4658                "|cmlt.8h\t$dst, $src1, $src2}",
4659                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4660def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4661                "|cmlt.2s\t$dst, $src1, $src2}",
4662                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4663def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4664                "|cmlt.4s\t$dst, $src1, $src2}",
4665                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4666def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4667                "|cmlt.2d\t$dst, $src1, $src2}",
4668                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4669
4670let Predicates = [HasNEON, HasFullFP16] in {
4671def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4672                "|fcmle.4h\t$dst, $src1, $src2}",
4673                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4674def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4675                "|fcmle.8h\t$dst, $src1, $src2}",
4676                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4677}
4678def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4679                "|fcmle.2s\t$dst, $src1, $src2}",
4680                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4681def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4682                "|fcmle.4s\t$dst, $src1, $src2}",
4683                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4684def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4685                "|fcmle.2d\t$dst, $src1, $src2}",
4686                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4687
4688let Predicates = [HasNEON, HasFullFP16] in {
4689def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4690                "|fcmlt.4h\t$dst, $src1, $src2}",
4691                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4692def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4693                "|fcmlt.8h\t$dst, $src1, $src2}",
4694                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4695}
4696def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4697                "|fcmlt.2s\t$dst, $src1, $src2}",
4698                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4699def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4700                "|fcmlt.4s\t$dst, $src1, $src2}",
4701                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4702def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4703                "|fcmlt.2d\t$dst, $src1, $src2}",
4704                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4705
4706let Predicates = [HasNEON, HasFullFP16] in {
4707def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4708                "|facle.4h\t$dst, $src1, $src2}",
4709                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4710def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4711                "|facle.8h\t$dst, $src1, $src2}",
4712                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4713}
4714def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4715                "|facle.2s\t$dst, $src1, $src2}",
4716                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4717def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4718                "|facle.4s\t$dst, $src1, $src2}",
4719                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4720def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4721                "|facle.2d\t$dst, $src1, $src2}",
4722                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4723
4724let Predicates = [HasNEON, HasFullFP16] in {
4725def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4726                "|faclt.4h\t$dst, $src1, $src2}",
4727                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4728def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4729                "|faclt.8h\t$dst, $src1, $src2}",
4730                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4731}
4732def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4733                "|faclt.2s\t$dst, $src1, $src2}",
4734                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4735def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4736                "|faclt.4s\t$dst, $src1, $src2}",
4737                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4738def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4739                "|faclt.2d\t$dst, $src1, $src2}",
4740                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4741
4742//===----------------------------------------------------------------------===//
4743// Advanced SIMD three scalar instructions.
4744//===----------------------------------------------------------------------===//
4745
4746defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4747defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4748defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4749defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4750defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4751defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4752defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4753defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4754def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4755          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4756let Predicates = [HasFullFP16] in {
4757def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4758}
4759def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4760def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4761defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4762                                     int_aarch64_neon_facge>;
4763defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4764                                     int_aarch64_neon_facgt>;
4765defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4766defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4767defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4768defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx, HasNEONorStreamingSVE>;
4769defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps, HasNEONorStreamingSVE>;
4770defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts, HasNEONorStreamingSVE>;
4771defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4772defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4773defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4774defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4775defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4776defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4777defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4778defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4779defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4780defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4781defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4782defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4783defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4784defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4785defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4786let Predicates = [HasRDM] in {
4787  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4788  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4789  def : Pat<(i32 (int_aarch64_neon_sqrdmlah (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4790                                            (i32 FPR32:$Rm))),
4791            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4792  def : Pat<(i32 (int_aarch64_neon_sqrdmlsh (i32 FPR32:$Rd), (i32 FPR32:$Rn),
4793                                            (i32 FPR32:$Rm))),
4794            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4795}
4796
4797def : InstAlias<"cmls $dst, $src1, $src2",
4798                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4799def : InstAlias<"cmle $dst, $src1, $src2",
4800                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4801def : InstAlias<"cmlo $dst, $src1, $src2",
4802                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4803def : InstAlias<"cmlt $dst, $src1, $src2",
4804                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4805def : InstAlias<"fcmle $dst, $src1, $src2",
4806                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4807def : InstAlias<"fcmle $dst, $src1, $src2",
4808                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4809def : InstAlias<"fcmlt $dst, $src1, $src2",
4810                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4811def : InstAlias<"fcmlt $dst, $src1, $src2",
4812                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4813def : InstAlias<"facle $dst, $src1, $src2",
4814                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4815def : InstAlias<"facle $dst, $src1, $src2",
4816                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4817def : InstAlias<"faclt $dst, $src1, $src2",
4818                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4819def : InstAlias<"faclt $dst, $src1, $src2",
4820                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4821
4822//===----------------------------------------------------------------------===//
4823// Advanced SIMD three scalar instructions (mixed operands).
4824//===----------------------------------------------------------------------===//
4825defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4826                                       int_aarch64_neon_sqdmulls_scalar>;
4827defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4828defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4829
4830def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4831                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4832                                                        (i32 FPR32:$Rm))))),
4833          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4834def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4835                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4836                                                        (i32 FPR32:$Rm))))),
4837          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4838
4839//===----------------------------------------------------------------------===//
4840// Advanced SIMD two scalar instructions.
4841//===----------------------------------------------------------------------===//
4842
4843defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4844defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4845defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4846defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4847defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4848defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4849defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4850defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4851defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4852defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4853defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4854defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4855defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4856defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4857defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4858defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4859defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4860defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4861defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4862def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4863defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4864defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4865defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe", HasNEONorStreamingSVE>;
4866defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx", HasNEONorStreamingSVE>;
4867defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte", HasNEONorStreamingSVE>;
4868defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4869                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4870defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4871defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4872defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4873defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4874defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4875defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4876                                     int_aarch64_neon_suqadd>;
4877defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4878defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4879defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4880                                    int_aarch64_neon_usqadd>;
4881
4882def : Pat<(v1i64 (AArch64vashr (v1i64 V64:$Rn), (i32 63))),
4883          (CMLTv1i64rz V64:$Rn)>;
4884
4885def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4886          (FCVTASv1i64 FPR64:$Rn)>;
4887def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4888          (FCVTAUv1i64 FPR64:$Rn)>;
4889def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4890          (FCVTMSv1i64 FPR64:$Rn)>;
4891def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4892          (FCVTMUv1i64 FPR64:$Rn)>;
4893def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4894          (FCVTNSv1i64 FPR64:$Rn)>;
4895def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4896          (FCVTNUv1i64 FPR64:$Rn)>;
4897def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4898          (FCVTPSv1i64 FPR64:$Rn)>;
4899def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4900          (FCVTPUv1i64 FPR64:$Rn)>;
4901def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
4902          (FCVTZSv1i64 FPR64:$Rn)>;
4903def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
4904          (FCVTZUv1i64 FPR64:$Rn)>;
4905
4906def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4907          (FRECPEv1f16 FPR16:$Rn)>;
4908def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4909          (FRECPEv1i32 FPR32:$Rn)>;
4910def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4911          (FRECPEv1i64 FPR64:$Rn)>;
4912def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4913          (FRECPEv1i64 FPR64:$Rn)>;
4914
4915def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4916          (FRECPEv1i32 FPR32:$Rn)>;
4917def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4918          (FRECPEv2f32 V64:$Rn)>;
4919def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4920          (FRECPEv4f32 FPR128:$Rn)>;
4921def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4922          (FRECPEv1i64 FPR64:$Rn)>;
4923def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4924          (FRECPEv1i64 FPR64:$Rn)>;
4925def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4926          (FRECPEv2f64 FPR128:$Rn)>;
4927
4928def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4929          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4930def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4931          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4932def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4933          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4934def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4935          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4936def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4937          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4938
4939def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4940          (FRECPXv1f16 FPR16:$Rn)>;
4941def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4942          (FRECPXv1i32 FPR32:$Rn)>;
4943def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4944          (FRECPXv1i64 FPR64:$Rn)>;
4945
4946def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4947          (FRSQRTEv1f16 FPR16:$Rn)>;
4948def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4949          (FRSQRTEv1i32 FPR32:$Rn)>;
4950def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4951          (FRSQRTEv1i64 FPR64:$Rn)>;
4952def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4953          (FRSQRTEv1i64 FPR64:$Rn)>;
4954
4955def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4956          (FRSQRTEv1i32 FPR32:$Rn)>;
4957def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4958          (FRSQRTEv2f32 V64:$Rn)>;
4959def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4960          (FRSQRTEv4f32 FPR128:$Rn)>;
4961def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4962          (FRSQRTEv1i64 FPR64:$Rn)>;
4963def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4964          (FRSQRTEv1i64 FPR64:$Rn)>;
4965def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4966          (FRSQRTEv2f64 FPR128:$Rn)>;
4967
4968def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4969          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4970def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4971          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4972def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4973          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4974def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4975          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4976def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4977          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4978
4979// Some float -> int -> float conversion patterns for which we want to keep the
4980// int values in FP registers using the corresponding NEON instructions to
4981// avoid more costly int <-> fp register transfers.
4982let Predicates = [HasNEON] in {
4983def : Pat<(f64 (sint_to_fp (i64 (fp_to_sint f64:$Rn)))),
4984          (SCVTFv1i64 (i64 (FCVTZSv1i64 f64:$Rn)))>;
4985def : Pat<(f32 (sint_to_fp (i32 (fp_to_sint f32:$Rn)))),
4986          (SCVTFv1i32 (i32 (FCVTZSv1i32 f32:$Rn)))>;
4987def : Pat<(f64 (uint_to_fp (i64 (fp_to_uint f64:$Rn)))),
4988          (UCVTFv1i64 (i64 (FCVTZUv1i64 f64:$Rn)))>;
4989def : Pat<(f32 (uint_to_fp (i32 (fp_to_uint f32:$Rn)))),
4990          (UCVTFv1i32 (i32 (FCVTZUv1i32 f32:$Rn)))>;
4991
4992let Predicates = [HasFullFP16] in {
4993def : Pat<(f16 (sint_to_fp (i32 (fp_to_sint f16:$Rn)))),
4994          (SCVTFv1i16 (f16 (FCVTZSv1f16 f16:$Rn)))>;
4995def : Pat<(f16 (uint_to_fp (i32 (fp_to_uint f16:$Rn)))),
4996          (UCVTFv1i16 (f16 (FCVTZUv1f16 f16:$Rn)))>;
4997}
4998}
4999
5000// If an integer is about to be converted to a floating point value,
5001// just load it on the floating point unit.
5002// Here are the patterns for 8 and 16-bits to float.
5003// 8-bits -> float.
5004multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
5005                             SDPatternOperator loadop, Instruction UCVTF,
5006                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
5007                             SubRegIndex sub> {
5008  def : Pat<(DstTy (uint_to_fp (SrcTy
5009                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
5010                                      ro.Wext:$extend))))),
5011           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5012                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
5013                                 sub))>;
5014
5015  def : Pat<(DstTy (uint_to_fp (SrcTy
5016                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
5017                                      ro.Wext:$extend))))),
5018           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
5019                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
5020                                 sub))>;
5021}
5022
5023defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
5024                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
5025def : Pat <(f32 (uint_to_fp (i32
5026               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5027           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5028                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5029def : Pat <(f32 (uint_to_fp (i32
5030                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5031           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5032                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5033// 16-bits -> float.
5034defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
5035                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
5036def : Pat <(f32 (uint_to_fp (i32
5037                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5038           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5039                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5040def : Pat <(f32 (uint_to_fp (i32
5041                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5042           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
5043                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5044// 32-bits are handled in target specific dag combine:
5045// performIntToFpCombine.
5046// 64-bits integer to 32-bits floating point, not possible with
5047// UCVTF on floating point registers (both source and destination
5048// must have the same size).
5049
5050// Here are the patterns for 8, 16, 32, and 64-bits to double.
5051// 8-bits -> double.
5052defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
5053                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
5054def : Pat <(f64 (uint_to_fp (i32
5055                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
5056           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5057                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
5058def : Pat <(f64 (uint_to_fp (i32
5059                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
5060           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5061                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
5062// 16-bits -> double.
5063defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
5064                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
5065def : Pat <(f64 (uint_to_fp (i32
5066                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
5067           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5068                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
5069def : Pat <(f64 (uint_to_fp (i32
5070                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
5071           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5072                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
5073// 32-bits -> double.
5074defm : UIntToFPROLoadPat<f64, i32, load,
5075                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
5076def : Pat <(f64 (uint_to_fp (i32
5077                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
5078           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5079                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
5080def : Pat <(f64 (uint_to_fp (i32
5081                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
5082           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5083                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
5084// 64-bits -> double are handled in target specific dag combine:
5085// performIntToFpCombine.
5086
5087//===----------------------------------------------------------------------===//
5088// Advanced SIMD three different-sized vector instructions.
5089//===----------------------------------------------------------------------===//
5090
5091defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
5092defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
5093defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
5094defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
5095defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
5096defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
5097                                             AArch64sabd>;
5098defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
5099                                          AArch64sabd>;
5100defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
5101            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
5102defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
5103                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
5104defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
5105    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5106defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
5107    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5108defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
5109defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
5110                                               int_aarch64_neon_sqadd>;
5111defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
5112                                               int_aarch64_neon_sqsub>;
5113defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
5114                                     int_aarch64_neon_sqdmull>;
5115defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
5116                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
5117defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
5118                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
5119defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
5120                                              AArch64uabd>;
5121defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
5122                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
5123defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
5124                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
5125defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
5126    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5127defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
5128    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5129defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
5130defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
5131                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
5132defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
5133                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
5134
5135// Additional patterns for [SU]ML[AS]L
5136multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
5137  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5138  def : Pat<(v4i16 (opnode
5139                    V64:$Ra,
5140                    (v4i16 (extract_subvector
5141                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
5142                            (i64 0))))),
5143             (EXTRACT_SUBREG (v8i16 (INST8B
5144                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
5145                                     V64:$Rn, V64:$Rm)), dsub)>;
5146  def : Pat<(v2i32 (opnode
5147                    V64:$Ra,
5148                    (v2i32 (extract_subvector
5149                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
5150                            (i64 0))))),
5151             (EXTRACT_SUBREG (v4i32 (INST4H
5152                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
5153                                     V64:$Rn, V64:$Rm)), dsub)>;
5154  def : Pat<(v1i64 (opnode
5155                    V64:$Ra,
5156                    (v1i64 (extract_subvector
5157                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
5158                            (i64 0))))),
5159             (EXTRACT_SUBREG (v2i64 (INST2S
5160                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
5161                                     V64:$Rn, V64:$Rm)), dsub)>;
5162}
5163
5164defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_umull,
5165     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5166defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_smull,
5167     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5168defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_umull,
5169     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5170defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_smull,
5171     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5172
5173// Additional patterns for SMULL and UMULL
5174multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
5175  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5176  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5177            (INST8B V64:$Rn, V64:$Rm)>;
5178  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5179            (INST4H V64:$Rn, V64:$Rm)>;
5180  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5181            (INST2S V64:$Rn, V64:$Rm)>;
5182}
5183
5184defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
5185  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
5186defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
5187  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
5188
5189// Patterns for smull2/umull2.
5190multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
5191  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5192  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
5193                           (extract_high_v16i8 V128:$Rm))),
5194             (INST8B V128:$Rn, V128:$Rm)>;
5195  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
5196                           (extract_high_v8i16 V128:$Rm))),
5197             (INST4H V128:$Rn, V128:$Rm)>;
5198  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
5199                           (extract_high_v4i32 V128:$Rm))),
5200             (INST2S V128:$Rn, V128:$Rm)>;
5201}
5202
5203defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
5204  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
5205defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
5206  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
5207
5208// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
5209multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
5210  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
5211  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
5212            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
5213  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
5214            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
5215  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
5216            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
5217}
5218
5219defm : Neon_mulacc_widen_patterns<
5220  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5221  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
5222defm : Neon_mulacc_widen_patterns<
5223  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5224  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
5225defm : Neon_mulacc_widen_patterns<
5226  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
5227  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
5228defm : Neon_mulacc_widen_patterns<
5229  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
5230  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
5231
5232// Patterns for 64-bit pmull
5233def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
5234          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
5235def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
5236                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
5237          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
5238
5239// CodeGen patterns for addhn and subhn instructions, which can actually be
5240// written in LLVM IR without too much difficulty.
5241
5242// ADDHN
5243def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
5244          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5245def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5246                                           (i32 16))))),
5247          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5248def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5249                                           (i32 32))))),
5250          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5251def : Pat<(concat_vectors (v8i8 V64:$Rd),
5252                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5253                                                    (i32 8))))),
5254          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5255                            V128:$Rn, V128:$Rm)>;
5256def : Pat<(concat_vectors (v4i16 V64:$Rd),
5257                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5258                                                    (i32 16))))),
5259          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5260                            V128:$Rn, V128:$Rm)>;
5261def : Pat<(concat_vectors (v2i32 V64:$Rd),
5262                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
5263                                                    (i32 32))))),
5264          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5265                            V128:$Rn, V128:$Rm)>;
5266
5267// SUBHN
5268def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
5269          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
5270def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5271                                           (i32 16))))),
5272          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
5273def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5274                                           (i32 32))))),
5275          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
5276def : Pat<(concat_vectors (v8i8 V64:$Rd),
5277                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5278                                                    (i32 8))))),
5279          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5280                            V128:$Rn, V128:$Rm)>;
5281def : Pat<(concat_vectors (v4i16 V64:$Rd),
5282                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5283                                                    (i32 16))))),
5284          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5285                            V128:$Rn, V128:$Rm)>;
5286def : Pat<(concat_vectors (v2i32 V64:$Rd),
5287                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
5288                                                    (i32 32))))),
5289          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
5290                            V128:$Rn, V128:$Rm)>;
5291
5292//----------------------------------------------------------------------------
5293// AdvSIMD bitwise extract from vector instruction.
5294//----------------------------------------------------------------------------
5295
5296defm EXT : SIMDBitwiseExtract<"ext">;
5297
5298def AdjustExtImm : SDNodeXForm<imm, [{
5299  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
5300}]>;
5301multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
5302  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
5303            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
5304  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
5305            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
5306  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
5307  // 128-bit vector.
5308  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
5309            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
5310  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
5311  // single 128-bit EXT.
5312  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
5313                              (extract_subvector V128:$Rn, (i64 N)),
5314                              (i32 imm:$imm))),
5315            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
5316  // A 64-bit EXT of the high half of a 128-bit register can be done using a
5317  // 128-bit EXT of the whole register with an adjustment to the immediate. The
5318  // top half of the other operand will be unset, but that doesn't matter as it
5319  // will not be used.
5320  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
5321                              V64:$Rm,
5322                              (i32 imm:$imm))),
5323            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
5324                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5325                                      (AdjustExtImm imm:$imm)), dsub)>;
5326}
5327
5328defm : ExtPat<v8i8, v16i8, 8>;
5329defm : ExtPat<v4i16, v8i16, 4>;
5330defm : ExtPat<v4f16, v8f16, 4>;
5331defm : ExtPat<v4bf16, v8bf16, 4>;
5332defm : ExtPat<v2i32, v4i32, 2>;
5333defm : ExtPat<v2f32, v4f32, 2>;
5334defm : ExtPat<v1i64, v2i64, 1>;
5335defm : ExtPat<v1f64, v2f64, 1>;
5336
5337//----------------------------------------------------------------------------
5338// AdvSIMD zip vector
5339//----------------------------------------------------------------------------
5340
5341defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5342defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5343defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5344defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5345defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5346defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5347
5348def : Pat<(v16i8 (concat_vectors (v8i8 (trunc (v8i16 V128:$Vn))),
5349                                 (v8i8 (trunc (v8i16 V128:$Vm))))),
5350          (UZP1v16i8 V128:$Vn, V128:$Vm)>;
5351def : Pat<(v8i16 (concat_vectors (v4i16 (trunc (v4i32 V128:$Vn))),
5352                                 (v4i16 (trunc (v4i32 V128:$Vm))))),
5353          (UZP1v8i16 V128:$Vn, V128:$Vm)>;
5354def : Pat<(v4i32 (concat_vectors (v2i32 (trunc (v2i64 V128:$Vn))),
5355                                 (v2i32 (trunc (v2i64 V128:$Vm))))),
5356          (UZP1v4i32 V128:$Vn, V128:$Vm)>;
5357
5358//----------------------------------------------------------------------------
5359// AdvSIMD TBL/TBX instructions
5360//----------------------------------------------------------------------------
5361
5362defm TBL : SIMDTableLookup<    0, "tbl">;
5363defm TBX : SIMDTableLookupTied<1, "tbx">;
5364
5365def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5366          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5367def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5368          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5369
5370def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5371                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5372          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5373def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5374                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5375          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5376
5377
5378//----------------------------------------------------------------------------
5379// AdvSIMD scalar DUP instruction
5380//----------------------------------------------------------------------------
5381
5382defm DUP : SIMDScalarDUP<"mov">;
5383
5384//----------------------------------------------------------------------------
5385// AdvSIMD scalar pairwise instructions
5386//----------------------------------------------------------------------------
5387
5388defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5389defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5390defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5391defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5392defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5393defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5394
5395let Predicates = [HasFullFP16] in {
5396def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5397            (FADDPv2i16p
5398              (EXTRACT_SUBREG
5399                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, (v8f16 (IMPLICIT_DEF))), (v8f16 (IMPLICIT_DEF))),
5400               dsub))>;
5401def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5402          (FADDPv2i16p (FADDPv4f16 V64:$Rn, (v4f16 (IMPLICIT_DEF))))>;
5403}
5404def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5405          (FADDPv2i32p
5406            (EXTRACT_SUBREG
5407              (FADDPv4f32 V128:$Rn, (v4f32 (IMPLICIT_DEF))),
5408             dsub))>;
5409def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5410          (FADDPv2i32p V64:$Rn)>;
5411def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5412          (FADDPv2i64p V128:$Rn)>;
5413
5414def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5415          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5416def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5417          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5418def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5419          (FADDPv2i32p V64:$Rn)>;
5420def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5421          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5422def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5423          (FADDPv2i64p V128:$Rn)>;
5424def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5425          (FMAXNMPv2i32p V64:$Rn)>;
5426def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5427          (FMAXNMPv2i64p V128:$Rn)>;
5428def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5429          (FMAXPv2i32p V64:$Rn)>;
5430def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5431          (FMAXPv2i64p V128:$Rn)>;
5432def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5433          (FMINNMPv2i32p V64:$Rn)>;
5434def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5435          (FMINNMPv2i64p V128:$Rn)>;
5436def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5437          (FMINPv2i32p V64:$Rn)>;
5438def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5439          (FMINPv2i64p V128:$Rn)>;
5440
5441//----------------------------------------------------------------------------
5442// AdvSIMD INS/DUP instructions
5443//----------------------------------------------------------------------------
5444
5445def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5446def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5447def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5448def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5449def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5450def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5451def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5452
5453def DUPv2i64lane : SIMDDup64FromElement;
5454def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5455def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5456def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5457def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5458def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5459def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5460
5461// DUP from a 64-bit register to a 64-bit register is just a copy
5462def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5463          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5464def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5465          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5466
5467def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5468          (v2f32 (DUPv2i32lane
5469            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5470            (i64 0)))>;
5471def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5472          (v4f32 (DUPv4i32lane
5473            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5474            (i64 0)))>;
5475def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5476          (v2f64 (DUPv2i64lane
5477            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5478            (i64 0)))>;
5479def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5480          (v4f16 (DUPv4i16lane
5481            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5482            (i64 0)))>;
5483def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5484          (v4bf16 (DUPv4i16lane
5485            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5486            (i64 0)))>;
5487def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5488          (v8f16 (DUPv8i16lane
5489            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5490            (i64 0)))>;
5491def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5492          (v8bf16 (DUPv8i16lane
5493            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5494            (i64 0)))>;
5495
5496def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5497          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5498def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5499          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5500
5501def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5502          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5503def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5504          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5505
5506def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5507          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5508def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5509         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5510def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5511          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5512
5513// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5514// instruction even if the types don't match: we just have to remap the lane
5515// carefully. N.b. this trick only applies to truncations.
5516def VecIndex_x2 : SDNodeXForm<imm, [{
5517  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5518}]>;
5519def VecIndex_x4 : SDNodeXForm<imm, [{
5520  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5521}]>;
5522def VecIndex_x8 : SDNodeXForm<imm, [{
5523  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5524}]>;
5525
5526multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5527                            ValueType Src128VT, ValueType ScalVT,
5528                            Instruction DUP, SDNodeXForm IdxXFORM> {
5529  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5530                                                     imm:$idx)))),
5531            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5532
5533  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5534                                                     imm:$idx)))),
5535            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5536}
5537
5538defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5539defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5540defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5541
5542defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5543defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5544defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5545
5546multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5547                               SDNodeXForm IdxXFORM> {
5548  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5549                                                         imm:$idx))))),
5550            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5551
5552  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5553                                                       imm:$idx))))),
5554            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5555}
5556
5557defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5558defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5559defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5560
5561defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5562defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5563defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5564
5565// SMOV and UMOV definitions, with some extra patterns for convenience
5566defm SMOV : SMov;
5567defm UMOV : UMov;
5568
5569def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5570          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5571def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5572          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5573def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5574          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5575def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5576          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5577def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5578          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5579def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5580          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5581
5582def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5583            VectorIndexB:$idx)))), i8),
5584          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5585def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5586            VectorIndexH:$idx)))), i16),
5587          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5588
5589// Extracting i8 or i16 elements will have the zero-extend transformed to
5590// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5591// for AArch64. Match these patterns here since UMOV already zeroes out the high
5592// bits of the destination register.
5593def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5594               (i32 0xff)),
5595          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5596def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5597               (i32 0xffff)),
5598          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5599
5600def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5601            VectorIndexB:$idx)))), (i64 0xff))),
5602          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx)), sub_32)>;
5603def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5604            VectorIndexH:$idx)))), (i64 0xffff))),
5605          (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx)), sub_32)>;
5606
5607defm INS : SIMDIns;
5608
5609def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5610          (SUBREG_TO_REG (i32 0),
5611                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5612def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5613          (SUBREG_TO_REG (i32 0),
5614                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5615
5616def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5617          (SUBREG_TO_REG (i32 0),
5618                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5619def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5620          (SUBREG_TO_REG (i32 0),
5621                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5622
5623def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5624          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5625def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5626          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5627
5628def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5629          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5630def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5631          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5632
5633def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5634            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5635                                  (i32 FPR32:$Rn), ssub))>;
5636def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5637            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5638                                  (i32 FPR32:$Rn), ssub))>;
5639
5640def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5641            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5642                                  (i64 FPR64:$Rn), dsub))>;
5643
5644def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5645          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5646def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5647          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5648
5649def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5650          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5651def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5652          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5653
5654def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5655          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5656def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5657          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5658
5659def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5660          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5661
5662def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5663            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5664          (EXTRACT_SUBREG
5665            (INSvi16lane
5666              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5667              VectorIndexS:$imm,
5668              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5669              (i64 0)),
5670            dsub)>;
5671
5672def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5673            (i64 VectorIndexH:$imm)),
5674          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5675def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5676            (i64 VectorIndexS:$imm)),
5677          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5678def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5679            (i64 VectorIndexD:$imm)),
5680          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5681
5682def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5683            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5684          (INSvi16lane
5685            V128:$Rn, VectorIndexH:$imm,
5686            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5687            (i64 0))>;
5688
5689def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5690            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5691          (EXTRACT_SUBREG
5692            (INSvi16lane
5693              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5694              VectorIndexS:$imm,
5695              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5696              (i64 0)),
5697            dsub)>;
5698
5699def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5700            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5701          (INSvi16lane
5702            V128:$Rn, VectorIndexH:$imm,
5703            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5704            (i64 0))>;
5705
5706def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5707            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5708          (EXTRACT_SUBREG
5709            (INSvi32lane
5710              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5711              VectorIndexS:$imm,
5712              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5713              (i64 0)),
5714            dsub)>;
5715def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5716            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5717          (INSvi32lane
5718            V128:$Rn, VectorIndexS:$imm,
5719            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5720            (i64 0))>;
5721def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5722            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5723          (INSvi64lane
5724            V128:$Rn, VectorIndexD:$imm,
5725            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5726            (i64 0))>;
5727
5728// Copy an element at a constant index in one vector into a constant indexed
5729// element of another.
5730// FIXME refactor to a shared class/dev parameterized on vector type, vector
5731// index type and INS extension
5732def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5733                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5734                   VectorIndexB:$idx2)),
5735          (v16i8 (INSvi8lane
5736                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5737          )>;
5738def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5739                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5740                   VectorIndexH:$idx2)),
5741          (v8i16 (INSvi16lane
5742                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5743          )>;
5744def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5745                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5746                   VectorIndexS:$idx2)),
5747          (v4i32 (INSvi32lane
5748                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5749          )>;
5750def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5751                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5752                   VectorIndexD:$idx2)),
5753          (v2i64 (INSvi64lane
5754                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5755          )>;
5756
5757multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5758                                ValueType VTScal, Instruction INS> {
5759  def : Pat<(VT128 (vector_insert V128:$src,
5760                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5761                        imm:$Immd)),
5762            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5763
5764  def : Pat<(VT128 (vector_insert V128:$src,
5765                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5766                        imm:$Immd)),
5767            (INS V128:$src, imm:$Immd,
5768                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5769
5770  def : Pat<(VT64 (vector_insert V64:$src,
5771                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5772                        imm:$Immd)),
5773            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5774                                 imm:$Immd, V128:$Rn, imm:$Immn),
5775                            dsub)>;
5776
5777  def : Pat<(VT64 (vector_insert V64:$src,
5778                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5779                        imm:$Immd)),
5780            (EXTRACT_SUBREG
5781                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5782                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5783                dsub)>;
5784}
5785
5786defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5787defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5788defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5789defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5790
5791
5792// Floating point vector extractions are codegen'd as either a sequence of
5793// subregister extractions, or a MOV (aka DUP here) if
5794// the lane number is anything other than zero.
5795def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5796          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5797def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5798          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5799def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5800          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5801def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5802          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5803
5804
5805def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5806          (f64 (DUPi64 V128:$Rn, VectorIndexD:$idx))>;
5807def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5808          (f32 (DUPi32 V128:$Rn, VectorIndexS:$idx))>;
5809def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5810          (f16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5811def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5812          (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
5813
5814// All concat_vectors operations are canonicalised to act on i64 vectors for
5815// AArch64. In the general case we need an instruction, which had just as well be
5816// INS.
5817class ConcatPat<ValueType DstTy, ValueType SrcTy>
5818  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5819        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5820                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5821
5822def : ConcatPat<v2i64, v1i64>;
5823def : ConcatPat<v2f64, v1f64>;
5824def : ConcatPat<v4i32, v2i32>;
5825def : ConcatPat<v4f32, v2f32>;
5826def : ConcatPat<v8i16, v4i16>;
5827def : ConcatPat<v8f16, v4f16>;
5828def : ConcatPat<v8bf16, v4bf16>;
5829def : ConcatPat<v16i8, v8i8>;
5830
5831// If the high lanes are undef, though, we can just ignore them:
5832class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5833  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5834        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5835
5836def : ConcatUndefPat<v2i64, v1i64>;
5837def : ConcatUndefPat<v2f64, v1f64>;
5838def : ConcatUndefPat<v4i32, v2i32>;
5839def : ConcatUndefPat<v4f32, v2f32>;
5840def : ConcatUndefPat<v8i16, v4i16>;
5841def : ConcatUndefPat<v16i8, v8i8>;
5842
5843//----------------------------------------------------------------------------
5844// AdvSIMD across lanes instructions
5845//----------------------------------------------------------------------------
5846
5847defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5848defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5849defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5850defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5851defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5852defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5853defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5854defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5855defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5856defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5857defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5858
5859// Patterns for uaddv(uaddlp(x)) ==> uaddlv
5860def : Pat<(i32 (vector_extract (v8i16 (insert_subvector undef,
5861            (v4i16 (AArch64uaddv (v4i16 (AArch64uaddlp (v8i8 V64:$op))))),
5862            (i64 0))), (i64 0))),
5863          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5864           (UADDLVv8i8v V64:$op), hsub), ssub)>;
5865def : Pat<(i32 (vector_extract (v8i16 (AArch64uaddv (v8i16 (AArch64uaddlp
5866           (v16i8 V128:$op))))), (i64 0))),
5867          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5868           (UADDLVv16i8v V128:$op), hsub), ssub)>;
5869def : Pat<(v4i32 (AArch64uaddv (v4i32 (AArch64uaddlp (v8i16 V128:$op))))),
5870          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), (UADDLVv8i16v V128:$op), ssub)>;
5871
5872// Patterns for addp(uaddlp(x))) ==> uaddlv
5873def : Pat<(v2i32 (AArch64uaddv (v2i32 (AArch64uaddlp (v4i16 V64:$op))))),
5874          (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (UADDLVv4i16v V64:$op), ssub)>;
5875def : Pat<(v2i64 (AArch64uaddv (v2i64 (AArch64uaddlp (v4i32 V128:$op))))),
5876          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (UADDLVv4i32v V128:$op), dsub)>;
5877
5878// Patterns for across-vector intrinsics, that have a node equivalent, that
5879// returns a vector (with only the low lane defined) instead of a scalar.
5880// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5881multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5882                                    SDPatternOperator opNode> {
5883// If a lane instruction caught the vector_extract around opNode, we can
5884// directly match the latter to the instruction.
5885def : Pat<(v8i8 (opNode V64:$Rn)),
5886          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5887           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5888def : Pat<(v16i8 (opNode V128:$Rn)),
5889          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5890           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5891def : Pat<(v4i16 (opNode V64:$Rn)),
5892          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5893           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5894def : Pat<(v8i16 (opNode V128:$Rn)),
5895          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5896           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5897def : Pat<(v4i32 (opNode V128:$Rn)),
5898          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5899           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5900
5901
5902// If none did, fallback to the explicit patterns, consuming the vector_extract.
5903def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5904            (i64 0)), (i64 0))),
5905          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5906            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5907            bsub), ssub)>;
5908def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5909          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5910            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5911            bsub), ssub)>;
5912def : Pat<(i32 (vector_extract (insert_subvector undef,
5913            (v4i16 (opNode V64:$Rn)), (i64 0)), (i64 0))),
5914          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5915            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5916            hsub), ssub)>;
5917def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5918          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5919            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5920            hsub), ssub)>;
5921def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5922          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5923            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5924            ssub), ssub)>;
5925
5926}
5927
5928multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5929                                          SDPatternOperator opNode>
5930    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5931// If there is a sign extension after this intrinsic, consume it as smov already
5932// performed it
5933def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5934            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), i8)),
5935          (i32 (SMOVvi8to32
5936            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5937              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5938            (i64 0)))>;
5939def : Pat<(i32 (sext_inreg (i32 (vector_extract
5940            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5941          (i32 (SMOVvi8to32
5942            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5943             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5944            (i64 0)))>;
5945def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5946            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), i16)),
5947          (i32 (SMOVvi16to32
5948           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5949            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5950           (i64 0)))>;
5951def : Pat<(i32 (sext_inreg (i32 (vector_extract
5952            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5953          (i32 (SMOVvi16to32
5954            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5955             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5956            (i64 0)))>;
5957}
5958
5959multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5960                                            SDPatternOperator opNode>
5961    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5962// If there is a masking operation keeping only what has been actually
5963// generated, consume it.
5964def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5965            (opNode (v8i8 V64:$Rn)), (i64 0)), (i64 0))), maski8_or_more)),
5966      (i32 (EXTRACT_SUBREG
5967        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5968          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5969        ssub))>;
5970def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5971            maski8_or_more)),
5972        (i32 (EXTRACT_SUBREG
5973          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5974            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5975          ssub))>;
5976def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5977            (opNode (v4i16 V64:$Rn)), (i64 0)), (i64 0))), maski16_or_more)),
5978          (i32 (EXTRACT_SUBREG
5979            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5980              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5981            ssub))>;
5982def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5983            maski16_or_more)),
5984        (i32 (EXTRACT_SUBREG
5985          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5986            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5987          ssub))>;
5988}
5989
5990defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
5991// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5992def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5993          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5994
5995defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5996// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5997def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5998          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5999
6000defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
6001def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
6002          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
6003
6004defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
6005def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
6006          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
6007
6008defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
6009def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
6010          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
6011
6012defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
6013def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
6014          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
6015
6016multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
6017  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6018        (i32 (SMOVvi16to32
6019          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6020            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6021          (i64 0)))>;
6022def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6023        (i32 (SMOVvi16to32
6024          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6025           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6026          (i64 0)))>;
6027
6028def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6029          (i32 (EXTRACT_SUBREG
6030           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6031            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6032           ssub))>;
6033def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6034        (i32 (EXTRACT_SUBREG
6035          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6036           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6037          ssub))>;
6038
6039def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6040        (i64 (EXTRACT_SUBREG
6041          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6042           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6043          dsub))>;
6044}
6045
6046multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
6047                                                Intrinsic intOp> {
6048  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
6049        (i32 (EXTRACT_SUBREG
6050          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6051            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
6052          ssub))>;
6053def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
6054        (i32 (EXTRACT_SUBREG
6055          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6056            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
6057          ssub))>;
6058
6059def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
6060          (i32 (EXTRACT_SUBREG
6061            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6062              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
6063            ssub))>;
6064def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
6065        (i32 (EXTRACT_SUBREG
6066          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6067            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
6068          ssub))>;
6069
6070def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
6071        (i64 (EXTRACT_SUBREG
6072          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6073            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
6074          dsub))>;
6075}
6076
6077defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
6078defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
6079
6080// The vaddlv_s32 intrinsic gets mapped to SADDLP.
6081def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
6082          (i64 (EXTRACT_SUBREG
6083            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6084              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
6085            dsub))>;
6086// The vaddlv_u32 intrinsic gets mapped to UADDLP.
6087def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
6088          (i64 (EXTRACT_SUBREG
6089            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
6090              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
6091            dsub))>;
6092
6093//------------------------------------------------------------------------------
6094// AdvSIMD modified immediate instructions
6095//------------------------------------------------------------------------------
6096
6097// AdvSIMD BIC
6098defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
6099// AdvSIMD ORR
6100defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
6101
6102def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6103def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6104def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6105def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6106
6107def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6108def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6109def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6110def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6111
6112def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6113def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6114def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6115def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6116
6117def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
6118def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
6119def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
6120def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
6121
6122// AdvSIMD FMOV
6123def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
6124                                              "fmov", ".2d",
6125                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6126def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
6127                                              "fmov", ".2s",
6128                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6129def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
6130                                              "fmov", ".4s",
6131                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6132let Predicates = [HasNEON, HasFullFP16] in {
6133def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
6134                                              "fmov", ".4h",
6135                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6136def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
6137                                              "fmov", ".8h",
6138                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
6139} // Predicates = [HasNEON, HasFullFP16]
6140
6141// AdvSIMD MOVI
6142
6143// EDIT byte mask: scalar
6144let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6145def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
6146                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
6147// The movi_edit node has the immediate value already encoded, so we use
6148// a plain imm0_255 here.
6149def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
6150          (MOVID imm0_255:$shift)>;
6151
6152// EDIT byte mask: 2d
6153
6154// The movi_edit node has the immediate value already encoded, so we use
6155// a plain imm0_255 in the pattern
6156let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6157def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
6158                                                simdimmtype10,
6159                                                "movi", ".2d",
6160                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
6161
6162def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6163def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6164def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6165def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
6166
6167def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6168def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6169def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6170def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
6171
6172// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
6173// extract is free and this gives better MachineCSE results.
6174def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6175def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6176def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6177def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
6178
6179def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6180def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6181def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6182def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
6183
6184// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6185let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6186defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
6187
6188def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6189def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6190def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6191def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6192
6193def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6194def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6195def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6196def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6197
6198def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6199          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
6200def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6201          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
6202def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6203          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
6204def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
6205          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
6206
6207let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6208// EDIT per word: 2s & 4s with MSL shifter
6209def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
6210                      [(set (v2i32 V64:$Rd),
6211                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6212def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
6213                      [(set (v4i32 V128:$Rd),
6214                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6215
6216// Per byte: 8b & 16b
6217def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
6218                                                 "movi", ".8b",
6219                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
6220
6221def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
6222                                                 "movi", ".16b",
6223                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
6224}
6225
6226// AdvSIMD MVNI
6227
6228// EDIT per word & halfword: 2s, 4h, 4s, & 8h
6229let isReMaterializable = 1, isAsCheapAsAMove = 1 in
6230defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
6231
6232def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6233def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6234def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6235def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6236
6237def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
6238def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
6239def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
6240def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
6241
6242def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6243          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
6244def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6245          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
6246def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6247          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
6248def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
6249          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
6250
6251// EDIT per word: 2s & 4s with MSL shifter
6252let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
6253def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
6254                      [(set (v2i32 V64:$Rd),
6255                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6256def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
6257                      [(set (v4i32 V128:$Rd),
6258                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
6259}
6260
6261//----------------------------------------------------------------------------
6262// AdvSIMD indexed element
6263//----------------------------------------------------------------------------
6264
6265let hasSideEffects = 0 in {
6266  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
6267  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
6268}
6269
6270// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
6271// instruction expects the addend first, while the intrinsic expects it last.
6272
6273// On the other hand, there are quite a few valid combinatorial options due to
6274// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
6275defm : SIMDFPIndexedTiedPatterns<"FMLA",
6276           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
6277defm : SIMDFPIndexedTiedPatterns<"FMLA",
6278           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
6279
6280defm : SIMDFPIndexedTiedPatterns<"FMLS",
6281           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
6282defm : SIMDFPIndexedTiedPatterns<"FMLS",
6283           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
6284defm : SIMDFPIndexedTiedPatterns<"FMLS",
6285           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
6286defm : SIMDFPIndexedTiedPatterns<"FMLS",
6287           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
6288
6289multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
6290  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6291  // and DUP scalar.
6292  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6293                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6294                                           VectorIndexS:$idx))),
6295            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6296  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6297                           (v2f32 (AArch64duplane32
6298                                      (v4f32 (insert_subvector undef,
6299                                                 (v2f32 (fneg V64:$Rm)),
6300                                                 (i64 0))),
6301                                      VectorIndexS:$idx)))),
6302            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6303                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6304                               VectorIndexS:$idx)>;
6305  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
6306                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6307            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
6308                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6309
6310  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
6311  // and DUP scalar.
6312  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6313                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
6314                                           VectorIndexS:$idx))),
6315            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
6316                               VectorIndexS:$idx)>;
6317  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6318                           (v4f32 (AArch64duplane32
6319                                      (v4f32 (insert_subvector undef,
6320                                                 (v2f32 (fneg V64:$Rm)),
6321                                                 (i64 0))),
6322                                      VectorIndexS:$idx)))),
6323            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6324                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
6325                               VectorIndexS:$idx)>;
6326  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
6327                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
6328            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
6329                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
6330
6331  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
6332  // (DUPLANE from 64-bit would be trivial).
6333  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6334                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
6335                                           VectorIndexD:$idx))),
6336            (FMLSv2i64_indexed
6337                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
6338  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
6339                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
6340            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
6341                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
6342
6343  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
6344  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6345                         (vector_extract (v4f32 (fneg V128:$Rm)),
6346                                         VectorIndexS:$idx))),
6347            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6348                V128:$Rm, VectorIndexS:$idx)>;
6349  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
6350                         (vector_extract (v4f32 (insert_subvector undef,
6351                                                    (v2f32 (fneg V64:$Rm)),
6352                                                    (i64 0))),
6353                                         VectorIndexS:$idx))),
6354            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
6355                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
6356
6357  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
6358  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
6359                         (vector_extract (v2f64 (fneg V128:$Rm)),
6360                                         VectorIndexS:$idx))),
6361            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
6362                V128:$Rm, VectorIndexS:$idx)>;
6363}
6364
6365defm : FMLSIndexedAfterNegPatterns<
6366           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
6367defm : FMLSIndexedAfterNegPatterns<
6368           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
6369
6370defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
6371defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
6372
6373def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6374          (FMULv2i32_indexed V64:$Rn,
6375            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6376            (i64 0))>;
6377def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6378          (FMULv4i32_indexed V128:$Rn,
6379            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6380            (i64 0))>;
6381def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6382          (FMULv2i64_indexed V128:$Rn,
6383            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6384            (i64 0))>;
6385
6386defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6387defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6388
6389defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6390                                     int_aarch64_neon_sqdmulh_laneq>;
6391defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6392                                      int_aarch64_neon_sqrdmulh_laneq>;
6393
6394// Generated by MachineCombine
6395defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6396defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6397
6398defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6399defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6400    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6401defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6402    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6403defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
6404                int_aarch64_neon_smull>;
6405defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6406                                           int_aarch64_neon_sqadd>;
6407defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6408                                           int_aarch64_neon_sqsub>;
6409defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6410                                          int_aarch64_neon_sqrdmlah>;
6411defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6412                                          int_aarch64_neon_sqrdmlsh>;
6413defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6414defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6415    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6416defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6417    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6418defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
6419                int_aarch64_neon_umull>;
6420
6421// A scalar sqdmull with the second operand being a vector lane can be
6422// handled directly with the indexed instruction encoding.
6423def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6424                                          (vector_extract (v4i32 V128:$Vm),
6425                                                           VectorIndexS:$idx)),
6426          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6427
6428// Match add node and also treat an 'or' node is as an 'add' if the or'ed operands
6429// have no common bits.
6430def add_and_or_is_add : PatFrags<(ops node:$lhs, node:$rhs),
6431                         [(add node:$lhs, node:$rhs), (or node:$lhs, node:$rhs)],[{
6432   if (N->getOpcode() == ISD::ADD)
6433     return true;
6434   return CurDAG->haveNoCommonBitsSet(N->getOperand(0), N->getOperand(1));
6435}]> {
6436  let GISelPredicateCode = [{
6437     // Only handle G_ADD for now. FIXME. build capability to compute whether
6438     // operands of G_OR have common bits set or not.
6439     return MI.getOpcode() == TargetOpcode::G_ADD;
6440  }];
6441}
6442
6443
6444//----------------------------------------------------------------------------
6445// AdvSIMD scalar shift instructions
6446//----------------------------------------------------------------------------
6447defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6448defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6449defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6450defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6451// Codegen patterns for the above. We don't put these directly on the
6452// instructions because TableGen's type inference can't handle the truth.
6453// Having the same base pattern for fp <--> int totally freaks it out.
6454def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6455          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6456def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6457          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6458def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6459          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6460def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6461          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6462def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6463                                            vecshiftR64:$imm)),
6464          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6465def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6466                                            vecshiftR64:$imm)),
6467          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6468def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6469          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6470def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6471          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6472def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6473                                            vecshiftR64:$imm)),
6474          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6475def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6476          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6477def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6478                                            vecshiftR64:$imm)),
6479          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6480def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6481          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6482
6483// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
6484
6485def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6486          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6487def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6488          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6489def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6490          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6491def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6492            (and FPR32:$Rn, (i32 65535)),
6493            vecshiftR16:$imm)),
6494          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6495def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6496          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6497def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6498          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6499def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6500          (i32 (INSERT_SUBREG
6501            (i32 (IMPLICIT_DEF)),
6502            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6503            hsub))>;
6504def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6505          (i64 (INSERT_SUBREG
6506            (i64 (IMPLICIT_DEF)),
6507            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6508            hsub))>;
6509def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6510          (i32 (INSERT_SUBREG
6511            (i32 (IMPLICIT_DEF)),
6512            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6513            hsub))>;
6514def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6515          (i64 (INSERT_SUBREG
6516            (i64 (IMPLICIT_DEF)),
6517            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6518            hsub))>;
6519def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6520          (i32 (INSERT_SUBREG
6521            (i32 (IMPLICIT_DEF)),
6522            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6523            hsub))>;
6524def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6525          (i32 (INSERT_SUBREG
6526            (i32 (IMPLICIT_DEF)),
6527            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6528            hsub))>;
6529
6530defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6531defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6532defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6533                                     int_aarch64_neon_sqrshrn>;
6534defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6535                                     int_aarch64_neon_sqrshrun>;
6536defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6537defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6538defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6539                                     int_aarch64_neon_sqshrn>;
6540defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6541                                     int_aarch64_neon_sqshrun>;
6542defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6543defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6544defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6545    TriOpFrag<(add node:$LHS,
6546                   (AArch64srshri node:$MHS, node:$RHS))>>;
6547defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6548defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6549    TriOpFrag<(add_and_or_is_add node:$LHS,
6550                   (AArch64vashr node:$MHS, node:$RHS))>>;
6551defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6552                                     int_aarch64_neon_uqrshrn>;
6553defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6554defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6555                                     int_aarch64_neon_uqshrn>;
6556defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6557defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6558    TriOpFrag<(add node:$LHS,
6559                   (AArch64urshri node:$MHS, node:$RHS))>>;
6560defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6561defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6562    TriOpFrag<(add_and_or_is_add node:$LHS,
6563                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6564
6565//----------------------------------------------------------------------------
6566// AdvSIMD vector shift instructions
6567//----------------------------------------------------------------------------
6568defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6569defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6570defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6571                                   int_aarch64_neon_vcvtfxs2fp>;
6572defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6573                                         int_aarch64_neon_rshrn>;
6574defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6575defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6576                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6577defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6578def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6579                                      (i32 vecshiftL64:$imm))),
6580          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6581defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6582                                         int_aarch64_neon_sqrshrn>;
6583defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6584                                         int_aarch64_neon_sqrshrun>;
6585defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6586defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6587defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6588                                         int_aarch64_neon_sqshrn>;
6589defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6590                                         int_aarch64_neon_sqshrun>;
6591defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6592def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6593                                      (i32 vecshiftR64:$imm))),
6594          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6595defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6596defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6597                 TriOpFrag<(add node:$LHS,
6598                                (AArch64srshri node:$MHS, node:$RHS))> >;
6599defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6600                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6601
6602defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6603defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6604                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6605defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6606                        int_aarch64_neon_vcvtfxu2fp>;
6607defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6608                                         int_aarch64_neon_uqrshrn>;
6609defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6610defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6611                                         int_aarch64_neon_uqshrn>;
6612defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6613defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6614                TriOpFrag<(add node:$LHS,
6615                               (AArch64urshri node:$MHS, node:$RHS))> >;
6616defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6617                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6618defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6619defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6620                TriOpFrag<(add_and_or_is_add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6621
6622// RADDHN patterns for when RSHRN shifts by half the size of the vector element
6623def : Pat<(v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))),
6624          (RADDHNv8i16_v8i8 V128:$Vn, (v8i16 (MOVIv2d_ns (i32 0))))>;
6625def : Pat<(v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))),
6626          (RADDHNv4i32_v4i16 V128:$Vn, (v4i32 (MOVIv2d_ns (i32 0))))>;
6627def : Pat<(v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))),
6628          (RADDHNv2i64_v2i32 V128:$Vn, (v2i64 (MOVIv2d_ns (i32 0))))>;
6629
6630// RADDHN2 patterns for when RSHRN shifts by half the size of the vector element
6631def : Pat<(v16i8 (concat_vectors
6632                 (v8i8 V64:$Vd),
6633                 (v8i8 (int_aarch64_neon_rshrn (v8i16 V128:$Vn), (i32 8))))),
6634          (RADDHNv8i16_v16i8
6635                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6636                 (v8i16 (MOVIv2d_ns (i32 0))))>;
6637def : Pat<(v8i16 (concat_vectors
6638                 (v4i16 V64:$Vd),
6639                 (v4i16 (int_aarch64_neon_rshrn (v4i32 V128:$Vn), (i32 16))))),
6640          (RADDHNv4i32_v8i16
6641                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6642                 (v4i32 (MOVIv2d_ns (i32 0))))>;
6643def : Pat<(v4i32 (concat_vectors
6644                 (v2i32 V64:$Vd),
6645                 (v2i32 (int_aarch64_neon_rshrn (v2i64 V128:$Vn), (i32 32))))),
6646          (RADDHNv2i64_v4i32
6647                 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Vd, dsub), V128:$Vn,
6648                 (v2i64 (MOVIv2d_ns (i32 0))))>;
6649
6650// SHRN patterns for when a logical right shift was used instead of arithmetic
6651// (the immediate guarantees no sign bits actually end up in the result so it
6652// doesn't matter).
6653def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6654          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6655def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6656          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6657def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6658          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6659
6660def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6661                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6662                                                    vecshiftR16Narrow:$imm)))),
6663          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6664                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6665def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6666                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6667                                                    vecshiftR32Narrow:$imm)))),
6668          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6669                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6670def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6671                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6672                                                    vecshiftR64Narrow:$imm)))),
6673          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6674                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6675
6676// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6677// Anyexts are implemented as zexts.
6678def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6679def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6680def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6681def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6682def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6683def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6684def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6685def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6686def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6687// Also match an extend from the upper half of a 128 bit source register.
6688def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6689          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6690def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6691          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6692def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6693          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6694def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6695          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6696def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6697          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6698def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6699          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6700def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6701          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6702def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6703          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6704def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6705          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6706
6707// Vector shift sxtl aliases
6708def : InstAlias<"sxtl.8h $dst, $src1",
6709                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6710def : InstAlias<"sxtl $dst.8h, $src1.8b",
6711                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6712def : InstAlias<"sxtl.4s $dst, $src1",
6713                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6714def : InstAlias<"sxtl $dst.4s, $src1.4h",
6715                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6716def : InstAlias<"sxtl.2d $dst, $src1",
6717                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6718def : InstAlias<"sxtl $dst.2d, $src1.2s",
6719                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6720
6721// Vector shift sxtl2 aliases
6722def : InstAlias<"sxtl2.8h $dst, $src1",
6723                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6724def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6725                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6726def : InstAlias<"sxtl2.4s $dst, $src1",
6727                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6728def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6729                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6730def : InstAlias<"sxtl2.2d $dst, $src1",
6731                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6732def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6733                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6734
6735// Vector shift uxtl aliases
6736def : InstAlias<"uxtl.8h $dst, $src1",
6737                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6738def : InstAlias<"uxtl $dst.8h, $src1.8b",
6739                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6740def : InstAlias<"uxtl.4s $dst, $src1",
6741                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6742def : InstAlias<"uxtl $dst.4s, $src1.4h",
6743                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6744def : InstAlias<"uxtl.2d $dst, $src1",
6745                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6746def : InstAlias<"uxtl $dst.2d, $src1.2s",
6747                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6748
6749// Vector shift uxtl2 aliases
6750def : InstAlias<"uxtl2.8h $dst, $src1",
6751                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6752def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6753                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6754def : InstAlias<"uxtl2.4s $dst, $src1",
6755                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6756def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6757                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6758def : InstAlias<"uxtl2.2d $dst, $src1",
6759                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6760def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6761                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6762
6763// If an integer is about to be converted to a floating point value,
6764// just load it on the floating point unit.
6765// These patterns are more complex because floating point loads do not
6766// support sign extension.
6767// The sign extension has to be explicitly added and is only supported for
6768// one step: byte-to-half, half-to-word, word-to-doubleword.
6769// SCVTF GPR -> FPR is 9 cycles.
6770// SCVTF FPR -> FPR is 4 cyclces.
6771// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6772// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6773// and still being faster.
6774// However, this is not good for code size.
6775// 8-bits -> float. 2 sizes step-up.
6776class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6777  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6778        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6779                            (SSHLLv4i16_shift
6780                              (f64
6781                                (EXTRACT_SUBREG
6782                                  (SSHLLv8i8_shift
6783                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6784                                        INST,
6785                                        bsub),
6786                                    0),
6787                                  dsub)),
6788                               0),
6789                             ssub)))>,
6790    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6791
6792def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6793                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6794def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6795                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6796def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6797                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6798def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6799                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6800
6801// 16-bits -> float. 1 size step-up.
6802class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6803  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6804        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6805                            (SSHLLv4i16_shift
6806                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6807                                  INST,
6808                                  hsub),
6809                                0),
6810                            ssub)))>, Requires<[NotForCodeSize]>;
6811
6812def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6813                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6814def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6815                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6816def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6817                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6818def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6819                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6820
6821// 32-bits to 32-bits are handled in target specific dag combine:
6822// performIntToFpCombine.
6823// 64-bits integer to 32-bits floating point, not possible with
6824// SCVTF on floating point registers (both source and destination
6825// must have the same size).
6826
6827// Here are the patterns for 8, 16, 32, and 64-bits to double.
6828// 8-bits -> double. 3 size step-up: give up.
6829// 16-bits -> double. 2 size step.
6830class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6831  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6832           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6833                              (SSHLLv2i32_shift
6834                                 (f64
6835                                  (EXTRACT_SUBREG
6836                                    (SSHLLv4i16_shift
6837                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6838                                        INST,
6839                                        hsub),
6840                                     0),
6841                                   dsub)),
6842                               0),
6843                             dsub)))>,
6844    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6845
6846def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6847                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6848def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6849                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6850def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6851                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6852def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6853                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6854// 32-bits -> double. 1 size step-up.
6855class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6856  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6857           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6858                              (SSHLLv2i32_shift
6859                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6860                                  INST,
6861                                  ssub),
6862                               0),
6863                             dsub)))>, Requires<[NotForCodeSize]>;
6864
6865def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6866                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6867def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6868                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6869def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6870                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6871def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6872                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6873
6874// 64-bits -> double are handled in target specific dag combine:
6875// performIntToFpCombine.
6876
6877
6878//----------------------------------------------------------------------------
6879// AdvSIMD Load-Store Structure
6880//----------------------------------------------------------------------------
6881defm LD1 : SIMDLd1Multiple<"ld1">;
6882defm LD2 : SIMDLd2Multiple<"ld2">;
6883defm LD3 : SIMDLd3Multiple<"ld3">;
6884defm LD4 : SIMDLd4Multiple<"ld4">;
6885
6886defm ST1 : SIMDSt1Multiple<"st1">;
6887defm ST2 : SIMDSt2Multiple<"st2">;
6888defm ST3 : SIMDSt3Multiple<"st3">;
6889defm ST4 : SIMDSt4Multiple<"st4">;
6890
6891class Ld1Pat<ValueType ty, Instruction INST>
6892  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6893
6894def : Ld1Pat<v16i8, LD1Onev16b>;
6895def : Ld1Pat<v8i16, LD1Onev8h>;
6896def : Ld1Pat<v4i32, LD1Onev4s>;
6897def : Ld1Pat<v2i64, LD1Onev2d>;
6898def : Ld1Pat<v8i8,  LD1Onev8b>;
6899def : Ld1Pat<v4i16, LD1Onev4h>;
6900def : Ld1Pat<v2i32, LD1Onev2s>;
6901def : Ld1Pat<v1i64, LD1Onev1d>;
6902
6903class St1Pat<ValueType ty, Instruction INST>
6904  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6905        (INST ty:$Vt, GPR64sp:$Rn)>;
6906
6907def : St1Pat<v16i8, ST1Onev16b>;
6908def : St1Pat<v8i16, ST1Onev8h>;
6909def : St1Pat<v4i32, ST1Onev4s>;
6910def : St1Pat<v2i64, ST1Onev2d>;
6911def : St1Pat<v8i8,  ST1Onev8b>;
6912def : St1Pat<v4i16, ST1Onev4h>;
6913def : St1Pat<v2i32, ST1Onev2s>;
6914def : St1Pat<v1i64, ST1Onev1d>;
6915
6916//---
6917// Single-element
6918//---
6919
6920defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6921defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6922defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6923defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6924let mayLoad = 1, hasSideEffects = 0 in {
6925defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6926defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6927defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6928defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6929defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6930defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6931defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6932defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6933defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6934defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6935defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6936defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6937defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6938defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6939defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6940defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6941}
6942
6943def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6944          (LD1Rv8b GPR64sp:$Rn)>;
6945def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6946          (LD1Rv16b GPR64sp:$Rn)>;
6947def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6948          (LD1Rv4h GPR64sp:$Rn)>;
6949def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6950          (LD1Rv8h GPR64sp:$Rn)>;
6951def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6952          (LD1Rv2s GPR64sp:$Rn)>;
6953def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6954          (LD1Rv4s GPR64sp:$Rn)>;
6955def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6956          (LD1Rv2d GPR64sp:$Rn)>;
6957def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6958          (LD1Rv1d GPR64sp:$Rn)>;
6959// Grab the floating point version too
6960def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6961          (LD1Rv2s GPR64sp:$Rn)>;
6962def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6963          (LD1Rv4s GPR64sp:$Rn)>;
6964def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6965          (LD1Rv2d GPR64sp:$Rn)>;
6966def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6967          (LD1Rv1d GPR64sp:$Rn)>;
6968def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6969          (LD1Rv4h GPR64sp:$Rn)>;
6970def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6971          (LD1Rv8h GPR64sp:$Rn)>;
6972def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6973          (LD1Rv4h GPR64sp:$Rn)>;
6974def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6975          (LD1Rv8h GPR64sp:$Rn)>;
6976
6977class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6978                    ValueType VTy, ValueType STy, Instruction LD1>
6979  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6980           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6981        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6982
6983def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6984def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6985def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6986def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6987def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6988def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
6989def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
6990def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
6991
6992// Generate LD1 for extload if memory type does not match the
6993// destination type, for example:
6994//
6995//   (v4i32 (insert_vector_elt (load anyext from i8) idx))
6996//
6997// In this case, the index must be adjusted to match LD1 type.
6998//
6999class Ld1Lane128IdxOpPat<SDPatternOperator scalar_load, Operand
7000                    VecIndex, ValueType VTy, ValueType STy,
7001                    Instruction LD1, SDNodeXForm IdxOp>
7002  : Pat<(vector_insert (VTy VecListOne128:$Rd),
7003                       (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7004        (LD1 VecListOne128:$Rd, (IdxOp VecIndex:$idx), GPR64sp:$Rn)>;
7005
7006def VectorIndexStoH : SDNodeXForm<imm, [{
7007  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7008}]>;
7009def VectorIndexStoB : SDNodeXForm<imm, [{
7010  return CurDAG->getTargetConstant(N->getZExtValue() * 4, SDLoc(N), MVT::i64);
7011}]>;
7012def VectorIndexHtoB : SDNodeXForm<imm, [{
7013  return CurDAG->getTargetConstant(N->getZExtValue() * 2, SDLoc(N), MVT::i64);
7014}]>;
7015
7016def : Ld1Lane128IdxOpPat<extloadi16, VectorIndexS, v4i32, i32, LD1i16, VectorIndexStoH>;
7017def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexS, v4i32, i32, LD1i8, VectorIndexStoB>;
7018def : Ld1Lane128IdxOpPat<extloadi8, VectorIndexH, v8i16, i32, LD1i8, VectorIndexHtoB>;
7019
7020// Same as above, but the first element is populated using
7021// scalar_to_vector + insert_subvector instead of insert_vector_elt.
7022class Ld1Lane128FirstElm<ValueType ResultTy, ValueType VecTy,
7023                        SDPatternOperator ExtLoad, Instruction LD1>
7024  : Pat<(ResultTy (scalar_to_vector (i32 (ExtLoad GPR64sp:$Rn)))),
7025          (ResultTy (EXTRACT_SUBREG
7026            (LD1 (VecTy (IMPLICIT_DEF)), 0, GPR64sp:$Rn), dsub))>;
7027
7028def : Ld1Lane128FirstElm<v2i32, v8i16, extloadi16, LD1i16>;
7029def : Ld1Lane128FirstElm<v2i32, v16i8, extloadi8, LD1i8>;
7030def : Ld1Lane128FirstElm<v4i16, v16i8, extloadi8, LD1i8>;
7031
7032class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
7033                   ValueType VTy, ValueType STy, Instruction LD1>
7034  : Pat<(vector_insert (VTy VecListOne64:$Rd),
7035           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
7036        (EXTRACT_SUBREG
7037            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
7038                          VecIndex:$idx, GPR64sp:$Rn),
7039            dsub)>;
7040
7041def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
7042def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
7043def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
7044def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
7045def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
7046def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
7047
7048
7049defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
7050defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
7051defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
7052defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
7053
7054// Stores
7055defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
7056defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
7057defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
7058defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
7059
7060let AddedComplexity = 19 in
7061class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7062                    ValueType VTy, ValueType STy, Instruction ST1>
7063  : Pat<(scalar_store
7064             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7065             GPR64sp:$Rn),
7066        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
7067
7068def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
7069def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
7070def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
7071def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
7072def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
7073def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
7074def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
7075def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
7076
7077let AddedComplexity = 19 in
7078class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7079                   ValueType VTy, ValueType STy, Instruction ST1>
7080  : Pat<(scalar_store
7081             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7082             GPR64sp:$Rn),
7083        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7084             VecIndex:$idx, GPR64sp:$Rn)>;
7085
7086def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
7087def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
7088def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
7089def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
7090def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
7091def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
7092
7093multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
7094                             ValueType VTy, ValueType STy, Instruction ST1,
7095                             int offset> {
7096  def : Pat<(scalar_store
7097              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7098              GPR64sp:$Rn, offset),
7099        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7100             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7101
7102  def : Pat<(scalar_store
7103              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
7104              GPR64sp:$Rn, GPR64:$Rm),
7105        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
7106             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7107}
7108
7109defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
7110defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
7111                        2>;
7112defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
7113defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
7114defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
7115defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
7116defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
7117defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
7118
7119multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
7120                             ValueType VTy, ValueType STy, Instruction ST1,
7121                             int offset> {
7122  def : Pat<(scalar_store
7123              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7124              GPR64sp:$Rn, offset),
7125        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
7126
7127  def : Pat<(scalar_store
7128              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
7129              GPR64sp:$Rn, GPR64:$Rm),
7130        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
7131}
7132
7133defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
7134                         1>;
7135defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
7136                         2>;
7137defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
7138defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
7139defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
7140defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
7141defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
7142defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
7143
7144let mayStore = 1, hasSideEffects = 0 in {
7145defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
7146defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
7147defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
7148defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
7149defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
7150defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
7151defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
7152defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
7153defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
7154defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
7155defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
7156defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
7157}
7158
7159defm ST1 : SIMDLdSt1SingleAliases<"st1">;
7160defm ST2 : SIMDLdSt2SingleAliases<"st2">;
7161defm ST3 : SIMDLdSt3SingleAliases<"st3">;
7162defm ST4 : SIMDLdSt4SingleAliases<"st4">;
7163
7164//----------------------------------------------------------------------------
7165// Crypto extensions
7166//----------------------------------------------------------------------------
7167
7168let Predicates = [HasAES] in {
7169def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
7170def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
7171def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
7172def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
7173}
7174
7175// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
7176// for AES fusion on some CPUs.
7177let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
7178def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7179                        Sched<[WriteVq]>;
7180def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
7181                         Sched<[WriteVq]>;
7182}
7183
7184// Only use constrained versions of AES(I)MC instructions if they are paired with
7185// AESE/AESD.
7186def : Pat<(v16i8 (int_aarch64_crypto_aesmc
7187            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
7188                                            (v16i8 V128:$src2))))),
7189          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
7190                                             (v16i8 V128:$src2)))))>,
7191          Requires<[HasFuseAES]>;
7192
7193def : Pat<(v16i8 (int_aarch64_crypto_aesimc
7194            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
7195                                            (v16i8 V128:$src2))))),
7196          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
7197                                              (v16i8 V128:$src2)))))>,
7198          Requires<[HasFuseAES]>;
7199
7200let Predicates = [HasSHA2] in {
7201def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
7202def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
7203def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
7204def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
7205def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
7206def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
7207def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
7208
7209def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
7210def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
7211def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
7212}
7213
7214//----------------------------------------------------------------------------
7215// Compiler-pseudos
7216//----------------------------------------------------------------------------
7217// FIXME: Like for X86, these should go in their own separate .td file.
7218
7219def def32 : PatLeaf<(i32 GPR32:$src), [{
7220  return isDef32(*N);
7221}]>;
7222
7223// In the case of a 32-bit def that is known to implicitly zero-extend,
7224// we can use a SUBREG_TO_REG.
7225def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
7226
7227// For an anyext, we don't care what the high bits are, so we can perform an
7228// INSERT_SUBREF into an IMPLICIT_DEF.
7229def : Pat<(i64 (anyext GPR32:$src)),
7230          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
7231
7232// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
7233// then assert the extension has happened.
7234def : Pat<(i64 (zext GPR32:$src)),
7235          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
7236
7237// To sign extend, we use a signed bitfield move instruction (SBFM) on the
7238// containing super-reg.
7239def : Pat<(i64 (sext GPR32:$src)),
7240   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
7241def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
7242def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
7243def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
7244def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
7245def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
7246def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
7247def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
7248
7249def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
7250          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
7251                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
7252def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
7253          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
7254                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
7255
7256def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
7257          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
7258                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
7259def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
7260          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
7261                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
7262
7263def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
7264          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7265                   (i64 (i64shift_a        imm0_63:$imm)),
7266                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
7267
7268// sra patterns have an AddedComplexity of 10, so make sure we have a higher
7269// AddedComplexity for the following patterns since we want to match sext + sra
7270// patterns before we attempt to match a single sra node.
7271let AddedComplexity = 20 in {
7272// We support all sext + sra combinations which preserve at least one bit of the
7273// original value which is to be sign extended. E.g. we support shifts up to
7274// bitwidth-1 bits.
7275def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
7276          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
7277def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
7278          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
7279
7280def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
7281          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
7282def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
7283          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
7284
7285def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
7286          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
7287                   (i64 imm0_31:$imm), 31)>;
7288} // AddedComplexity = 20
7289
7290// To truncate, we can simply extract from a subregister.
7291def : Pat<(i32 (trunc GPR64sp:$src)),
7292          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
7293
7294// __builtin_trap() uses the BRK instruction on AArch64.
7295def : Pat<(trap), (BRK 1)>;
7296def : Pat<(debugtrap), (BRK 0xF000)>;
7297
7298def ubsan_trap_xform : SDNodeXForm<timm, [{
7299  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
7300}]>;
7301
7302def ubsan_trap_imm : TImmLeaf<i32, [{
7303  return isUInt<8>(Imm);
7304}], ubsan_trap_xform>;
7305
7306def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
7307
7308// Multiply high patterns which multiply the lower subvector using smull/umull
7309// and the upper subvector with smull2/umull2. Then shuffle the high the high
7310// part of both results together.
7311def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
7312          (UZP2v16i8
7313           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7314                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7315           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7316def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
7317          (UZP2v8i16
7318           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7319                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7320           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7321def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
7322          (UZP2v4i32
7323           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7324                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7325           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7326
7327def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
7328          (UZP2v16i8
7329           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
7330                            (EXTRACT_SUBREG V128:$Rm, dsub)),
7331           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
7332def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
7333          (UZP2v8i16
7334           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
7335                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7336           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
7337def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
7338          (UZP2v4i32
7339           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
7340                             (EXTRACT_SUBREG V128:$Rm, dsub)),
7341           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
7342
7343// Conversions within AdvSIMD types in the same register size are free.
7344// But because we need a consistent lane ordering, in big endian many
7345// conversions require one or more REV instructions.
7346//
7347// Consider a simple memory load followed by a bitconvert then a store.
7348//   v0 = load v2i32
7349//   v1 = BITCAST v2i32 v0 to v4i16
7350//        store v4i16 v2
7351//
7352// In big endian mode every memory access has an implicit byte swap. LDR and
7353// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
7354// is, they treat the vector as a sequence of elements to be byte-swapped.
7355// The two pairs of instructions are fundamentally incompatible. We've decided
7356// to use LD1/ST1 only to simplify compiler implementation.
7357//
7358// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
7359// the original code sequence:
7360//   v0 = load v2i32
7361//   v1 = REV v2i32                  (implicit)
7362//   v2 = BITCAST v2i32 v1 to v4i16
7363//   v3 = REV v4i16 v2               (implicit)
7364//        store v4i16 v3
7365//
7366// But this is now broken - the value stored is different to the value loaded
7367// due to lane reordering. To fix this, on every BITCAST we must perform two
7368// other REVs:
7369//   v0 = load v2i32
7370//   v1 = REV v2i32                  (implicit)
7371//   v2 = REV v2i32
7372//   v3 = BITCAST v2i32 v2 to v4i16
7373//   v4 = REV v4i16
7374//   v5 = REV v4i16 v4               (implicit)
7375//        store v4i16 v5
7376//
7377// This means an extra two instructions, but actually in most cases the two REV
7378// instructions can be combined into one. For example:
7379//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
7380//
7381// There is also no 128-bit REV instruction. This must be synthesized with an
7382// EXT instruction.
7383//
7384// Most bitconverts require some sort of conversion. The only exceptions are:
7385//   a) Identity conversions -  vNfX <-> vNiX
7386//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
7387//
7388
7389// Natural vector casts (64 bit)
7390def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
7391def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7392def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7393def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7394def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
7395def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7396def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7397
7398def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
7399def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
7400def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7401def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7402def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7403def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7404
7405def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
7406def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
7407def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
7408def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>;
7409def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
7410def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
7411def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
7412
7413def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
7414def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7415def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7416def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7417def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7418def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7419def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7420def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
7421
7422def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
7423def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7424def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7425def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
7426def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7427def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7428
7429// Natural vector casts (128 bit)
7430def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7431def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7432def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7433def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7434def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
7435def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7436def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7437def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7438
7439def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7440def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
7441def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7442def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7443def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7444def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7445def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7446def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7447
7448def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
7449def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7450def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7451def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7452def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7453def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7454def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7455def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7456
7457def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7458def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7459def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7460def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7461def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7462def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
7463def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7464def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7465
7466def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7467def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7468def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7469def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
7470def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7471def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7472def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7473def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7474
7475def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7476def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7477def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7478def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7479def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
7480def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7481def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7482def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7483
7484let Predicates = [IsLE] in {
7485def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7486def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7487def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7488def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7489def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7490def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7491
7492def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7493          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7494def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7495          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7496def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7497          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7498def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7499          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7500def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7501          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7502def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7503          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7504def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7505          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7506}
7507let Predicates = [IsBE] in {
7508def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7509                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7510def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7511                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7512def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7513                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7514def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7515                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7516def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7517                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7518def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7519                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7520
7521def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7522          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7523def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7524          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7525def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7526          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7527def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7528          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7529def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7530          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7531def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7532          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7533}
7534def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7535def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7536def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7537          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7538def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7539          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7540def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7541          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7542def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7543
7544def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7545          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7546def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7547          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7548def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7549          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7550def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7551          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7552def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7553          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7554
7555def : Pat<(f16 (bitconvert (bf16 FPR16:$src))), (f16 FPR16:$src)>;
7556def : Pat<(bf16 (bitconvert (f16 FPR16:$src))), (bf16 FPR16:$src)>;
7557
7558let Predicates = [IsLE] in {
7559def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7560def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7561def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7562def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7563def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7564def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7565}
7566let Predicates = [IsBE] in {
7567def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7568                             (v1i64 (REV64v2i32 FPR64:$src))>;
7569def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7570                             (v1i64 (REV64v4i16 FPR64:$src))>;
7571def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7572                             (v1i64 (REV64v8i8 FPR64:$src))>;
7573def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7574                             (v1i64 (REV64v4i16 FPR64:$src))>;
7575def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7576                             (v1i64 (REV64v4i16 FPR64:$src))>;
7577def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7578                             (v1i64 (REV64v2i32 FPR64:$src))>;
7579}
7580def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7581def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7582
7583let Predicates = [IsLE] in {
7584def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7585def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7586def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7587def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7588def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7589def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7590def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7591}
7592let Predicates = [IsBE] in {
7593def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7594                             (v2i32 (REV64v2i32 FPR64:$src))>;
7595def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7596                             (v2i32 (REV32v4i16 FPR64:$src))>;
7597def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7598                             (v2i32 (REV32v8i8 FPR64:$src))>;
7599def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7600                             (v2i32 (REV64v2i32 FPR64:$src))>;
7601def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7602                             (v2i32 (REV64v2i32 FPR64:$src))>;
7603def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7604                             (v2i32 (REV32v4i16 FPR64:$src))>;
7605def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7606                             (v2i32 (REV32v4i16 FPR64:$src))>;
7607}
7608def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7609
7610let Predicates = [IsLE] in {
7611def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7612def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7613def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7614def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7615def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7616def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7617}
7618let Predicates = [IsBE] in {
7619def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7620                             (v4i16 (REV64v4i16 FPR64:$src))>;
7621def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7622                             (v4i16 (REV32v4i16 FPR64:$src))>;
7623def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7624                             (v4i16 (REV16v8i8 FPR64:$src))>;
7625def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7626                             (v4i16 (REV64v4i16 FPR64:$src))>;
7627def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7628                             (v4i16 (REV32v4i16 FPR64:$src))>;
7629def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7630                             (v4i16 (REV64v4i16 FPR64:$src))>;
7631}
7632def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7633def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7634
7635let Predicates = [IsLE] in {
7636def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7637def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7638def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7639def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7640def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7641def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7642
7643def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7644def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7645def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7646def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7647def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7648def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7649}
7650let Predicates = [IsBE] in {
7651def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7652                             (v4f16 (REV64v4i16 FPR64:$src))>;
7653def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7654                             (v4f16 (REV32v4i16 FPR64:$src))>;
7655def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7656                             (v4f16 (REV16v8i8 FPR64:$src))>;
7657def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7658                             (v4f16 (REV64v4i16 FPR64:$src))>;
7659def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7660                             (v4f16 (REV32v4i16 FPR64:$src))>;
7661def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7662                             (v4f16 (REV64v4i16 FPR64:$src))>;
7663
7664def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7665                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7666def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7667                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7668def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7669                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7670def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7671                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7672def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7673                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7674def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7675                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7676}
7677def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7678def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7679
7680let Predicates = [IsLE] in {
7681def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7682def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7683def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7684def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7685def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7686def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7687def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7688def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7689}
7690let Predicates = [IsBE] in {
7691def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7692                             (v8i8 (REV64v8i8 FPR64:$src))>;
7693def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7694                             (v8i8 (REV32v8i8 FPR64:$src))>;
7695def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7696                             (v8i8 (REV16v8i8 FPR64:$src))>;
7697def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7698                             (v8i8 (REV64v8i8 FPR64:$src))>;
7699def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7700                             (v8i8 (REV32v8i8 FPR64:$src))>;
7701def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7702                             (v8i8 (REV64v8i8 FPR64:$src))>;
7703def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7704                             (v8i8 (REV16v8i8 FPR64:$src))>;
7705def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7706                             (v8i8 (REV16v8i8 FPR64:$src))>;
7707}
7708
7709let Predicates = [IsLE] in {
7710def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7711def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7712def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7713def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7714def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7715def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7716}
7717let Predicates = [IsBE] in {
7718def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7719                             (f64 (REV64v2i32 FPR64:$src))>;
7720def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7721                             (f64 (REV64v4i16 FPR64:$src))>;
7722def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7723                             (f64 (REV64v2i32 FPR64:$src))>;
7724def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7725                             (f64 (REV64v8i8 FPR64:$src))>;
7726def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7727                             (f64 (REV64v4i16 FPR64:$src))>;
7728def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7729                             (f64 (REV64v4i16 FPR64:$src))>;
7730}
7731def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7732def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7733
7734let Predicates = [IsLE] in {
7735def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7736def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7737def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7738def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7739def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7740def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7741}
7742let Predicates = [IsBE] in {
7743def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7744                             (v1f64 (REV64v2i32 FPR64:$src))>;
7745def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7746                             (v1f64 (REV64v4i16 FPR64:$src))>;
7747def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7748                             (v1f64 (REV64v8i8 FPR64:$src))>;
7749def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7750                             (v1f64 (REV64v2i32 FPR64:$src))>;
7751def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7752                             (v1f64 (REV64v4i16 FPR64:$src))>;
7753def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7754                             (v1f64 (REV64v4i16 FPR64:$src))>;
7755}
7756def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7757def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7758
7759let Predicates = [IsLE] in {
7760def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7761def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7762def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7763def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7764def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7765def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7766def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7767}
7768let Predicates = [IsBE] in {
7769def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7770                             (v2f32 (REV64v2i32 FPR64:$src))>;
7771def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7772                             (v2f32 (REV32v4i16 FPR64:$src))>;
7773def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7774                             (v2f32 (REV32v8i8 FPR64:$src))>;
7775def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7776                             (v2f32 (REV64v2i32 FPR64:$src))>;
7777def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7778                             (v2f32 (REV64v2i32 FPR64:$src))>;
7779def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7780                             (v2f32 (REV32v4i16 FPR64:$src))>;
7781def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7782                             (v2f32 (REV32v4i16 FPR64:$src))>;
7783}
7784def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7785
7786let Predicates = [IsLE] in {
7787def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7788def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7789def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7790def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7791def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7792def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7793def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7794def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7795}
7796let Predicates = [IsBE] in {
7797def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7798                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7799def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7800                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7801                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7802def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7803                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7804                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7805def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7806                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7807                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7808def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7809                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7810                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7811def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7812                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7813def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7814                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7815                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7816def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7817                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7818                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7819}
7820
7821let Predicates = [IsLE] in {
7822def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7823def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7824def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7825def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7826def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7827def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7828def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7829}
7830let Predicates = [IsBE] in {
7831def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7832                             (v2f64 (EXTv16i8 FPR128:$src,
7833                                              FPR128:$src, (i32 8)))>;
7834def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7835                             (v2f64 (REV64v4i32 FPR128:$src))>;
7836def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7837                             (v2f64 (REV64v8i16 FPR128:$src))>;
7838def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7839                             (v2f64 (REV64v8i16 FPR128:$src))>;
7840def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7841                             (v2f64 (REV64v8i16 FPR128:$src))>;
7842def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7843                             (v2f64 (REV64v16i8 FPR128:$src))>;
7844def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7845                             (v2f64 (REV64v4i32 FPR128:$src))>;
7846}
7847def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7848
7849let Predicates = [IsLE] in {
7850def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7851def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7852def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7853def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7854def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7855def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7856def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7857}
7858let Predicates = [IsBE] in {
7859def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7860                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7861                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7862def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7863                             (v4f32 (REV32v8i16 FPR128:$src))>;
7864def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7865                             (v4f32 (REV32v8i16 FPR128:$src))>;
7866def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7867                             (v4f32 (REV32v8i16 FPR128:$src))>;
7868def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7869                             (v4f32 (REV32v16i8 FPR128:$src))>;
7870def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7871                             (v4f32 (REV64v4i32 FPR128:$src))>;
7872def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7873                             (v4f32 (REV64v4i32 FPR128:$src))>;
7874}
7875def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7876
7877let Predicates = [IsLE] in {
7878def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7879def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7880def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7881def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7882def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7883def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7884def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7885}
7886let Predicates = [IsBE] in {
7887def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7888                             (v2i64 (EXTv16i8 FPR128:$src,
7889                                              FPR128:$src, (i32 8)))>;
7890def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7891                             (v2i64 (REV64v4i32 FPR128:$src))>;
7892def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7893                             (v2i64 (REV64v8i16 FPR128:$src))>;
7894def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7895                             (v2i64 (REV64v16i8 FPR128:$src))>;
7896def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7897                             (v2i64 (REV64v4i32 FPR128:$src))>;
7898def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7899                             (v2i64 (REV64v8i16 FPR128:$src))>;
7900def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7901                             (v2i64 (REV64v8i16 FPR128:$src))>;
7902}
7903def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7904
7905let Predicates = [IsLE] in {
7906def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7907def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7908def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7909def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7910def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7911def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7912def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7913}
7914let Predicates = [IsBE] in {
7915def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7916                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7917                                              (REV64v4i32 FPR128:$src),
7918                                              (i32 8)))>;
7919def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7920                             (v4i32 (REV64v4i32 FPR128:$src))>;
7921def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7922                             (v4i32 (REV32v8i16 FPR128:$src))>;
7923def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7924                             (v4i32 (REV32v16i8 FPR128:$src))>;
7925def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7926                             (v4i32 (REV64v4i32 FPR128:$src))>;
7927def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7928                             (v4i32 (REV32v8i16 FPR128:$src))>;
7929def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7930                             (v4i32 (REV32v8i16 FPR128:$src))>;
7931}
7932def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7933
7934let Predicates = [IsLE] in {
7935def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7936def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7937def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7938def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7939def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7940def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7941}
7942let Predicates = [IsBE] in {
7943def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7944                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7945                                              (REV64v8i16 FPR128:$src),
7946                                              (i32 8)))>;
7947def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7948                             (v8i16 (REV64v8i16 FPR128:$src))>;
7949def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7950                             (v8i16 (REV32v8i16 FPR128:$src))>;
7951def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7952                             (v8i16 (REV16v16i8 FPR128:$src))>;
7953def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7954                             (v8i16 (REV64v8i16 FPR128:$src))>;
7955def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7956                             (v8i16 (REV32v8i16 FPR128:$src))>;
7957}
7958def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7959def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7960
7961let Predicates = [IsLE] in {
7962def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7963def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7964def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7965def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7966def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7967def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7968
7969def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7970def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7971def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7972def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7973def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7974def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7975}
7976let Predicates = [IsBE] in {
7977def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7978                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7979                                              (REV64v8i16 FPR128:$src),
7980                                              (i32 8)))>;
7981def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7982                             (v8f16 (REV64v8i16 FPR128:$src))>;
7983def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7984                             (v8f16 (REV32v8i16 FPR128:$src))>;
7985def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7986                             (v8f16 (REV16v16i8 FPR128:$src))>;
7987def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7988                             (v8f16 (REV64v8i16 FPR128:$src))>;
7989def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7990                             (v8f16 (REV32v8i16 FPR128:$src))>;
7991
7992def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7993                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7994                                              (REV64v8i16 FPR128:$src),
7995                                              (i32 8)))>;
7996def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7997                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7998def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7999                             (v8bf16 (REV32v8i16 FPR128:$src))>;
8000def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
8001                             (v8bf16 (REV16v16i8 FPR128:$src))>;
8002def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
8003                             (v8bf16 (REV64v8i16 FPR128:$src))>;
8004def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
8005                             (v8bf16 (REV32v8i16 FPR128:$src))>;
8006}
8007def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
8008def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
8009
8010let Predicates = [IsLE] in {
8011def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
8012def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
8013def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
8014def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
8015def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
8016def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
8017def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
8018def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
8019}
8020let Predicates = [IsBE] in {
8021def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
8022                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
8023                                              (REV64v16i8 FPR128:$src),
8024                                              (i32 8)))>;
8025def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
8026                             (v16i8 (REV64v16i8 FPR128:$src))>;
8027def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
8028                             (v16i8 (REV32v16i8 FPR128:$src))>;
8029def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
8030                             (v16i8 (REV16v16i8 FPR128:$src))>;
8031def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
8032                             (v16i8 (REV64v16i8 FPR128:$src))>;
8033def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
8034                             (v16i8 (REV32v16i8 FPR128:$src))>;
8035def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
8036                             (v16i8 (REV16v16i8 FPR128:$src))>;
8037def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
8038                             (v16i8 (REV16v16i8 FPR128:$src))>;
8039}
8040
8041def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
8042           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8043def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
8044           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8045def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
8046           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8047def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
8048           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8049def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
8050           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8051def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
8052           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8053def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
8054           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8055def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
8056           (EXTRACT_SUBREG V128:$Rn, dsub)>;
8057
8058def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
8059          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8060def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
8061          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8062def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
8063          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8064def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
8065          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
8066
8067// A 64-bit subvector insert to the first 128-bit vector position
8068// is a subregister copy that needs no instruction.
8069multiclass InsertSubvectorUndef<ValueType Ty> {
8070  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
8071            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8072  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
8073            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8074  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
8075            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8076  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
8077            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8078  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
8079            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8080  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
8081            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8082  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
8083            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8084  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
8085            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
8086}
8087
8088defm : InsertSubvectorUndef<i32>;
8089defm : InsertSubvectorUndef<i64>;
8090
8091// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
8092// or v2f32.
8093def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
8094                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
8095           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
8096def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
8097                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
8098           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
8099    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
8100    // so we match on v4f32 here, not v2f32. This will also catch adding
8101    // the low two lanes of a true v4f32 vector.
8102def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
8103                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
8104          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8105def : Pat<(fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
8106                (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
8107          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
8108
8109// Scalar 64-bit shifts in FPR64 registers.
8110def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8111          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8112def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8113          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8114def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8115          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8116def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
8117          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
8118
8119// Patterns for nontemporal/no-allocate stores.
8120// We have to resort to tricks to turn a single-input store into a store pair,
8121// because there is no single-input nontemporal store, only STNP.
8122let Predicates = [IsLE] in {
8123let AddedComplexity = 15 in {
8124class NTStore128Pat<ValueType VT> :
8125  Pat<(nontemporalstore (VT FPR128:$Rt),
8126        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
8127      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
8128              (DUPi64 FPR128:$Rt, (i64 1)),
8129              GPR64sp:$Rn, simm7s8:$offset)>;
8130
8131def : NTStore128Pat<v2i64>;
8132def : NTStore128Pat<v4i32>;
8133def : NTStore128Pat<v8i16>;
8134def : NTStore128Pat<v16i8>;
8135
8136class NTStore64Pat<ValueType VT> :
8137  Pat<(nontemporalstore (VT FPR64:$Rt),
8138        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8139      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
8140              (DUPi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
8141              GPR64sp:$Rn, simm7s4:$offset)>;
8142
8143// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
8144def : NTStore64Pat<v1f64>;
8145def : NTStore64Pat<v1i64>;
8146def : NTStore64Pat<v2i32>;
8147def : NTStore64Pat<v4i16>;
8148def : NTStore64Pat<v8i8>;
8149
8150def : Pat<(nontemporalstore GPR64:$Rt,
8151            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
8152          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
8153                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
8154                  GPR64sp:$Rn, simm7s4:$offset)>;
8155} // AddedComplexity=10
8156} // Predicates = [IsLE]
8157
8158// Tail call return handling. These are all compiler pseudo-instructions,
8159// so no encoding information or anything like that.
8160let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
8161  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
8162                   Sched<[WriteBrReg]>;
8163  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
8164                   Sched<[WriteBrReg]>;
8165  // Indirect tail-call with any register allowed, used by MachineOutliner when
8166  // this is proven safe.
8167  // FIXME: If we have to add any more hacks like this, we should instead relax
8168  // some verifier checks for outlined functions.
8169  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
8170                      Sched<[WriteBrReg]>;
8171  // Indirect tail-call limited to only use registers (x16 and x17) which are
8172  // allowed to tail-call a "BTI c" instruction.
8173  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
8174                      Sched<[WriteBrReg]>;
8175}
8176
8177def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
8178          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
8179      Requires<[NotUseBTI]>;
8180def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
8181          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
8182      Requires<[UseBTI]>;
8183def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
8184          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8185def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
8186          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
8187
8188def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
8189def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
8190
8191// Extracting lane zero is a special case where we can just use a plain
8192// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
8193// rest of the compiler, especially the register allocator and copy propagation,
8194// to reason about, so is preferred when it's possible to use it.
8195let AddedComplexity = 10 in {
8196  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
8197  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
8198  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
8199}
8200
8201// dot_v4i8
8202class mul_v4i8<SDPatternOperator ldop> :
8203  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
8204          (mul (ldop (add node:$Rn, node:$offset)),
8205               (ldop (add node:$Rm, node:$offset)))>;
8206class mulz_v4i8<SDPatternOperator ldop> :
8207  PatFrag<(ops node:$Rn, node:$Rm),
8208          (mul (ldop node:$Rn), (ldop node:$Rm))>;
8209
8210def load_v4i8 :
8211  OutPatFrag<(ops node:$R),
8212             (INSERT_SUBREG
8213              (v2i32 (IMPLICIT_DEF)),
8214               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
8215              ssub)>;
8216
8217class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
8218  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
8219           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
8220           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
8221                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
8222      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
8223                                (load_v4i8 GPR64sp:$Rn),
8224                                (load_v4i8 GPR64sp:$Rm))),
8225                      sub_32)>, Requires<[HasDotProd]>;
8226
8227// dot_v8i8
8228class ee_v8i8<SDPatternOperator extend> :
8229  PatFrag<(ops node:$V, node:$K),
8230          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
8231
8232class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8233  PatFrag<(ops node:$M, node:$N, node:$K),
8234          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
8235                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
8236
8237class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
8238  PatFrag<(ops node:$M, node:$N),
8239          (i32 (extractelt
8240           (v4i32 (AArch64uaddv
8241            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
8242                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
8243           (i64 0)))>;
8244
8245// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
8246def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
8247
8248class odot_v8i8<Instruction DOT> :
8249  OutPatFrag<(ops node:$Vm, node:$Vn),
8250             (EXTRACT_SUBREG
8251              (VADDV_32
8252               (i64 (DOT (DUPv2i32gpr WZR),
8253                         (v8i8 node:$Vm),
8254                         (v8i8 node:$Vn)))),
8255              sub_32)>;
8256
8257class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
8258                    SDPatternOperator extend> :
8259  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
8260      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
8261  Requires<[HasDotProd]>;
8262
8263// dot_v16i8
8264class ee_v16i8<SDPatternOperator extend> :
8265  PatFrag<(ops node:$V, node:$K1, node:$K2),
8266          (v4i16 (extract_subvector
8267           (v8i16 (extend
8268            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
8269
8270class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
8271  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
8272          (v4i32
8273           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
8274                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
8275
8276class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
8277  PatFrag<(ops node:$M, node:$N),
8278          (i32 (extractelt
8279           (v4i32 (AArch64uaddv
8280            (add
8281             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
8282                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
8283             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
8284                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
8285           (i64 0)))>;
8286
8287class odot_v16i8<Instruction DOT> :
8288  OutPatFrag<(ops node:$Vm, node:$Vn),
8289             (i32 (ADDVv4i32v
8290              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
8291
8292class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
8293                SDPatternOperator extend> :
8294  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
8295      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
8296  Requires<[HasDotProd]>;
8297
8298let AddedComplexity = 10 in {
8299  def : dot_v4i8<SDOTv8i8, sextloadi8>;
8300  def : dot_v4i8<UDOTv8i8, zextloadi8>;
8301  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
8302  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
8303  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
8304  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
8305
8306  // FIXME: add patterns to generate vector by element dot product.
8307  // FIXME: add SVE dot-product patterns.
8308}
8309
8310// Custom DAG nodes and isel rules to make a 64-byte block out of eight GPRs,
8311// so that it can be used as input to inline asm, and vice versa.
8312def LS64_BUILD : SDNode<"AArch64ISD::LS64_BUILD", SDTypeProfile<1, 8, []>>;
8313def LS64_EXTRACT : SDNode<"AArch64ISD::LS64_EXTRACT", SDTypeProfile<1, 2, []>>;
8314def : Pat<(i64x8 (LS64_BUILD GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3,
8315                             GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7)),
8316          (REG_SEQUENCE GPR64x8Class,
8317              $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3,
8318              $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7)>;
8319foreach i = 0-7 in {
8320  def : Pat<(i64 (LS64_EXTRACT (i64x8 GPR64x8:$val), (i32 i))),
8321            (EXTRACT_SUBREG $val, !cast<SubRegIndex>("x8sub_"#i))>;
8322}
8323
8324let Predicates = [HasLS64] in {
8325  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
8326                                          (outs GPR64x8:$Rt)>;
8327  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
8328                                          (outs)>;
8329  def ST64BV:   Store64BV<0b011, "st64bv">;
8330  def ST64BV0:  Store64BV<0b010, "st64bv0">;
8331
8332  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
8333    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
8334          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
8335
8336  def : ST64BPattern<int_aarch64_st64b, ST64B>;
8337  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
8338  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
8339}
8340
8341let Predicates = [HasMOPS] in {
8342  let Defs = [NZCV] in {
8343    defm CPYFP : MOPSMemoryCopyInsns<0b00, "cpyfp">;
8344
8345    defm CPYP : MOPSMemoryMoveInsns<0b00, "cpyp">;
8346
8347    defm SETP : MOPSMemorySetInsns<0b00, "setp">;
8348  }
8349  let Uses = [NZCV] in {
8350    defm CPYFM : MOPSMemoryCopyInsns<0b01, "cpyfm">;
8351    defm CPYFE : MOPSMemoryCopyInsns<0b10, "cpyfe">;
8352
8353    defm CPYM : MOPSMemoryMoveInsns<0b01, "cpym">;
8354    defm CPYE : MOPSMemoryMoveInsns<0b10, "cpye">;
8355
8356    defm SETM : MOPSMemorySetInsns<0b01, "setm">;
8357    defm SETE : MOPSMemorySetInsns<0b10, "sete">;
8358  }
8359}
8360let Predicates = [HasMOPS, HasMTE] in {
8361  let Defs = [NZCV] in {
8362    defm SETGP     : MOPSMemorySetTaggingInsns<0b00, "setgp">;
8363  }
8364  let Uses = [NZCV] in {
8365    defm SETGM     : MOPSMemorySetTaggingInsns<0b01, "setgm">;
8366    // Can't use SETGE because it's a reserved name in TargetSelectionDAG.td
8367    defm MOPSSETGE : MOPSMemorySetTaggingInsns<0b10, "setge">;
8368  }
8369}
8370
8371// MOPS Node operands: 0: Dst, 1: Src or Value, 2: Size, 3: Chain
8372// MOPS Node results: 0: Dst writeback, 1: Size writeback, 2: Chain
8373def SDT_AArch64mops : SDTypeProfile<2, 3, [ SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2> ]>;
8374def AArch64mops_memset : SDNode<"AArch64ISD::MOPS_MEMSET", SDT_AArch64mops>;
8375def AArch64mops_memset_tagging : SDNode<"AArch64ISD::MOPS_MEMSET_TAGGING", SDT_AArch64mops>;
8376def AArch64mops_memcopy : SDNode<"AArch64ISD::MOPS_MEMCOPY", SDT_AArch64mops>;
8377def AArch64mops_memmove : SDNode<"AArch64ISD::MOPS_MEMMOVE", SDT_AArch64mops>;
8378
8379// MOPS operations always contain three 4-byte instructions
8380let Predicates = [HasMOPS], Defs = [NZCV], Size = 12, mayStore = 1 in {
8381  let mayLoad = 1 in {
8382    def MOPSMemoryCopyPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8383                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8384                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8385    def MOPSMemoryMovePseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64common:$Rs_wb, GPR64:$Rn_wb),
8386                                      (ins GPR64common:$Rd, GPR64common:$Rs, GPR64:$Rn),
8387                                      [], "$Rd = $Rd_wb,$Rs = $Rs_wb,$Rn = $Rn_wb">, Sched<[]>;
8388  }
8389  let mayLoad = 0 in {
8390    def MOPSMemorySetPseudo  : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8391                                      (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8392                                      [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8393  }
8394}
8395let Predicates = [HasMOPS, HasMTE], Defs = [NZCV], Size = 12, mayLoad = 0, mayStore = 1 in {
8396  def MOPSMemorySetTaggingPseudo : Pseudo<(outs GPR64common:$Rd_wb, GPR64:$Rn_wb),
8397                                          (ins GPR64common:$Rd, GPR64:$Rn, GPR64:$Rm),
8398                                          [], "$Rd = $Rd_wb,$Rn = $Rn_wb">, Sched<[]>;
8399}
8400
8401// This gets lowered into an instruction sequence of 20 bytes
8402let Defs = [X16, X17], mayStore = 1, isCodeGenOnly = 1, Size = 20 in
8403def StoreSwiftAsyncContext
8404      : Pseudo<(outs), (ins GPR64:$ctx, GPR64sp:$base, simm9:$offset),
8405               []>, Sched<[]>;
8406
8407def AArch64AssertZExtBool : SDNode<"AArch64ISD::ASSERT_ZEXT_BOOL", SDT_assert>;
8408def : Pat<(AArch64AssertZExtBool GPR32:$op),
8409          (i32 GPR32:$op)>;
8410
8411include "AArch64InstrAtomics.td"
8412include "AArch64SVEInstrInfo.td"
8413include "AArch64SMEInstrInfo.td"
8414include "AArch64InstrGISel.td"
8415