xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td (revision e8d8bef961a50d4dc22501cde4fb9fb0be1b2532)
1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Instruction definitions.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// ARM Instruction Predicate Definitions.
15//
16def HasV8_1a         : Predicate<"Subtarget->hasV8_1aOps()">,
17                                 AssemblerPredicate<(all_of HasV8_1aOps), "armv8.1a">;
18def HasV8_2a         : Predicate<"Subtarget->hasV8_2aOps()">,
19                                 AssemblerPredicate<(all_of HasV8_2aOps), "armv8.2a">;
20def HasV8_3a         : Predicate<"Subtarget->hasV8_3aOps()">,
21                                 AssemblerPredicate<(all_of HasV8_3aOps), "armv8.3a">;
22def HasV8_4a         : Predicate<"Subtarget->hasV8_4aOps()">,
23                                 AssemblerPredicate<(all_of HasV8_4aOps), "armv8.4a">;
24def HasV8_5a         : Predicate<"Subtarget->hasV8_5aOps()">,
25                                 AssemblerPredicate<(all_of HasV8_5aOps), "armv8.5a">;
26def HasV8_6a         : Predicate<"Subtarget->hasV8_6aOps()">,
27                                 AssemblerPredicate<(all_of HasV8_6aOps), "armv8.6a">;
28def HasV8_7a         : Predicate<"Subtarget->hasV8_7aOps()">,
29                                 AssemblerPredicate<(all_of HasV8_7aOps), "armv8.7a">;
30def HasVH            : Predicate<"Subtarget->hasVH()">,
31                       AssemblerPredicate<(all_of FeatureVH), "vh">;
32
33def HasLOR           : Predicate<"Subtarget->hasLOR()">,
34                       AssemblerPredicate<(all_of FeatureLOR), "lor">;
35
36def HasPAuth         : Predicate<"Subtarget->hasPAuth()">,
37                       AssemblerPredicate<(all_of FeaturePAuth), "pauth">;
38
39def HasJS            : Predicate<"Subtarget->hasJS()">,
40                       AssemblerPredicate<(all_of FeatureJS), "jsconv">;
41
42def HasCCIDX         : Predicate<"Subtarget->hasCCIDX()">,
43                       AssemblerPredicate<(all_of FeatureCCIDX), "ccidx">;
44
45def HasComplxNum      : Predicate<"Subtarget->hasComplxNum()">,
46                       AssemblerPredicate<(all_of FeatureComplxNum), "complxnum">;
47
48def HasNV            : Predicate<"Subtarget->hasNV()">,
49                       AssemblerPredicate<(all_of FeatureNV), "nv">;
50
51def HasMPAM          : Predicate<"Subtarget->hasMPAM()">,
52                       AssemblerPredicate<(all_of FeatureMPAM), "mpam">;
53
54def HasDIT           : Predicate<"Subtarget->hasDIT()">,
55                       AssemblerPredicate<(all_of FeatureDIT), "dit">;
56
57def HasTRACEV8_4         : Predicate<"Subtarget->hasTRACEV8_4()">,
58                       AssemblerPredicate<(all_of FeatureTRACEV8_4), "tracev8.4">;
59
60def HasAM            : Predicate<"Subtarget->hasAM()">,
61                       AssemblerPredicate<(all_of FeatureAM), "am">;
62
63def HasSEL2          : Predicate<"Subtarget->hasSEL2()">,
64                       AssemblerPredicate<(all_of FeatureSEL2), "sel2">;
65
66def HasPMU           : Predicate<"Subtarget->hasPMU()">,
67                       AssemblerPredicate<(all_of FeaturePMU), "pmu">;
68
69def HasTLB_RMI          : Predicate<"Subtarget->hasTLB_RMI()">,
70                       AssemblerPredicate<(all_of FeatureTLB_RMI), "tlb-rmi">;
71
72def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
73                       AssemblerPredicate<(all_of FeatureFlagM), "flagm">;
74
75def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
76                       AssemblerPredicate<(all_of FeatureRCPC_IMMO), "rcpc-immo">;
77
78def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
79                               AssemblerPredicate<(all_of FeatureFPARMv8), "fp-armv8">;
80def HasNEON          : Predicate<"Subtarget->hasNEON()">,
81                                 AssemblerPredicate<(all_of FeatureNEON), "neon">;
82def HasCrypto        : Predicate<"Subtarget->hasCrypto()">,
83                                 AssemblerPredicate<(all_of FeatureCrypto), "crypto">;
84def HasSM4           : Predicate<"Subtarget->hasSM4()">,
85                                 AssemblerPredicate<(all_of FeatureSM4), "sm4">;
86def HasSHA3          : Predicate<"Subtarget->hasSHA3()">,
87                                 AssemblerPredicate<(all_of FeatureSHA3), "sha3">;
88def HasSHA2          : Predicate<"Subtarget->hasSHA2()">,
89                                 AssemblerPredicate<(all_of FeatureSHA2), "sha2">;
90def HasAES           : Predicate<"Subtarget->hasAES()">,
91                                 AssemblerPredicate<(all_of FeatureAES), "aes">;
92def HasDotProd       : Predicate<"Subtarget->hasDotProd()">,
93                                 AssemblerPredicate<(all_of FeatureDotProd), "dotprod">;
94def HasCRC           : Predicate<"Subtarget->hasCRC()">,
95                                 AssemblerPredicate<(all_of FeatureCRC), "crc">;
96def HasLSE           : Predicate<"Subtarget->hasLSE()">,
97                                 AssemblerPredicate<(all_of FeatureLSE), "lse">;
98def HasRAS           : Predicate<"Subtarget->hasRAS()">,
99                                 AssemblerPredicate<(all_of FeatureRAS), "ras">;
100def HasRDM           : Predicate<"Subtarget->hasRDM()">,
101                                 AssemblerPredicate<(all_of FeatureRDM), "rdm">;
102def HasPerfMon       : Predicate<"Subtarget->hasPerfMon()">;
103def HasFullFP16      : Predicate<"Subtarget->hasFullFP16()">,
104                                 AssemblerPredicate<(all_of FeatureFullFP16), "fullfp16">;
105def HasFP16FML       : Predicate<"Subtarget->hasFP16FML()">,
106                                 AssemblerPredicate<(all_of FeatureFP16FML), "fp16fml">;
107def HasSPE           : Predicate<"Subtarget->hasSPE()">,
108                                 AssemblerPredicate<(all_of FeatureSPE), "spe">;
109def HasFuseAES       : Predicate<"Subtarget->hasFuseAES()">,
110                                 AssemblerPredicate<(all_of FeatureFuseAES),
111                                 "fuse-aes">;
112def HasSVE           : Predicate<"Subtarget->hasSVE()">,
113                                 AssemblerPredicate<(all_of FeatureSVE), "sve">;
114def HasSVE2          : Predicate<"Subtarget->hasSVE2()">,
115                                 AssemblerPredicate<(all_of FeatureSVE2), "sve2">;
116def HasSVE2AES       : Predicate<"Subtarget->hasSVE2AES()">,
117                                 AssemblerPredicate<(all_of FeatureSVE2AES), "sve2-aes">;
118def HasSVE2SM4       : Predicate<"Subtarget->hasSVE2SM4()">,
119                                 AssemblerPredicate<(all_of FeatureSVE2SM4), "sve2-sm4">;
120def HasSVE2SHA3      : Predicate<"Subtarget->hasSVE2SHA3()">,
121                                 AssemblerPredicate<(all_of FeatureSVE2SHA3), "sve2-sha3">;
122def HasSVE2BitPerm   : Predicate<"Subtarget->hasSVE2BitPerm()">,
123                                 AssemblerPredicate<(all_of FeatureSVE2BitPerm), "sve2-bitperm">;
124def HasRCPC          : Predicate<"Subtarget->hasRCPC()">,
125                                 AssemblerPredicate<(all_of FeatureRCPC), "rcpc">;
126def HasAltNZCV       : Predicate<"Subtarget->hasAlternativeNZCV()">,
127                       AssemblerPredicate<(all_of FeatureAltFPCmp), "altnzcv">;
128def HasFRInt3264     : Predicate<"Subtarget->hasFRInt3264()">,
129                       AssemblerPredicate<(all_of FeatureFRInt3264), "frint3264">;
130def HasSB            : Predicate<"Subtarget->hasSB()">,
131                       AssemblerPredicate<(all_of FeatureSB), "sb">;
132def HasPredRes      : Predicate<"Subtarget->hasPredRes()">,
133                       AssemblerPredicate<(all_of FeaturePredRes), "predres">;
134def HasCCDP          : Predicate<"Subtarget->hasCCDP()">,
135                       AssemblerPredicate<(all_of FeatureCacheDeepPersist), "ccdp">;
136def HasBTI           : Predicate<"Subtarget->hasBTI()">,
137                       AssemblerPredicate<(all_of FeatureBranchTargetId), "bti">;
138def HasMTE           : Predicate<"Subtarget->hasMTE()">,
139                       AssemblerPredicate<(all_of FeatureMTE), "mte">;
140def HasTME           : Predicate<"Subtarget->hasTME()">,
141                       AssemblerPredicate<(all_of FeatureTME), "tme">;
142def HasETE           : Predicate<"Subtarget->hasETE()">,
143                       AssemblerPredicate<(all_of FeatureETE), "ete">;
144def HasTRBE          : Predicate<"Subtarget->hasTRBE()">,
145                       AssemblerPredicate<(all_of FeatureTRBE), "trbe">;
146def HasBF16          : Predicate<"Subtarget->hasBF16()">,
147                       AssemblerPredicate<(all_of FeatureBF16), "bf16">;
148def HasMatMulInt8    : Predicate<"Subtarget->hasMatMulInt8()">,
149                       AssemblerPredicate<(all_of FeatureMatMulInt8), "i8mm">;
150def HasMatMulFP32    : Predicate<"Subtarget->hasMatMulFP32()">,
151                       AssemblerPredicate<(all_of FeatureMatMulFP32), "f32mm">;
152def HasMatMulFP64    : Predicate<"Subtarget->hasMatMulFP64()">,
153                       AssemblerPredicate<(all_of FeatureMatMulFP64), "f64mm">;
154def HasXS            : Predicate<"Subtarget->hasXS()">,
155                       AssemblerPredicate<(all_of FeatureXS), "xs">;
156def HasWFxT          : Predicate<"Subtarget->hasWFxT()">,
157                       AssemblerPredicate<(all_of FeatureWFxT), "wfxt">;
158def HasLS64          : Predicate<"Subtarget->hasLS64()">,
159                       AssemblerPredicate<(all_of FeatureLS64), "ls64">;
160def HasBRBE          : Predicate<"Subtarget->hasBRBE()">,
161                       AssemblerPredicate<(all_of FeatureBRBE), "brbe">;
162def HasSPE_EEF       : Predicate<"Subtarget->hasSPE_EEF()">,
163                       AssemblerPredicate<(all_of FeatureSPE_EEF), "spe-eef">;
164def IsLE             : Predicate<"Subtarget->isLittleEndian()">;
165def IsBE             : Predicate<"!Subtarget->isLittleEndian()">;
166def IsWindows        : Predicate<"Subtarget->isTargetWindows()">;
167def UseExperimentalZeroingPseudos
168    : Predicate<"Subtarget->useExperimentalZeroingPseudos()">;
169def UseAlternateSExtLoadCVTF32
170    : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
171
172def UseNegativeImmediates
173    : Predicate<"false">, AssemblerPredicate<(all_of (not FeatureNoNegativeImmediates)),
174                                             "NegativeImmediates">;
175
176def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
177                                  SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
178                                                       SDTCisInt<1>]>>;
179
180
181//===----------------------------------------------------------------------===//
182// AArch64-specific DAG Nodes.
183//
184
185// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
186def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
187                                              [SDTCisSameAs<0, 2>,
188                                               SDTCisSameAs<0, 3>,
189                                               SDTCisInt<0>, SDTCisVT<1, i32>]>;
190
191// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
192def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
193                                            [SDTCisSameAs<0, 1>,
194                                             SDTCisSameAs<0, 2>,
195                                             SDTCisInt<0>,
196                                             SDTCisVT<3, i32>]>;
197
198// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
199def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
200                                            [SDTCisSameAs<0, 2>,
201                                             SDTCisSameAs<0, 3>,
202                                             SDTCisInt<0>,
203                                             SDTCisVT<1, i32>,
204                                             SDTCisVT<4, i32>]>;
205
206def SDT_AArch64Brcond  : SDTypeProfile<0, 3,
207                                     [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
208                                      SDTCisVT<2, i32>]>;
209def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
210def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
211                                        SDTCisVT<2, OtherVT>]>;
212
213
214def SDT_AArch64CSel  : SDTypeProfile<1, 4,
215                                   [SDTCisSameAs<0, 1>,
216                                    SDTCisSameAs<0, 2>,
217                                    SDTCisInt<3>,
218                                    SDTCisVT<4, i32>]>;
219def SDT_AArch64CCMP : SDTypeProfile<1, 5,
220                                    [SDTCisVT<0, i32>,
221                                     SDTCisInt<1>,
222                                     SDTCisSameAs<1, 2>,
223                                     SDTCisInt<3>,
224                                     SDTCisInt<4>,
225                                     SDTCisVT<5, i32>]>;
226def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
227                                     [SDTCisVT<0, i32>,
228                                      SDTCisFP<1>,
229                                      SDTCisSameAs<1, 2>,
230                                      SDTCisInt<3>,
231                                      SDTCisInt<4>,
232                                      SDTCisVT<5, i32>]>;
233def SDT_AArch64FCmp   : SDTypeProfile<0, 2,
234                                   [SDTCisFP<0>,
235                                    SDTCisSameAs<0, 1>]>;
236def SDT_AArch64Dup   : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
237def SDT_AArch64DupLane   : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
238def SDT_AArch64Insr  : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
239def SDT_AArch64Zip   : SDTypeProfile<1, 2, [SDTCisVec<0>,
240                                          SDTCisSameAs<0, 1>,
241                                          SDTCisSameAs<0, 2>]>;
242def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
243def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
244def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
245                                           SDTCisInt<2>, SDTCisInt<3>]>;
246def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
247def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
248                                          SDTCisSameAs<0,2>, SDTCisInt<3>]>;
249def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
250
251def SDT_AArch64vshiftinsert : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<3>,
252                                                 SDTCisSameAs<0,1>,
253                                                 SDTCisSameAs<0,2>]>;
254
255def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
256def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
257def SDT_AArch64fcmp  : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
258def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
259                                           SDTCisSameAs<0,2>]>;
260def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
261                                           SDTCisSameAs<0,2>,
262                                           SDTCisSameAs<0,3>]>;
263def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
264def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
265
266def SDT_AArch64ITOF  : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
267
268def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
269                                                 SDTCisPtrTy<1>]>;
270
271def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
272def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
273def SDT_AArch64stnp : SDTypeProfile<0, 3, [SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
274
275// Generates the general dynamic sequences, i.e.
276//  adrp  x0, :tlsdesc:var
277//  ldr   x1, [x0, #:tlsdesc_lo12:var]
278//  add   x0, x0, #:tlsdesc_lo12:var
279//  .tlsdesccall var
280//  blr   x1
281
282// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
283// number of operands (the variable)
284def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
285                                          [SDTCisPtrTy<0>]>;
286
287def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
288                                        [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
289                                         SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
290                                         SDTCisSameAs<1, 4>]>;
291
292def SDT_AArch64TBL : SDTypeProfile<1, 2, [
293  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
294]>;
295
296// non-extending masked load fragment.
297def nonext_masked_load :
298  PatFrag<(ops node:$ptr, node:$pred, node:$def),
299          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
300  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
301         cast<MaskedLoadSDNode>(N)->isUnindexed() &&
302         !cast<MaskedLoadSDNode>(N)->isNonTemporal();
303}]>;
304// sign extending masked load fragments.
305def asext_masked_load :
306  PatFrag<(ops node:$ptr, node:$pred, node:$def),
307          (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
308  return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
309          cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
310         cast<MaskedLoadSDNode>(N)->isUnindexed();
311}]>;
312def asext_masked_load_i8 :
313  PatFrag<(ops node:$ptr, node:$pred, node:$def),
314          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
315  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
316}]>;
317def asext_masked_load_i16 :
318  PatFrag<(ops node:$ptr, node:$pred, node:$def),
319          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
320  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
321}]>;
322def asext_masked_load_i32 :
323  PatFrag<(ops node:$ptr, node:$pred, node:$def),
324          (asext_masked_load node:$ptr, node:$pred, node:$def), [{
325  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
326}]>;
327// zero extending masked load fragments.
328def zext_masked_load :
329  PatFrag<(ops node:$ptr, node:$pred, node:$def),
330          (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
331  return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
332         cast<MaskedLoadSDNode>(N)->isUnindexed();
333}]>;
334def zext_masked_load_i8 :
335  PatFrag<(ops node:$ptr, node:$pred, node:$def),
336          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
337  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
338}]>;
339def zext_masked_load_i16 :
340  PatFrag<(ops node:$ptr, node:$pred, node:$def),
341          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
342  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
343}]>;
344def zext_masked_load_i32 :
345  PatFrag<(ops node:$ptr, node:$pred, node:$def),
346          (zext_masked_load node:$ptr, node:$pred, node:$def), [{
347  return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
348}]>;
349
350def non_temporal_load :
351   PatFrag<(ops node:$ptr, node:$pred, node:$def),
352           (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
353   return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
354          cast<MaskedLoadSDNode>(N)->isUnindexed() &&
355          cast<MaskedLoadSDNode>(N)->isNonTemporal();
356}]>;
357
358// non-truncating masked store fragment.
359def nontrunc_masked_store :
360  PatFrag<(ops node:$val, node:$ptr, node:$pred),
361          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
362  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
363         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
364         !cast<MaskedStoreSDNode>(N)->isNonTemporal();
365}]>;
366// truncating masked store fragments.
367def trunc_masked_store :
368  PatFrag<(ops node:$val, node:$ptr, node:$pred),
369          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
370  return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
371         cast<MaskedStoreSDNode>(N)->isUnindexed();
372}]>;
373def trunc_masked_store_i8 :
374  PatFrag<(ops node:$val, node:$ptr, node:$pred),
375          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
376  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
377}]>;
378def trunc_masked_store_i16 :
379  PatFrag<(ops node:$val, node:$ptr, node:$pred),
380          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
381  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
382}]>;
383def trunc_masked_store_i32 :
384  PatFrag<(ops node:$val, node:$ptr, node:$pred),
385          (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
386  return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
387}]>;
388
389def non_temporal_store :
390  PatFrag<(ops node:$val, node:$ptr, node:$pred),
391          (masked_st node:$val, node:$ptr, undef, node:$pred), [{
392  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
393         cast<MaskedStoreSDNode>(N)->isUnindexed() &&
394         cast<MaskedStoreSDNode>(N)->isNonTemporal();
395}]>;
396
397// Node definitions.
398def AArch64adrp          : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
399def AArch64adr           : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
400def AArch64addlow        : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
401def AArch64LOADgot       : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
402def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
403                                SDCallSeqStart<[ SDTCisVT<0, i32>,
404                                                 SDTCisVT<1, i32> ]>,
405                                [SDNPHasChain, SDNPOutGlue]>;
406def AArch64callseq_end   : SDNode<"ISD::CALLSEQ_END",
407                                SDCallSeqEnd<[ SDTCisVT<0, i32>,
408                                               SDTCisVT<1, i32> ]>,
409                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
410def AArch64call          : SDNode<"AArch64ISD::CALL",
411                                SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
412                                [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
413                                 SDNPVariadic]>;
414
415def AArch64call_rvmarker: SDNode<"AArch64ISD::CALL_RVMARKER",
416                             SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
417                             [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
418                              SDNPVariadic]>;
419
420def AArch64brcond        : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
421                                [SDNPHasChain]>;
422def AArch64cbz           : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
423                                [SDNPHasChain]>;
424def AArch64cbnz           : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
425                                [SDNPHasChain]>;
426def AArch64tbz           : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
427                                [SDNPHasChain]>;
428def AArch64tbnz           : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
429                                [SDNPHasChain]>;
430
431
432def AArch64csel          : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
433def AArch64csinv         : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
434def AArch64csneg         : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
435def AArch64csinc         : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
436def AArch64retflag       : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
437                                [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
438def AArch64adc       : SDNode<"AArch64ISD::ADC",  SDTBinaryArithWithFlagsIn >;
439def AArch64sbc       : SDNode<"AArch64ISD::SBC",  SDTBinaryArithWithFlagsIn>;
440def AArch64add_flag  : SDNode<"AArch64ISD::ADDS",  SDTBinaryArithWithFlagsOut,
441                            [SDNPCommutative]>;
442def AArch64sub_flag  : SDNode<"AArch64ISD::SUBS",  SDTBinaryArithWithFlagsOut>;
443def AArch64and_flag  : SDNode<"AArch64ISD::ANDS",  SDTBinaryArithWithFlagsOut,
444                            [SDNPCommutative]>;
445def AArch64adc_flag  : SDNode<"AArch64ISD::ADCS",  SDTBinaryArithWithFlagsInOut>;
446def AArch64sbc_flag  : SDNode<"AArch64ISD::SBCS",  SDTBinaryArithWithFlagsInOut>;
447
448def AArch64ccmp      : SDNode<"AArch64ISD::CCMP",  SDT_AArch64CCMP>;
449def AArch64ccmn      : SDNode<"AArch64ISD::CCMN",  SDT_AArch64CCMP>;
450def AArch64fccmp     : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
451
452def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
453
454def AArch64fcmp         : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
455def AArch64strict_fcmp  : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
456                                 [SDNPHasChain]>;
457def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
458                                 [SDNPHasChain]>;
459def AArch64any_fcmp     : PatFrags<(ops node:$lhs, node:$rhs),
460                                   [(AArch64strict_fcmp node:$lhs, node:$rhs),
461                                    (AArch64fcmp node:$lhs, node:$rhs)]>;
462
463def AArch64dup       : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
464def AArch64duplane8  : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
465def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
466def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
467def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
468
469def AArch64insr      : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
470
471def AArch64zip1      : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
472def AArch64zip2      : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
473def AArch64uzp1      : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
474def AArch64uzp2      : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
475def AArch64trn1      : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
476def AArch64trn2      : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
477
478def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
479def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
480def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
481def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
482def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
483def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
484def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
485
486def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
487def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
488def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
489def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
490
491def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
492def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
493def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
494def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
495def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
496def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
497def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
498def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
499def AArch64vsli : SDNode<"AArch64ISD::VSLI", SDT_AArch64vshiftinsert>;
500def AArch64vsri : SDNode<"AArch64ISD::VSRI", SDT_AArch64vshiftinsert>;
501
502def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
503def AArch64bsp: SDNode<"AArch64ISD::BSP", SDT_AArch64trivec>;
504
505def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
506def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
507def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
508def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
509def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
510
511def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
512def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
513def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
514
515def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
516def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
517def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
518def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
519def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
520def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
521                        (vnot (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
522
523def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
524def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
525def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
526def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
527def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
528
529def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
530def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
531
532def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
533
534def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
535                  [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
536
537def AArch64Prefetch        : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
538                               [SDNPHasChain, SDNPSideEffect]>;
539
540def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
541def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
542
543def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
544                                    SDT_AArch64TLSDescCallSeq,
545                                    [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
546                                     SDNPVariadic]>;
547
548
549def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
550                                 SDT_AArch64WrapperLarge>;
551
552def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
553
554def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
555                                    SDTCisSameAs<1, 2>]>;
556def AArch64smull    : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
557def AArch64umull    : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
558
559def AArch64frecpe   : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
560def AArch64frecps   : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
561def AArch64frsqrte  : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
562def AArch64frsqrts  : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
563
564def AArch64saddv    : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
565def AArch64uaddv    : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
566def AArch64sminv    : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
567def AArch64uminv    : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
568def AArch64smaxv    : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
569def AArch64umaxv    : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
570
571def AArch64srhadd   : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
572def AArch64urhadd   : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
573def AArch64shadd   : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>;
574def AArch64uhadd   : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>;
575
576def AArch64uabd_n   : SDNode<"AArch64ISD::UABD", SDT_AArch64binvec>;
577def AArch64sabd_n   : SDNode<"AArch64ISD::SABD", SDT_AArch64binvec>;
578
579def AArch64uabd     : PatFrags<(ops node:$lhs, node:$rhs),
580                               [(AArch64uabd_n node:$lhs, node:$rhs),
581                                (int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
582def AArch64sabd     : PatFrags<(ops node:$lhs, node:$rhs),
583                               [(AArch64sabd_n node:$lhs, node:$rhs),
584                                (int_aarch64_neon_sabd node:$lhs, node:$rhs)]>;
585
586def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
587def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
588def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
589def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
590def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
591
592def SDT_AArch64unpk : SDTypeProfile<1, 1, [
593    SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
594]>;
595def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
596def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
597def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
598def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
599
600def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
601def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
602def AArch64stnp : SDNode<"AArch64ISD::STNP", SDT_AArch64stnp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
603
604def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
605
606//===----------------------------------------------------------------------===//
607
608//===----------------------------------------------------------------------===//
609
610// AArch64 Instruction Predicate Definitions.
611// We could compute these on a per-module basis but doing so requires accessing
612// the Function object through the <Target>Subtarget and objections were raised
613// to that (see post-commit review comments for r301750).
614let RecomputePerFunction = 1 in {
615  def ForCodeSize   : Predicate<"shouldOptForSize(MF)">;
616  def NotForCodeSize   : Predicate<"!shouldOptForSize(MF)">;
617  // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
618  def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
619
620  def UseBTI : Predicate<[{ MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
621  def NotUseBTI : Predicate<[{ !MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement() }]>;
622
623  def SLSBLRMitigation : Predicate<[{ MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
624  def NoSLSBLRMitigation : Predicate<[{ !MF->getSubtarget<AArch64Subtarget>().hardenSlsBlr() }]>;
625  // Toggles patterns which aren't beneficial in GlobalISel when we aren't
626  // optimizing. This allows us to selectively use patterns without impacting
627  // SelectionDAG's behaviour.
628  // FIXME: One day there will probably be a nicer way to check for this, but
629  // today is not that day.
630  def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
631}
632
633include "AArch64InstrFormats.td"
634include "SVEInstrFormats.td"
635
636//===----------------------------------------------------------------------===//
637
638//===----------------------------------------------------------------------===//
639// Miscellaneous instructions.
640//===----------------------------------------------------------------------===//
641
642let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
643// We set Sched to empty list because we expect these instructions to simply get
644// removed in most cases.
645def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
646                              [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
647                              Sched<[]>;
648def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
649                            [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
650                            Sched<[]>;
651} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
652
653let isReMaterializable = 1, isCodeGenOnly = 1 in {
654// FIXME: The following pseudo instructions are only needed because remat
655// cannot handle multiple instructions.  When that changes, they can be
656// removed, along with the AArch64Wrapper node.
657
658let AddedComplexity = 10 in
659def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
660                     [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
661              Sched<[WriteLDAdr]>;
662
663// The MOVaddr instruction should match only when the add is not folded
664// into a load or store address.
665def MOVaddr
666    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
667             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
668                                            tglobaladdr:$low))]>,
669      Sched<[WriteAdrAdr]>;
670def MOVaddrJT
671    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
672             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
673                                             tjumptable:$low))]>,
674      Sched<[WriteAdrAdr]>;
675def MOVaddrCP
676    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
677             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
678                                             tconstpool:$low))]>,
679      Sched<[WriteAdrAdr]>;
680def MOVaddrBA
681    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
682             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
683                                             tblockaddress:$low))]>,
684      Sched<[WriteAdrAdr]>;
685def MOVaddrTLS
686    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
687             [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
688                                            tglobaltlsaddr:$low))]>,
689      Sched<[WriteAdrAdr]>;
690def MOVaddrEXT
691    : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
692             [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
693                                            texternalsym:$low))]>,
694      Sched<[WriteAdrAdr]>;
695// Normally AArch64addlow either gets folded into a following ldr/str,
696// or together with an adrp into MOVaddr above. For cases with TLS, it
697// might appear without either of them, so allow lowering it into a plain
698// add.
699def ADDlowTLS
700    : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low),
701             [(set GPR64:$dst, (AArch64addlow GPR64:$src,
702                                            tglobaltlsaddr:$low))]>,
703      Sched<[WriteAdr]>;
704
705} // isReMaterializable, isCodeGenOnly
706
707def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
708          (LOADgot tglobaltlsaddr:$addr)>;
709
710def : Pat<(AArch64LOADgot texternalsym:$addr),
711          (LOADgot texternalsym:$addr)>;
712
713def : Pat<(AArch64LOADgot tconstpool:$addr),
714          (LOADgot tconstpool:$addr)>;
715
716// 32-bit jump table destination is actually only 2 instructions since we can
717// use the table itself as a PC-relative base. But optimization occurs after
718// branch relaxation so be pessimistic.
719let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch",
720    isNotDuplicable = 1 in {
721def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
722                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
723                      Sched<[]>;
724def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
725                             (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
726                      Sched<[]>;
727def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
728                            (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
729                     Sched<[]>;
730}
731
732// Space-consuming pseudo to aid testing of placement and reachability
733// algorithms. Immediate operand is the number of bytes this "instruction"
734// occupies; register operands can be used to enforce dependency and constrain
735// the scheduler.
736let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
737def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
738                   [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
739            Sched<[]>;
740
741let hasSideEffects = 1, isCodeGenOnly = 1 in {
742  def SpeculationSafeValueX
743      : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
744  def SpeculationSafeValueW
745      : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
746}
747
748// SpeculationBarrierEndBB must only be used after an unconditional control
749// flow, i.e. after a terminator for which isBarrier is True.
750let hasSideEffects = 1, isCodeGenOnly = 1, isTerminator = 1, isBarrier = 1 in {
751  def SpeculationBarrierISBDSBEndBB
752      : Pseudo<(outs), (ins), []>, Sched<[]>;
753  def SpeculationBarrierSBEndBB
754      : Pseudo<(outs), (ins), []>, Sched<[]>;
755}
756
757//===----------------------------------------------------------------------===//
758// System instructions.
759//===----------------------------------------------------------------------===//
760
761def HINT : HintI<"hint">;
762def : InstAlias<"nop",  (HINT 0b000)>;
763def : InstAlias<"yield",(HINT 0b001)>;
764def : InstAlias<"wfe",  (HINT 0b010)>;
765def : InstAlias<"wfi",  (HINT 0b011)>;
766def : InstAlias<"sev",  (HINT 0b100)>;
767def : InstAlias<"sevl", (HINT 0b101)>;
768def : InstAlias<"dgh",  (HINT 0b110)>;
769def : InstAlias<"esb",  (HINT 0b10000)>, Requires<[HasRAS]>;
770def : InstAlias<"csdb", (HINT 20)>;
771// In order to be able to write readable assembly, LLVM should accept assembly
772// inputs that use Branch Target Indentification mnemonics, even with BTI disabled.
773// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
774// should not emit these mnemonics unless BTI is enabled.
775def : InstAlias<"bti",  (HINT 32), 0>;
776def : InstAlias<"bti $op", (HINT btihint_op:$op), 0>;
777def : InstAlias<"bti",  (HINT 32)>, Requires<[HasBTI]>;
778def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
779
780// v8.2a Statistical Profiling extension
781def : InstAlias<"psb $op",  (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
782
783// As far as LLVM is concerned this writes to the system's exclusive monitors.
784let mayLoad = 1, mayStore = 1 in
785def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
786
787// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
788// model patterns with sufficiently fine granularity.
789let mayLoad = ?, mayStore = ? in {
790def DMB   : CRmSystemI<barrier_op, 0b101, "dmb",
791                       [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
792
793def DSB   : CRmSystemI<barrier_op, 0b100, "dsb",
794                       [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
795
796def ISB   : CRmSystemI<barrier_op, 0b110, "isb",
797                       [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
798
799def TSB   : CRmSystemI<barrier_op, 0b010, "tsb", []> {
800  let CRm        = 0b0010;
801  let Inst{12}   = 0;
802  let Predicates = [HasTRACEV8_4];
803}
804
805def DSBnXS  : CRmSystemI<barrier_nxs_op, 0b001, "dsb"> {
806  let CRm{1-0}   = 0b11;
807  let Inst{9-8}  = 0b10;
808  let Predicates = [HasXS];
809}
810
811let Predicates = [HasWFxT] in {
812def WFET : RegInputSystemI<0b0000, 0b000, "wfet">;
813def WFIT : RegInputSystemI<0b0000, 0b001, "wfit">;
814}
815
816// Branch Record Buffer two-word mnemonic instructions
817class BRBEI<bits<3> op2, string keyword>
818    : SimpleSystemI<0, (ins), "brb", keyword>, Sched<[WriteSys]> {
819  let Inst{31-8} = 0b110101010000100101110010;
820  let Inst{7-5} = op2;
821  let Predicates = [HasBRBE];
822}
823def BRB_IALL: BRBEI<0b100, "\tiall">;
824def BRB_INJ:  BRBEI<0b101, "\tinj">;
825
826}
827
828// Allow uppercase and lowercase keyword arguments for BRB IALL and BRB INJ
829def : TokenAlias<"INJ", "inj">;
830def : TokenAlias<"IALL", "iall">;
831
832// ARMv8.2-A Dot Product
833let Predicates = [HasDotProd] in {
834defm SDOT : SIMDThreeSameVectorDot<0, 0, "sdot", int_aarch64_neon_sdot>;
835defm UDOT : SIMDThreeSameVectorDot<1, 0, "udot", int_aarch64_neon_udot>;
836defm SDOTlane : SIMDThreeSameVectorDotIndex<0, 0, 0b10, "sdot", int_aarch64_neon_sdot>;
837defm UDOTlane : SIMDThreeSameVectorDotIndex<1, 0, 0b10, "udot", int_aarch64_neon_udot>;
838}
839
840// ARMv8.6-A BFloat
841let Predicates = [HasBF16] in {
842defm BFDOT       : SIMDThreeSameVectorBFDot<1, "bfdot">;
843defm BF16DOTlane : SIMDThreeSameVectorBF16DotI<0, "bfdot">;
844def BFMMLA       : SIMDThreeSameVectorBF16MatrixMul<"bfmmla">;
845def BFMLALB      : SIMDBF16MLAL<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
846def BFMLALT      : SIMDBF16MLAL<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
847def BFMLALBIdx   : SIMDBF16MLALIndex<0, "bfmlalb", int_aarch64_neon_bfmlalb>;
848def BFMLALTIdx   : SIMDBF16MLALIndex<1, "bfmlalt", int_aarch64_neon_bfmlalt>;
849def BFCVTN       : SIMD_BFCVTN;
850def BFCVTN2      : SIMD_BFCVTN2;
851def BFCVT        : BF16ToSinglePrecision<"bfcvt">;
852
853// Vector-scalar BFDOT:
854// The second source operand of the 64-bit variant of BF16DOTlane is a 128-bit
855// register (the instruction uses a single 32-bit lane from it), so the pattern
856// is a bit tricky.
857def : Pat<(v2f32 (int_aarch64_neon_bfdot
858                    (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
859                    (v4bf16 (bitconvert
860                      (v2i32 (AArch64duplane32
861                        (v4i32 (bitconvert
862                          (v8bf16 (insert_subvector undef,
863                            (v4bf16 V64:$Rm),
864                            (i64 0))))),
865                        VectorIndexS:$idx)))))),
866          (BF16DOTlanev4bf16 (v2f32 V64:$Rd), (v4bf16 V64:$Rn),
867                             (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
868                             VectorIndexS:$idx)>;
869}
870
871// ARMv8.6A AArch64 matrix multiplication
872let Predicates = [HasMatMulInt8] in {
873def  SMMLA : SIMDThreeSameVectorMatMul<0, 0, "smmla", int_aarch64_neon_smmla>;
874def  UMMLA : SIMDThreeSameVectorMatMul<0, 1, "ummla", int_aarch64_neon_ummla>;
875def USMMLA : SIMDThreeSameVectorMatMul<1, 0, "usmmla", int_aarch64_neon_usmmla>;
876defm USDOT : SIMDThreeSameVectorDot<0, 1, "usdot", int_aarch64_neon_usdot>;
877defm USDOTlane : SIMDThreeSameVectorDotIndex<0, 1, 0b10, "usdot", int_aarch64_neon_usdot>;
878
879// sudot lane has a pattern where usdot is expected (there is no sudot).
880// The second operand is used in the dup operation to repeat the indexed
881// element.
882class BaseSIMDSUDOTIndex<bit Q, string dst_kind, string lhs_kind,
883                         string rhs_kind, RegisterOperand RegType,
884                         ValueType AccumType, ValueType InputType>
885      : BaseSIMDThreeSameVectorDotIndex<Q, 0, 1, 0b00, "sudot", dst_kind,
886                                        lhs_kind, rhs_kind, RegType, AccumType,
887                                        InputType, null_frag> {
888  let Pattern = [(set (AccumType RegType:$dst),
889                      (AccumType (int_aarch64_neon_usdot (AccumType RegType:$Rd),
890                                 (InputType (bitconvert (AccumType
891                                    (AArch64duplane32 (v4i32 V128:$Rm),
892                                        VectorIndexS:$idx)))),
893                                 (InputType RegType:$Rn))))];
894}
895
896multiclass SIMDSUDOTIndex {
897  def v8i8  : BaseSIMDSUDOTIndex<0, ".2s", ".8b", ".4b", V64, v2i32, v8i8>;
898  def v16i8 : BaseSIMDSUDOTIndex<1, ".4s", ".16b", ".4b", V128, v4i32, v16i8>;
899}
900
901defm SUDOTlane : SIMDSUDOTIndex;
902
903}
904
905// ARMv8.2-A FP16 Fused Multiply-Add Long
906let Predicates = [HasNEON, HasFP16FML] in {
907defm FMLAL      : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
908defm FMLSL      : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
909defm FMLAL2     : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
910defm FMLSL2     : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
911defm FMLALlane  : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
912defm FMLSLlane  : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
913defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
914defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
915}
916
917// Armv8.2-A Crypto extensions
918let Predicates = [HasSHA3] in {
919def SHA512H   : CryptoRRRTied<0b0, 0b00, "sha512h">;
920def SHA512H2  : CryptoRRRTied<0b0, 0b01, "sha512h2">;
921def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
922def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
923def RAX1      : CryptoRRR_2D<0b0,0b11, "rax1">;
924def EOR3      : CryptoRRRR_16B<0b00, "eor3">;
925def BCAX      : CryptoRRRR_16B<0b01, "bcax">;
926def XAR       : CryptoRRRi6<"xar">;
927} // HasSHA3
928
929let Predicates = [HasSM4] in {
930def SM3TT1A   : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
931def SM3TT1B   : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
932def SM3TT2A   : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
933def SM3TT2B   : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
934def SM3SS1    : CryptoRRRR_4S<0b10, "sm3ss1">;
935def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
936def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
937def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
938def SM4E      : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
939} // HasSM4
940
941let Predicates = [HasRCPC] in {
942  // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
943  def LDAPRB  : RCPCLoad<0b00, "ldaprb", GPR32>;
944  def LDAPRH  : RCPCLoad<0b01, "ldaprh", GPR32>;
945  def LDAPRW  : RCPCLoad<0b10, "ldapr", GPR32>;
946  def LDAPRX  : RCPCLoad<0b11, "ldapr", GPR64>;
947}
948
949// v8.3a complex add and multiply-accumulate. No predicate here, that is done
950// inside the multiclass as the FP16 versions need different predicates.
951defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
952                                               "fcmla", null_frag>;
953defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
954                                           "fcadd", null_frag>;
955defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla",
956                                       null_frag>;
957
958let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
959  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
960            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
961  def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
962            (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
963  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
964            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
965  def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
966            (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
967}
968
969let Predicates = [HasComplxNum, HasNEON] in {
970  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
971            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
972  def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
973            (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
974  foreach Ty = [v4f32, v2f64] in {
975    def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
976              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
977    def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
978              (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
979  }
980}
981
982multiclass FCMLA_PATS<ValueType ty, RegisterClass Reg> {
983  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
984            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 0)>;
985  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
986            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 1)>;
987  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
988            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 2)>;
989  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), (ty Reg:$Rm))),
990            (!cast<Instruction>("FCMLA" # ty) $Rd, $Rn, $Rm, 3)>;
991}
992
993multiclass FCMLA_LANE_PATS<ValueType ty, RegisterClass Reg, dag RHSDup> {
994  def : Pat<(ty (int_aarch64_neon_vcmla_rot0 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
995            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 0)>;
996  def : Pat<(ty (int_aarch64_neon_vcmla_rot90 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
997            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 1)>;
998  def : Pat<(ty (int_aarch64_neon_vcmla_rot180 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
999            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 2)>;
1000  def : Pat<(ty (int_aarch64_neon_vcmla_rot270 (ty Reg:$Rd), (ty Reg:$Rn), RHSDup)),
1001            (!cast<Instruction>("FCMLA" # ty # "_indexed") $Rd, $Rn, $Rm, VectorIndexS:$idx, 3)>;
1002}
1003
1004
1005let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
1006  defm : FCMLA_PATS<v4f16, V64>;
1007  defm : FCMLA_PATS<v8f16, V128>;
1008
1009  defm : FCMLA_LANE_PATS<v4f16, V64,
1010                         (v4f16 (bitconvert (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexD:$idx))))>;
1011  defm : FCMLA_LANE_PATS<v8f16, V128,
1012                         (v8f16 (bitconvert (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx))))>;
1013}
1014let Predicates = [HasComplxNum, HasNEON] in {
1015  defm : FCMLA_PATS<v2f32, V64>;
1016  defm : FCMLA_PATS<v4f32, V128>;
1017  defm : FCMLA_PATS<v2f64, V128>;
1018
1019  defm : FCMLA_LANE_PATS<v4f32, V128,
1020                         (v4f32 (bitconvert (v2i64 (AArch64duplane64 (v2i64 V128:$Rm), VectorIndexD:$idx))))>;
1021}
1022
1023// v8.3a Pointer Authentication
1024// These instructions inhabit part of the hint space and so can be used for
1025// armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
1026// important for compatibility with other assemblers (e.g. GAS) when building
1027// software compatible with both CPUs that do or don't implement PA.
1028let Uses = [LR], Defs = [LR] in {
1029  def PACIAZ   : SystemNoOperands<0b000, "hint\t#24">;
1030  def PACIBZ   : SystemNoOperands<0b010, "hint\t#26">;
1031  let isAuthenticated = 1 in {
1032    def AUTIAZ   : SystemNoOperands<0b100, "hint\t#28">;
1033    def AUTIBZ   : SystemNoOperands<0b110, "hint\t#30">;
1034  }
1035}
1036let Uses = [LR, SP], Defs = [LR] in {
1037  def PACIASP  : SystemNoOperands<0b001, "hint\t#25">;
1038  def PACIBSP  : SystemNoOperands<0b011, "hint\t#27">;
1039  let isAuthenticated = 1 in {
1040    def AUTIASP  : SystemNoOperands<0b101, "hint\t#29">;
1041    def AUTIBSP  : SystemNoOperands<0b111, "hint\t#31">;
1042  }
1043}
1044let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
1045  def PACIA1716  : SystemNoOperands<0b000, "hint\t#8">;
1046  def PACIB1716  : SystemNoOperands<0b010, "hint\t#10">;
1047  let isAuthenticated = 1 in {
1048    def AUTIA1716  : SystemNoOperands<0b100, "hint\t#12">;
1049    def AUTIB1716  : SystemNoOperands<0b110, "hint\t#14">;
1050  }
1051}
1052
1053let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
1054  def XPACLRI   : SystemNoOperands<0b111, "hint\t#7">;
1055}
1056
1057// In order to be able to write readable assembly, LLVM should accept assembly
1058// inputs that use pointer authentication mnemonics, even with PA disabled.
1059// However, in order to be compatible with other assemblers (e.g. GAS), LLVM
1060// should not emit these mnemonics unless PA is enabled.
1061def : InstAlias<"paciaz", (PACIAZ), 0>;
1062def : InstAlias<"pacibz", (PACIBZ), 0>;
1063def : InstAlias<"autiaz", (AUTIAZ), 0>;
1064def : InstAlias<"autibz", (AUTIBZ), 0>;
1065def : InstAlias<"paciasp", (PACIASP), 0>;
1066def : InstAlias<"pacibsp", (PACIBSP), 0>;
1067def : InstAlias<"autiasp", (AUTIASP), 0>;
1068def : InstAlias<"autibsp", (AUTIBSP), 0>;
1069def : InstAlias<"pacia1716", (PACIA1716), 0>;
1070def : InstAlias<"pacib1716", (PACIB1716), 0>;
1071def : InstAlias<"autia1716", (AUTIA1716), 0>;
1072def : InstAlias<"autib1716", (AUTIB1716), 0>;
1073def : InstAlias<"xpaclri", (XPACLRI), 0>;
1074
1075// These pointer authentication instructions require armv8.3a
1076let Predicates = [HasPAuth] in {
1077
1078  // When PA is enabled, a better mnemonic should be emitted.
1079  def : InstAlias<"paciaz", (PACIAZ), 1>;
1080  def : InstAlias<"pacibz", (PACIBZ), 1>;
1081  def : InstAlias<"autiaz", (AUTIAZ), 1>;
1082  def : InstAlias<"autibz", (AUTIBZ), 1>;
1083  def : InstAlias<"paciasp", (PACIASP), 1>;
1084  def : InstAlias<"pacibsp", (PACIBSP), 1>;
1085  def : InstAlias<"autiasp", (AUTIASP), 1>;
1086  def : InstAlias<"autibsp", (AUTIBSP), 1>;
1087  def : InstAlias<"pacia1716", (PACIA1716), 1>;
1088  def : InstAlias<"pacib1716", (PACIB1716), 1>;
1089  def : InstAlias<"autia1716", (AUTIA1716), 1>;
1090  def : InstAlias<"autib1716", (AUTIB1716), 1>;
1091  def : InstAlias<"xpaclri", (XPACLRI), 1>;
1092
1093  multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm> {
1094    def IA   : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia")>;
1095    def IB   : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib")>;
1096    def DA   : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da")>;
1097    def DB   : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db")>;
1098    def IZA  : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza")>;
1099    def DZA  : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza")>;
1100    def IZB  : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb")>;
1101    def DZB  : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb")>;
1102  }
1103
1104  defm PAC : SignAuth<0b000, 0b010, "pac">;
1105  defm AUT : SignAuth<0b001, 0b011, "aut">;
1106
1107  def XPACI : ClearAuth<0, "xpaci">;
1108  def XPACD : ClearAuth<1, "xpacd">;
1109  def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>;
1110
1111  // Combined Instructions
1112  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1113    def BRAA    : AuthBranchTwoOperands<0, 0, "braa">;
1114    def BRAB    : AuthBranchTwoOperands<0, 1, "brab">;
1115  }
1116  let isCall = 1, Defs = [LR], Uses = [SP] in {
1117    def BLRAA   : AuthBranchTwoOperands<1, 0, "blraa">;
1118    def BLRAB   : AuthBranchTwoOperands<1, 1, "blrab">;
1119  }
1120
1121  let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1  in {
1122    def BRAAZ   : AuthOneOperand<0b000, 0, "braaz">;
1123    def BRABZ   : AuthOneOperand<0b000, 1, "brabz">;
1124  }
1125  let isCall = 1, Defs = [LR], Uses = [SP] in {
1126    def BLRAAZ  : AuthOneOperand<0b001, 0, "blraaz">;
1127    def BLRABZ  : AuthOneOperand<0b001, 1, "blrabz">;
1128  }
1129
1130  let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1131    def RETAA   : AuthReturn<0b010, 0, "retaa">;
1132    def RETAB   : AuthReturn<0b010, 1, "retab">;
1133    def ERETAA  : AuthReturn<0b100, 0, "eretaa">;
1134    def ERETAB  : AuthReturn<0b100, 1, "eretab">;
1135  }
1136
1137  defm LDRAA  : AuthLoad<0, "ldraa", simm10Scaled>;
1138  defm LDRAB  : AuthLoad<1, "ldrab", simm10Scaled>;
1139
1140}
1141
1142// v8.3a floating point conversion for javascript
1143let Predicates = [HasJS, HasFPARMv8], Defs = [NZCV] in
1144def FJCVTZS  : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
1145                                      "fjcvtzs",
1146                                      [(set GPR32:$Rd,
1147                                         (int_aarch64_fjcvtzs FPR64:$Rn))]> {
1148  let Inst{31} = 0;
1149} // HasJS, HasFPARMv8
1150
1151// v8.4 Flag manipulation instructions
1152let Predicates = [HasFlagM], Defs = [NZCV], Uses = [NZCV] in {
1153def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
1154  let Inst{20-5} = 0b0000001000000000;
1155}
1156def SETF8  : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
1157def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
1158def RMIF   : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
1159                        "{\t$Rn, $imm, $mask}">;
1160} // HasFlagM
1161
1162// v8.5 flag manipulation instructions
1163let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
1164
1165def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
1166  let Inst{18-16} = 0b000;
1167  let Inst{11-8} = 0b0000;
1168  let Unpredictable{11-8} = 0b1111;
1169  let Inst{7-5} = 0b001;
1170}
1171
1172def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
1173  let Inst{18-16} = 0b000;
1174  let Inst{11-8} = 0b0000;
1175  let Unpredictable{11-8} = 0b1111;
1176  let Inst{7-5} = 0b010;
1177}
1178} // HasAltNZCV
1179
1180
1181// Armv8.5-A speculation barrier
1182def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
1183  let Inst{20-5} = 0b0001100110000111;
1184  let Unpredictable{11-8} = 0b1111;
1185  let Predicates = [HasSB];
1186  let hasSideEffects = 1;
1187}
1188
1189def : InstAlias<"clrex", (CLREX 0xf)>;
1190def : InstAlias<"isb", (ISB 0xf)>;
1191def : InstAlias<"ssbb", (DSB 0)>;
1192def : InstAlias<"pssbb", (DSB 4)>;
1193
1194def MRS    : MRSI;
1195def MSR    : MSRI;
1196def MSRpstateImm1 : MSRpstateImm0_1;
1197def MSRpstateImm4 : MSRpstateImm0_15;
1198
1199// The thread pointer (on Linux, at least, where this has been implemented) is
1200// TPIDR_EL0.
1201def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
1202                       [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
1203
1204let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
1205def HWASAN_CHECK_MEMACCESS : Pseudo<
1206  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1207  [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1208  Sched<[]>;
1209}
1210
1211let Uses = [ X20 ], Defs = [ X16, X17, LR, NZCV ] in {
1212def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
1213  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
1214  [(int_hwasan_check_memaccess_shortgranules X20, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
1215  Sched<[]>;
1216}
1217
1218// The cycle counter PMC register is PMCCNTR_EL0.
1219let Predicates = [HasPerfMon] in
1220def : Pat<(readcyclecounter), (MRS 0xdce8)>;
1221
1222// FPCR register
1223def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
1224
1225// Generic system instructions
1226def SYSxt  : SystemXtI<0, "sys">;
1227def SYSLxt : SystemLXtI<1, "sysl">;
1228
1229def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1230                (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1231                 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1232
1233
1234let Predicates = [HasTME] in {
1235
1236def TSTART : TMSystemI<0b0000, "tstart",
1237                      [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1238
1239def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1240
1241def TCANCEL : TMSystemException<0b011, "tcancel",
1242                                [(int_aarch64_tcancel i64_imm0_65535:$imm)]>;
1243
1244def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1245  let mayLoad = 0;
1246  let mayStore = 0;
1247}
1248} // HasTME
1249
1250//===----------------------------------------------------------------------===//
1251// Move immediate instructions.
1252//===----------------------------------------------------------------------===//
1253
1254defm MOVK : InsertImmediate<0b11, "movk">;
1255defm MOVN : MoveImmediate<0b00, "movn">;
1256
1257let PostEncoderMethod = "fixMOVZ" in
1258defm MOVZ : MoveImmediate<0b10, "movz">;
1259
1260// First group of aliases covers an implicit "lsl #0".
1261def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, i32_imm0_65535:$imm, 0), 0>;
1262def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, i32_imm0_65535:$imm, 0), 0>;
1263def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1264def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1265def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1266def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1267
1268// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1269def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1270def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1271def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1272def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1273
1274def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1275def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1276def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1277def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1278
1279def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1280def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1281def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1282def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1283
1284def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1285def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1286
1287def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1288def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1289
1290def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1291def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1292
1293// Final group of aliases covers true "mov $Rd, $imm" cases.
1294multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1295                          int width, int shift> {
1296  def _asmoperand : AsmOperandClass {
1297    let Name = basename # width # "_lsl" # shift # "MovAlias";
1298    let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1299                               # shift # ">";
1300    let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1301  }
1302
1303  def _movimm : Operand<i32> {
1304    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1305  }
1306
1307  def : InstAlias<"mov $Rd, $imm",
1308                  (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1309}
1310
1311defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1312defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1313
1314defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1315defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1316defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1317defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1318
1319defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1320defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1321
1322defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1323defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1324defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1325defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1326
1327let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1328    isAsCheapAsAMove = 1 in {
1329// FIXME: The following pseudo instructions are only needed because remat
1330// cannot handle multiple instructions.  When that changes, we can select
1331// directly to the real instructions and get rid of these pseudos.
1332
1333def MOVi32imm
1334    : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1335             [(set GPR32:$dst, imm:$src)]>,
1336      Sched<[WriteImm]>;
1337def MOVi64imm
1338    : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1339             [(set GPR64:$dst, imm:$src)]>,
1340      Sched<[WriteImm]>;
1341} // isReMaterializable, isCodeGenOnly
1342
1343// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1344// eventual expansion code fewer bits to worry about getting right. Marshalling
1345// the types is a little tricky though:
1346def i64imm_32bit : ImmLeaf<i64, [{
1347  return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1348}]>;
1349
1350def s64imm_32bit : ImmLeaf<i64, [{
1351  int64_t Imm64 = static_cast<int64_t>(Imm);
1352  return Imm64 >= std::numeric_limits<int32_t>::min() &&
1353         Imm64 <= std::numeric_limits<int32_t>::max();
1354}]>;
1355
1356def trunc_imm : SDNodeXForm<imm, [{
1357  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1358}]>;
1359
1360def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1361  GISDNodeXFormEquiv<trunc_imm>;
1362
1363let Predicates = [OptimizedGISelOrOtherSelector] in {
1364// The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1365// copies.
1366def : Pat<(i64 i64imm_32bit:$src),
1367          (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1368}
1369
1370// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1371def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1372return CurDAG->getTargetConstant(
1373  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1374}]>;
1375
1376def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1377return CurDAG->getTargetConstant(
1378  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1379}]>;
1380
1381
1382def : Pat<(f32 fpimm:$in),
1383  (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1384def : Pat<(f64 fpimm:$in),
1385  (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1386
1387
1388// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1389// sequences.
1390def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1391                             tglobaladdr:$g1, tglobaladdr:$g0),
1392          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1393                                  tglobaladdr:$g1, 16),
1394                          tglobaladdr:$g2, 32),
1395                  tglobaladdr:$g3, 48)>;
1396
1397def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1398                             tblockaddress:$g1, tblockaddress:$g0),
1399          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1400                                  tblockaddress:$g1, 16),
1401                          tblockaddress:$g2, 32),
1402                  tblockaddress:$g3, 48)>;
1403
1404def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1405                             tconstpool:$g1, tconstpool:$g0),
1406          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1407                                  tconstpool:$g1, 16),
1408                          tconstpool:$g2, 32),
1409                  tconstpool:$g3, 48)>;
1410
1411def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1412                             tjumptable:$g1, tjumptable:$g0),
1413          (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1414                                  tjumptable:$g1, 16),
1415                          tjumptable:$g2, 32),
1416                  tjumptable:$g3, 48)>;
1417
1418
1419//===----------------------------------------------------------------------===//
1420// Arithmetic instructions.
1421//===----------------------------------------------------------------------===//
1422
1423// Add/subtract with carry.
1424defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1425defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1426
1427def : InstAlias<"ngc $dst, $src",  (SBCWr  GPR32:$dst, WZR, GPR32:$src)>;
1428def : InstAlias<"ngc $dst, $src",  (SBCXr  GPR64:$dst, XZR, GPR64:$src)>;
1429def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1430def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1431
1432// Add/subtract
1433defm ADD : AddSub<0, "add", "sub", add>;
1434defm SUB : AddSub<1, "sub", "add">;
1435
1436def : InstAlias<"mov $dst, $src",
1437                (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1438def : InstAlias<"mov $dst, $src",
1439                (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1440def : InstAlias<"mov $dst, $src",
1441                (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1442def : InstAlias<"mov $dst, $src",
1443                (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1444
1445defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1446defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1447
1448// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1449def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1450          (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1451def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1452          (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1453def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1454          (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1455def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1456          (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1457def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1458          (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1459def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1460          (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1461let AddedComplexity = 1 in {
1462def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1463          (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1464def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1465          (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1466}
1467
1468// Because of the immediate format for add/sub-imm instructions, the
1469// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1470//  These patterns capture that transformation.
1471let AddedComplexity = 1 in {
1472def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1473          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1474def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1475          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1476def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1477          (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1478def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1479          (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1480}
1481
1482// Because of the immediate format for add/sub-imm instructions, the
1483// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1484//  These patterns capture that transformation.
1485let AddedComplexity = 1 in {
1486def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1487          (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1488def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1489          (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1490def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1491          (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1492def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1493          (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1494}
1495
1496def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1497def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1498def : InstAlias<"neg $dst, $src$shift",
1499                (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1500def : InstAlias<"neg $dst, $src$shift",
1501                (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1502
1503def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1504def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1505def : InstAlias<"negs $dst, $src$shift",
1506                (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1507def : InstAlias<"negs $dst, $src$shift",
1508                (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1509
1510
1511// Unsigned/Signed divide
1512defm UDIV : Div<0, "udiv", udiv>;
1513defm SDIV : Div<1, "sdiv", sdiv>;
1514
1515def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1516def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1517def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1518def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1519
1520// Variable shift
1521defm ASRV : Shift<0b10, "asr", sra>;
1522defm LSLV : Shift<0b00, "lsl", shl>;
1523defm LSRV : Shift<0b01, "lsr", srl>;
1524defm RORV : Shift<0b11, "ror", rotr>;
1525
1526def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1527def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1528def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1529def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1530def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1531def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1532def : ShiftAlias<"rorv", RORVWr, GPR32>;
1533def : ShiftAlias<"rorv", RORVXr, GPR64>;
1534
1535// Multiply-add
1536let AddedComplexity = 5 in {
1537defm MADD : MulAccum<0, "madd", add>;
1538defm MSUB : MulAccum<1, "msub", sub>;
1539
1540def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1541          (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1542def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1543          (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1544
1545def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1546          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1547def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1548          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1549def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1550          (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1551def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1552          (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1553} // AddedComplexity = 5
1554
1555let AddedComplexity = 5 in {
1556def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1557def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1558def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1559def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1560
1561def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
1562          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1563def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
1564          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1565def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1566          (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1567def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
1568          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
1569def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
1570          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
1571def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1572          (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1573
1574def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1575          (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1576def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1577          (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1578
1579def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1580          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1581def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1582          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1583def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1584          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1585                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1586
1587def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1588          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1589def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1590          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1591def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1592          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1593                     (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1594
1595def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1596          (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1597def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1598          (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1599def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1600                    GPR64:$Ra)),
1601          (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1602                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1603
1604def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1605          (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1606def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1607          (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1608def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1609                                    (s64imm_32bit:$C)))),
1610          (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1611                     (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1612} // AddedComplexity = 5
1613
1614def : MulAccumWAlias<"mul", MADDWrrr>;
1615def : MulAccumXAlias<"mul", MADDXrrr>;
1616def : MulAccumWAlias<"mneg", MSUBWrrr>;
1617def : MulAccumXAlias<"mneg", MSUBXrrr>;
1618def : WideMulAccumAlias<"smull", SMADDLrrr>;
1619def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1620def : WideMulAccumAlias<"umull", UMADDLrrr>;
1621def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1622
1623// Multiply-high
1624def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1625def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1626
1627// CRC32
1628def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1629def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1630def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1631def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1632
1633def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1634def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1635def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1636def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1637
1638// v8.1 atomic CAS
1639defm CAS   : CompareAndSwap<0, 0, "">;
1640defm CASA  : CompareAndSwap<1, 0, "a">;
1641defm CASL  : CompareAndSwap<0, 1, "l">;
1642defm CASAL : CompareAndSwap<1, 1, "al">;
1643
1644// v8.1 atomic CASP
1645defm CASP   : CompareAndSwapPair<0, 0, "">;
1646defm CASPA  : CompareAndSwapPair<1, 0, "a">;
1647defm CASPL  : CompareAndSwapPair<0, 1, "l">;
1648defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1649
1650// v8.1 atomic SWP
1651defm SWP   : Swap<0, 0, "">;
1652defm SWPA  : Swap<1, 0, "a">;
1653defm SWPL  : Swap<0, 1, "l">;
1654defm SWPAL : Swap<1, 1, "al">;
1655
1656// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1657defm LDADD   : LDOPregister<0b000, "add", 0, 0, "">;
1658defm LDADDA  : LDOPregister<0b000, "add", 1, 0, "a">;
1659defm LDADDL  : LDOPregister<0b000, "add", 0, 1, "l">;
1660defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1661
1662defm LDCLR   : LDOPregister<0b001, "clr", 0, 0, "">;
1663defm LDCLRA  : LDOPregister<0b001, "clr", 1, 0, "a">;
1664defm LDCLRL  : LDOPregister<0b001, "clr", 0, 1, "l">;
1665defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1666
1667defm LDEOR   : LDOPregister<0b010, "eor", 0, 0, "">;
1668defm LDEORA  : LDOPregister<0b010, "eor", 1, 0, "a">;
1669defm LDEORL  : LDOPregister<0b010, "eor", 0, 1, "l">;
1670defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1671
1672defm LDSET   : LDOPregister<0b011, "set", 0, 0, "">;
1673defm LDSETA  : LDOPregister<0b011, "set", 1, 0, "a">;
1674defm LDSETL  : LDOPregister<0b011, "set", 0, 1, "l">;
1675defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1676
1677defm LDSMAX   : LDOPregister<0b100, "smax", 0, 0, "">;
1678defm LDSMAXA  : LDOPregister<0b100, "smax", 1, 0, "a">;
1679defm LDSMAXL  : LDOPregister<0b100, "smax", 0, 1, "l">;
1680defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1681
1682defm LDSMIN   : LDOPregister<0b101, "smin", 0, 0, "">;
1683defm LDSMINA  : LDOPregister<0b101, "smin", 1, 0, "a">;
1684defm LDSMINL  : LDOPregister<0b101, "smin", 0, 1, "l">;
1685defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1686
1687defm LDUMAX   : LDOPregister<0b110, "umax", 0, 0, "">;
1688defm LDUMAXA  : LDOPregister<0b110, "umax", 1, 0, "a">;
1689defm LDUMAXL  : LDOPregister<0b110, "umax", 0, 1, "l">;
1690defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1691
1692defm LDUMIN   : LDOPregister<0b111, "umin", 0, 0, "">;
1693defm LDUMINA  : LDOPregister<0b111, "umin", 1, 0, "a">;
1694defm LDUMINL  : LDOPregister<0b111, "umin", 0, 1, "l">;
1695defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1696
1697// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1698defm : STOPregister<"stadd","LDADD">; // STADDx
1699defm : STOPregister<"stclr","LDCLR">; // STCLRx
1700defm : STOPregister<"steor","LDEOR">; // STEORx
1701defm : STOPregister<"stset","LDSET">; // STSETx
1702defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1703defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1704defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1705defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1706
1707// v8.5 Memory Tagging Extension
1708let Predicates = [HasMTE] in {
1709
1710def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1711            Sched<[]>{
1712  let Inst{31} = 1;
1713}
1714def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1715  let Inst{31} = 1;
1716  let isNotDuplicable = 1;
1717}
1718def ADDG  : AddSubG<0, "addg", null_frag>;
1719def SUBG  : AddSubG<1, "subg", null_frag>;
1720
1721def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1722
1723def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1724def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1725  let Defs = [NZCV];
1726}
1727
1728def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1729
1730def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1731
1732def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1733          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1734def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)),
1735          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1736
1737def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1738
1739def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1740                   (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1741def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1742                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1743def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1744                   (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1745  let Inst{23} = 0;
1746}
1747
1748defm STG   : MemTagStore<0b00, "stg">;
1749defm STZG  : MemTagStore<0b01, "stzg">;
1750defm ST2G  : MemTagStore<0b10, "st2g">;
1751defm STZ2G : MemTagStore<0b11, "stz2g">;
1752
1753def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1754          (STGOffset $Rn, $Rm, $imm)>;
1755def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1756          (STZGOffset $Rn, $Rm, $imm)>;
1757def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1758          (ST2GOffset $Rn, $Rm, $imm)>;
1759def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1760          (STZ2GOffset $Rn, $Rm, $imm)>;
1761
1762defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1763def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1764def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1765
1766def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1767          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>;
1768
1769def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1770          (STGPi $Rt, $Rt2, $Rn, $imm)>;
1771
1772def IRGstack
1773    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1774      Sched<[]>;
1775def TAGPstack
1776    : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1777      Sched<[]>;
1778
1779// Explicit SP in the first operand prevents ShrinkWrap optimization
1780// from leaving this instruction out of the stack frame. When IRGstack
1781// is transformed into IRG, this operand is replaced with the actual
1782// register / expression for the tagged base pointer of the current function.
1783def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1784
1785// Large STG to be expanded into a loop. $sz is the size, $Rn is start address.
1786// $Rn_wback is one past the end of the range. $Rm is the loop counter.
1787let isCodeGenOnly=1, mayStore=1 in {
1788def STGloop_wback
1789    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1790             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1791      Sched<[WriteAdr, WriteST]>;
1792
1793def STZGloop_wback
1794    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn_wback), (ins i64imm:$sz, GPR64sp:$Rn),
1795             [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,@earlyclobber $Rm" >,
1796      Sched<[WriteAdr, WriteST]>;
1797
1798// A variant of the above where $Rn2 is an independent register not tied to the input register $Rn.
1799// Their purpose is to use a FrameIndex operand as $Rn (which of course can not be written back).
1800def STGloop
1801    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1802             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1803      Sched<[WriteAdr, WriteST]>;
1804
1805def STZGloop
1806    : Pseudo<(outs GPR64common:$Rm, GPR64sp:$Rn2), (ins i64imm:$sz, GPR64sp:$Rn),
1807             [], "@earlyclobber $Rn2,@earlyclobber $Rm" >,
1808      Sched<[WriteAdr, WriteST]>;
1809}
1810
1811} // Predicates = [HasMTE]
1812
1813//===----------------------------------------------------------------------===//
1814// Logical instructions.
1815//===----------------------------------------------------------------------===//
1816
1817// (immediate)
1818defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1819defm AND  : LogicalImm<0b00, "and", and, "bic">;
1820defm EOR  : LogicalImm<0b10, "eor", xor, "eon">;
1821defm ORR  : LogicalImm<0b01, "orr", or, "orn">;
1822
1823// FIXME: these aliases *are* canonical sometimes (when movz can't be
1824// used). Actually, it seems to be working right now, but putting logical_immXX
1825// here is a bit dodgy on the AsmParser side too.
1826def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1827                                          logical_imm32:$imm), 0>;
1828def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1829                                          logical_imm64:$imm), 0>;
1830
1831
1832// (register)
1833defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1834defm BICS : LogicalRegS<0b11, 1, "bics",
1835                        BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1836defm AND  : LogicalReg<0b00, 0, "and", and>;
1837defm BIC  : LogicalReg<0b00, 1, "bic",
1838                       BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1839defm EON  : LogicalReg<0b10, 1, "eon",
1840                       BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1841defm EOR  : LogicalReg<0b10, 0, "eor", xor>;
1842defm ORN  : LogicalReg<0b01, 1, "orn",
1843                       BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1844defm ORR  : LogicalReg<0b01, 0, "orr", or>;
1845
1846def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1847def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1848
1849def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1850def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1851
1852def : InstAlias<"mvn $Wd, $Wm$sh",
1853                (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1854def : InstAlias<"mvn $Xd, $Xm$sh",
1855                (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
1856
1857def : InstAlias<"tst $src1, $src2",
1858                (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
1859def : InstAlias<"tst $src1, $src2",
1860                (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
1861
1862def : InstAlias<"tst $src1, $src2",
1863                        (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
1864def : InstAlias<"tst $src1, $src2",
1865                        (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
1866
1867def : InstAlias<"tst $src1, $src2$sh",
1868               (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
1869def : InstAlias<"tst $src1, $src2$sh",
1870               (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
1871
1872
1873def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
1874def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
1875
1876
1877//===----------------------------------------------------------------------===//
1878// One operand data processing instructions.
1879//===----------------------------------------------------------------------===//
1880
1881defm CLS    : OneOperandData<0b101, "cls">;
1882defm CLZ    : OneOperandData<0b100, "clz", ctlz>;
1883defm RBIT   : OneOperandData<0b000, "rbit", bitreverse>;
1884
1885def  REV16Wr : OneWRegData<0b001, "rev16",
1886                                  UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
1887def  REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
1888
1889def : Pat<(cttz GPR32:$Rn),
1890          (CLZWr (RBITWr GPR32:$Rn))>;
1891def : Pat<(cttz GPR64:$Rn),
1892          (CLZXr (RBITXr GPR64:$Rn))>;
1893def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
1894                (i32 1))),
1895          (CLSWr GPR32:$Rn)>;
1896def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
1897                (i64 1))),
1898          (CLSXr GPR64:$Rn)>;
1899def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
1900def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
1901
1902// Unlike the other one operand instructions, the instructions with the "rev"
1903// mnemonic do *not* just different in the size bit, but actually use different
1904// opcode bits for the different sizes.
1905def REVWr   : OneWRegData<0b010, "rev", bswap>;
1906def REVXr   : OneXRegData<0b011, "rev", bswap>;
1907def REV32Xr : OneXRegData<0b010, "rev32",
1908                                 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
1909
1910def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
1911
1912// The bswap commutes with the rotr so we want a pattern for both possible
1913// orders.
1914def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
1915def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
1916
1917//===----------------------------------------------------------------------===//
1918// Bitfield immediate extraction instruction.
1919//===----------------------------------------------------------------------===//
1920let hasSideEffects = 0 in
1921defm EXTR : ExtractImm<"extr">;
1922def : InstAlias<"ror $dst, $src, $shift",
1923            (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
1924def : InstAlias<"ror $dst, $src, $shift",
1925            (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
1926
1927def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
1928          (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
1929def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
1930          (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1931
1932//===----------------------------------------------------------------------===//
1933// Other bitfield immediate instructions.
1934//===----------------------------------------------------------------------===//
1935let hasSideEffects = 0 in {
1936defm BFM  : BitfieldImmWith2RegArgs<0b01, "bfm">;
1937defm SBFM : BitfieldImm<0b00, "sbfm">;
1938defm UBFM : BitfieldImm<0b10, "ubfm">;
1939}
1940
1941def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1942  uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1943  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1944}]>;
1945
1946def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1947  uint64_t enc = 31 - N->getZExtValue();
1948  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1949}]>;
1950
1951// min(7, 31 - shift_amt)
1952def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1953  uint64_t enc = 31 - N->getZExtValue();
1954  enc = enc > 7 ? 7 : enc;
1955  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1956}]>;
1957
1958// min(15, 31 - shift_amt)
1959def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1960  uint64_t enc = 31 - N->getZExtValue();
1961  enc = enc > 15 ? 15 : enc;
1962  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1963}]>;
1964
1965def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1966  uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1967  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1968}]>;
1969
1970def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1971  uint64_t enc = 63 - N->getZExtValue();
1972  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1973}]>;
1974
1975// min(7, 63 - shift_amt)
1976def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1977  uint64_t enc = 63 - N->getZExtValue();
1978  enc = enc > 7 ? 7 : enc;
1979  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1980}]>;
1981
1982// min(15, 63 - shift_amt)
1983def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1984  uint64_t enc = 63 - N->getZExtValue();
1985  enc = enc > 15 ? 15 : enc;
1986  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1987}]>;
1988
1989// min(31, 63 - shift_amt)
1990def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1991  uint64_t enc = 63 - N->getZExtValue();
1992  enc = enc > 31 ? 31 : enc;
1993  return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1994}]>;
1995
1996def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1997          (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1998                              (i64 (i32shift_b imm0_31:$imm)))>;
1999def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
2000          (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
2001                              (i64 (i64shift_b imm0_63:$imm)))>;
2002
2003let AddedComplexity = 10 in {
2004def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
2005          (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2006def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
2007          (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2008}
2009
2010def : InstAlias<"asr $dst, $src, $shift",
2011                (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2012def : InstAlias<"asr $dst, $src, $shift",
2013                (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2014def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2015def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2016def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2017def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2018def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2019
2020def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
2021          (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
2022def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
2023          (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
2024
2025def : InstAlias<"lsr $dst, $src, $shift",
2026                (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
2027def : InstAlias<"lsr $dst, $src, $shift",
2028                (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
2029def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
2030def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
2031def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
2032def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
2033def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
2034
2035//===----------------------------------------------------------------------===//
2036// Conditional comparison instructions.
2037//===----------------------------------------------------------------------===//
2038defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
2039defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
2040
2041//===----------------------------------------------------------------------===//
2042// Conditional select instructions.
2043//===----------------------------------------------------------------------===//
2044defm CSEL  : CondSelect<0, 0b00, "csel">;
2045
2046def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
2047defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
2048defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
2049defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
2050
2051def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2052          (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2053def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2054          (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2055def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2056          (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2057def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2058          (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2059def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
2060          (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
2061def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
2062          (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
2063
2064def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
2065          (CSINCWr WZR, WZR, (i32 imm:$cc))>;
2066def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
2067          (CSINCXr XZR, XZR, (i32 imm:$cc))>;
2068def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
2069          (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2070def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
2071          (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2072def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
2073          (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2074def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
2075          (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2076def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
2077          (CSINVWr WZR, WZR, (i32 imm:$cc))>;
2078def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
2079          (CSINVXr XZR, XZR, (i32 imm:$cc))>;
2080def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
2081          (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
2082def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
2083          (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
2084def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
2085          (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2086def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
2087          (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
2088
2089// The inverse of the condition code from the alias instruction is what is used
2090// in the aliased instruction. The parser all ready inverts the condition code
2091// for these aliases.
2092def : InstAlias<"cset $dst, $cc",
2093                (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2094def : InstAlias<"cset $dst, $cc",
2095                (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2096
2097def : InstAlias<"csetm $dst, $cc",
2098                (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
2099def : InstAlias<"csetm $dst, $cc",
2100                (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
2101
2102def : InstAlias<"cinc $dst, $src, $cc",
2103                (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2104def : InstAlias<"cinc $dst, $src, $cc",
2105                (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2106
2107def : InstAlias<"cinv $dst, $src, $cc",
2108                (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2109def : InstAlias<"cinv $dst, $src, $cc",
2110                (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2111
2112def : InstAlias<"cneg $dst, $src, $cc",
2113                (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
2114def : InstAlias<"cneg $dst, $src, $cc",
2115                (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
2116
2117//===----------------------------------------------------------------------===//
2118// PC-relative instructions.
2119//===----------------------------------------------------------------------===//
2120let isReMaterializable = 1 in {
2121let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
2122def ADR  : ADRI<0, "adr", adrlabel,
2123                [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
2124} // hasSideEffects = 0
2125
2126def ADRP : ADRI<1, "adrp", adrplabel,
2127                [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
2128} // isReMaterializable = 1
2129
2130// page address of a constant pool entry, block address
2131def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
2132def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
2133def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
2134def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
2135def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
2136def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
2137def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
2138
2139//===----------------------------------------------------------------------===//
2140// Unconditional branch (register) instructions.
2141//===----------------------------------------------------------------------===//
2142
2143let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
2144def RET  : BranchReg<0b0010, "ret", []>;
2145def DRPS : SpecialReturn<0b0101, "drps">;
2146def ERET : SpecialReturn<0b0100, "eret">;
2147} // isReturn = 1, isTerminator = 1, isBarrier = 1
2148
2149// Default to the LR register.
2150def : InstAlias<"ret", (RET LR)>;
2151
2152let isCall = 1, Defs = [LR], Uses = [SP] in {
2153  def BLR : BranchReg<0b0001, "blr", []>;
2154  def BLRNoIP : Pseudo<(outs), (ins GPR64noip:$Rn), []>,
2155                Sched<[WriteBrReg]>,
2156                PseudoInstExpansion<(BLR GPR64:$Rn)>;
2157  def BLR_RVMARKER : Pseudo<(outs), (ins variable_ops), []>,
2158                     Sched<[WriteBrReg]>;
2159} // isCall
2160
2161def : Pat<(AArch64call GPR64:$Rn),
2162          (BLR GPR64:$Rn)>,
2163      Requires<[NoSLSBLRMitigation]>;
2164def : Pat<(AArch64call GPR64noip:$Rn),
2165          (BLRNoIP GPR64noip:$Rn)>,
2166      Requires<[SLSBLRMitigation]>;
2167
2168def : Pat<(AArch64call_rvmarker GPR64:$Rn),
2169          (BLR_RVMARKER GPR64:$Rn)>,
2170      Requires<[NoSLSBLRMitigation]>;
2171
2172let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
2173def BR  : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
2174} // isBranch, isTerminator, isBarrier, isIndirectBranch
2175
2176// Create a separate pseudo-instruction for codegen to use so that we don't
2177// flag lr as used in every function. It'll be restored before the RET by the
2178// epilogue if it's legitimately used.
2179def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
2180                   Sched<[WriteBrReg]> {
2181  let isTerminator = 1;
2182  let isBarrier = 1;
2183  let isReturn = 1;
2184}
2185
2186// This is a directive-like pseudo-instruction. The purpose is to insert an
2187// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
2188// (which in the usual case is a BLR).
2189let hasSideEffects = 1 in
2190def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
2191  let AsmString = ".tlsdesccall $sym";
2192}
2193
2194// Pseudo instruction to tell the streamer to emit a 'B' character into the
2195// augmentation string.
2196def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
2197
2198// FIXME: maybe the scratch register used shouldn't be fixed to X1?
2199// FIXME: can "hasSideEffects be dropped?
2200let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
2201    isCodeGenOnly = 1 in
2202def TLSDESC_CALLSEQ
2203    : Pseudo<(outs), (ins i64imm:$sym),
2204             [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
2205      Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
2206def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
2207          (TLSDESC_CALLSEQ texternalsym:$sym)>;
2208
2209//===----------------------------------------------------------------------===//
2210// Conditional branch (immediate) instruction.
2211//===----------------------------------------------------------------------===//
2212def Bcc : BranchCond;
2213
2214//===----------------------------------------------------------------------===//
2215// Compare-and-branch instructions.
2216//===----------------------------------------------------------------------===//
2217defm CBZ  : CmpBranch<0, "cbz", AArch64cbz>;
2218defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
2219
2220//===----------------------------------------------------------------------===//
2221// Test-bit-and-branch instructions.
2222//===----------------------------------------------------------------------===//
2223defm TBZ  : TestBranch<0, "tbz", AArch64tbz>;
2224defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
2225
2226//===----------------------------------------------------------------------===//
2227// Unconditional branch (immediate) instructions.
2228//===----------------------------------------------------------------------===//
2229let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
2230def B  : BranchImm<0, "b", [(br bb:$addr)]>;
2231} // isBranch, isTerminator, isBarrier
2232
2233let isCall = 1, Defs = [LR], Uses = [SP] in {
2234def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
2235} // isCall
2236def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
2237
2238//===----------------------------------------------------------------------===//
2239// Exception generation instructions.
2240//===----------------------------------------------------------------------===//
2241let isTrap = 1 in {
2242def BRK   : ExceptionGeneration<0b001, 0b00, "brk">;
2243}
2244def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
2245def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
2246def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
2247def HLT   : ExceptionGeneration<0b010, 0b00, "hlt">;
2248def HVC   : ExceptionGeneration<0b000, 0b10, "hvc">;
2249def SMC   : ExceptionGeneration<0b000, 0b11, "smc">;
2250def SVC   : ExceptionGeneration<0b000, 0b01, "svc">;
2251
2252// DCPSn defaults to an immediate operand of zero if unspecified.
2253def : InstAlias<"dcps1", (DCPS1 0)>;
2254def : InstAlias<"dcps2", (DCPS2 0)>;
2255def : InstAlias<"dcps3", (DCPS3 0)>;
2256
2257def UDF : UDFType<0, "udf">;
2258
2259//===----------------------------------------------------------------------===//
2260// Load instructions.
2261//===----------------------------------------------------------------------===//
2262
2263// Pair (indexed, offset)
2264defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2265defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2266defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2267defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2268defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2269
2270defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2271
2272// Pair (pre-indexed)
2273def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2274def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2275def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2276def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2277def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2278
2279def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2280
2281// Pair (post-indexed)
2282def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2283def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2284def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2285def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2286def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2287
2288def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2289
2290
2291// Pair (no allocate)
2292defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2293defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2294defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2295defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2296defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2297
2298def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2299          (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2300
2301//---
2302// (register offset)
2303//---
2304
2305// Integer
2306defm LDRBB : Load8RO<0b00,  0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2307defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2308defm LDRW  : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2309defm LDRX  : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2310
2311// Floating-point
2312defm LDRB : Load8RO<0b00,   1, 0b01, FPR8Op,   "ldr", untyped, load>;
2313defm LDRH : Load16RO<0b01,  1, 0b01, FPR16Op,  "ldr", f16, load>;
2314defm LDRS : Load32RO<0b10,  1, 0b01, FPR32Op,  "ldr", f32, load>;
2315defm LDRD : Load64RO<0b11,  1, 0b01, FPR64Op,  "ldr", f64, load>;
2316defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2317
2318// Load sign-extended half-word
2319defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2320defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2321
2322// Load sign-extended byte
2323defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2324defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2325
2326// Load sign-extended word
2327defm LDRSW  : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2328
2329// Pre-fetch.
2330defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2331
2332// For regular load, we do not have any alignment requirement.
2333// Thus, it is safe to directly map the vector loads with interesting
2334// addressing modes.
2335// FIXME: We could do the same for bitconvert to floating point vectors.
2336multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2337                              ValueType ScalTy, ValueType VecTy,
2338                              Instruction LOADW, Instruction LOADX,
2339                              SubRegIndex sub> {
2340  def : Pat<(VecTy (scalar_to_vector (ScalTy
2341              (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2342            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2343                           (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2344                           sub)>;
2345
2346  def : Pat<(VecTy (scalar_to_vector (ScalTy
2347              (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2348            (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2349                           (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2350                           sub)>;
2351}
2352
2353let AddedComplexity = 10 in {
2354defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v8i8,  LDRBroW, LDRBroX, bsub>;
2355defm : ScalToVecROLoadPat<ro8,  extloadi8,  i32, v16i8, LDRBroW, LDRBroX, bsub>;
2356
2357defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2358defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2359
2360defm : ScalToVecROLoadPat<ro16, load,       i32, v4f16, LDRHroW, LDRHroX, hsub>;
2361defm : ScalToVecROLoadPat<ro16, load,       i32, v8f16, LDRHroW, LDRHroX, hsub>;
2362
2363defm : ScalToVecROLoadPat<ro32, load,       i32, v2i32, LDRSroW, LDRSroX, ssub>;
2364defm : ScalToVecROLoadPat<ro32, load,       i32, v4i32, LDRSroW, LDRSroX, ssub>;
2365
2366defm : ScalToVecROLoadPat<ro32, load,       f32, v2f32, LDRSroW, LDRSroX, ssub>;
2367defm : ScalToVecROLoadPat<ro32, load,       f32, v4f32, LDRSroW, LDRSroX, ssub>;
2368
2369defm : ScalToVecROLoadPat<ro64, load,       i64, v2i64, LDRDroW, LDRDroX, dsub>;
2370
2371defm : ScalToVecROLoadPat<ro64, load,       f64, v2f64, LDRDroW, LDRDroX, dsub>;
2372
2373
2374def : Pat <(v1i64 (scalar_to_vector (i64
2375                      (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2376                                           ro_Wextend64:$extend))))),
2377           (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2378
2379def : Pat <(v1i64 (scalar_to_vector (i64
2380                      (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2381                                           ro_Xextend64:$extend))))),
2382           (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2383}
2384
2385// Match all load 64 bits width whose type is compatible with FPR64
2386multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2387                        Instruction LOADW, Instruction LOADX> {
2388
2389  def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2390            (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2391
2392  def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2393            (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2394}
2395
2396let AddedComplexity = 10 in {
2397let Predicates = [IsLE] in {
2398  // We must do vector loads with LD1 in big-endian.
2399  defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2400  defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2401  defm : VecROLoadPat<ro64, v8i8,  LDRDroW, LDRDroX>;
2402  defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2403  defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2404  defm : VecROLoadPat<ro64, v4bf16, LDRDroW, LDRDroX>;
2405}
2406
2407defm : VecROLoadPat<ro64, v1i64,  LDRDroW, LDRDroX>;
2408defm : VecROLoadPat<ro64, v1f64,  LDRDroW, LDRDroX>;
2409
2410// Match all load 128 bits width whose type is compatible with FPR128
2411let Predicates = [IsLE] in {
2412  // We must do vector loads with LD1 in big-endian.
2413  defm : VecROLoadPat<ro128, v2i64,  LDRQroW, LDRQroX>;
2414  defm : VecROLoadPat<ro128, v2f64,  LDRQroW, LDRQroX>;
2415  defm : VecROLoadPat<ro128, v4i32,  LDRQroW, LDRQroX>;
2416  defm : VecROLoadPat<ro128, v4f32,  LDRQroW, LDRQroX>;
2417  defm : VecROLoadPat<ro128, v8i16,  LDRQroW, LDRQroX>;
2418  defm : VecROLoadPat<ro128, v8f16,  LDRQroW, LDRQroX>;
2419  defm : VecROLoadPat<ro128, v8bf16,  LDRQroW, LDRQroX>;
2420  defm : VecROLoadPat<ro128, v16i8,  LDRQroW, LDRQroX>;
2421}
2422} // AddedComplexity = 10
2423
2424// zextload -> i64
2425multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2426                            Instruction INSTW, Instruction INSTX> {
2427  def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2428            (SUBREG_TO_REG (i64 0),
2429                           (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2430                           sub_32)>;
2431
2432  def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2433            (SUBREG_TO_REG (i64 0),
2434                           (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2435                           sub_32)>;
2436}
2437
2438let AddedComplexity = 10 in {
2439  defm : ExtLoadTo64ROPat<ro8,  zextloadi8,  LDRBBroW, LDRBBroX>;
2440  defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2441  defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW,  LDRWroX>;
2442
2443  // zextloadi1 -> zextloadi8
2444  defm : ExtLoadTo64ROPat<ro8,  zextloadi1,  LDRBBroW, LDRBBroX>;
2445
2446  // extload -> zextload
2447  defm : ExtLoadTo64ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2448  defm : ExtLoadTo64ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2449  defm : ExtLoadTo64ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2450
2451  // extloadi1 -> zextloadi8
2452  defm : ExtLoadTo64ROPat<ro8,  extloadi1,   LDRBBroW, LDRBBroX>;
2453}
2454
2455
2456// zextload -> i64
2457multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2458                            Instruction INSTW, Instruction INSTX> {
2459  def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2460            (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2461
2462  def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2463            (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2464
2465}
2466
2467let AddedComplexity = 10 in {
2468  // extload -> zextload
2469  defm : ExtLoadTo32ROPat<ro8,  extloadi8,   LDRBBroW, LDRBBroX>;
2470  defm : ExtLoadTo32ROPat<ro16, extloadi16,  LDRHHroW, LDRHHroX>;
2471  defm : ExtLoadTo32ROPat<ro32, extloadi32,  LDRWroW,  LDRWroX>;
2472
2473  // zextloadi1 -> zextloadi8
2474  defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2475}
2476
2477//---
2478// (unsigned immediate)
2479//---
2480defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2481                   [(set GPR64z:$Rt,
2482                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2483defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2484                   [(set GPR32z:$Rt,
2485                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2486defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2487                   [(set FPR8Op:$Rt,
2488                         (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2489defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2490                   [(set (f16 FPR16Op:$Rt),
2491                         (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2492defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2493                   [(set (f32 FPR32Op:$Rt),
2494                         (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2495defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2496                   [(set (f64 FPR64Op:$Rt),
2497                         (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2498defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2499                 [(set (f128 FPR128Op:$Rt),
2500                       (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2501
2502// bf16 load pattern
2503def : Pat <(bf16 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2504           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
2505
2506// For regular load, we do not have any alignment requirement.
2507// Thus, it is safe to directly map the vector loads with interesting
2508// addressing modes.
2509// FIXME: We could do the same for bitconvert to floating point vectors.
2510def : Pat <(v8i8 (scalar_to_vector (i32
2511               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2512           (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2513                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2514def : Pat <(v16i8 (scalar_to_vector (i32
2515               (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2516           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2517                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2518def : Pat <(v4i16 (scalar_to_vector (i32
2519               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2520           (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2521                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2522def : Pat <(v8i16 (scalar_to_vector (i32
2523               (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2524           (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2525                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2526def : Pat <(v2i32 (scalar_to_vector (i32
2527               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2528           (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2529                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2530def : Pat <(v4i32 (scalar_to_vector (i32
2531               (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2532           (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2533                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2534def : Pat <(v1i64 (scalar_to_vector (i64
2535               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2536           (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2537def : Pat <(v2i64 (scalar_to_vector (i64
2538               (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2539           (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2540                          (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2541
2542// Match all load 64 bits width whose type is compatible with FPR64
2543let Predicates = [IsLE] in {
2544  // We must use LD1 to perform vector loads in big-endian.
2545  def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2546            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2547  def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2548            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2549  def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2550            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2551  def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2552            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2553  def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2554            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2555  def : Pat<(v4bf16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2556            (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2557}
2558def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2559          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2560def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2561          (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2562
2563// Match all load 128 bits width whose type is compatible with FPR128
2564let Predicates = [IsLE] in {
2565  // We must use LD1 to perform vector loads in big-endian.
2566  def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2567            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2568  def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2569            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2570  def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2571            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2572  def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2573            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2574  def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2575            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2576  def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2577            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2578  def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2579            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2580  def : Pat<(v8bf16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2581            (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2582}
2583def : Pat<(f128  (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2584          (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2585
2586defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2587                    [(set GPR32:$Rt,
2588                          (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2589                                                     uimm12s2:$offset)))]>;
2590defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2591                    [(set GPR32:$Rt,
2592                          (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2593                                                   uimm12s1:$offset)))]>;
2594// zextload -> i64
2595def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2596    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2597def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2598    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2599
2600// zextloadi1 -> zextloadi8
2601def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2602          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2603def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2604    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2605
2606// extload -> zextload
2607def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2608          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2609def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2610          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2611def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2612          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2613def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2614    (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2615def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2616    (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2617def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2618    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2619def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2620    (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2621
2622// load sign-extended half-word
2623defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2624                     [(set GPR32:$Rt,
2625                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2626                                                      uimm12s2:$offset)))]>;
2627defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2628                     [(set GPR64:$Rt,
2629                           (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2630                                                      uimm12s2:$offset)))]>;
2631
2632// load sign-extended byte
2633defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2634                     [(set GPR32:$Rt,
2635                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2636                                                    uimm12s1:$offset)))]>;
2637defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2638                     [(set GPR64:$Rt,
2639                           (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2640                                                    uimm12s1:$offset)))]>;
2641
2642// load sign-extended word
2643defm LDRSW  : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2644                     [(set GPR64:$Rt,
2645                           (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2646                                                      uimm12s4:$offset)))]>;
2647
2648// load zero-extended word
2649def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2650      (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2651
2652// Pre-fetch.
2653def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2654                        [(AArch64Prefetch imm:$Rt,
2655                                        (am_indexed64 GPR64sp:$Rn,
2656                                                      uimm12s8:$offset))]>;
2657
2658def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2659
2660//---
2661// (literal)
2662
2663def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2664  if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2665    const DataLayout &DL = MF->getDataLayout();
2666    Align Align = G->getGlobal()->getPointerAlignment(DL);
2667    return Align >= 4 && G->getOffset() % 4 == 0;
2668  }
2669  if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2670    return C->getAlign() >= 4 && C->getOffset() % 4 == 0;
2671  return false;
2672}]>;
2673
2674def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2675  [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2676def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2677  [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2678def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2679  [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2680def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2681  [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2682def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2683  [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2684
2685// load sign-extended word
2686def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2687  [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2688
2689let AddedComplexity = 20 in {
2690def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2691        (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2692}
2693
2694// prefetch
2695def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2696//                   [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2697
2698//---
2699// (unscaled immediate)
2700defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2701                    [(set GPR64z:$Rt,
2702                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2703defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2704                    [(set GPR32z:$Rt,
2705                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2706defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2707                    [(set FPR8Op:$Rt,
2708                          (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2709defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2710                    [(set (f16 FPR16Op:$Rt),
2711                          (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2712defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2713                    [(set (f32 FPR32Op:$Rt),
2714                          (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2715defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2716                    [(set (f64 FPR64Op:$Rt),
2717                          (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2718defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2719                    [(set (f128 FPR128Op:$Rt),
2720                          (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2721
2722defm LDURHH
2723    : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2724             [(set GPR32:$Rt,
2725                    (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2726defm LDURBB
2727    : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2728             [(set GPR32:$Rt,
2729                    (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2730
2731// Match all load 64 bits width whose type is compatible with FPR64
2732let Predicates = [IsLE] in {
2733  def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2734            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2735  def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2736            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2737  def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2738            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2739  def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2740            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2741  def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2742            (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2743}
2744def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2745          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2746def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2747          (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2748
2749// Match all load 128 bits width whose type is compatible with FPR128
2750let Predicates = [IsLE] in {
2751  def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2752            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2753  def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2754            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2755  def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2756            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2757  def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2758            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2759  def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2760            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2761  def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2762            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2763  def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2764            (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2765}
2766
2767//  anyext -> zext
2768def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2769          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2770def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2771          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2772def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2773          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2774def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2775    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2776def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2777    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2778def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2779    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2780def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2781    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2782// unscaled zext
2783def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2784          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2785def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2786          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2787def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2788          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2789def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2790    (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2791def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2792    (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2793def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2794    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2795def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2796    (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2797
2798
2799//---
2800// LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2801
2802// Define new assembler match classes as we want to only match these when
2803// the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2804// associate a DiagnosticType either, as we want the diagnostic for the
2805// canonical form (the scaled operand) to take precedence.
2806class SImm9OffsetOperand<int Width> : AsmOperandClass {
2807  let Name = "SImm9OffsetFB" # Width;
2808  let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2809  let RenderMethod = "addImmOperands";
2810}
2811
2812def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2813def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2814def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2815def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2816def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2817
2818def simm9_offset_fb8 : Operand<i64> {
2819  let ParserMatchClass = SImm9OffsetFB8Operand;
2820}
2821def simm9_offset_fb16 : Operand<i64> {
2822  let ParserMatchClass = SImm9OffsetFB16Operand;
2823}
2824def simm9_offset_fb32 : Operand<i64> {
2825  let ParserMatchClass = SImm9OffsetFB32Operand;
2826}
2827def simm9_offset_fb64 : Operand<i64> {
2828  let ParserMatchClass = SImm9OffsetFB64Operand;
2829}
2830def simm9_offset_fb128 : Operand<i64> {
2831  let ParserMatchClass = SImm9OffsetFB128Operand;
2832}
2833
2834def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2835                (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2836def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2837                (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2838def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2839                (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2840def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2841                (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2842def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2843                (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2844def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2845                (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2846def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2847               (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2848
2849// zextload -> i64
2850def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2851  (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2852def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2853  (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2854
2855// load sign-extended half-word
2856defm LDURSHW
2857    : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
2858               [(set GPR32:$Rt,
2859                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2860defm LDURSHX
2861    : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
2862              [(set GPR64:$Rt,
2863                    (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2864
2865// load sign-extended byte
2866defm LDURSBW
2867    : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
2868                [(set GPR32:$Rt,
2869                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2870defm LDURSBX
2871    : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
2872                [(set GPR64:$Rt,
2873                      (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2874
2875// load sign-extended word
2876defm LDURSW
2877    : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
2878              [(set GPR64:$Rt,
2879                    (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2880
2881// zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
2882def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
2883                (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2884def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
2885                (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2886def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2887                (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2888def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2889                (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2890def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2891                (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2892def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2893                (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2894def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
2895                (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2896
2897// Pre-fetch.
2898defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
2899                  [(AArch64Prefetch imm:$Rt,
2900                                  (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2901
2902//---
2903// (unscaled immediate, unprivileged)
2904defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
2905defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
2906
2907defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
2908defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
2909
2910// load sign-extended half-word
2911defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
2912defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
2913
2914// load sign-extended byte
2915defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
2916defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
2917
2918// load sign-extended word
2919defm LDTRSW  : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
2920
2921//---
2922// (immediate pre-indexed)
2923def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2924def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2925def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
2926def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2927def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2928def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2929def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2930
2931// load sign-extended half-word
2932def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2933def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2934
2935// load sign-extended byte
2936def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2937def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2938
2939// load zero-extended byte
2940def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2941def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2942
2943// load sign-extended word
2944def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2945
2946//---
2947// (immediate post-indexed)
2948def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2949def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2950def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op,  "ldr">;
2951def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2952def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2953def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2954def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2955
2956// load sign-extended half-word
2957def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2958def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2959
2960// load sign-extended byte
2961def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2962def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2963
2964// load zero-extended byte
2965def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2966def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2967
2968// load sign-extended word
2969def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2970
2971//===----------------------------------------------------------------------===//
2972// Store instructions.
2973//===----------------------------------------------------------------------===//
2974
2975// Pair (indexed, offset)
2976// FIXME: Use dedicated range-checked addressing mode operand here.
2977defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
2978defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
2979defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
2980defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
2981defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
2982
2983// Pair (pre-indexed)
2984def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2985def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2986def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2987def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2988def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2989
2990// Pair (pre-indexed)
2991def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2992def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2993def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2994def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2995def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2996
2997// Pair (no allocate)
2998defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
2999defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
3000defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
3001defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
3002defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
3003
3004def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
3005          (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
3006
3007def : Pat<(AArch64stnp FPR128:$Rt, FPR128:$Rt2, (am_indexed7s128 GPR64sp:$Rn, simm7s16:$offset)),
3008          (STNPQi FPR128:$Rt, FPR128:$Rt2, GPR64sp:$Rn, simm7s16:$offset)>;
3009
3010
3011//---
3012// (Register offset)
3013
3014// Integer
3015defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
3016defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
3017defm STRW  : Store32RO<0b10, 0, 0b00, GPR32, "str",  i32, store>;
3018defm STRX  : Store64RO<0b11, 0, 0b00, GPR64, "str",  i64, store>;
3019
3020
3021// Floating-point
3022defm STRB : Store8RO< 0b00,  1, 0b00, FPR8Op,   "str", untyped, store>;
3023defm STRH : Store16RO<0b01,  1, 0b00, FPR16Op,  "str", f16,     store>;
3024defm STRS : Store32RO<0b10,  1, 0b00, FPR32Op,  "str", f32,     store>;
3025defm STRD : Store64RO<0b11,  1, 0b00, FPR64Op,  "str", f64,     store>;
3026defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str", f128,    store>;
3027
3028let Predicates = [UseSTRQro], AddedComplexity = 10 in {
3029  def : Pat<(store (f128 FPR128:$Rt),
3030                        (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
3031                                        ro_Wextend128:$extend)),
3032            (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
3033  def : Pat<(store (f128 FPR128:$Rt),
3034                        (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
3035                                        ro_Xextend128:$extend)),
3036            (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
3037}
3038
3039multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
3040                                 Instruction STRW, Instruction STRX> {
3041
3042  def : Pat<(storeop GPR64:$Rt,
3043                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3044            (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3045                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3046
3047  def : Pat<(storeop GPR64:$Rt,
3048                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3049            (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
3050                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3051}
3052
3053let AddedComplexity = 10 in {
3054  // truncstore i64
3055  defm : TruncStoreFrom64ROPat<ro8,  truncstorei8,  STRBBroW, STRBBroX>;
3056  defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
3057  defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW,  STRWroX>;
3058}
3059
3060multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
3061                         Instruction STRW, Instruction STRX> {
3062  def : Pat<(store (VecTy FPR:$Rt),
3063                   (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3064            (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3065
3066  def : Pat<(store (VecTy FPR:$Rt),
3067                   (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3068            (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3069}
3070
3071let AddedComplexity = 10 in {
3072// Match all store 64 bits width whose type is compatible with FPR64
3073let Predicates = [IsLE] in {
3074  // We must use ST1 to store vectors in big-endian.
3075  defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
3076  defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
3077  defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
3078  defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
3079  defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
3080  defm : VecROStorePat<ro64, v4bf16, FPR64, STRDroW, STRDroX>;
3081}
3082
3083defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
3084defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
3085
3086// Match all store 128 bits width whose type is compatible with FPR128
3087let Predicates = [IsLE, UseSTRQro] in {
3088  // We must use ST1 to store vectors in big-endian.
3089  defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
3090  defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
3091  defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
3092  defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
3093  defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
3094  defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
3095  defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
3096  defm : VecROStorePat<ro128, v8bf16, FPR128, STRQroW, STRQroX>;
3097}
3098} // AddedComplexity = 10
3099
3100// Match stores from lane 0 to the appropriate subreg's store.
3101multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
3102                              ValueType VecTy, ValueType STy,
3103                              SubRegIndex SubRegIdx,
3104                              Instruction STRW, Instruction STRX> {
3105
3106  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3107                     (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
3108            (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3109                  GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
3110
3111  def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
3112                     (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
3113            (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3114                  GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
3115}
3116
3117let AddedComplexity = 19 in {
3118  defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
3119  defm : VecROStoreLane0Pat<ro16,         store, v8f16, f16, hsub, STRHroW, STRHroX>;
3120  defm : VecROStoreLane0Pat<ro32,         store, v4i32, i32, ssub, STRSroW, STRSroX>;
3121  defm : VecROStoreLane0Pat<ro32,         store, v4f32, f32, ssub, STRSroW, STRSroX>;
3122  defm : VecROStoreLane0Pat<ro64,         store, v2i64, i64, dsub, STRDroW, STRDroX>;
3123  defm : VecROStoreLane0Pat<ro64,         store, v2f64, f64, dsub, STRDroW, STRDroX>;
3124}
3125
3126//---
3127// (unsigned immediate)
3128defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
3129                   [(store GPR64z:$Rt,
3130                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3131defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
3132                    [(store GPR32z:$Rt,
3133                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3134defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
3135                    [(store FPR8Op:$Rt,
3136                            (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
3137defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
3138                    [(store (f16 FPR16Op:$Rt),
3139                            (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
3140defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
3141                    [(store (f32 FPR32Op:$Rt),
3142                            (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
3143defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
3144                    [(store (f64 FPR64Op:$Rt),
3145                            (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
3146defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
3147
3148defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
3149                     [(truncstorei16 GPR32z:$Rt,
3150                                     (am_indexed16 GPR64sp:$Rn,
3151                                                   uimm12s2:$offset))]>;
3152defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1,  "strb",
3153                     [(truncstorei8 GPR32z:$Rt,
3154                                    (am_indexed8 GPR64sp:$Rn,
3155                                                 uimm12s1:$offset))]>;
3156
3157// bf16 store pattern
3158def : Pat<(store (bf16 FPR16Op:$Rt),
3159                 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3160          (STRHui FPR16:$Rt, GPR64sp:$Rn, uimm12s2:$offset)>;
3161
3162let AddedComplexity = 10 in {
3163
3164// Match all store 64 bits width whose type is compatible with FPR64
3165def : Pat<(store (v1i64 FPR64:$Rt),
3166                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3167          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3168def : Pat<(store (v1f64 FPR64:$Rt),
3169                 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3170          (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3171
3172let Predicates = [IsLE] in {
3173  // We must use ST1 to store vectors in big-endian.
3174  def : Pat<(store (v2f32 FPR64:$Rt),
3175                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3176            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3177  def : Pat<(store (v8i8 FPR64:$Rt),
3178                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3179            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3180  def : Pat<(store (v4i16 FPR64:$Rt),
3181                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3182            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3183  def : Pat<(store (v2i32 FPR64:$Rt),
3184                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3185            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3186  def : Pat<(store (v4f16 FPR64:$Rt),
3187                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3188            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3189  def : Pat<(store (v4bf16 FPR64:$Rt),
3190                   (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
3191            (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
3192}
3193
3194// Match all store 128 bits width whose type is compatible with FPR128
3195def : Pat<(store (f128  FPR128:$Rt),
3196                 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3197          (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3198
3199let Predicates = [IsLE] in {
3200  // We must use ST1 to store vectors in big-endian.
3201  def : Pat<(store (v4f32 FPR128:$Rt),
3202                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3203            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3204  def : Pat<(store (v2f64 FPR128:$Rt),
3205                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3206            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3207  def : Pat<(store (v16i8 FPR128:$Rt),
3208                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3209            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3210  def : Pat<(store (v8i16 FPR128:$Rt),
3211                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3212            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3213  def : Pat<(store (v4i32 FPR128:$Rt),
3214                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3215            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3216  def : Pat<(store (v2i64 FPR128:$Rt),
3217                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3218            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3219  def : Pat<(store (v8f16 FPR128:$Rt),
3220                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3221            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3222  def : Pat<(store (v8bf16 FPR128:$Rt),
3223                   (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
3224            (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
3225}
3226
3227// truncstore i64
3228def : Pat<(truncstorei32 GPR64:$Rt,
3229                         (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
3230  (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
3231def : Pat<(truncstorei16 GPR64:$Rt,
3232                         (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
3233  (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
3234def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
3235  (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
3236
3237} // AddedComplexity = 10
3238
3239// Match stores from lane 0 to the appropriate subreg's store.
3240multiclass VecStoreLane0Pat<Operand UIAddrMode, SDPatternOperator storeop,
3241                            ValueType VTy, ValueType STy,
3242                            SubRegIndex SubRegIdx, Operand IndexType,
3243                            Instruction STR> {
3244  def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
3245                     (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
3246            (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
3247                 GPR64sp:$Rn, IndexType:$offset)>;
3248}
3249
3250let AddedComplexity = 19 in {
3251  defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
3252  defm : VecStoreLane0Pat<am_indexed16,         store, v8f16, f16, hsub, uimm12s2, STRHui>;
3253  defm : VecStoreLane0Pat<am_indexed32,         store, v4i32, i32, ssub, uimm12s4, STRSui>;
3254  defm : VecStoreLane0Pat<am_indexed32,         store, v4f32, f32, ssub, uimm12s4, STRSui>;
3255  defm : VecStoreLane0Pat<am_indexed64,         store, v2i64, i64, dsub, uimm12s8, STRDui>;
3256  defm : VecStoreLane0Pat<am_indexed64,         store, v2f64, f64, dsub, uimm12s8, STRDui>;
3257}
3258
3259//---
3260// (unscaled immediate)
3261defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
3262                         [(store GPR64z:$Rt,
3263                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3264defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
3265                         [(store GPR32z:$Rt,
3266                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3267defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
3268                         [(store FPR8Op:$Rt,
3269                                 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3270defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
3271                         [(store (f16 FPR16Op:$Rt),
3272                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3273defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
3274                         [(store (f32 FPR32Op:$Rt),
3275                                 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
3276defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
3277                         [(store (f64 FPR64Op:$Rt),
3278                                 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
3279defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
3280                         [(store (f128 FPR128Op:$Rt),
3281                                 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
3282defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
3283                         [(truncstorei16 GPR32z:$Rt,
3284                                 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
3285defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
3286                         [(truncstorei8 GPR32z:$Rt,
3287                                  (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
3288
3289// Armv8.4 Weaker Release Consistency enhancements
3290//         LDAPR & STLR with Immediate Offset instructions
3291let Predicates = [HasRCPC_IMMO] in {
3292defm STLURB     : BaseStoreUnscaleV84<"stlurb",  0b00, 0b00, GPR32>;
3293defm STLURH     : BaseStoreUnscaleV84<"stlurh",  0b01, 0b00, GPR32>;
3294defm STLURW     : BaseStoreUnscaleV84<"stlur",   0b10, 0b00, GPR32>;
3295defm STLURX     : BaseStoreUnscaleV84<"stlur",   0b11, 0b00, GPR64>;
3296defm LDAPURB    : BaseLoadUnscaleV84<"ldapurb",  0b00, 0b01, GPR32>;
3297defm LDAPURSBW  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3298defm LDAPURSBX  : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3299defm LDAPURH    : BaseLoadUnscaleV84<"ldapurh",  0b01, 0b01, GPR32>;
3300defm LDAPURSHW  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3301defm LDAPURSHX  : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3302defm LDAPUR     : BaseLoadUnscaleV84<"ldapur",   0b10, 0b01, GPR32>;
3303defm LDAPURSW   : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3304defm LDAPURX    : BaseLoadUnscaleV84<"ldapur",   0b11, 0b01, GPR64>;
3305}
3306
3307// Match all store 64 bits width whose type is compatible with FPR64
3308def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3309          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3310def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3311          (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3312
3313let AddedComplexity = 10 in {
3314
3315let Predicates = [IsLE] in {
3316  // We must use ST1 to store vectors in big-endian.
3317  def : Pat<(store (v2f32 FPR64:$Rt),
3318                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3319            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3320  def : Pat<(store (v8i8 FPR64:$Rt),
3321                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3322            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3323  def : Pat<(store (v4i16 FPR64:$Rt),
3324                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3325            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3326  def : Pat<(store (v2i32 FPR64:$Rt),
3327                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3328            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3329  def : Pat<(store (v4f16 FPR64:$Rt),
3330                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3331            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3332  def : Pat<(store (v4bf16 FPR64:$Rt),
3333                   (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3334            (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3335}
3336
3337// Match all store 128 bits width whose type is compatible with FPR128
3338def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3339          (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3340
3341let Predicates = [IsLE] in {
3342  // We must use ST1 to store vectors in big-endian.
3343  def : Pat<(store (v4f32 FPR128:$Rt),
3344                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3345            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3346  def : Pat<(store (v2f64 FPR128:$Rt),
3347                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3348            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3349  def : Pat<(store (v16i8 FPR128:$Rt),
3350                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3351            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3352  def : Pat<(store (v8i16 FPR128:$Rt),
3353                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3354            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3355  def : Pat<(store (v4i32 FPR128:$Rt),
3356                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3357            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3358  def : Pat<(store (v2i64 FPR128:$Rt),
3359                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3360            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3361  def : Pat<(store (v2f64 FPR128:$Rt),
3362                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3363            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3364  def : Pat<(store (v8f16 FPR128:$Rt),
3365                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3366            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3367  def : Pat<(store (v8bf16 FPR128:$Rt),
3368                   (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3369            (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3370}
3371
3372} // AddedComplexity = 10
3373
3374// unscaled i64 truncating stores
3375def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3376  (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3377def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3378  (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3379def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3380  (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3381
3382// Match stores from lane 0 to the appropriate subreg's store.
3383multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3384                             ValueType VTy, ValueType STy,
3385                             SubRegIndex SubRegIdx, Instruction STR> {
3386  defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3387}
3388
3389let AddedComplexity = 19 in {
3390  defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3391  defm : VecStoreULane0Pat<store,         v8f16, f16, hsub, STURHi>;
3392  defm : VecStoreULane0Pat<store,         v4i32, i32, ssub, STURSi>;
3393  defm : VecStoreULane0Pat<store,         v4f32, f32, ssub, STURSi>;
3394  defm : VecStoreULane0Pat<store,         v2i64, i64, dsub, STURDi>;
3395  defm : VecStoreULane0Pat<store,         v2f64, f64, dsub, STURDi>;
3396}
3397
3398//---
3399// STR mnemonics fall back to STUR for negative or unaligned offsets.
3400def : InstAlias<"str $Rt, [$Rn, $offset]",
3401                (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3402def : InstAlias<"str $Rt, [$Rn, $offset]",
3403                (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3404def : InstAlias<"str $Rt, [$Rn, $offset]",
3405                (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3406def : InstAlias<"str $Rt, [$Rn, $offset]",
3407                (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3408def : InstAlias<"str $Rt, [$Rn, $offset]",
3409                (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3410def : InstAlias<"str $Rt, [$Rn, $offset]",
3411                (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3412def : InstAlias<"str $Rt, [$Rn, $offset]",
3413                (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3414
3415def : InstAlias<"strb $Rt, [$Rn, $offset]",
3416                (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3417def : InstAlias<"strh $Rt, [$Rn, $offset]",
3418                (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3419
3420//---
3421// (unscaled immediate, unprivileged)
3422defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3423defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3424
3425defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3426defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3427
3428//---
3429// (immediate pre-indexed)
3430def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str",  pre_store, i32>;
3431def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str",  pre_store, i64>;
3432def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op,  "str",  pre_store, untyped>;
3433def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str",  pre_store, f16>;
3434def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str",  pre_store, f32>;
3435def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str",  pre_store, f64>;
3436def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3437
3438def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8,  i32>;
3439def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3440
3441// truncstore i64
3442def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3443  (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3444           simm9:$off)>;
3445def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3446  (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3447            simm9:$off)>;
3448def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3449  (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3450            simm9:$off)>;
3451
3452def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3453          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3454def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3455          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3456def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3457          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3458def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3459          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3460def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3461          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3462def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3463          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3464def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3465          (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3466
3467def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3468          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3469def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3470          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3471def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3472          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3473def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3474          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3475def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3476          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3477def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3478          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3479def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3480          (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3481
3482//---
3483// (immediate post-indexed)
3484def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z,  "str", post_store, i32>;
3485def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z,  "str", post_store, i64>;
3486def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op,   "str", post_store, untyped>;
3487def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op,  "str", post_store, f16>;
3488def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op,  "str", post_store, f32>;
3489def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op,  "str", post_store, f64>;
3490def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3491
3492def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3493def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3494
3495// truncstore i64
3496def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3497  (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3498            simm9:$off)>;
3499def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3500  (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3501             simm9:$off)>;
3502def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3503  (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3504             simm9:$off)>;
3505
3506def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3507          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3508def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3509          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3510def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3511          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3512def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3513          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3514def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3515          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3516def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3517          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3518def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3519          (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3520
3521def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3522          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3523def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3524          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3525def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3526          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3527def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3528          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3529def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3530          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3531def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3532          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3533def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3534          (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3535
3536//===----------------------------------------------------------------------===//
3537// Load/store exclusive instructions.
3538//===----------------------------------------------------------------------===//
3539
3540def LDARW  : LoadAcquire   <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3541def LDARX  : LoadAcquire   <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3542def LDARB  : LoadAcquire   <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3543def LDARH  : LoadAcquire   <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3544
3545def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3546def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3547def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3548def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3549
3550def LDXRW  : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3551def LDXRX  : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3552def LDXRB  : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3553def LDXRH  : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3554
3555def STLRW  : StoreRelease  <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3556def STLRX  : StoreRelease  <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3557def STLRB  : StoreRelease  <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3558def STLRH  : StoreRelease  <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3559
3560def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3561def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3562def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3563def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3564
3565def STXRW  : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3566def STXRX  : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3567def STXRB  : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3568def STXRH  : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3569
3570def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3571def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3572
3573def LDXPW  : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3574def LDXPX  : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3575
3576def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3577def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3578
3579def STXPW  : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3580def STXPX  : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3581
3582let Predicates = [HasLOR] in {
3583  // v8.1a "Limited Order Region" extension load-acquire instructions
3584  def LDLARW  : LoadAcquire   <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3585  def LDLARX  : LoadAcquire   <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3586  def LDLARB  : LoadAcquire   <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3587  def LDLARH  : LoadAcquire   <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3588
3589  // v8.1a "Limited Order Region" extension store-release instructions
3590  def STLLRW  : StoreRelease   <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3591  def STLLRX  : StoreRelease   <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3592  def STLLRB  : StoreRelease   <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3593  def STLLRH  : StoreRelease   <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3594}
3595
3596//===----------------------------------------------------------------------===//
3597// Scaled floating point to integer conversion instructions.
3598//===----------------------------------------------------------------------===//
3599
3600defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3601defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3602defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3603defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3604defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3605defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3606defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3607defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3608defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3609defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3610defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3611defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3612
3613multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3614  def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3615  def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3616  def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3617  def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3618  def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3619  def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3620
3621  def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3622            (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3623  def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3624            (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3625  def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3626            (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3627  def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3628            (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3629  def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3630            (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3631  def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3632            (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3633}
3634
3635defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3636defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3637
3638multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
3639  def : Pat<(i32 (to_int (round f32:$Rn))),
3640            (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3641  def : Pat<(i64 (to_int (round f32:$Rn))),
3642            (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3643  def : Pat<(i32 (to_int (round f64:$Rn))),
3644            (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3645  def : Pat<(i64 (to_int (round f64:$Rn))),
3646            (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3647}
3648
3649defm : FPToIntegerPats<fp_to_sint, fceil,  "FCVTPS">;
3650defm : FPToIntegerPats<fp_to_uint, fceil,  "FCVTPU">;
3651defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
3652defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
3653defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
3654defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
3655defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">;
3656defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">;
3657
3658let Predicates = [HasFullFP16] in {
3659  def : Pat<(i32 (lround f16:$Rn)),
3660            (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3661  def : Pat<(i64 (lround f16:$Rn)),
3662            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3663  def : Pat<(i64 (llround f16:$Rn)),
3664            (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3665}
3666def : Pat<(i32 (lround f32:$Rn)),
3667          (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3668def : Pat<(i32 (lround f64:$Rn)),
3669          (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3670def : Pat<(i64 (lround f32:$Rn)),
3671          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3672def : Pat<(i64 (lround f64:$Rn)),
3673          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3674def : Pat<(i64 (llround f32:$Rn)),
3675          (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3676def : Pat<(i64 (llround f64:$Rn)),
3677          (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3678
3679//===----------------------------------------------------------------------===//
3680// Scaled integer to floating point conversion instructions.
3681//===----------------------------------------------------------------------===//
3682
3683defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3684defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3685
3686//===----------------------------------------------------------------------===//
3687// Unscaled integer to floating point conversion instruction.
3688//===----------------------------------------------------------------------===//
3689
3690defm FMOV : UnscaledConversion<"fmov">;
3691
3692// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3693let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3694def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3695    Sched<[WriteF]>, Requires<[HasFullFP16]>;
3696def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3697    Sched<[WriteF]>;
3698def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3699    Sched<[WriteF]>;
3700}
3701// Similarly add aliases
3702def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3703    Requires<[HasFullFP16]>;
3704def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3705def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3706
3707//===----------------------------------------------------------------------===//
3708// Floating point conversion instruction.
3709//===----------------------------------------------------------------------===//
3710
3711defm FCVT : FPConversion<"fcvt">;
3712
3713//===----------------------------------------------------------------------===//
3714// Floating point single operand instructions.
3715//===----------------------------------------------------------------------===//
3716
3717defm FABS   : SingleOperandFPData<0b0001, "fabs", fabs>;
3718defm FMOV   : SingleOperandFPData<0b0000, "fmov">;
3719defm FNEG   : SingleOperandFPData<0b0010, "fneg", fneg>;
3720defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3721defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3722defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3723defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
3724defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3725
3726def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
3727          (FRINTNDr FPR64:$Rn)>;
3728
3729defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3730defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3731
3732let SchedRW = [WriteFDiv] in {
3733defm FSQRT  : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3734}
3735
3736let Predicates = [HasFRInt3264] in {
3737  defm FRINT32Z : FRIntNNT<0b00, "frint32z">;
3738  defm FRINT64Z : FRIntNNT<0b10, "frint64z">;
3739  defm FRINT32X : FRIntNNT<0b01, "frint32x">;
3740  defm FRINT64X : FRIntNNT<0b11, "frint64x">;
3741} // HasFRInt3264
3742
3743let Predicates = [HasFullFP16] in {
3744  def : Pat<(i32 (lrint f16:$Rn)),
3745            (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3746  def : Pat<(i64 (lrint f16:$Rn)),
3747            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3748  def : Pat<(i64 (llrint f16:$Rn)),
3749            (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3750}
3751def : Pat<(i32 (lrint f32:$Rn)),
3752          (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3753def : Pat<(i32 (lrint f64:$Rn)),
3754          (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3755def : Pat<(i64 (lrint f32:$Rn)),
3756          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3757def : Pat<(i64 (lrint f64:$Rn)),
3758          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3759def : Pat<(i64 (llrint f32:$Rn)),
3760          (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3761def : Pat<(i64 (llrint f64:$Rn)),
3762          (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3763
3764//===----------------------------------------------------------------------===//
3765// Floating point two operand instructions.
3766//===----------------------------------------------------------------------===//
3767
3768defm FADD   : TwoOperandFPData<0b0010, "fadd", fadd>;
3769let SchedRW = [WriteFDiv] in {
3770defm FDIV   : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3771}
3772defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3773defm FMAX   : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3774defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3775defm FMIN   : TwoOperandFPData<0b0101, "fmin", fminimum>;
3776let SchedRW = [WriteFMul] in {
3777defm FMUL   : TwoOperandFPData<0b0000, "fmul", fmul>;
3778defm FNMUL  : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3779}
3780defm FSUB   : TwoOperandFPData<0b0011, "fsub", fsub>;
3781
3782def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3783          (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3784def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3785          (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
3786def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3787          (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
3788def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3789          (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
3790
3791//===----------------------------------------------------------------------===//
3792// Floating point three operand instructions.
3793//===----------------------------------------------------------------------===//
3794
3795defm FMADD  : ThreeOperandFPData<0, 0, "fmadd", fma>;
3796defm FMSUB  : ThreeOperandFPData<0, 1, "fmsub",
3797     TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
3798defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
3799     TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
3800defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
3801     TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
3802
3803// The following def pats catch the case where the LHS of an FMA is negated.
3804// The TriOpFrag above catches the case where the middle operand is negated.
3805
3806// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
3807// the NEON variant.
3808
3809// Here we handle first -(a + b*c) for FNMADD:
3810
3811let Predicates = [HasNEON, HasFullFP16] in
3812def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
3813          (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3814
3815def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
3816          (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3817
3818def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
3819          (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3820
3821// Now it's time for "(-a) + (-b)*c"
3822
3823let Predicates = [HasNEON, HasFullFP16] in
3824def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
3825          (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3826
3827def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
3828          (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3829
3830def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
3831          (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3832
3833//===----------------------------------------------------------------------===//
3834// Floating point comparison instructions.
3835//===----------------------------------------------------------------------===//
3836
3837defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
3838defm FCMP  : FPComparison<0, "fcmp", AArch64any_fcmp>;
3839
3840//===----------------------------------------------------------------------===//
3841// Floating point conditional comparison instructions.
3842//===----------------------------------------------------------------------===//
3843
3844defm FCCMPE : FPCondComparison<1, "fccmpe">;
3845defm FCCMP  : FPCondComparison<0, "fccmp", AArch64fccmp>;
3846
3847//===----------------------------------------------------------------------===//
3848// Floating point conditional select instruction.
3849//===----------------------------------------------------------------------===//
3850
3851defm FCSEL : FPCondSelect<"fcsel">;
3852
3853// CSEL instructions providing f128 types need to be handled by a
3854// pseudo-instruction since the eventual code will need to introduce basic
3855// blocks and control flow.
3856def F128CSEL : Pseudo<(outs FPR128:$Rd),
3857                      (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
3858                      [(set (f128 FPR128:$Rd),
3859                            (AArch64csel FPR128:$Rn, FPR128:$Rm,
3860                                       (i32 imm:$cond), NZCV))]> {
3861  let Uses = [NZCV];
3862  let usesCustomInserter = 1;
3863  let hasNoSchedulingInfo = 1;
3864}
3865
3866//===----------------------------------------------------------------------===//
3867// Instructions used for emitting unwind opcodes on ARM64 Windows.
3868//===----------------------------------------------------------------------===//
3869let isPseudo = 1 in {
3870  def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
3871  def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3872  def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3873  def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3874  def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3875  def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3876  def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3877  def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3878  def SEH_SaveFReg_X :  Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3879  def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3880  def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3881  def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
3882  def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3883  def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
3884  def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3885  def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
3886  def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3887}
3888
3889// Pseudo instructions for Windows EH
3890//===----------------------------------------------------------------------===//
3891let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
3892    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
3893   def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
3894   let usesCustomInserter = 1 in
3895     def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
3896                    Sched<[]>;
3897}
3898
3899//===----------------------------------------------------------------------===//
3900// Floating point immediate move.
3901//===----------------------------------------------------------------------===//
3902
3903let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
3904defm FMOV : FPMoveImmediate<"fmov">;
3905}
3906
3907//===----------------------------------------------------------------------===//
3908// Advanced SIMD two vector instructions.
3909//===----------------------------------------------------------------------===//
3910
3911defm UABDL   : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
3912                                          AArch64uabd>;
3913// Match UABDL in log2-shuffle patterns.
3914def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
3915                           (zext (v8i8 V64:$opB))))),
3916          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3917def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3918               (v8i16 (add (sub (zext (v8i8 V64:$opA)),
3919                                (zext (v8i8 V64:$opB))),
3920                           (AArch64vashr v8i16:$src, (i32 15))))),
3921          (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3922def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
3923                           (zext (extract_high_v16i8 V128:$opB))))),
3924          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3925def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3926               (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
3927                                (zext (extract_high_v16i8 V128:$opB))),
3928                           (AArch64vashr v8i16:$src, (i32 15))))),
3929          (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3930def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
3931                           (zext (v4i16 V64:$opB))))),
3932          (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
3933def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
3934                           (zext (extract_high_v8i16 V128:$opB))))),
3935          (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
3936def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
3937                           (zext (v2i32 V64:$opB))))),
3938          (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
3939def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
3940                           (zext (extract_high_v4i32 V128:$opB))))),
3941          (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
3942
3943defm ABS    : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
3944defm CLS    : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
3945defm CLZ    : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
3946defm CMEQ   : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
3947defm CMGE   : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
3948defm CMGT   : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
3949defm CMLE   : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
3950defm CMLT   : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
3951defm CNT    : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
3952defm FABS   : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
3953
3954defm FCMEQ  : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3955defm FCMGE  : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3956defm FCMGT  : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3957defm FCMLE  : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3958defm FCMLT  : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3959defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
3960defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
3961defm FCVTL  : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
3962def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
3963          (FCVTLv4i16 V64:$Rn)>;
3964def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
3965                                                              (i64 4)))),
3966          (FCVTLv8i16 V128:$Rn)>;
3967def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
3968
3969def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
3970
3971defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
3972defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
3973defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
3974defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
3975defm FCVTN  : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
3976def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
3977          (FCVTNv4i16 V128:$Rn)>;
3978def : Pat<(concat_vectors V64:$Rd,
3979                          (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
3980          (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3981def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
3982def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
3983def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
3984          (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3985defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
3986defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
3987defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
3988                                        int_aarch64_neon_fcvtxn>;
3989defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
3990defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
3991
3992def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
3993def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
3994def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
3995def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
3996def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
3997
3998def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
3999def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
4000def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
4001def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
4002def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
4003
4004defm FNEG   : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
4005defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
4006defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
4007defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
4008defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
4009defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
4010defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
4011defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
4012defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
4013
4014let Predicates = [HasFRInt3264] in {
4015  defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z">;
4016  defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z">;
4017  defm FRINT32X : FRIntNNTVector<1, 0, "frint32x">;
4018  defm FRINT64X : FRIntNNTVector<1, 1, "frint64x">;
4019} // HasFRInt3264
4020
4021defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
4022defm FSQRT  : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
4023defm NEG    : SIMDTwoVectorBHSD<1, 0b01011, "neg",
4024                               UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4025defm NOT    : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
4026// Aliases for MVN -> NOT.
4027def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
4028                (NOTv8i8 V64:$Vd, V64:$Vn)>;
4029def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
4030                (NOTv16i8 V128:$Vd, V128:$Vn)>;
4031
4032def : Pat<(AArch64neg (v8i8  V64:$Rn)),  (NEGv8i8  V64:$Rn)>;
4033def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
4034def : Pat<(AArch64neg (v4i16 V64:$Rn)),  (NEGv4i16 V64:$Rn)>;
4035def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
4036def : Pat<(AArch64neg (v2i32 V64:$Rn)),  (NEGv2i32 V64:$Rn)>;
4037def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
4038def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
4039
4040def : Pat<(vnot (v4i16 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4041def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4042def : Pat<(vnot (v2i32 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4043def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4044def : Pat<(vnot (v1i64 V64:$Rn)),  (NOTv8i8  V64:$Rn)>;
4045def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
4046
4047defm RBIT   : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
4048defm REV16  : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
4049defm REV32  : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
4050defm REV64  : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
4051defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
4052       BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
4053defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
4054defm SCVTF  : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
4055defm SHLL   : SIMDVectorLShiftLongBySizeBHS;
4056defm SQABS  : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4057defm SQNEG  : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4058defm SQXTN  : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
4059defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
4060defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
4061defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
4062       BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
4063defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
4064                    int_aarch64_neon_uaddlp>;
4065defm UCVTF  : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
4066defm UQXTN  : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
4067defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
4068defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
4069defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
4070defm XTN    : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
4071
4072def : Pat<(v4f16  (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4073def : Pat<(v4f16  (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4074def : Pat<(v4bf16 (AArch64rev32 V64:$Rn)),  (REV32v4i16 V64:$Rn)>;
4075def : Pat<(v4bf16 (AArch64rev64 V64:$Rn)),  (REV64v4i16 V64:$Rn)>;
4076def : Pat<(v8f16  (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4077def : Pat<(v8f16  (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4078def : Pat<(v8bf16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
4079def : Pat<(v8bf16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
4080def : Pat<(v2f32  (AArch64rev64 V64:$Rn)),  (REV64v2i32 V64:$Rn)>;
4081def : Pat<(v4f32  (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
4082
4083// Patterns for vector long shift (by element width). These need to match all
4084// three of zext, sext and anyext so it's easier to pull the patterns out of the
4085// definition.
4086multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
4087  def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
4088            (SHLLv8i8 V64:$Rn)>;
4089  def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
4090            (SHLLv16i8 V128:$Rn)>;
4091  def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
4092            (SHLLv4i16 V64:$Rn)>;
4093  def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
4094            (SHLLv8i16 V128:$Rn)>;
4095  def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
4096            (SHLLv2i32 V64:$Rn)>;
4097  def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
4098            (SHLLv4i32 V128:$Rn)>;
4099}
4100
4101defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
4102defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
4103defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
4104
4105//===----------------------------------------------------------------------===//
4106// Advanced SIMD three vector instructions.
4107//===----------------------------------------------------------------------===//
4108
4109defm ADD     : SIMDThreeSameVector<0, 0b10000, "add", add>;
4110defm ADDP    : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
4111defm CMEQ    : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
4112defm CMGE    : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
4113defm CMGT    : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
4114defm CMHI    : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
4115defm CMHS    : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
4116defm CMTST   : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
4117defm FABD    : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
4118let Predicates = [HasNEON] in {
4119foreach VT = [ v2f32, v4f32, v2f64 ] in
4120def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4121}
4122let Predicates = [HasNEON, HasFullFP16] in {
4123foreach VT = [ v4f16, v8f16 ] in
4124def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
4125}
4126defm FACGE   : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
4127defm FACGT   : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
4128defm FADDP   : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
4129defm FADD    : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
4130defm FCMEQ   : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4131defm FCMGE   : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4132defm FCMGT   : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4133defm FDIV    : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
4134defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
4135defm FMAXNM  : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
4136defm FMAXP   : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
4137defm FMAX    : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
4138defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
4139defm FMINNM  : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
4140defm FMINP   : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
4141defm FMIN    : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
4142
4143// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
4144// instruction expects the addend first, while the fma intrinsic puts it last.
4145defm FMLA     : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
4146            TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4147defm FMLS     : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
4148            TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4149
4150defm FMULX    : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
4151defm FMUL     : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
4152defm FRECPS   : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
4153defm FRSQRTS  : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
4154defm FSUB     : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
4155
4156// MLA and MLS are generated in MachineCombine
4157defm MLA      : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
4158defm MLS      : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
4159
4160defm MUL      : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
4161defm PMUL     : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
4162defm SABA     : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
4163      TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
4164defm SABD     : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
4165defm SHADD    : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>;
4166defm SHSUB    : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
4167defm SMAXP    : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
4168defm SMAX     : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
4169defm SMINP    : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
4170defm SMIN     : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
4171defm SQADD    : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
4172defm SQDMULH  : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
4173defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
4174defm SQRSHL   : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
4175defm SQSHL    : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
4176defm SQSUB    : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
4177defm SRHADD   : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
4178defm SRSHL    : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
4179defm SSHL     : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
4180defm SUB      : SIMDThreeSameVector<1,0b10000,"sub", sub>;
4181defm UABA     : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
4182      TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
4183defm UABD     : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
4184defm UHADD    : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>;
4185defm UHSUB    : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
4186defm UMAXP    : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
4187defm UMAX     : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
4188defm UMINP    : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
4189defm UMIN     : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
4190defm UQADD    : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
4191defm UQRSHL   : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
4192defm UQSHL    : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
4193defm UQSUB    : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
4194defm URHADD   : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
4195defm URSHL    : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
4196defm USHL     : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
4197defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
4198                                                  int_aarch64_neon_sqadd>;
4199defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
4200                                                    int_aarch64_neon_sqsub>;
4201
4202// Extra saturate patterns, other than the intrinsics matches above
4203defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
4204defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
4205defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
4206defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
4207
4208defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
4209defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
4210                                  BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
4211defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
4212defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
4213                                  BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
4214defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
4215
4216// Pseudo bitwise select pattern BSP.
4217// It is expanded into BSL/BIT/BIF after register allocation.
4218defm BSP : SIMDLogicalThreeVectorPseudo<TriOpFrag<(or (and node:$LHS, node:$MHS),
4219                                                      (and (vnot node:$LHS), node:$RHS))>>;
4220defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl">;
4221defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
4222defm BIF : SIMDLogicalThreeVectorTied<1, 0b11, "bif">;
4223
4224def : Pat<(AArch64bsp (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
4225          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4226def : Pat<(AArch64bsp (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
4227          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4228def : Pat<(AArch64bsp (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
4229          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4230def : Pat<(AArch64bsp (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
4231          (BSPv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
4232
4233def : Pat<(AArch64bsp (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
4234          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4235def : Pat<(AArch64bsp (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
4236          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4237def : Pat<(AArch64bsp (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
4238          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4239def : Pat<(AArch64bsp (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
4240          (BSPv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
4241
4242def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
4243                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
4244def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
4245                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4246def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
4247                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4248def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
4249                (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
4250
4251def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
4252                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
4253def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
4254                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4255def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
4256                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4257def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
4258                (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
4259
4260def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
4261                "|cmls.8b\t$dst, $src1, $src2}",
4262                (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4263def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
4264                "|cmls.16b\t$dst, $src1, $src2}",
4265                (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4266def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
4267                "|cmls.4h\t$dst, $src1, $src2}",
4268                (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4269def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4270                "|cmls.8h\t$dst, $src1, $src2}",
4271                (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4272def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4273                "|cmls.2s\t$dst, $src1, $src2}",
4274                (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4275def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4276                "|cmls.4s\t$dst, $src1, $src2}",
4277                (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4278def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4279                "|cmls.2d\t$dst, $src1, $src2}",
4280                (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4281
4282def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4283                "|cmlo.8b\t$dst, $src1, $src2}",
4284                (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4285def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4286                "|cmlo.16b\t$dst, $src1, $src2}",
4287                (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4288def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4289                "|cmlo.4h\t$dst, $src1, $src2}",
4290                (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4291def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4292                "|cmlo.8h\t$dst, $src1, $src2}",
4293                (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4294def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4295                "|cmlo.2s\t$dst, $src1, $src2}",
4296                (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4297def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4298                "|cmlo.4s\t$dst, $src1, $src2}",
4299                (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4300def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4301                "|cmlo.2d\t$dst, $src1, $src2}",
4302                (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4303
4304def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4305                "|cmle.8b\t$dst, $src1, $src2}",
4306                (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4307def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4308                "|cmle.16b\t$dst, $src1, $src2}",
4309                (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4310def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4311                "|cmle.4h\t$dst, $src1, $src2}",
4312                (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4313def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4314                "|cmle.8h\t$dst, $src1, $src2}",
4315                (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4316def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4317                "|cmle.2s\t$dst, $src1, $src2}",
4318                (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4319def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4320                "|cmle.4s\t$dst, $src1, $src2}",
4321                (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4322def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4323                "|cmle.2d\t$dst, $src1, $src2}",
4324                (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4325
4326def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4327                "|cmlt.8b\t$dst, $src1, $src2}",
4328                (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4329def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4330                "|cmlt.16b\t$dst, $src1, $src2}",
4331                (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4332def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4333                "|cmlt.4h\t$dst, $src1, $src2}",
4334                (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4335def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4336                "|cmlt.8h\t$dst, $src1, $src2}",
4337                (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4338def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4339                "|cmlt.2s\t$dst, $src1, $src2}",
4340                (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4341def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4342                "|cmlt.4s\t$dst, $src1, $src2}",
4343                (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4344def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4345                "|cmlt.2d\t$dst, $src1, $src2}",
4346                (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4347
4348let Predicates = [HasNEON, HasFullFP16] in {
4349def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4350                "|fcmle.4h\t$dst, $src1, $src2}",
4351                (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4352def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4353                "|fcmle.8h\t$dst, $src1, $src2}",
4354                (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4355}
4356def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4357                "|fcmle.2s\t$dst, $src1, $src2}",
4358                (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4359def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4360                "|fcmle.4s\t$dst, $src1, $src2}",
4361                (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4362def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4363                "|fcmle.2d\t$dst, $src1, $src2}",
4364                (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4365
4366let Predicates = [HasNEON, HasFullFP16] in {
4367def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4368                "|fcmlt.4h\t$dst, $src1, $src2}",
4369                (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4370def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4371                "|fcmlt.8h\t$dst, $src1, $src2}",
4372                (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4373}
4374def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4375                "|fcmlt.2s\t$dst, $src1, $src2}",
4376                (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4377def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4378                "|fcmlt.4s\t$dst, $src1, $src2}",
4379                (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4380def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4381                "|fcmlt.2d\t$dst, $src1, $src2}",
4382                (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4383
4384let Predicates = [HasNEON, HasFullFP16] in {
4385def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4386                "|facle.4h\t$dst, $src1, $src2}",
4387                (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4388def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4389                "|facle.8h\t$dst, $src1, $src2}",
4390                (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4391}
4392def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4393                "|facle.2s\t$dst, $src1, $src2}",
4394                (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4395def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4396                "|facle.4s\t$dst, $src1, $src2}",
4397                (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4398def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4399                "|facle.2d\t$dst, $src1, $src2}",
4400                (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4401
4402let Predicates = [HasNEON, HasFullFP16] in {
4403def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4404                "|faclt.4h\t$dst, $src1, $src2}",
4405                (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4406def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4407                "|faclt.8h\t$dst, $src1, $src2}",
4408                (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4409}
4410def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4411                "|faclt.2s\t$dst, $src1, $src2}",
4412                (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4413def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4414                "|faclt.4s\t$dst, $src1, $src2}",
4415                (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4416def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4417                "|faclt.2d\t$dst, $src1, $src2}",
4418                (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4419
4420//===----------------------------------------------------------------------===//
4421// Advanced SIMD three scalar instructions.
4422//===----------------------------------------------------------------------===//
4423
4424defm ADD      : SIMDThreeScalarD<0, 0b10000, "add", add>;
4425defm CMEQ     : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4426defm CMGE     : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4427defm CMGT     : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4428defm CMHI     : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4429defm CMHS     : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4430defm CMTST    : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4431defm FABD     : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4432def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4433          (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4434let Predicates = [HasFullFP16] in {
4435def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4436}
4437def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4438def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4439defm FACGE    : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4440                                     int_aarch64_neon_facge>;
4441defm FACGT    : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4442                                     int_aarch64_neon_facgt>;
4443defm FCMEQ    : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4444defm FCMGE    : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4445defm FCMGT    : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4446defm FMULX    : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
4447defm FRECPS   : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
4448defm FRSQRTS  : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
4449defm SQADD    : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4450defm SQDMULH  : SIMDThreeScalarHS<  0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4451defm SQRDMULH : SIMDThreeScalarHS<  1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4452defm SQRSHL   : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4453defm SQSHL    : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4454defm SQSUB    : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4455defm SRSHL    : SIMDThreeScalarD<   0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4456defm SSHL     : SIMDThreeScalarD<   0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4457defm SUB      : SIMDThreeScalarD<   1, 0b10000, "sub", sub>;
4458defm UQADD    : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4459defm UQRSHL   : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4460defm UQSHL    : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4461defm UQSUB    : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4462defm URSHL    : SIMDThreeScalarD<   1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4463defm USHL     : SIMDThreeScalarD<   1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4464let Predicates = [HasRDM] in {
4465  defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4466  defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4467  def : Pat<(i32 (int_aarch64_neon_sqadd
4468                   (i32 FPR32:$Rd),
4469                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4470                                                   (i32 FPR32:$Rm))))),
4471            (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4472  def : Pat<(i32 (int_aarch64_neon_sqsub
4473                   (i32 FPR32:$Rd),
4474                   (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4475                                                   (i32 FPR32:$Rm))))),
4476            (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4477}
4478
4479def : InstAlias<"cmls $dst, $src1, $src2",
4480                (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4481def : InstAlias<"cmle $dst, $src1, $src2",
4482                (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4483def : InstAlias<"cmlo $dst, $src1, $src2",
4484                (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4485def : InstAlias<"cmlt $dst, $src1, $src2",
4486                (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4487def : InstAlias<"fcmle $dst, $src1, $src2",
4488                (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4489def : InstAlias<"fcmle $dst, $src1, $src2",
4490                (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4491def : InstAlias<"fcmlt $dst, $src1, $src2",
4492                (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4493def : InstAlias<"fcmlt $dst, $src1, $src2",
4494                (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4495def : InstAlias<"facle $dst, $src1, $src2",
4496                (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4497def : InstAlias<"facle $dst, $src1, $src2",
4498                (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4499def : InstAlias<"faclt $dst, $src1, $src2",
4500                (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4501def : InstAlias<"faclt $dst, $src1, $src2",
4502                (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4503
4504//===----------------------------------------------------------------------===//
4505// Advanced SIMD three scalar instructions (mixed operands).
4506//===----------------------------------------------------------------------===//
4507defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4508                                       int_aarch64_neon_sqdmulls_scalar>;
4509defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4510defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4511
4512def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4513                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4514                                                        (i32 FPR32:$Rm))))),
4515          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4516def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4517                   (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4518                                                        (i32 FPR32:$Rm))))),
4519          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4520
4521//===----------------------------------------------------------------------===//
4522// Advanced SIMD two scalar instructions.
4523//===----------------------------------------------------------------------===//
4524
4525defm ABS    : SIMDTwoScalarD<    0, 0b01011, "abs", abs>;
4526defm CMEQ   : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4527defm CMGE   : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4528defm CMGT   : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4529defm CMLE   : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4530defm CMLT   : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4531defm FCMEQ  : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4532defm FCMGE  : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4533defm FCMGT  : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4534defm FCMLE  : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4535defm FCMLT  : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4536defm FCVTAS : SIMDFPTwoScalar<   0, 0, 0b11100, "fcvtas">;
4537defm FCVTAU : SIMDFPTwoScalar<   1, 0, 0b11100, "fcvtau">;
4538defm FCVTMS : SIMDFPTwoScalar<   0, 0, 0b11011, "fcvtms">;
4539defm FCVTMU : SIMDFPTwoScalar<   1, 0, 0b11011, "fcvtmu">;
4540defm FCVTNS : SIMDFPTwoScalar<   0, 0, 0b11010, "fcvtns">;
4541defm FCVTNU : SIMDFPTwoScalar<   1, 0, 0b11010, "fcvtnu">;
4542defm FCVTPS : SIMDFPTwoScalar<   0, 1, 0b11010, "fcvtps">;
4543defm FCVTPU : SIMDFPTwoScalar<   1, 1, 0b11010, "fcvtpu">;
4544def  FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4545defm FCVTZS : SIMDFPTwoScalar<   0, 1, 0b11011, "fcvtzs">;
4546defm FCVTZU : SIMDFPTwoScalar<   1, 1, 0b11011, "fcvtzu">;
4547defm FRECPE : SIMDFPTwoScalar<   0, 1, 0b11101, "frecpe">;
4548defm FRECPX : SIMDFPTwoScalar<   0, 1, 0b11111, "frecpx">;
4549defm FRSQRTE : SIMDFPTwoScalar<  1, 1, 0b11101, "frsqrte">;
4550defm NEG    : SIMDTwoScalarD<    1, 0b01011, "neg",
4551                                 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4552defm SCVTF  : SIMDFPTwoScalarCVT<   0, 0, 0b11101, "scvtf", AArch64sitof>;
4553defm SQABS  : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4554defm SQNEG  : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4555defm SQXTN  : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4556defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4557defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4558                                     int_aarch64_neon_suqadd>;
4559defm UCVTF  : SIMDFPTwoScalarCVT<   1, 0, 0b11101, "ucvtf", AArch64uitof>;
4560defm UQXTN  : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4561defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4562                                    int_aarch64_neon_usqadd>;
4563
4564def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
4565
4566def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4567          (FCVTASv1i64 FPR64:$Rn)>;
4568def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4569          (FCVTAUv1i64 FPR64:$Rn)>;
4570def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4571          (FCVTMSv1i64 FPR64:$Rn)>;
4572def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4573          (FCVTMUv1i64 FPR64:$Rn)>;
4574def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4575          (FCVTNSv1i64 FPR64:$Rn)>;
4576def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4577          (FCVTNUv1i64 FPR64:$Rn)>;
4578def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4579          (FCVTPSv1i64 FPR64:$Rn)>;
4580def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4581          (FCVTPUv1i64 FPR64:$Rn)>;
4582def : Pat<(v1i64 (int_aarch64_neon_fcvtzs (v1f64 FPR64:$Rn))),
4583          (FCVTZSv1i64 FPR64:$Rn)>;
4584def : Pat<(v1i64 (int_aarch64_neon_fcvtzu (v1f64 FPR64:$Rn))),
4585          (FCVTZUv1i64 FPR64:$Rn)>;
4586
4587def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4588          (FRECPEv1f16 FPR16:$Rn)>;
4589def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4590          (FRECPEv1i32 FPR32:$Rn)>;
4591def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4592          (FRECPEv1i64 FPR64:$Rn)>;
4593def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4594          (FRECPEv1i64 FPR64:$Rn)>;
4595
4596def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4597          (FRECPEv1i32 FPR32:$Rn)>;
4598def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4599          (FRECPEv2f32 V64:$Rn)>;
4600def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4601          (FRECPEv4f32 FPR128:$Rn)>;
4602def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4603          (FRECPEv1i64 FPR64:$Rn)>;
4604def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4605          (FRECPEv1i64 FPR64:$Rn)>;
4606def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4607          (FRECPEv2f64 FPR128:$Rn)>;
4608
4609def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4610          (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4611def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4612          (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4613def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4614          (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4615def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4616          (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4617def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4618          (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4619
4620def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4621          (FRECPXv1f16 FPR16:$Rn)>;
4622def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4623          (FRECPXv1i32 FPR32:$Rn)>;
4624def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4625          (FRECPXv1i64 FPR64:$Rn)>;
4626
4627def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4628          (FRSQRTEv1f16 FPR16:$Rn)>;
4629def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4630          (FRSQRTEv1i32 FPR32:$Rn)>;
4631def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4632          (FRSQRTEv1i64 FPR64:$Rn)>;
4633def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4634          (FRSQRTEv1i64 FPR64:$Rn)>;
4635
4636def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4637          (FRSQRTEv1i32 FPR32:$Rn)>;
4638def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4639          (FRSQRTEv2f32 V64:$Rn)>;
4640def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4641          (FRSQRTEv4f32 FPR128:$Rn)>;
4642def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4643          (FRSQRTEv1i64 FPR64:$Rn)>;
4644def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4645          (FRSQRTEv1i64 FPR64:$Rn)>;
4646def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4647          (FRSQRTEv2f64 FPR128:$Rn)>;
4648
4649def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4650          (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4651def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4652          (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4653def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4654          (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4655def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4656          (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4657def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4658          (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4659
4660// If an integer is about to be converted to a floating point value,
4661// just load it on the floating point unit.
4662// Here are the patterns for 8 and 16-bits to float.
4663// 8-bits -> float.
4664multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4665                             SDPatternOperator loadop, Instruction UCVTF,
4666                             ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4667                             SubRegIndex sub> {
4668  def : Pat<(DstTy (uint_to_fp (SrcTy
4669                     (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4670                                      ro.Wext:$extend))))),
4671           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4672                                 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4673                                 sub))>;
4674
4675  def : Pat<(DstTy (uint_to_fp (SrcTy
4676                     (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4677                                      ro.Wext:$extend))))),
4678           (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4679                                 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4680                                 sub))>;
4681}
4682
4683defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4684                         UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4685def : Pat <(f32 (uint_to_fp (i32
4686               (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4687           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4688                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4689def : Pat <(f32 (uint_to_fp (i32
4690                     (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4691           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4692                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4693// 16-bits -> float.
4694defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4695                         UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4696def : Pat <(f32 (uint_to_fp (i32
4697                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4698           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4699                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4700def : Pat <(f32 (uint_to_fp (i32
4701                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4702           (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4703                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4704// 32-bits are handled in target specific dag combine:
4705// performIntToFpCombine.
4706// 64-bits integer to 32-bits floating point, not possible with
4707// UCVTF on floating point registers (both source and destination
4708// must have the same size).
4709
4710// Here are the patterns for 8, 16, 32, and 64-bits to double.
4711// 8-bits -> double.
4712defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4713                         UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4714def : Pat <(f64 (uint_to_fp (i32
4715                    (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4716           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4717                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4718def : Pat <(f64 (uint_to_fp (i32
4719                  (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4720           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4721                          (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4722// 16-bits -> double.
4723defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
4724                         UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
4725def : Pat <(f64 (uint_to_fp (i32
4726                  (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4727           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4728                          (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4729def : Pat <(f64 (uint_to_fp (i32
4730                  (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4731           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4732                          (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4733// 32-bits -> double.
4734defm : UIntToFPROLoadPat<f64, i32, load,
4735                         UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
4736def : Pat <(f64 (uint_to_fp (i32
4737                  (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4738           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4739                          (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
4740def : Pat <(f64 (uint_to_fp (i32
4741                  (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
4742           (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4743                          (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
4744// 64-bits -> double are handled in target specific dag combine:
4745// performIntToFpCombine.
4746
4747//===----------------------------------------------------------------------===//
4748// Advanced SIMD three different-sized vector instructions.
4749//===----------------------------------------------------------------------===//
4750
4751defm ADDHN  : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
4752defm SUBHN  : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
4753defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
4754defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
4755defm PMULL  : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
4756defm SABAL  : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
4757                                             AArch64sabd>;
4758defm SABDL   : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
4759                                          AArch64sabd>;
4760defm SADDL   : SIMDLongThreeVectorBHS<   0, 0b0000, "saddl",
4761            BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
4762defm SADDW   : SIMDWideThreeVectorBHS<   0, 0b0001, "saddw",
4763                 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
4764defm SMLAL   : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
4765    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4766defm SMLSL   : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
4767    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4768defm SMULL   : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
4769defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
4770                                               int_aarch64_neon_sqadd>;
4771defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
4772                                               int_aarch64_neon_sqsub>;
4773defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
4774                                     int_aarch64_neon_sqdmull>;
4775defm SSUBL   : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
4776                 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
4777defm SSUBW   : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
4778                 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
4779defm UABAL   : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
4780                                              AArch64uabd>;
4781defm UADDL   : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
4782                 BinOpFrag<(add (zanyext node:$LHS), (zanyext node:$RHS))>>;
4783defm UADDW   : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
4784                 BinOpFrag<(add node:$LHS, (zanyext node:$RHS))>>;
4785defm UMLAL   : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
4786    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4787defm UMLSL   : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
4788    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4789defm UMULL   : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
4790defm USUBL   : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
4791                 BinOpFrag<(sub (zanyext node:$LHS), (zanyext node:$RHS))>>;
4792defm USUBW   : SIMDWideThreeVectorBHS<   1, 0b0011, "usubw",
4793                 BinOpFrag<(sub node:$LHS, (zanyext node:$RHS))>>;
4794
4795// Additional patterns for [SU]ML[AS]L
4796multiclass Neon_mul_acc_widen_patterns<SDPatternOperator opnode, SDPatternOperator vecopnode,
4797  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4798  def : Pat<(v4i16 (opnode
4799                    V64:$Ra,
4800                    (v4i16 (extract_subvector
4801                            (vecopnode (v8i8 V64:$Rn),(v8i8 V64:$Rm)),
4802                            (i64 0))))),
4803             (EXTRACT_SUBREG (v8i16 (INST8B
4804                                     (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), V64:$Ra, dsub),
4805                                     V64:$Rn, V64:$Rm)), dsub)>;
4806  def : Pat<(v2i32 (opnode
4807                    V64:$Ra,
4808                    (v2i32 (extract_subvector
4809                            (vecopnode (v4i16 V64:$Rn),(v4i16 V64:$Rm)),
4810                            (i64 0))))),
4811             (EXTRACT_SUBREG (v4i32 (INST4H
4812                                     (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), V64:$Ra, dsub),
4813                                     V64:$Rn, V64:$Rm)), dsub)>;
4814  def : Pat<(v1i64 (opnode
4815                    V64:$Ra,
4816                    (v1i64 (extract_subvector
4817                            (vecopnode (v2i32 V64:$Rn),(v2i32 V64:$Rm)),
4818                            (i64 0))))),
4819             (EXTRACT_SUBREG (v2i64 (INST2S
4820                                     (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), V64:$Ra, dsub),
4821                                     V64:$Rn, V64:$Rm)), dsub)>;
4822}
4823
4824defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_umull,
4825     UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
4826defm : Neon_mul_acc_widen_patterns<add, int_aarch64_neon_smull,
4827     SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
4828defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_umull,
4829     UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
4830defm : Neon_mul_acc_widen_patterns<sub, int_aarch64_neon_smull,
4831     SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
4832
4833// Additional patterns for SMULL and UMULL
4834multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
4835  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4836  def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4837            (INST8B V64:$Rn, V64:$Rm)>;
4838  def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4839            (INST4H V64:$Rn, V64:$Rm)>;
4840  def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4841            (INST2S V64:$Rn, V64:$Rm)>;
4842}
4843
4844defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
4845  SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
4846defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
4847  UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
4848
4849// Patterns for smull2/umull2.
4850multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
4851  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4852  def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
4853                           (extract_high_v16i8 V128:$Rm))),
4854             (INST8B V128:$Rn, V128:$Rm)>;
4855  def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
4856                           (extract_high_v8i16 V128:$Rm))),
4857             (INST4H V128:$Rn, V128:$Rm)>;
4858  def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
4859                           (extract_high_v4i32 V128:$Rm))),
4860             (INST2S V128:$Rn, V128:$Rm)>;
4861}
4862
4863defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
4864  SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
4865defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
4866  UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
4867
4868// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
4869multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
4870  Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4871  def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4872            (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
4873  def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4874            (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
4875  def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4876            (INST2S  V128:$Rd, V64:$Rn, V64:$Rm)>;
4877}
4878
4879defm : Neon_mulacc_widen_patterns<
4880  TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4881  SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
4882defm : Neon_mulacc_widen_patterns<
4883  TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4884  UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
4885defm : Neon_mulacc_widen_patterns<
4886  TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4887  SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
4888defm : Neon_mulacc_widen_patterns<
4889  TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4890  UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
4891
4892// Patterns for 64-bit pmull
4893def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
4894          (PMULLv1i64 V64:$Rn, V64:$Rm)>;
4895def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
4896                                    (extractelt (v2i64 V128:$Rm), (i64 1))),
4897          (PMULLv2i64 V128:$Rn, V128:$Rm)>;
4898
4899// CodeGen patterns for addhn and subhn instructions, which can actually be
4900// written in LLVM IR without too much difficulty.
4901
4902// ADDHN
4903def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
4904          (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4905def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4906                                           (i32 16))))),
4907          (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4908def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4909                                           (i32 32))))),
4910          (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4911def : Pat<(concat_vectors (v8i8 V64:$Rd),
4912                          (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4913                                                    (i32 8))))),
4914          (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4915                            V128:$Rn, V128:$Rm)>;
4916def : Pat<(concat_vectors (v4i16 V64:$Rd),
4917                          (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4918                                                    (i32 16))))),
4919          (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4920                            V128:$Rn, V128:$Rm)>;
4921def : Pat<(concat_vectors (v2i32 V64:$Rd),
4922                          (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4923                                                    (i32 32))))),
4924          (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4925                            V128:$Rn, V128:$Rm)>;
4926
4927// SUBHN
4928def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
4929          (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4930def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4931                                           (i32 16))))),
4932          (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4933def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4934                                           (i32 32))))),
4935          (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4936def : Pat<(concat_vectors (v8i8 V64:$Rd),
4937                          (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4938                                                    (i32 8))))),
4939          (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4940                            V128:$Rn, V128:$Rm)>;
4941def : Pat<(concat_vectors (v4i16 V64:$Rd),
4942                          (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4943                                                    (i32 16))))),
4944          (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4945                            V128:$Rn, V128:$Rm)>;
4946def : Pat<(concat_vectors (v2i32 V64:$Rd),
4947                          (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4948                                                    (i32 32))))),
4949          (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4950                            V128:$Rn, V128:$Rm)>;
4951
4952//----------------------------------------------------------------------------
4953// AdvSIMD bitwise extract from vector instruction.
4954//----------------------------------------------------------------------------
4955
4956defm EXT : SIMDBitwiseExtract<"ext">;
4957
4958def AdjustExtImm : SDNodeXForm<imm, [{
4959  return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
4960}]>;
4961multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
4962  def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
4963            (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
4964  def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
4965            (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
4966  // We use EXT to handle extract_subvector to copy the upper 64-bits of a
4967  // 128-bit vector.
4968  def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
4969            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
4970  // A 64-bit EXT of two halves of the same 128-bit register can be done as a
4971  // single 128-bit EXT.
4972  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
4973                              (extract_subvector V128:$Rn, (i64 N)),
4974                              (i32 imm:$imm))),
4975            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
4976  // A 64-bit EXT of the high half of a 128-bit register can be done using a
4977  // 128-bit EXT of the whole register with an adjustment to the immediate. The
4978  // top half of the other operand will be unset, but that doesn't matter as it
4979  // will not be used.
4980  def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
4981                              V64:$Rm,
4982                              (i32 imm:$imm))),
4983            (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
4984                                      (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4985                                      (AdjustExtImm imm:$imm)), dsub)>;
4986}
4987
4988defm : ExtPat<v8i8, v16i8, 8>;
4989defm : ExtPat<v4i16, v8i16, 4>;
4990defm : ExtPat<v4f16, v8f16, 4>;
4991defm : ExtPat<v4bf16, v8bf16, 4>;
4992defm : ExtPat<v2i32, v4i32, 2>;
4993defm : ExtPat<v2f32, v4f32, 2>;
4994defm : ExtPat<v1i64, v2i64, 1>;
4995defm : ExtPat<v1f64, v2f64, 1>;
4996
4997//----------------------------------------------------------------------------
4998// AdvSIMD zip vector
4999//----------------------------------------------------------------------------
5000
5001defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
5002defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
5003defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
5004defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
5005defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
5006defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
5007
5008//----------------------------------------------------------------------------
5009// AdvSIMD TBL/TBX instructions
5010//----------------------------------------------------------------------------
5011
5012defm TBL : SIMDTableLookup<    0, "tbl">;
5013defm TBX : SIMDTableLookupTied<1, "tbx">;
5014
5015def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5016          (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
5017def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5018          (TBLv16i8One V128:$Ri, V128:$Rn)>;
5019
5020def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
5021                  (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
5022          (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
5023def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
5024                   (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
5025          (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
5026
5027
5028//----------------------------------------------------------------------------
5029// AdvSIMD scalar CPY instruction
5030//----------------------------------------------------------------------------
5031
5032defm CPY : SIMDScalarCPY<"cpy">;
5033
5034//----------------------------------------------------------------------------
5035// AdvSIMD scalar pairwise instructions
5036//----------------------------------------------------------------------------
5037
5038defm ADDP    : SIMDPairwiseScalarD<0, 0b11011, "addp">;
5039defm FADDP   : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
5040defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
5041defm FMAXP   : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
5042defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
5043defm FMINP   : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
5044
5045let Predicates = [HasFullFP16] in {
5046def : Pat<(f16 (vecreduce_fadd (v8f16 V128:$Rn))),
5047            (FADDPv2i16p
5048              (EXTRACT_SUBREG
5049                 (FADDPv8f16 (FADDPv8f16 V128:$Rn, (v8f16 (IMPLICIT_DEF))), (v8f16 (IMPLICIT_DEF))),
5050               dsub))>;
5051def : Pat<(f16 (vecreduce_fadd (v4f16 V64:$Rn))),
5052          (FADDPv2i16p (FADDPv4f16 V64:$Rn, (v4f16 (IMPLICIT_DEF))))>;
5053}
5054def : Pat<(f32 (vecreduce_fadd (v4f32 V128:$Rn))),
5055          (FADDPv2i32p
5056            (EXTRACT_SUBREG
5057              (FADDPv4f32 V128:$Rn, (v4f32 (IMPLICIT_DEF))),
5058             dsub))>;
5059def : Pat<(f32 (vecreduce_fadd (v2f32 V64:$Rn))),
5060          (FADDPv2i32p V64:$Rn)>;
5061def : Pat<(f64 (vecreduce_fadd (v2f64 V128:$Rn))),
5062          (FADDPv2i64p V128:$Rn)>;
5063
5064def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
5065          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5066def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
5067          (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
5068def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
5069          (FADDPv2i32p V64:$Rn)>;
5070def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
5071          (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
5072def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
5073          (FADDPv2i64p V128:$Rn)>;
5074def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
5075          (FMAXNMPv2i32p V64:$Rn)>;
5076def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
5077          (FMAXNMPv2i64p V128:$Rn)>;
5078def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
5079          (FMAXPv2i32p V64:$Rn)>;
5080def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
5081          (FMAXPv2i64p V128:$Rn)>;
5082def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
5083          (FMINNMPv2i32p V64:$Rn)>;
5084def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
5085          (FMINNMPv2i64p V128:$Rn)>;
5086def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
5087          (FMINPv2i32p V64:$Rn)>;
5088def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
5089          (FMINPv2i64p V128:$Rn)>;
5090
5091//----------------------------------------------------------------------------
5092// AdvSIMD INS/DUP instructions
5093//----------------------------------------------------------------------------
5094
5095def DUPv8i8gpr  : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
5096def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
5097def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
5098def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
5099def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
5100def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
5101def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
5102
5103def DUPv2i64lane : SIMDDup64FromElement;
5104def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
5105def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
5106def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
5107def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
5108def DUPv8i8lane  : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
5109def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
5110
5111// DUP from a 64-bit register to a 64-bit register is just a copy
5112def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
5113          (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
5114def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
5115          (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
5116
5117def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
5118          (v2f32 (DUPv2i32lane
5119            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5120            (i64 0)))>;
5121def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
5122          (v4f32 (DUPv4i32lane
5123            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
5124            (i64 0)))>;
5125def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
5126          (v2f64 (DUPv2i64lane
5127            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
5128            (i64 0)))>;
5129def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
5130          (v4f16 (DUPv4i16lane
5131            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5132            (i64 0)))>;
5133def : Pat<(v4bf16 (AArch64dup (bf16 FPR16:$Rn))),
5134          (v4bf16 (DUPv4i16lane
5135            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5136            (i64 0)))>;
5137def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
5138          (v8f16 (DUPv8i16lane
5139            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5140            (i64 0)))>;
5141def : Pat<(v8bf16 (AArch64dup (bf16 FPR16:$Rn))),
5142          (v8bf16 (DUPv8i16lane
5143            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
5144            (i64 0)))>;
5145
5146def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5147          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5148def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
5149          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5150
5151def : Pat<(v4bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5152          (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
5153def : Pat<(v8bf16 (AArch64duplane16 (v8bf16 V128:$Rn), VectorIndexH:$imm)),
5154          (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
5155
5156def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5157          (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
5158def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
5159         (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
5160def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
5161          (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
5162
5163// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
5164// instruction even if the types don't match: we just have to remap the lane
5165// carefully. N.b. this trick only applies to truncations.
5166def VecIndex_x2 : SDNodeXForm<imm, [{
5167  return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
5168}]>;
5169def VecIndex_x4 : SDNodeXForm<imm, [{
5170  return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
5171}]>;
5172def VecIndex_x8 : SDNodeXForm<imm, [{
5173  return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
5174}]>;
5175
5176multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
5177                            ValueType Src128VT, ValueType ScalVT,
5178                            Instruction DUP, SDNodeXForm IdxXFORM> {
5179  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
5180                                                     imm:$idx)))),
5181            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5182
5183  def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
5184                                                     imm:$idx)))),
5185            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5186}
5187
5188defm : DUPWithTruncPats<v8i8,   v4i16, v8i16, i32, DUPv8i8lane,  VecIndex_x2>;
5189defm : DUPWithTruncPats<v8i8,   v2i32, v4i32, i32, DUPv8i8lane,  VecIndex_x4>;
5190defm : DUPWithTruncPats<v4i16,  v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
5191
5192defm : DUPWithTruncPats<v16i8,  v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
5193defm : DUPWithTruncPats<v16i8,  v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
5194defm : DUPWithTruncPats<v8i16,  v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
5195
5196multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
5197                               SDNodeXForm IdxXFORM> {
5198  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
5199                                                         imm:$idx))))),
5200            (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
5201
5202  def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
5203                                                       imm:$idx))))),
5204            (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
5205}
5206
5207defm : DUPWithTrunci64Pats<v8i8,  DUPv8i8lane,   VecIndex_x8>;
5208defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane,  VecIndex_x4>;
5209defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane,  VecIndex_x2>;
5210
5211defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
5212defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
5213defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
5214
5215// SMOV and UMOV definitions, with some extra patterns for convenience
5216defm SMOV : SMov;
5217defm UMOV : UMov;
5218
5219def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5220          (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
5221def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
5222          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5223def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5224          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5225def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5226          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5227def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
5228          (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
5229def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
5230          (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
5231
5232def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
5233            VectorIndexB:$idx)))), i8),
5234          (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
5235def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
5236            VectorIndexH:$idx)))), i16),
5237          (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
5238
5239// Extracting i8 or i16 elements will have the zero-extend transformed to
5240// an 'and' mask by type legalization since neither i8 nor i16 are legal types
5241// for AArch64. Match these patterns here since UMOV already zeroes out the high
5242// bits of the destination register.
5243def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
5244               (i32 0xff)),
5245          (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
5246def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
5247               (i32 0xffff)),
5248          (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
5249
5250defm INS : SIMDIns;
5251
5252def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
5253          (SUBREG_TO_REG (i32 0),
5254                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5255def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
5256          (SUBREG_TO_REG (i32 0),
5257                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5258
5259def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
5260          (SUBREG_TO_REG (i32 0),
5261                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5262def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
5263          (SUBREG_TO_REG (i32 0),
5264                         (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
5265
5266def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5267          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5268def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5269          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5270
5271def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5272          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5273def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5274          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5275
5276def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
5277            (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
5278                                  (i32 FPR32:$Rn), ssub))>;
5279def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
5280            (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5281                                  (i32 FPR32:$Rn), ssub))>;
5282
5283def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
5284            (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
5285                                  (i64 FPR64:$Rn), dsub))>;
5286
5287def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
5288          (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5289def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
5290          (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5291
5292def : Pat<(v4bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5293          (INSERT_SUBREG (v4bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5294def : Pat<(v8bf16 (scalar_to_vector (bf16 FPR16:$Rn))),
5295          (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
5296
5297def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
5298          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5299def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
5300          (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
5301
5302def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
5303          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
5304
5305def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
5306            (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5307          (EXTRACT_SUBREG
5308            (INSvi16lane
5309              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5310              VectorIndexS:$imm,
5311              (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5312              (i64 0)),
5313            dsub)>;
5314
5315def : Pat<(vector_insert (v8f16 v8f16:$Rn), (f16 fpimm0),
5316            (i64 VectorIndexH:$imm)),
5317          (INSvi16gpr V128:$Rn, VectorIndexH:$imm, WZR)>;
5318def : Pat<(vector_insert v4f32:$Rn, (f32 fpimm0),
5319            (i64 VectorIndexS:$imm)),
5320          (INSvi32gpr V128:$Rn, VectorIndexS:$imm, WZR)>;
5321def : Pat<(vector_insert v2f64:$Rn, (f64 fpimm0),
5322            (i64 VectorIndexD:$imm)),
5323          (INSvi64gpr V128:$Rn, VectorIndexS:$imm, XZR)>;
5324
5325def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
5326            (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5327          (INSvi16lane
5328            V128:$Rn, VectorIndexH:$imm,
5329            (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5330            (i64 0))>;
5331
5332def : Pat<(v4bf16 (vector_insert (v4bf16 V64:$Rn),
5333            (bf16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
5334          (EXTRACT_SUBREG
5335            (INSvi16lane
5336              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5337              VectorIndexS:$imm,
5338              (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5339              (i64 0)),
5340            dsub)>;
5341
5342def : Pat<(v8bf16 (vector_insert (v8bf16 V128:$Rn),
5343            (bf16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
5344          (INSvi16lane
5345            V128:$Rn, VectorIndexH:$imm,
5346            (v8bf16 (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
5347            (i64 0))>;
5348
5349def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
5350            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5351          (EXTRACT_SUBREG
5352            (INSvi32lane
5353              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
5354              VectorIndexS:$imm,
5355              (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5356              (i64 0)),
5357            dsub)>;
5358def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
5359            (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
5360          (INSvi32lane
5361            V128:$Rn, VectorIndexS:$imm,
5362            (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
5363            (i64 0))>;
5364def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
5365            (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
5366          (INSvi64lane
5367            V128:$Rn, VectorIndexD:$imm,
5368            (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
5369            (i64 0))>;
5370
5371// Copy an element at a constant index in one vector into a constant indexed
5372// element of another.
5373// FIXME refactor to a shared class/dev parameterized on vector type, vector
5374// index type and INS extension
5375def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
5376                   (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
5377                   VectorIndexB:$idx2)),
5378          (v16i8 (INSvi8lane
5379                   V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
5380          )>;
5381def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
5382                   (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5383                   VectorIndexH:$idx2)),
5384          (v8i16 (INSvi16lane
5385                   V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5386          )>;
5387def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5388                   (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5389                   VectorIndexS:$idx2)),
5390          (v4i32 (INSvi32lane
5391                   V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5392          )>;
5393def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5394                   (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5395                   VectorIndexD:$idx2)),
5396          (v2i64 (INSvi64lane
5397                   V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5398          )>;
5399
5400multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5401                                ValueType VTScal, Instruction INS> {
5402  def : Pat<(VT128 (vector_insert V128:$src,
5403                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5404                        imm:$Immd)),
5405            (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5406
5407  def : Pat<(VT128 (vector_insert V128:$src,
5408                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5409                        imm:$Immd)),
5410            (INS V128:$src, imm:$Immd,
5411                 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5412
5413  def : Pat<(VT64 (vector_insert V64:$src,
5414                        (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5415                        imm:$Immd)),
5416            (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5417                                 imm:$Immd, V128:$Rn, imm:$Immn),
5418                            dsub)>;
5419
5420  def : Pat<(VT64 (vector_insert V64:$src,
5421                        (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5422                        imm:$Immd)),
5423            (EXTRACT_SUBREG
5424                (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5425                     (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5426                dsub)>;
5427}
5428
5429defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5430defm : Neon_INS_elt_pattern<v8bf16, v4bf16, bf16, INSvi16lane>;
5431defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5432defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5433
5434
5435// Floating point vector extractions are codegen'd as either a sequence of
5436// subregister extractions, or a MOV (aka CPY here, alias for DUP) if
5437// the lane number is anything other than zero.
5438def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5439          (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5440def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5441          (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5442def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5443          (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5444def : Pat<(vector_extract (v8bf16 V128:$Rn), 0),
5445          (bf16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5446
5447
5448def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5449          (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
5450def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5451          (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
5452def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5453          (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5454def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
5455          (bf16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5456
5457// All concat_vectors operations are canonicalised to act on i64 vectors for
5458// AArch64. In the general case we need an instruction, which had just as well be
5459// INS.
5460class ConcatPat<ValueType DstTy, ValueType SrcTy>
5461  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5462        (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5463                     (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5464
5465def : ConcatPat<v2i64, v1i64>;
5466def : ConcatPat<v2f64, v1f64>;
5467def : ConcatPat<v4i32, v2i32>;
5468def : ConcatPat<v4f32, v2f32>;
5469def : ConcatPat<v8i16, v4i16>;
5470def : ConcatPat<v8f16, v4f16>;
5471def : ConcatPat<v8bf16, v4bf16>;
5472def : ConcatPat<v16i8, v8i8>;
5473
5474// If the high lanes are undef, though, we can just ignore them:
5475class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5476  : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5477        (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5478
5479def : ConcatUndefPat<v2i64, v1i64>;
5480def : ConcatUndefPat<v2f64, v1f64>;
5481def : ConcatUndefPat<v4i32, v2i32>;
5482def : ConcatUndefPat<v4f32, v2f32>;
5483def : ConcatUndefPat<v8i16, v4i16>;
5484def : ConcatUndefPat<v16i8, v8i8>;
5485
5486//----------------------------------------------------------------------------
5487// AdvSIMD across lanes instructions
5488//----------------------------------------------------------------------------
5489
5490defm ADDV    : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5491defm SMAXV   : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5492defm SMINV   : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5493defm UMAXV   : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5494defm UMINV   : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5495defm SADDLV  : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5496defm UADDLV  : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5497defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5498defm FMAXV   : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5499defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5500defm FMINV   : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5501
5502// Patterns for across-vector intrinsics, that have a node equivalent, that
5503// returns a vector (with only the low lane defined) instead of a scalar.
5504// In effect, opNode is the same as (scalar_to_vector (IntNode)).
5505multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5506                                    SDPatternOperator opNode> {
5507// If a lane instruction caught the vector_extract around opNode, we can
5508// directly match the latter to the instruction.
5509def : Pat<(v8i8 (opNode V64:$Rn)),
5510          (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5511           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5512def : Pat<(v16i8 (opNode V128:$Rn)),
5513          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5514           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5515def : Pat<(v4i16 (opNode V64:$Rn)),
5516          (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5517           (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5518def : Pat<(v8i16 (opNode V128:$Rn)),
5519          (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5520           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5521def : Pat<(v4i32 (opNode V128:$Rn)),
5522          (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5523           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5524
5525
5526// If none did, fallback to the explicit patterns, consuming the vector_extract.
5527def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5528            (i32 0)), (i64 0))),
5529          (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5530            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5531            bsub), ssub)>;
5532def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5533          (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5534            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5535            bsub), ssub)>;
5536def : Pat<(i32 (vector_extract (insert_subvector undef,
5537            (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
5538          (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5539            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5540            hsub), ssub)>;
5541def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5542          (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5543            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5544            hsub), ssub)>;
5545def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5546          (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5547            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5548            ssub), ssub)>;
5549
5550}
5551
5552multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5553                                          SDPatternOperator opNode>
5554    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5555// If there is a sign extension after this intrinsic, consume it as smov already
5556// performed it
5557def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5558            (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
5559          (i32 (SMOVvi8to32
5560            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5561              (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5562            (i64 0)))>;
5563def : Pat<(i32 (sext_inreg (i32 (vector_extract
5564            (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5565          (i32 (SMOVvi8to32
5566            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5567             (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5568            (i64 0)))>;
5569def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5570            (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
5571          (i32 (SMOVvi16to32
5572           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5573            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5574           (i64 0)))>;
5575def : Pat<(i32 (sext_inreg (i32 (vector_extract
5576            (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5577          (i32 (SMOVvi16to32
5578            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5579             (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5580            (i64 0)))>;
5581}
5582
5583multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5584                                            SDPatternOperator opNode>
5585    : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5586// If there is a masking operation keeping only what has been actually
5587// generated, consume it.
5588def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5589            (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
5590      (i32 (EXTRACT_SUBREG
5591        (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5592          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5593        ssub))>;
5594def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5595            maski8_or_more)),
5596        (i32 (EXTRACT_SUBREG
5597          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5598            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5599          ssub))>;
5600def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5601            (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
5602          (i32 (EXTRACT_SUBREG
5603            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5604              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5605            ssub))>;
5606def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5607            maski16_or_more)),
5608        (i32 (EXTRACT_SUBREG
5609          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5610            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5611          ssub))>;
5612}
5613
5614defm : SIMDAcrossLanesSignedIntrinsic<"ADDV",  AArch64saddv>;
5615// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5616def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5617          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5618
5619defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5620// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5621def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5622          (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5623
5624defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
5625def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
5626          (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
5627
5628defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
5629def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
5630          (SMINPv2i32 V64:$Rn, V64:$Rn)>;
5631
5632defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
5633def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
5634          (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
5635
5636defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
5637def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
5638          (UMINPv2i32 V64:$Rn, V64:$Rn)>;
5639
5640multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
5641  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5642        (i32 (SMOVvi16to32
5643          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5644            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5645          (i64 0)))>;
5646def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5647        (i32 (SMOVvi16to32
5648          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5649           (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5650          (i64 0)))>;
5651
5652def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5653          (i32 (EXTRACT_SUBREG
5654           (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5655            (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5656           ssub))>;
5657def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5658        (i32 (EXTRACT_SUBREG
5659          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5660           (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5661          ssub))>;
5662
5663def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5664        (i64 (EXTRACT_SUBREG
5665          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5666           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5667          dsub))>;
5668}
5669
5670multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
5671                                                Intrinsic intOp> {
5672  def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5673        (i32 (EXTRACT_SUBREG
5674          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5675            (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5676          ssub))>;
5677def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5678        (i32 (EXTRACT_SUBREG
5679          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5680            (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5681          ssub))>;
5682
5683def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5684          (i32 (EXTRACT_SUBREG
5685            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5686              (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5687            ssub))>;
5688def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5689        (i32 (EXTRACT_SUBREG
5690          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5691            (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5692          ssub))>;
5693
5694def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5695        (i64 (EXTRACT_SUBREG
5696          (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5697            (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5698          dsub))>;
5699}
5700
5701defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
5702defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
5703
5704// The vaddlv_s32 intrinsic gets mapped to SADDLP.
5705def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
5706          (i64 (EXTRACT_SUBREG
5707            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5708              (SADDLPv2i32_v1i64 V64:$Rn), dsub),
5709            dsub))>;
5710// The vaddlv_u32 intrinsic gets mapped to UADDLP.
5711def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
5712          (i64 (EXTRACT_SUBREG
5713            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5714              (UADDLPv2i32_v1i64 V64:$Rn), dsub),
5715            dsub))>;
5716
5717//------------------------------------------------------------------------------
5718// AdvSIMD modified immediate instructions
5719//------------------------------------------------------------------------------
5720
5721// AdvSIMD BIC
5722defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
5723// AdvSIMD ORR
5724defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
5725
5726def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5727def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5728def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5729def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5730
5731def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5732def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5733def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5734def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5735
5736def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5737def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5738def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5739def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5740
5741def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
5742def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5743def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
5744def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5745
5746// AdvSIMD FMOV
5747def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
5748                                              "fmov", ".2d",
5749                       [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5750def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64,  fpimm8,
5751                                              "fmov", ".2s",
5752                       [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5753def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
5754                                              "fmov", ".4s",
5755                       [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5756let Predicates = [HasNEON, HasFullFP16] in {
5757def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64,  fpimm8,
5758                                              "fmov", ".4h",
5759                       [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5760def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
5761                                              "fmov", ".8h",
5762                       [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5763} // Predicates = [HasNEON, HasFullFP16]
5764
5765// AdvSIMD MOVI
5766
5767// EDIT byte mask: scalar
5768let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5769def MOVID      : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
5770                    [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
5771// The movi_edit node has the immediate value already encoded, so we use
5772// a plain imm0_255 here.
5773def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
5774          (MOVID imm0_255:$shift)>;
5775
5776// EDIT byte mask: 2d
5777
5778// The movi_edit node has the immediate value already encoded, so we use
5779// a plain imm0_255 in the pattern
5780let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5781def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
5782                                                simdimmtype10,
5783                                                "movi", ".2d",
5784                   [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
5785
5786def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5787def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5788def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5789def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5790
5791def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5792def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5793def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5794def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5795
5796// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
5797// extract is free and this gives better MachineCSE results.
5798def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5799def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5800def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5801def : Pat<(v8i8  immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5802
5803def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5804def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5805def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5806def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5807
5808// EDIT per word & halfword: 2s, 4h, 4s, & 8h
5809let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5810defm MOVI      : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
5811
5812def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5813def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5814def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5815def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5816
5817def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5818def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5819def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5820def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5821
5822def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5823          (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
5824def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5825          (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
5826def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5827          (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
5828def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5829          (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
5830
5831let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5832// EDIT per word: 2s & 4s with MSL shifter
5833def MOVIv2s_msl  : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
5834                      [(set (v2i32 V64:$Rd),
5835                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5836def MOVIv4s_msl  : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
5837                      [(set (v4i32 V128:$Rd),
5838                            (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5839
5840// Per byte: 8b & 16b
5841def MOVIv8b_ns   : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64,  imm0_255,
5842                                                 "movi", ".8b",
5843                       [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
5844
5845def MOVIv16b_ns  : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
5846                                                 "movi", ".16b",
5847                       [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
5848}
5849
5850// AdvSIMD MVNI
5851
5852// EDIT per word & halfword: 2s, 4h, 4s, & 8h
5853let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5854defm MVNI      : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
5855
5856def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5857def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5858def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5859def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5860
5861def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
5862def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5863def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
5864def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5865
5866def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5867          (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
5868def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5869          (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
5870def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5871          (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
5872def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5873          (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
5874
5875// EDIT per word: 2s & 4s with MSL shifter
5876let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5877def MVNIv2s_msl   : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
5878                      [(set (v2i32 V64:$Rd),
5879                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5880def MVNIv4s_msl   : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
5881                      [(set (v4i32 V128:$Rd),
5882                            (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5883}
5884
5885//----------------------------------------------------------------------------
5886// AdvSIMD indexed element
5887//----------------------------------------------------------------------------
5888
5889let hasSideEffects = 0 in {
5890  defm FMLA  : SIMDFPIndexedTied<0, 0b0001, "fmla">;
5891  defm FMLS  : SIMDFPIndexedTied<0, 0b0101, "fmls">;
5892}
5893
5894// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
5895// instruction expects the addend first, while the intrinsic expects it last.
5896
5897// On the other hand, there are quite a few valid combinatorial options due to
5898// the commutativity of multiplication and the fact that (-x) * y = x * (-y).
5899defm : SIMDFPIndexedTiedPatterns<"FMLA",
5900           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
5901defm : SIMDFPIndexedTiedPatterns<"FMLA",
5902           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
5903
5904defm : SIMDFPIndexedTiedPatterns<"FMLS",
5905           TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
5906defm : SIMDFPIndexedTiedPatterns<"FMLS",
5907           TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
5908defm : SIMDFPIndexedTiedPatterns<"FMLS",
5909           TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
5910defm : SIMDFPIndexedTiedPatterns<"FMLS",
5911           TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
5912
5913multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
5914  // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5915  // and DUP scalar.
5916  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5917                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5918                                           VectorIndexS:$idx))),
5919            (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5920  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5921                           (v2f32 (AArch64duplane32
5922                                      (v4f32 (insert_subvector undef,
5923                                                 (v2f32 (fneg V64:$Rm)),
5924                                                 (i32 0))),
5925                                      VectorIndexS:$idx)))),
5926            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5927                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5928                               VectorIndexS:$idx)>;
5929  def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5930                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5931            (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5932                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5933
5934  // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5935  // and DUP scalar.
5936  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5937                           (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5938                                           VectorIndexS:$idx))),
5939            (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
5940                               VectorIndexS:$idx)>;
5941  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5942                           (v4f32 (AArch64duplane32
5943                                      (v4f32 (insert_subvector undef,
5944                                                 (v2f32 (fneg V64:$Rm)),
5945                                                 (i32 0))),
5946                                      VectorIndexS:$idx)))),
5947            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5948                               (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5949                               VectorIndexS:$idx)>;
5950  def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5951                           (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5952            (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5953                (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5954
5955  // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
5956  // (DUPLANE from 64-bit would be trivial).
5957  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5958                           (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
5959                                           VectorIndexD:$idx))),
5960            (FMLSv2i64_indexed
5961                V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5962  def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5963                           (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
5964            (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
5965                (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
5966
5967  // 2 variants for 32-bit scalar version: extract from .2s or from .4s
5968  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5969                         (vector_extract (v4f32 (fneg V128:$Rm)),
5970                                         VectorIndexS:$idx))),
5971            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5972                V128:$Rm, VectorIndexS:$idx)>;
5973  def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5974                         (vector_extract (v4f32 (insert_subvector undef,
5975                                                    (v2f32 (fneg V64:$Rm)),
5976                                                    (i32 0))),
5977                                         VectorIndexS:$idx))),
5978            (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5979                (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
5980
5981  // 1 variant for 64-bit scalar version: extract from .1d or from .2d
5982  def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
5983                         (vector_extract (v2f64 (fneg V128:$Rm)),
5984                                         VectorIndexS:$idx))),
5985            (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
5986                V128:$Rm, VectorIndexS:$idx)>;
5987}
5988
5989defm : FMLSIndexedAfterNegPatterns<
5990           TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
5991defm : FMLSIndexedAfterNegPatterns<
5992           TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
5993
5994defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
5995defm FMUL  : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
5996
5997def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5998          (FMULv2i32_indexed V64:$Rn,
5999            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6000            (i64 0))>;
6001def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
6002          (FMULv4i32_indexed V128:$Rn,
6003            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
6004            (i64 0))>;
6005def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
6006          (FMULv2i64_indexed V128:$Rn,
6007            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
6008            (i64 0))>;
6009
6010defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
6011defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
6012
6013defm SQDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqdmulh_lane,
6014                                     int_aarch64_neon_sqdmulh_laneq>;
6015defm SQRDMULH : SIMDIndexedHSPatterns<int_aarch64_neon_sqrdmulh_lane,
6016                                      int_aarch64_neon_sqrdmulh_laneq>;
6017
6018// Generated by MachineCombine
6019defm MLA   : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
6020defm MLS   : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
6021
6022defm MUL   : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
6023defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
6024    TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6025defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
6026    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
6027defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
6028                int_aarch64_neon_smull>;
6029defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
6030                                           int_aarch64_neon_sqadd>;
6031defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
6032                                           int_aarch64_neon_sqsub>;
6033defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
6034                                          int_aarch64_neon_sqadd>;
6035defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
6036                                          int_aarch64_neon_sqsub>;
6037defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
6038defm UMLAL   : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
6039    TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6040defm UMLSL   : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
6041    TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
6042defm UMULL   : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
6043                int_aarch64_neon_umull>;
6044
6045// A scalar sqdmull with the second operand being a vector lane can be
6046// handled directly with the indexed instruction encoding.
6047def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
6048                                          (vector_extract (v4i32 V128:$Vm),
6049                                                           VectorIndexS:$idx)),
6050          (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
6051
6052//----------------------------------------------------------------------------
6053// AdvSIMD scalar shift instructions
6054//----------------------------------------------------------------------------
6055defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
6056defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
6057defm SCVTF  : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
6058defm UCVTF  : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
6059// Codegen patterns for the above. We don't put these directly on the
6060// instructions because TableGen's type inference can't handle the truth.
6061// Having the same base pattern for fp <--> int totally freaks it out.
6062def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
6063          (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
6064def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
6065          (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
6066def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
6067          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6068def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
6069          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6070def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
6071                                            vecshiftR64:$imm)),
6072          (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
6073def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
6074                                            vecshiftR64:$imm)),
6075          (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
6076def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
6077          (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6078def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6079          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6080def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
6081                                            vecshiftR64:$imm)),
6082          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6083def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
6084          (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6085def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
6086                                            vecshiftR64:$imm)),
6087          (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
6088def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
6089          (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
6090
6091// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
6092
6093def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
6094          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6095def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
6096          (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6097def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6098          (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6099def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
6100            (and FPR32:$Rn, (i32 65535)),
6101            vecshiftR16:$imm)),
6102          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6103def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
6104          (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
6105def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
6106          (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
6107def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
6108          (i32 (INSERT_SUBREG
6109            (i32 (IMPLICIT_DEF)),
6110            (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
6111            hsub))>;
6112def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
6113          (i64 (INSERT_SUBREG
6114            (i64 (IMPLICIT_DEF)),
6115            (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
6116            hsub))>;
6117def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
6118          (i32 (INSERT_SUBREG
6119            (i32 (IMPLICIT_DEF)),
6120            (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
6121            hsub))>;
6122def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
6123          (i64 (INSERT_SUBREG
6124            (i64 (IMPLICIT_DEF)),
6125            (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
6126            hsub))>;
6127def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6128          (i32 (INSERT_SUBREG
6129            (i32 (IMPLICIT_DEF)),
6130            (FACGE16 FPR16:$Rn, FPR16:$Rm),
6131            hsub))>;
6132def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
6133          (i32 (INSERT_SUBREG
6134            (i32 (IMPLICIT_DEF)),
6135            (FACGT16 FPR16:$Rn, FPR16:$Rm),
6136            hsub))>;
6137
6138defm SHL      : SIMDScalarLShiftD<   0, 0b01010, "shl", AArch64vshl>;
6139defm SLI      : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
6140defm SQRSHRN  : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
6141                                     int_aarch64_neon_sqrshrn>;
6142defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
6143                                     int_aarch64_neon_sqrshrun>;
6144defm SQSHLU   : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6145defm SQSHL    : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6146defm SQSHRN   : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
6147                                     int_aarch64_neon_sqshrn>;
6148defm SQSHRUN  : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
6149                                     int_aarch64_neon_sqshrun>;
6150defm SRI      : SIMDScalarRShiftDTied<   1, 0b01000, "sri">;
6151defm SRSHR    : SIMDScalarRShiftD<   0, 0b00100, "srshr", AArch64srshri>;
6152defm SRSRA    : SIMDScalarRShiftDTied<   0, 0b00110, "srsra",
6153    TriOpFrag<(add node:$LHS,
6154                   (AArch64srshri node:$MHS, node:$RHS))>>;
6155defm SSHR     : SIMDScalarRShiftD<   0, 0b00000, "sshr", AArch64vashr>;
6156defm SSRA     : SIMDScalarRShiftDTied<   0, 0b00010, "ssra",
6157    TriOpFrag<(add node:$LHS,
6158                   (AArch64vashr node:$MHS, node:$RHS))>>;
6159defm UQRSHRN  : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
6160                                     int_aarch64_neon_uqrshrn>;
6161defm UQSHL    : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6162defm UQSHRN   : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
6163                                     int_aarch64_neon_uqshrn>;
6164defm URSHR    : SIMDScalarRShiftD<   1, 0b00100, "urshr", AArch64urshri>;
6165defm URSRA    : SIMDScalarRShiftDTied<   1, 0b00110, "ursra",
6166    TriOpFrag<(add node:$LHS,
6167                   (AArch64urshri node:$MHS, node:$RHS))>>;
6168defm USHR     : SIMDScalarRShiftD<   1, 0b00000, "ushr", AArch64vlshr>;
6169defm USRA     : SIMDScalarRShiftDTied<   1, 0b00010, "usra",
6170    TriOpFrag<(add node:$LHS,
6171                   (AArch64vlshr node:$MHS, node:$RHS))>>;
6172
6173//----------------------------------------------------------------------------
6174// AdvSIMD vector shift instructions
6175//----------------------------------------------------------------------------
6176defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
6177defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
6178defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
6179                                   int_aarch64_neon_vcvtfxs2fp>;
6180defm RSHRN   : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
6181                                         int_aarch64_neon_rshrn>;
6182defm SHL     : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
6183defm SHRN    : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
6184                          BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
6185defm SLI     : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", AArch64vsli>;
6186def : Pat<(v1i64 (AArch64vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6187                                      (i32 vecshiftL64:$imm))),
6188          (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
6189defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
6190                                         int_aarch64_neon_sqrshrn>;
6191defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
6192                                         int_aarch64_neon_sqrshrun>;
6193defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
6194defm SQSHL  : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
6195defm SQSHRN  : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
6196                                         int_aarch64_neon_sqshrn>;
6197defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
6198                                         int_aarch64_neon_sqshrun>;
6199defm SRI     : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", AArch64vsri>;
6200def : Pat<(v1i64 (AArch64vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
6201                                      (i32 vecshiftR64:$imm))),
6202          (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
6203defm SRSHR   : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
6204defm SRSRA   : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
6205                 TriOpFrag<(add node:$LHS,
6206                                (AArch64srshri node:$MHS, node:$RHS))> >;
6207defm SSHLL   : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
6208                BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
6209
6210defm SSHR    : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
6211defm SSRA    : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
6212                TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
6213defm UCVTF   : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
6214                        int_aarch64_neon_vcvtfxu2fp>;
6215defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
6216                                         int_aarch64_neon_uqrshrn>;
6217defm UQSHL   : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
6218defm UQSHRN  : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
6219                                         int_aarch64_neon_uqshrn>;
6220defm URSHR   : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
6221defm URSRA   : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
6222                TriOpFrag<(add node:$LHS,
6223                               (AArch64urshri node:$MHS, node:$RHS))> >;
6224defm USHLL   : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
6225                BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
6226defm USHR    : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
6227defm USRA    : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
6228                TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
6229
6230// SHRN patterns for when a logical right shift was used instead of arithmetic
6231// (the immediate guarantees no sign bits actually end up in the result so it
6232// doesn't matter).
6233def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
6234          (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
6235def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
6236          (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
6237def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
6238          (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
6239
6240def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
6241                                 (trunc (AArch64vlshr (v8i16 V128:$Rn),
6242                                                    vecshiftR16Narrow:$imm)))),
6243          (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6244                           V128:$Rn, vecshiftR16Narrow:$imm)>;
6245def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
6246                                 (trunc (AArch64vlshr (v4i32 V128:$Rn),
6247                                                    vecshiftR32Narrow:$imm)))),
6248          (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6249                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6250def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
6251                                 (trunc (AArch64vlshr (v2i64 V128:$Rn),
6252                                                    vecshiftR64Narrow:$imm)))),
6253          (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
6254                           V128:$Rn, vecshiftR32Narrow:$imm)>;
6255
6256// Vector sign and zero extensions are implemented with SSHLL and USSHLL.
6257// Anyexts are implemented as zexts.
6258def : Pat<(v8i16 (sext   (v8i8 V64:$Rn))),  (SSHLLv8i8_shift  V64:$Rn, (i32 0))>;
6259def : Pat<(v8i16 (zext   (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6260def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))),  (USHLLv8i8_shift  V64:$Rn, (i32 0))>;
6261def : Pat<(v4i32 (sext   (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
6262def : Pat<(v4i32 (zext   (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6263def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
6264def : Pat<(v2i64 (sext   (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
6265def : Pat<(v2i64 (zext   (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6266def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
6267// Also match an extend from the upper half of a 128 bit source register.
6268def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6269          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6270def : Pat<(v8i16 (zext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6271          (USHLLv16i8_shift V128:$Rn, (i32 0))>;
6272def : Pat<(v8i16 (sext   (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
6273          (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
6274def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6275          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6276def : Pat<(v4i32 (zext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6277          (USHLLv8i16_shift V128:$Rn, (i32 0))>;
6278def : Pat<(v4i32 (sext   (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
6279          (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
6280def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6281          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6282def : Pat<(v2i64 (zext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6283          (USHLLv4i32_shift V128:$Rn, (i32 0))>;
6284def : Pat<(v2i64 (sext   (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
6285          (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
6286
6287// Vector shift sxtl aliases
6288def : InstAlias<"sxtl.8h $dst, $src1",
6289                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6290def : InstAlias<"sxtl $dst.8h, $src1.8b",
6291                (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6292def : InstAlias<"sxtl.4s $dst, $src1",
6293                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6294def : InstAlias<"sxtl $dst.4s, $src1.4h",
6295                (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6296def : InstAlias<"sxtl.2d $dst, $src1",
6297                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6298def : InstAlias<"sxtl $dst.2d, $src1.2s",
6299                (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6300
6301// Vector shift sxtl2 aliases
6302def : InstAlias<"sxtl2.8h $dst, $src1",
6303                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6304def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
6305                (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6306def : InstAlias<"sxtl2.4s $dst, $src1",
6307                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6308def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
6309                (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6310def : InstAlias<"sxtl2.2d $dst, $src1",
6311                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6312def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
6313                (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6314
6315// Vector shift uxtl aliases
6316def : InstAlias<"uxtl.8h $dst, $src1",
6317                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6318def : InstAlias<"uxtl $dst.8h, $src1.8b",
6319                (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
6320def : InstAlias<"uxtl.4s $dst, $src1",
6321                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6322def : InstAlias<"uxtl $dst.4s, $src1.4h",
6323                (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
6324def : InstAlias<"uxtl.2d $dst, $src1",
6325                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6326def : InstAlias<"uxtl $dst.2d, $src1.2s",
6327                (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
6328
6329// Vector shift uxtl2 aliases
6330def : InstAlias<"uxtl2.8h $dst, $src1",
6331                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6332def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
6333                (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
6334def : InstAlias<"uxtl2.4s $dst, $src1",
6335                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6336def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
6337                (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
6338def : InstAlias<"uxtl2.2d $dst, $src1",
6339                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6340def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
6341                (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
6342
6343// If an integer is about to be converted to a floating point value,
6344// just load it on the floating point unit.
6345// These patterns are more complex because floating point loads do not
6346// support sign extension.
6347// The sign extension has to be explicitly added and is only supported for
6348// one step: byte-to-half, half-to-word, word-to-doubleword.
6349// SCVTF GPR -> FPR is 9 cycles.
6350// SCVTF FPR -> FPR is 4 cyclces.
6351// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
6352// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
6353// and still being faster.
6354// However, this is not good for code size.
6355// 8-bits -> float. 2 sizes step-up.
6356class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
6357  : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
6358        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6359                            (SSHLLv4i16_shift
6360                              (f64
6361                                (EXTRACT_SUBREG
6362                                  (SSHLLv8i8_shift
6363                                    (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6364                                        INST,
6365                                        bsub),
6366                                    0),
6367                                  dsub)),
6368                               0),
6369                             ssub)))>,
6370    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6371
6372def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
6373                          (LDRBroW  GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
6374def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
6375                          (LDRBroX  GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
6376def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
6377                          (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
6378def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
6379                          (LDURBi GPR64sp:$Rn, simm9:$offset)>;
6380
6381// 16-bits -> float. 1 size step-up.
6382class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
6383  : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6384        (SCVTFv1i32 (f32 (EXTRACT_SUBREG
6385                            (SSHLLv4i16_shift
6386                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6387                                  INST,
6388                                  hsub),
6389                                0),
6390                            ssub)))>, Requires<[NotForCodeSize]>;
6391
6392def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6393                           (LDRHroW   GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6394def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6395                           (LDRHroX   GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6396def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6397                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6398def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6399                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6400
6401// 32-bits to 32-bits are handled in target specific dag combine:
6402// performIntToFpCombine.
6403// 64-bits integer to 32-bits floating point, not possible with
6404// SCVTF on floating point registers (both source and destination
6405// must have the same size).
6406
6407// Here are the patterns for 8, 16, 32, and 64-bits to double.
6408// 8-bits -> double. 3 size step-up: give up.
6409// 16-bits -> double. 2 size step.
6410class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6411  : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6412           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6413                              (SSHLLv2i32_shift
6414                                 (f64
6415                                  (EXTRACT_SUBREG
6416                                    (SSHLLv4i16_shift
6417                                      (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6418                                        INST,
6419                                        hsub),
6420                                     0),
6421                                   dsub)),
6422                               0),
6423                             dsub)))>,
6424    Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6425
6426def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6427                           (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6428def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6429                           (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6430def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6431                           (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6432def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6433                           (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6434// 32-bits -> double. 1 size step-up.
6435class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6436  : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6437           (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6438                              (SSHLLv2i32_shift
6439                                (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6440                                  INST,
6441                                  ssub),
6442                               0),
6443                             dsub)))>, Requires<[NotForCodeSize]>;
6444
6445def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6446                           (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6447def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6448                           (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6449def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6450                           (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6451def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6452                           (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6453
6454// 64-bits -> double are handled in target specific dag combine:
6455// performIntToFpCombine.
6456
6457
6458//----------------------------------------------------------------------------
6459// AdvSIMD Load-Store Structure
6460//----------------------------------------------------------------------------
6461defm LD1 : SIMDLd1Multiple<"ld1">;
6462defm LD2 : SIMDLd2Multiple<"ld2">;
6463defm LD3 : SIMDLd3Multiple<"ld3">;
6464defm LD4 : SIMDLd4Multiple<"ld4">;
6465
6466defm ST1 : SIMDSt1Multiple<"st1">;
6467defm ST2 : SIMDSt2Multiple<"st2">;
6468defm ST3 : SIMDSt3Multiple<"st3">;
6469defm ST4 : SIMDSt4Multiple<"st4">;
6470
6471class Ld1Pat<ValueType ty, Instruction INST>
6472  : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6473
6474def : Ld1Pat<v16i8, LD1Onev16b>;
6475def : Ld1Pat<v8i16, LD1Onev8h>;
6476def : Ld1Pat<v4i32, LD1Onev4s>;
6477def : Ld1Pat<v2i64, LD1Onev2d>;
6478def : Ld1Pat<v8i8,  LD1Onev8b>;
6479def : Ld1Pat<v4i16, LD1Onev4h>;
6480def : Ld1Pat<v2i32, LD1Onev2s>;
6481def : Ld1Pat<v1i64, LD1Onev1d>;
6482
6483class St1Pat<ValueType ty, Instruction INST>
6484  : Pat<(store ty:$Vt, GPR64sp:$Rn),
6485        (INST ty:$Vt, GPR64sp:$Rn)>;
6486
6487def : St1Pat<v16i8, ST1Onev16b>;
6488def : St1Pat<v8i16, ST1Onev8h>;
6489def : St1Pat<v4i32, ST1Onev4s>;
6490def : St1Pat<v2i64, ST1Onev2d>;
6491def : St1Pat<v8i8,  ST1Onev8b>;
6492def : St1Pat<v4i16, ST1Onev4h>;
6493def : St1Pat<v2i32, ST1Onev2s>;
6494def : St1Pat<v1i64, ST1Onev1d>;
6495
6496//---
6497// Single-element
6498//---
6499
6500defm LD1R          : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6501defm LD2R          : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6502defm LD3R          : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6503defm LD4R          : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6504let mayLoad = 1, hasSideEffects = 0 in {
6505defm LD1 : SIMDLdSingleBTied<0, 0b000,       "ld1", VecListOneb,   GPR64pi1>;
6506defm LD1 : SIMDLdSingleHTied<0, 0b010, 0,    "ld1", VecListOneh,   GPR64pi2>;
6507defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes,   GPR64pi4>;
6508defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned,   GPR64pi8>;
6509defm LD2 : SIMDLdSingleBTied<1, 0b000,       "ld2", VecListTwob,   GPR64pi2>;
6510defm LD2 : SIMDLdSingleHTied<1, 0b010, 0,    "ld2", VecListTwoh,   GPR64pi4>;
6511defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos,   GPR64pi8>;
6512defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod,   GPR64pi16>;
6513defm LD3 : SIMDLdSingleBTied<0, 0b001,       "ld3", VecListThreeb, GPR64pi3>;
6514defm LD3 : SIMDLdSingleHTied<0, 0b011, 0,    "ld3", VecListThreeh, GPR64pi6>;
6515defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6516defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6517defm LD4 : SIMDLdSingleBTied<1, 0b001,       "ld4", VecListFourb,  GPR64pi4>;
6518defm LD4 : SIMDLdSingleHTied<1, 0b011, 0,    "ld4", VecListFourh,  GPR64pi8>;
6519defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours,  GPR64pi16>;
6520defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd,  GPR64pi32>;
6521}
6522
6523def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6524          (LD1Rv8b GPR64sp:$Rn)>;
6525def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6526          (LD1Rv16b GPR64sp:$Rn)>;
6527def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6528          (LD1Rv4h GPR64sp:$Rn)>;
6529def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6530          (LD1Rv8h GPR64sp:$Rn)>;
6531def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6532          (LD1Rv2s GPR64sp:$Rn)>;
6533def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6534          (LD1Rv4s GPR64sp:$Rn)>;
6535def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6536          (LD1Rv2d GPR64sp:$Rn)>;
6537def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6538          (LD1Rv1d GPR64sp:$Rn)>;
6539// Grab the floating point version too
6540def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6541          (LD1Rv2s GPR64sp:$Rn)>;
6542def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6543          (LD1Rv4s GPR64sp:$Rn)>;
6544def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6545          (LD1Rv2d GPR64sp:$Rn)>;
6546def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6547          (LD1Rv1d GPR64sp:$Rn)>;
6548def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6549          (LD1Rv4h GPR64sp:$Rn)>;
6550def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6551          (LD1Rv8h GPR64sp:$Rn)>;
6552def : Pat<(v4bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6553          (LD1Rv4h GPR64sp:$Rn)>;
6554def : Pat<(v8bf16 (AArch64dup (bf16 (load GPR64sp:$Rn)))),
6555          (LD1Rv8h GPR64sp:$Rn)>;
6556
6557class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6558                    ValueType VTy, ValueType STy, Instruction LD1>
6559  : Pat<(vector_insert (VTy VecListOne128:$Rd),
6560           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6561        (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6562
6563def : Ld1Lane128Pat<extloadi8,  VectorIndexB, v16i8, i32, LD1i8>;
6564def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6565def : Ld1Lane128Pat<load,       VectorIndexS, v4i32, i32, LD1i32>;
6566def : Ld1Lane128Pat<load,       VectorIndexS, v4f32, f32, LD1i32>;
6567def : Ld1Lane128Pat<load,       VectorIndexD, v2i64, i64, LD1i64>;
6568def : Ld1Lane128Pat<load,       VectorIndexD, v2f64, f64, LD1i64>;
6569def : Ld1Lane128Pat<load,       VectorIndexH, v8f16, f16, LD1i16>;
6570def : Ld1Lane128Pat<load,       VectorIndexH, v8bf16, bf16, LD1i16>;
6571
6572class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
6573                   ValueType VTy, ValueType STy, Instruction LD1>
6574  : Pat<(vector_insert (VTy VecListOne64:$Rd),
6575           (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6576        (EXTRACT_SUBREG
6577            (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
6578                          VecIndex:$idx, GPR64sp:$Rn),
6579            dsub)>;
6580
6581def : Ld1Lane64Pat<extloadi8,  VectorIndexB, v8i8,  i32, LD1i8>;
6582def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
6583def : Ld1Lane64Pat<load,       VectorIndexS, v2i32, i32, LD1i32>;
6584def : Ld1Lane64Pat<load,       VectorIndexS, v2f32, f32, LD1i32>;
6585def : Ld1Lane64Pat<load,       VectorIndexH, v4f16, f16, LD1i16>;
6586def : Ld1Lane64Pat<load,       VectorIndexH, v4bf16, bf16, LD1i16>;
6587
6588
6589defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
6590defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
6591defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
6592defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
6593
6594// Stores
6595defm ST1 : SIMDStSingleB<0, 0b000,       "st1", VecListOneb, GPR64pi1>;
6596defm ST1 : SIMDStSingleH<0, 0b010, 0,    "st1", VecListOneh, GPR64pi2>;
6597defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
6598defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
6599
6600let AddedComplexity = 19 in
6601class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6602                    ValueType VTy, ValueType STy, Instruction ST1>
6603  : Pat<(scalar_store
6604             (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6605             GPR64sp:$Rn),
6606        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
6607
6608def : St1Lane128Pat<truncstorei8,  VectorIndexB, v16i8, i32, ST1i8>;
6609def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
6610def : St1Lane128Pat<store,         VectorIndexS, v4i32, i32, ST1i32>;
6611def : St1Lane128Pat<store,         VectorIndexS, v4f32, f32, ST1i32>;
6612def : St1Lane128Pat<store,         VectorIndexD, v2i64, i64, ST1i64>;
6613def : St1Lane128Pat<store,         VectorIndexD, v2f64, f64, ST1i64>;
6614def : St1Lane128Pat<store,         VectorIndexH, v8f16, f16, ST1i16>;
6615def : St1Lane128Pat<store,         VectorIndexH, v8bf16, bf16, ST1i16>;
6616
6617let AddedComplexity = 19 in
6618class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6619                   ValueType VTy, ValueType STy, Instruction ST1>
6620  : Pat<(scalar_store
6621             (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6622             GPR64sp:$Rn),
6623        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6624             VecIndex:$idx, GPR64sp:$Rn)>;
6625
6626def : St1Lane64Pat<truncstorei8,  VectorIndexB, v8i8, i32, ST1i8>;
6627def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
6628def : St1Lane64Pat<store,         VectorIndexS, v2i32, i32, ST1i32>;
6629def : St1Lane64Pat<store,         VectorIndexS, v2f32, f32, ST1i32>;
6630def : St1Lane64Pat<store,         VectorIndexH, v4f16, f16, ST1i16>;
6631def : St1Lane64Pat<store,         VectorIndexH, v4bf16, bf16, ST1i16>;
6632
6633multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6634                             ValueType VTy, ValueType STy, Instruction ST1,
6635                             int offset> {
6636  def : Pat<(scalar_store
6637              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6638              GPR64sp:$Rn, offset),
6639        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6640             VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6641
6642  def : Pat<(scalar_store
6643              (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6644              GPR64sp:$Rn, GPR64:$Rm),
6645        (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6646             VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6647}
6648
6649defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
6650defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
6651                        2>;
6652defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
6653defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
6654defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
6655defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
6656defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
6657defm : St1LanePost64Pat<post_store, VectorIndexH, v4bf16, bf16, ST1i16_POST, 2>;
6658
6659multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6660                             ValueType VTy, ValueType STy, Instruction ST1,
6661                             int offset> {
6662  def : Pat<(scalar_store
6663              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6664              GPR64sp:$Rn, offset),
6665        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6666
6667  def : Pat<(scalar_store
6668              (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6669              GPR64sp:$Rn, GPR64:$Rm),
6670        (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6671}
6672
6673defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
6674                         1>;
6675defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
6676                         2>;
6677defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
6678defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
6679defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
6680defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
6681defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
6682defm : St1LanePost128Pat<post_store, VectorIndexH, v8bf16, bf16, ST1i16_POST, 2>;
6683
6684let mayStore = 1, hasSideEffects = 0 in {
6685defm ST2 : SIMDStSingleB<1, 0b000,       "st2", VecListTwob,   GPR64pi2>;
6686defm ST2 : SIMDStSingleH<1, 0b010, 0,    "st2", VecListTwoh,   GPR64pi4>;
6687defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos,   GPR64pi8>;
6688defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod,   GPR64pi16>;
6689defm ST3 : SIMDStSingleB<0, 0b001,       "st3", VecListThreeb, GPR64pi3>;
6690defm ST3 : SIMDStSingleH<0, 0b011, 0,    "st3", VecListThreeh, GPR64pi6>;
6691defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
6692defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
6693defm ST4 : SIMDStSingleB<1, 0b001,       "st4", VecListFourb,  GPR64pi4>;
6694defm ST4 : SIMDStSingleH<1, 0b011, 0,    "st4", VecListFourh,  GPR64pi8>;
6695defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours,  GPR64pi16>;
6696defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd,  GPR64pi32>;
6697}
6698
6699defm ST1 : SIMDLdSt1SingleAliases<"st1">;
6700defm ST2 : SIMDLdSt2SingleAliases<"st2">;
6701defm ST3 : SIMDLdSt3SingleAliases<"st3">;
6702defm ST4 : SIMDLdSt4SingleAliases<"st4">;
6703
6704//----------------------------------------------------------------------------
6705// Crypto extensions
6706//----------------------------------------------------------------------------
6707
6708let Predicates = [HasAES] in {
6709def AESErr   : AESTiedInst<0b0100, "aese",   int_aarch64_crypto_aese>;
6710def AESDrr   : AESTiedInst<0b0101, "aesd",   int_aarch64_crypto_aesd>;
6711def AESMCrr  : AESInst<    0b0110, "aesmc",  int_aarch64_crypto_aesmc>;
6712def AESIMCrr : AESInst<    0b0111, "aesimc", int_aarch64_crypto_aesimc>;
6713}
6714
6715// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
6716// for AES fusion on some CPUs.
6717let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
6718def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6719                        Sched<[WriteV]>;
6720def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6721                         Sched<[WriteV]>;
6722}
6723
6724// Only use constrained versions of AES(I)MC instructions if they are paired with
6725// AESE/AESD.
6726def : Pat<(v16i8 (int_aarch64_crypto_aesmc
6727            (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
6728                                            (v16i8 V128:$src2))))),
6729          (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
6730                                             (v16i8 V128:$src2)))))>,
6731          Requires<[HasFuseAES]>;
6732
6733def : Pat<(v16i8 (int_aarch64_crypto_aesimc
6734            (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
6735                                            (v16i8 V128:$src2))))),
6736          (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
6737                                              (v16i8 V128:$src2)))))>,
6738          Requires<[HasFuseAES]>;
6739
6740let Predicates = [HasSHA2] in {
6741def SHA1Crrr     : SHATiedInstQSV<0b000, "sha1c",   int_aarch64_crypto_sha1c>;
6742def SHA1Prrr     : SHATiedInstQSV<0b001, "sha1p",   int_aarch64_crypto_sha1p>;
6743def SHA1Mrrr     : SHATiedInstQSV<0b010, "sha1m",   int_aarch64_crypto_sha1m>;
6744def SHA1SU0rrr   : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
6745def SHA256Hrrr   : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
6746def SHA256H2rrr  : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
6747def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
6748
6749def SHA1Hrr     : SHAInstSS<    0b0000, "sha1h",    int_aarch64_crypto_sha1h>;
6750def SHA1SU1rr   : SHATiedInstVV<0b0001, "sha1su1",  int_aarch64_crypto_sha1su1>;
6751def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
6752}
6753
6754//----------------------------------------------------------------------------
6755// Compiler-pseudos
6756//----------------------------------------------------------------------------
6757// FIXME: Like for X86, these should go in their own separate .td file.
6758
6759def def32 : PatLeaf<(i32 GPR32:$src), [{
6760  return isDef32(*N);
6761}]>;
6762
6763// In the case of a 32-bit def that is known to implicitly zero-extend,
6764// we can use a SUBREG_TO_REG.
6765def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
6766
6767// For an anyext, we don't care what the high bits are, so we can perform an
6768// INSERT_SUBREF into an IMPLICIT_DEF.
6769def : Pat<(i64 (anyext GPR32:$src)),
6770          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
6771
6772// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
6773// then assert the extension has happened.
6774def : Pat<(i64 (zext GPR32:$src)),
6775          (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
6776
6777// To sign extend, we use a signed bitfield move instruction (SBFM) on the
6778// containing super-reg.
6779def : Pat<(i64 (sext GPR32:$src)),
6780   (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
6781def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
6782def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
6783def : Pat<(i64 (sext_inreg GPR64:$src, i8)),  (SBFMXri GPR64:$src, 0, 7)>;
6784def : Pat<(i64 (sext_inreg GPR64:$src, i1)),  (SBFMXri GPR64:$src, 0, 0)>;
6785def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
6786def : Pat<(i32 (sext_inreg GPR32:$src, i8)),  (SBFMWri GPR32:$src, 0, 7)>;
6787def : Pat<(i32 (sext_inreg GPR32:$src, i1)),  (SBFMWri GPR32:$src, 0, 0)>;
6788
6789def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
6790          (SBFMWri GPR32:$Rn, (i64 (i32shift_a       imm0_31:$imm)),
6791                              (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
6792def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
6793          (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
6794                              (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
6795
6796def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
6797          (SBFMWri GPR32:$Rn, (i64 (i32shift_a        imm0_31:$imm)),
6798                              (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
6799def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
6800          (SBFMXri GPR64:$Rn, (i64 (i64shift_a        imm0_63:$imm)),
6801                              (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
6802
6803def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
6804          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6805                   (i64 (i64shift_a        imm0_63:$imm)),
6806                   (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
6807
6808// sra patterns have an AddedComplexity of 10, so make sure we have a higher
6809// AddedComplexity for the following patterns since we want to match sext + sra
6810// patterns before we attempt to match a single sra node.
6811let AddedComplexity = 20 in {
6812// We support all sext + sra combinations which preserve at least one bit of the
6813// original value which is to be sign extended. E.g. we support shifts up to
6814// bitwidth-1 bits.
6815def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
6816          (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
6817def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
6818          (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
6819
6820def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
6821          (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
6822def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
6823          (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
6824
6825def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
6826          (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6827                   (i64 imm0_31:$imm), 31)>;
6828} // AddedComplexity = 20
6829
6830// To truncate, we can simply extract from a subregister.
6831def : Pat<(i32 (trunc GPR64sp:$src)),
6832          (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
6833
6834// __builtin_trap() uses the BRK instruction on AArch64.
6835def : Pat<(trap), (BRK 1)>;
6836def : Pat<(debugtrap), (BRK 0xF000)>;
6837
6838def ubsan_trap_xform : SDNodeXForm<timm, [{
6839  return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
6840}]>;
6841
6842def ubsan_trap_imm : TImmLeaf<i32, [{
6843  return isUInt<8>(Imm);
6844}], ubsan_trap_xform>;
6845
6846def : Pat<(ubsantrap ubsan_trap_imm:$kind), (BRK ubsan_trap_imm:$kind)>;
6847
6848// Multiply high patterns which multiply the lower subvector using smull/umull
6849// and the upper subvector with smull2/umull2. Then shuffle the high the high
6850// part of both results together.
6851def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
6852          (UZP2v16i8
6853           (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6854                            (EXTRACT_SUBREG V128:$Rm, dsub)),
6855           (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6856def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
6857          (UZP2v8i16
6858           (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6859                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6860           (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6861def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
6862          (UZP2v4i32
6863           (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6864                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6865           (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6866
6867def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
6868          (UZP2v16i8
6869           (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6870                            (EXTRACT_SUBREG V128:$Rm, dsub)),
6871           (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6872def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
6873          (UZP2v8i16
6874           (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6875                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6876           (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6877def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
6878          (UZP2v4i32
6879           (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6880                             (EXTRACT_SUBREG V128:$Rm, dsub)),
6881           (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6882
6883// Conversions within AdvSIMD types in the same register size are free.
6884// But because we need a consistent lane ordering, in big endian many
6885// conversions require one or more REV instructions.
6886//
6887// Consider a simple memory load followed by a bitconvert then a store.
6888//   v0 = load v2i32
6889//   v1 = BITCAST v2i32 v0 to v4i16
6890//        store v4i16 v2
6891//
6892// In big endian mode every memory access has an implicit byte swap. LDR and
6893// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
6894// is, they treat the vector as a sequence of elements to be byte-swapped.
6895// The two pairs of instructions are fundamentally incompatible. We've decided
6896// to use LD1/ST1 only to simplify compiler implementation.
6897//
6898// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
6899// the original code sequence:
6900//   v0 = load v2i32
6901//   v1 = REV v2i32                  (implicit)
6902//   v2 = BITCAST v2i32 v1 to v4i16
6903//   v3 = REV v4i16 v2               (implicit)
6904//        store v4i16 v3
6905//
6906// But this is now broken - the value stored is different to the value loaded
6907// due to lane reordering. To fix this, on every BITCAST we must perform two
6908// other REVs:
6909//   v0 = load v2i32
6910//   v1 = REV v2i32                  (implicit)
6911//   v2 = REV v2i32
6912//   v3 = BITCAST v2i32 v2 to v4i16
6913//   v4 = REV v4i16
6914//   v5 = REV v4i16 v4               (implicit)
6915//        store v4i16 v5
6916//
6917// This means an extra two instructions, but actually in most cases the two REV
6918// instructions can be combined into one. For example:
6919//   (REV64_2s (REV64_4h X)) === (REV32_4h X)
6920//
6921// There is also no 128-bit REV instruction. This must be synthesized with an
6922// EXT instruction.
6923//
6924// Most bitconverts require some sort of conversion. The only exceptions are:
6925//   a) Identity conversions -  vNfX <-> vNiX
6926//   b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
6927//
6928
6929// Natural vector casts (64 bit)
6930def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6931def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6932def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6933def : Pat<(v4bf16 (AArch64NvCast (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
6934def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
6935def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6936def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6937
6938def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6939def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
6940def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6941def : Pat<(v4bf16 (AArch64NvCast (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
6942def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6943def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6944
6945def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
6946def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6947def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6948def : Pat<(v4bf16 (AArch64NvCast (v8i8 FPR64:$src))), (v4bf16 FPR64:$src)>;
6949def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6950def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6951def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6952
6953def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6954def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6955def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6956def : Pat<(v4bf16 (AArch64NvCast (f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
6957def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6958def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6959def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6960def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6961
6962def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6963def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6964def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6965def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
6966def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6967def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6968
6969// Natural vector casts (128 bit)
6970def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
6971def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6972def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
6973def : Pat<(v8bf16 (AArch64NvCast (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
6974def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
6975def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6976def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6977def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6978
6979def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
6980def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
6981def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
6982def : Pat<(v8bf16 (AArch64NvCast (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
6983def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6984def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6985def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6986def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6987
6988def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
6989def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6990def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
6991def : Pat<(v8bf16 (AArch64NvCast (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
6992def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6993def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6994def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6995def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6996
6997def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
6998def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6999def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7000def : Pat<(v8bf16 (AArch64NvCast (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7001def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7002def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
7003def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7004def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7005
7006def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7007def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7008def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7009def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
7010def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7011def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7012def : Pat<(v8bf16 (AArch64NvCast (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7013def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7014
7015def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7016def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7017def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7018def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7019def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
7020def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7021def : Pat<(v8bf16 (AArch64NvCast (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7022def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7023
7024let Predicates = [IsLE] in {
7025def : Pat<(v8i8  (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7026def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7027def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7028def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7029def : Pat<(v4bf16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7030def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7031
7032def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7033          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7034def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7035          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7036def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7037          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7038def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7039          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7040def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7041          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7042def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7043          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7044def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7045          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7046}
7047let Predicates = [IsBE] in {
7048def : Pat<(v8i8  (bitconvert GPR64:$Xn)),
7049                 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7050def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
7051                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7052def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
7053                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7054def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
7055                 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7056def : Pat<(v4bf16 (bitconvert GPR64:$Xn)),
7057                  (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7058def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
7059                 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
7060
7061def : Pat<(i64 (bitconvert (v8i8  V64:$Vn))),
7062          (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7063def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
7064          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7065def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
7066          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7067def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
7068          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7069def : Pat<(i64 (bitconvert (v4bf16 V64:$Vn))),
7070          (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7071def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
7072          (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
7073}
7074def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7075def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7076def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
7077          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7078def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
7079          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7080def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
7081          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7082def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
7083
7084def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
7085          (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
7086def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
7087          (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
7088def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
7089          (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
7090def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
7091          (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
7092def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
7093          (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
7094
7095let Predicates = [IsLE] in {
7096def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
7097def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
7098def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))), (v1i64 FPR64:$src)>;
7099def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
7100def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))), (v1i64 FPR64:$src)>;
7101def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
7102}
7103let Predicates = [IsBE] in {
7104def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
7105                             (v1i64 (REV64v2i32 FPR64:$src))>;
7106def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
7107                             (v1i64 (REV64v4i16 FPR64:$src))>;
7108def : Pat<(v1i64 (bitconvert (v8i8  FPR64:$src))),
7109                             (v1i64 (REV64v8i8 FPR64:$src))>;
7110def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
7111                             (v1i64 (REV64v4i16 FPR64:$src))>;
7112def : Pat<(v1i64 (bitconvert (v4bf16 FPR64:$src))),
7113                             (v1i64 (REV64v4i16 FPR64:$src))>;
7114def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
7115                             (v1i64 (REV64v2i32 FPR64:$src))>;
7116}
7117def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
7118def : Pat<(v1i64 (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
7119
7120let Predicates = [IsLE] in {
7121def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
7122def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
7123def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))), (v2i32 FPR64:$src)>;
7124def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
7125def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
7126def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
7127def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))), (v2i32 FPR64:$src)>;
7128}
7129let Predicates = [IsBE] in {
7130def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
7131                             (v2i32 (REV64v2i32 FPR64:$src))>;
7132def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
7133                             (v2i32 (REV32v4i16 FPR64:$src))>;
7134def : Pat<(v2i32 (bitconvert (v8i8  FPR64:$src))),
7135                             (v2i32 (REV32v8i8 FPR64:$src))>;
7136def : Pat<(v2i32 (bitconvert (f64   FPR64:$src))),
7137                             (v2i32 (REV64v2i32 FPR64:$src))>;
7138def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
7139                             (v2i32 (REV64v2i32 FPR64:$src))>;
7140def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
7141                             (v2i32 (REV32v4i16 FPR64:$src))>;
7142def : Pat<(v2i32 (bitconvert (v4bf16 FPR64:$src))),
7143                             (v2i32 (REV32v4i16 FPR64:$src))>;
7144}
7145def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
7146
7147let Predicates = [IsLE] in {
7148def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
7149def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
7150def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))), (v4i16 FPR64:$src)>;
7151def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
7152def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
7153def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
7154}
7155let Predicates = [IsBE] in {
7156def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
7157                             (v4i16 (REV64v4i16 FPR64:$src))>;
7158def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
7159                             (v4i16 (REV32v4i16 FPR64:$src))>;
7160def : Pat<(v4i16 (bitconvert (v8i8  FPR64:$src))),
7161                             (v4i16 (REV16v8i8 FPR64:$src))>;
7162def : Pat<(v4i16 (bitconvert (f64   FPR64:$src))),
7163                             (v4i16 (REV64v4i16 FPR64:$src))>;
7164def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
7165                             (v4i16 (REV32v4i16 FPR64:$src))>;
7166def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
7167                             (v4i16 (REV64v4i16 FPR64:$src))>;
7168}
7169def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
7170def : Pat<(v4i16 (bitconvert (v4bf16 FPR64:$src))), (v4i16 FPR64:$src)>;
7171
7172let Predicates = [IsLE] in {
7173def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
7174def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
7175def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))), (v4f16 FPR64:$src)>;
7176def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))), (v4f16 FPR64:$src)>;
7177def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
7178def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
7179
7180def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7181def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7182def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))), (v4bf16 FPR64:$src)>;
7183def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))), (v4bf16 FPR64:$src)>;
7184def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), (v4bf16 FPR64:$src)>;
7185def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 FPR64:$src)>;
7186}
7187let Predicates = [IsBE] in {
7188def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
7189                             (v4f16 (REV64v4i16 FPR64:$src))>;
7190def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
7191                             (v4f16 (REV32v4i16 FPR64:$src))>;
7192def : Pat<(v4f16 (bitconvert (v8i8  FPR64:$src))),
7193                             (v4f16 (REV16v8i8 FPR64:$src))>;
7194def : Pat<(v4f16 (bitconvert (f64   FPR64:$src))),
7195                             (v4f16 (REV64v4i16 FPR64:$src))>;
7196def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
7197                             (v4f16 (REV32v4i16 FPR64:$src))>;
7198def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
7199                             (v4f16 (REV64v4i16 FPR64:$src))>;
7200
7201def : Pat<(v4bf16 (bitconvert (v1i64 FPR64:$src))),
7202                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7203def : Pat<(v4bf16 (bitconvert (v2i32 FPR64:$src))),
7204                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7205def : Pat<(v4bf16 (bitconvert (v8i8  FPR64:$src))),
7206                             (v4bf16 (REV16v8i8 FPR64:$src))>;
7207def : Pat<(v4bf16 (bitconvert (f64   FPR64:$src))),
7208                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7209def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))),
7210                             (v4bf16 (REV32v4i16 FPR64:$src))>;
7211def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))),
7212                             (v4bf16 (REV64v4i16 FPR64:$src))>;
7213}
7214def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
7215def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>;
7216
7217let Predicates = [IsLE] in {
7218def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))), (v8i8  FPR64:$src)>;
7219def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))), (v8i8  FPR64:$src)>;
7220def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))), (v8i8  FPR64:$src)>;
7221def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))), (v8i8  FPR64:$src)>;
7222def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))), (v8i8  FPR64:$src)>;
7223def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))), (v8i8  FPR64:$src)>;
7224def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))), (v8i8  FPR64:$src)>;
7225def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))), (v8i8  FPR64:$src)>;
7226}
7227let Predicates = [IsBE] in {
7228def : Pat<(v8i8  (bitconvert (v1i64 FPR64:$src))),
7229                             (v8i8 (REV64v8i8 FPR64:$src))>;
7230def : Pat<(v8i8  (bitconvert (v2i32 FPR64:$src))),
7231                             (v8i8 (REV32v8i8 FPR64:$src))>;
7232def : Pat<(v8i8  (bitconvert (v4i16 FPR64:$src))),
7233                             (v8i8 (REV16v8i8 FPR64:$src))>;
7234def : Pat<(v8i8  (bitconvert (f64   FPR64:$src))),
7235                             (v8i8 (REV64v8i8 FPR64:$src))>;
7236def : Pat<(v8i8  (bitconvert (v2f32 FPR64:$src))),
7237                             (v8i8 (REV32v8i8 FPR64:$src))>;
7238def : Pat<(v8i8  (bitconvert (v1f64 FPR64:$src))),
7239                             (v8i8 (REV64v8i8 FPR64:$src))>;
7240def : Pat<(v8i8  (bitconvert (v4f16 FPR64:$src))),
7241                             (v8i8 (REV16v8i8 FPR64:$src))>;
7242def : Pat<(v8i8  (bitconvert (v4bf16 FPR64:$src))),
7243                             (v8i8 (REV16v8i8 FPR64:$src))>;
7244}
7245
7246let Predicates = [IsLE] in {
7247def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))), (f64   FPR64:$src)>;
7248def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))), (f64   FPR64:$src)>;
7249def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))), (f64   FPR64:$src)>;
7250def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))), (f64   FPR64:$src)>;
7251def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))), (f64   FPR64:$src)>;
7252def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))), (f64   FPR64:$src)>;
7253}
7254let Predicates = [IsBE] in {
7255def : Pat<(f64   (bitconvert (v2i32 FPR64:$src))),
7256                             (f64 (REV64v2i32 FPR64:$src))>;
7257def : Pat<(f64   (bitconvert (v4i16 FPR64:$src))),
7258                             (f64 (REV64v4i16 FPR64:$src))>;
7259def : Pat<(f64   (bitconvert (v2f32 FPR64:$src))),
7260                             (f64 (REV64v2i32 FPR64:$src))>;
7261def : Pat<(f64   (bitconvert (v8i8  FPR64:$src))),
7262                             (f64 (REV64v8i8 FPR64:$src))>;
7263def : Pat<(f64   (bitconvert (v4f16 FPR64:$src))),
7264                             (f64 (REV64v4i16 FPR64:$src))>;
7265def : Pat<(f64   (bitconvert (v4bf16 FPR64:$src))),
7266                             (f64 (REV64v4i16 FPR64:$src))>;
7267}
7268def : Pat<(f64   (bitconvert (v1i64 FPR64:$src))), (f64   FPR64:$src)>;
7269def : Pat<(f64   (bitconvert (v1f64 FPR64:$src))), (f64   FPR64:$src)>;
7270
7271let Predicates = [IsLE] in {
7272def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
7273def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
7274def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))), (v1f64 FPR64:$src)>;
7275def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
7276def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
7277def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))), (v1f64 FPR64:$src)>;
7278}
7279let Predicates = [IsBE] in {
7280def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
7281                             (v1f64 (REV64v2i32 FPR64:$src))>;
7282def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
7283                             (v1f64 (REV64v4i16 FPR64:$src))>;
7284def : Pat<(v1f64 (bitconvert (v8i8  FPR64:$src))),
7285                             (v1f64 (REV64v8i8 FPR64:$src))>;
7286def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
7287                             (v1f64 (REV64v2i32 FPR64:$src))>;
7288def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
7289                             (v1f64 (REV64v4i16 FPR64:$src))>;
7290def : Pat<(v1f64 (bitconvert (v4bf16 FPR64:$src))),
7291                             (v1f64 (REV64v4i16 FPR64:$src))>;
7292}
7293def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
7294def : Pat<(v1f64 (bitconvert (f64   FPR64:$src))), (v1f64 FPR64:$src)>;
7295
7296let Predicates = [IsLE] in {
7297def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
7298def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
7299def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))), (v2f32 FPR64:$src)>;
7300def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
7301def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
7302def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
7303def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))), (v2f32 FPR64:$src)>;
7304}
7305let Predicates = [IsBE] in {
7306def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
7307                             (v2f32 (REV64v2i32 FPR64:$src))>;
7308def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
7309                             (v2f32 (REV32v4i16 FPR64:$src))>;
7310def : Pat<(v2f32 (bitconvert (v8i8  FPR64:$src))),
7311                             (v2f32 (REV32v8i8 FPR64:$src))>;
7312def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
7313                             (v2f32 (REV64v2i32 FPR64:$src))>;
7314def : Pat<(v2f32 (bitconvert (f64   FPR64:$src))),
7315                             (v2f32 (REV64v2i32 FPR64:$src))>;
7316def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
7317                             (v2f32 (REV32v4i16 FPR64:$src))>;
7318def : Pat<(v2f32 (bitconvert (v4bf16 FPR64:$src))),
7319                             (v2f32 (REV32v4i16 FPR64:$src))>;
7320}
7321def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
7322
7323let Predicates = [IsLE] in {
7324def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
7325def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
7326def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
7327def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
7328def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
7329def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
7330def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))), (f128 FPR128:$src)>;
7331def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
7332}
7333let Predicates = [IsBE] in {
7334def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
7335                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7336def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
7337                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7338                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7339def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
7340                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7341                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7342def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
7343                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7344                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7345def : Pat<(f128 (bitconvert (v8bf16 FPR128:$src))),
7346                            (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
7347                                            (REV64v8i16 FPR128:$src), (i32 8)))>;
7348def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
7349                            (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
7350def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
7351                            (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
7352                                            (REV64v4i32 FPR128:$src), (i32 8)))>;
7353def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
7354                            (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
7355                                            (REV64v16i8 FPR128:$src), (i32 8)))>;
7356}
7357
7358let Predicates = [IsLE] in {
7359def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))), (v2f64 FPR128:$src)>;
7360def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
7361def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
7362def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
7363def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))), (v2f64 FPR128:$src)>;
7364def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
7365def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
7366}
7367let Predicates = [IsBE] in {
7368def : Pat<(v2f64 (bitconvert (f128  FPR128:$src))),
7369                             (v2f64 (EXTv16i8 FPR128:$src,
7370                                              FPR128:$src, (i32 8)))>;
7371def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
7372                             (v2f64 (REV64v4i32 FPR128:$src))>;
7373def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
7374                             (v2f64 (REV64v8i16 FPR128:$src))>;
7375def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
7376                             (v2f64 (REV64v8i16 FPR128:$src))>;
7377def : Pat<(v2f64 (bitconvert (v8bf16 FPR128:$src))),
7378                             (v2f64 (REV64v8i16 FPR128:$src))>;
7379def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
7380                             (v2f64 (REV64v16i8 FPR128:$src))>;
7381def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
7382                             (v2f64 (REV64v4i32 FPR128:$src))>;
7383}
7384def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
7385
7386let Predicates = [IsLE] in {
7387def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))), (v4f32 FPR128:$src)>;
7388def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
7389def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
7390def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))), (v4f32 FPR128:$src)>;
7391def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
7392def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
7393def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
7394}
7395let Predicates = [IsBE] in {
7396def : Pat<(v4f32 (bitconvert (f128  FPR128:$src))),
7397                             (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7398                                    (REV64v4i32 FPR128:$src), (i32 8)))>;
7399def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
7400                             (v4f32 (REV32v8i16 FPR128:$src))>;
7401def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
7402                             (v4f32 (REV32v8i16 FPR128:$src))>;
7403def : Pat<(v4f32 (bitconvert (v8bf16 FPR128:$src))),
7404                             (v4f32 (REV32v8i16 FPR128:$src))>;
7405def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
7406                             (v4f32 (REV32v16i8 FPR128:$src))>;
7407def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
7408                             (v4f32 (REV64v4i32 FPR128:$src))>;
7409def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
7410                             (v4f32 (REV64v4i32 FPR128:$src))>;
7411}
7412def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
7413
7414let Predicates = [IsLE] in {
7415def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))), (v2i64 FPR128:$src)>;
7416def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
7417def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
7418def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
7419def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
7420def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
7421def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))), (v2i64 FPR128:$src)>;
7422}
7423let Predicates = [IsBE] in {
7424def : Pat<(v2i64 (bitconvert (f128  FPR128:$src))),
7425                             (v2i64 (EXTv16i8 FPR128:$src,
7426                                              FPR128:$src, (i32 8)))>;
7427def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
7428                             (v2i64 (REV64v4i32 FPR128:$src))>;
7429def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
7430                             (v2i64 (REV64v8i16 FPR128:$src))>;
7431def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
7432                             (v2i64 (REV64v16i8 FPR128:$src))>;
7433def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
7434                             (v2i64 (REV64v4i32 FPR128:$src))>;
7435def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
7436                             (v2i64 (REV64v8i16 FPR128:$src))>;
7437def : Pat<(v2i64 (bitconvert (v8bf16 FPR128:$src))),
7438                             (v2i64 (REV64v8i16 FPR128:$src))>;
7439}
7440def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
7441
7442let Predicates = [IsLE] in {
7443def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))), (v4i32 FPR128:$src)>;
7444def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
7445def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
7446def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
7447def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
7448def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
7449def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))), (v4i32 FPR128:$src)>;
7450}
7451let Predicates = [IsBE] in {
7452def : Pat<(v4i32 (bitconvert (f128  FPR128:$src))),
7453                             (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
7454                                              (REV64v4i32 FPR128:$src),
7455                                              (i32 8)))>;
7456def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
7457                             (v4i32 (REV64v4i32 FPR128:$src))>;
7458def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
7459                             (v4i32 (REV32v8i16 FPR128:$src))>;
7460def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
7461                             (v4i32 (REV32v16i8 FPR128:$src))>;
7462def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
7463                             (v4i32 (REV64v4i32 FPR128:$src))>;
7464def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
7465                             (v4i32 (REV32v8i16 FPR128:$src))>;
7466def : Pat<(v4i32 (bitconvert (v8bf16 FPR128:$src))),
7467                             (v4i32 (REV32v8i16 FPR128:$src))>;
7468}
7469def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
7470
7471let Predicates = [IsLE] in {
7472def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))), (v8i16 FPR128:$src)>;
7473def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
7474def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
7475def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
7476def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
7477def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
7478}
7479let Predicates = [IsBE] in {
7480def : Pat<(v8i16 (bitconvert (f128  FPR128:$src))),
7481                             (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7482                                              (REV64v8i16 FPR128:$src),
7483                                              (i32 8)))>;
7484def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
7485                             (v8i16 (REV64v8i16 FPR128:$src))>;
7486def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
7487                             (v8i16 (REV32v8i16 FPR128:$src))>;
7488def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7489                             (v8i16 (REV16v16i8 FPR128:$src))>;
7490def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7491                             (v8i16 (REV64v8i16 FPR128:$src))>;
7492def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7493                             (v8i16 (REV32v8i16 FPR128:$src))>;
7494}
7495def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7496def : Pat<(v8i16 (bitconvert (v8bf16 FPR128:$src))), (v8i16 FPR128:$src)>;
7497
7498let Predicates = [IsLE] in {
7499def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))), (v8f16 FPR128:$src)>;
7500def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7501def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7502def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7503def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7504def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7505
7506def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))), (v8bf16 FPR128:$src)>;
7507def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7508def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7509def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))), (v8bf16 FPR128:$src)>;
7510def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), (v8bf16 FPR128:$src)>;
7511def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 FPR128:$src)>;
7512}
7513let Predicates = [IsBE] in {
7514def : Pat<(v8f16 (bitconvert (f128  FPR128:$src))),
7515                             (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7516                                              (REV64v8i16 FPR128:$src),
7517                                              (i32 8)))>;
7518def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7519                             (v8f16 (REV64v8i16 FPR128:$src))>;
7520def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7521                             (v8f16 (REV32v8i16 FPR128:$src))>;
7522def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7523                             (v8f16 (REV16v16i8 FPR128:$src))>;
7524def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7525                             (v8f16 (REV64v8i16 FPR128:$src))>;
7526def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7527                             (v8f16 (REV32v8i16 FPR128:$src))>;
7528
7529def : Pat<(v8bf16 (bitconvert (f128  FPR128:$src))),
7530                             (v8bf16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7531                                              (REV64v8i16 FPR128:$src),
7532                                              (i32 8)))>;
7533def : Pat<(v8bf16 (bitconvert (v2i64 FPR128:$src))),
7534                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7535def : Pat<(v8bf16 (bitconvert (v4i32 FPR128:$src))),
7536                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7537def : Pat<(v8bf16 (bitconvert (v16i8 FPR128:$src))),
7538                             (v8bf16 (REV16v16i8 FPR128:$src))>;
7539def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))),
7540                             (v8bf16 (REV64v8i16 FPR128:$src))>;
7541def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))),
7542                             (v8bf16 (REV32v8i16 FPR128:$src))>;
7543}
7544def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7545def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>;
7546
7547let Predicates = [IsLE] in {
7548def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))), (v16i8 FPR128:$src)>;
7549def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7550def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7551def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7552def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7553def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7554def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7555def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))), (v16i8 FPR128:$src)>;
7556}
7557let Predicates = [IsBE] in {
7558def : Pat<(v16i8 (bitconvert (f128  FPR128:$src))),
7559                             (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7560                                              (REV64v16i8 FPR128:$src),
7561                                              (i32 8)))>;
7562def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7563                             (v16i8 (REV64v16i8 FPR128:$src))>;
7564def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7565                             (v16i8 (REV32v16i8 FPR128:$src))>;
7566def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7567                             (v16i8 (REV16v16i8 FPR128:$src))>;
7568def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7569                             (v16i8 (REV64v16i8 FPR128:$src))>;
7570def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7571                             (v16i8 (REV32v16i8 FPR128:$src))>;
7572def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7573                             (v16i8 (REV16v16i8 FPR128:$src))>;
7574def : Pat<(v16i8 (bitconvert (v8bf16 FPR128:$src))),
7575                             (v16i8 (REV16v16i8 FPR128:$src))>;
7576}
7577
7578def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
7579           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7580def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
7581           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7582def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
7583           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7584def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
7585           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7586def : Pat<(v4bf16 (extract_subvector V128:$Rn, (i64 0))),
7587           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7588def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
7589           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7590def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
7591           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7592def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
7593           (EXTRACT_SUBREG V128:$Rn, dsub)>;
7594
7595def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
7596          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7597def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
7598          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7599def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
7600          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7601def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
7602          (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7603
7604// A 64-bit subvector insert to the first 128-bit vector position
7605// is a subregister copy that needs no instruction.
7606multiclass InsertSubvectorUndef<ValueType Ty> {
7607  def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
7608            (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7609  def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
7610            (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7611  def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
7612            (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7613  def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
7614            (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7615  def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
7616            (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7617  def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
7618            (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7619  def : Pat<(insert_subvector undef, (v4bf16 FPR64:$src), (Ty 0)),
7620            (INSERT_SUBREG (v8bf16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7621  def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
7622            (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7623}
7624
7625defm : InsertSubvectorUndef<i32>;
7626defm : InsertSubvectorUndef<i64>;
7627
7628// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
7629// or v2f32.
7630def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
7631                    (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
7632           (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
7633def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
7634                     (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
7635           (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
7636    // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
7637    // so we match on v4f32 here, not v2f32. This will also catch adding
7638    // the low two lanes of a true v4f32 vector.
7639def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
7640                (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
7641          (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7642def : Pat<(fadd (vector_extract (v8f16 FPR128:$Rn), (i64 0)),
7643                (vector_extract (v8f16 FPR128:$Rn), (i64 1))),
7644          (f16 (FADDPv2i16p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7645
7646// Scalar 64-bit shifts in FPR64 registers.
7647def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7648          (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7649def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7650          (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7651def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7652          (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7653def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7654          (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7655
7656// Patterns for nontemporal/no-allocate stores.
7657// We have to resort to tricks to turn a single-input store into a store pair,
7658// because there is no single-input nontemporal store, only STNP.
7659let Predicates = [IsLE] in {
7660let AddedComplexity = 15 in {
7661class NTStore128Pat<ValueType VT> :
7662  Pat<(nontemporalstore (VT FPR128:$Rt),
7663        (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
7664      (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
7665              (CPYi64 FPR128:$Rt, (i64 1)),
7666              GPR64sp:$Rn, simm7s8:$offset)>;
7667
7668def : NTStore128Pat<v2i64>;
7669def : NTStore128Pat<v4i32>;
7670def : NTStore128Pat<v8i16>;
7671def : NTStore128Pat<v16i8>;
7672
7673class NTStore64Pat<ValueType VT> :
7674  Pat<(nontemporalstore (VT FPR64:$Rt),
7675        (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7676      (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
7677              (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
7678              GPR64sp:$Rn, simm7s4:$offset)>;
7679
7680// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
7681def : NTStore64Pat<v1f64>;
7682def : NTStore64Pat<v1i64>;
7683def : NTStore64Pat<v2i32>;
7684def : NTStore64Pat<v4i16>;
7685def : NTStore64Pat<v8i8>;
7686
7687def : Pat<(nontemporalstore GPR64:$Rt,
7688            (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7689          (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
7690                  (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
7691                  GPR64sp:$Rn, simm7s4:$offset)>;
7692} // AddedComplexity=10
7693} // Predicates = [IsLE]
7694
7695// Tail call return handling. These are all compiler pseudo-instructions,
7696// so no encoding information or anything like that.
7697let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
7698  def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
7699                   Sched<[WriteBrReg]>;
7700  def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
7701                   Sched<[WriteBrReg]>;
7702  // Indirect tail-call with any register allowed, used by MachineOutliner when
7703  // this is proven safe.
7704  // FIXME: If we have to add any more hacks like this, we should instead relax
7705  // some verifier checks for outlined functions.
7706  def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
7707                      Sched<[WriteBrReg]>;
7708  // Indirect tail-call limited to only use registers (x16 and x17) which are
7709  // allowed to tail-call a "BTI c" instruction.
7710  def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
7711                      Sched<[WriteBrReg]>;
7712}
7713
7714def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
7715          (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
7716      Requires<[NotUseBTI]>;
7717def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
7718          (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
7719      Requires<[UseBTI]>;
7720def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
7721          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7722def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
7723          (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7724
7725def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
7726def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
7727
7728// Extracting lane zero is a special case where we can just use a plain
7729// EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
7730// rest of the compiler, especially the register allocator and copy propagation,
7731// to reason about, so is preferred when it's possible to use it.
7732let AddedComplexity = 10 in {
7733  def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
7734  def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
7735  def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
7736}
7737
7738// dot_v4i8
7739class mul_v4i8<SDPatternOperator ldop> :
7740  PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
7741          (mul (ldop (add node:$Rn, node:$offset)),
7742               (ldop (add node:$Rm, node:$offset)))>;
7743class mulz_v4i8<SDPatternOperator ldop> :
7744  PatFrag<(ops node:$Rn, node:$Rm),
7745          (mul (ldop node:$Rn), (ldop node:$Rm))>;
7746
7747def load_v4i8 :
7748  OutPatFrag<(ops node:$R),
7749             (INSERT_SUBREG
7750              (v2i32 (IMPLICIT_DEF)),
7751               (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
7752              ssub)>;
7753
7754class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
7755  Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
7756           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
7757           (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
7758                (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
7759      (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
7760                                (load_v4i8 GPR64sp:$Rn),
7761                                (load_v4i8 GPR64sp:$Rm))),
7762                      sub_32)>, Requires<[HasDotProd]>;
7763
7764// dot_v8i8
7765class ee_v8i8<SDPatternOperator extend> :
7766  PatFrag<(ops node:$V, node:$K),
7767          (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
7768
7769class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7770  PatFrag<(ops node:$M, node:$N, node:$K),
7771          (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
7772                 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
7773
7774class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7775  PatFrag<(ops node:$M, node:$N),
7776          (i32 (extractelt
7777           (v4i32 (AArch64uaddv
7778            (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
7779                 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
7780           (i64 0)))>;
7781
7782// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
7783def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
7784
7785class odot_v8i8<Instruction DOT> :
7786  OutPatFrag<(ops node:$Vm, node:$Vn),
7787             (EXTRACT_SUBREG
7788              (VADDV_32
7789               (i64 (DOT (DUPv2i32gpr WZR),
7790                         (v8i8 node:$Vm),
7791                         (v8i8 node:$Vn)))),
7792              sub_32)>;
7793
7794class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
7795                    SDPatternOperator extend> :
7796  Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
7797      (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
7798  Requires<[HasDotProd]>;
7799
7800// dot_v16i8
7801class ee_v16i8<SDPatternOperator extend> :
7802  PatFrag<(ops node:$V, node:$K1, node:$K2),
7803          (v4i16 (extract_subvector
7804           (v8i16 (extend
7805            (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
7806
7807class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
7808  PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
7809          (v4i32
7810           (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
7811                  (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
7812
7813class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
7814  PatFrag<(ops node:$M, node:$N),
7815          (i32 (extractelt
7816           (v4i32 (AArch64uaddv
7817            (add
7818             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
7819                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
7820             (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
7821                  (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
7822           (i64 0)))>;
7823
7824class odot_v16i8<Instruction DOT> :
7825  OutPatFrag<(ops node:$Vm, node:$Vn),
7826             (i32 (ADDVv4i32v
7827              (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
7828
7829class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
7830                SDPatternOperator extend> :
7831  Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
7832      (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
7833  Requires<[HasDotProd]>;
7834
7835let AddedComplexity = 10 in {
7836  def : dot_v4i8<SDOTv8i8, sextloadi8>;
7837  def : dot_v4i8<UDOTv8i8, zextloadi8>;
7838  def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
7839  def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
7840  def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
7841  def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
7842
7843  // FIXME: add patterns to generate vector by element dot product.
7844  // FIXME: add SVE dot-product patterns.
7845}
7846
7847let Predicates = [HasLS64] in {
7848  def LD64B: LoadStore64B<0b101, "ld64b", (ins GPR64sp:$Rn),
7849                                          (outs GPR64x8:$Rt)>;
7850  def ST64B: LoadStore64B<0b001, "st64b", (ins GPR64x8:$Rt, GPR64sp:$Rn),
7851                                          (outs)>;
7852  def ST64BV:   Store64BV<0b011, "st64bv">;
7853  def ST64BV0:  Store64BV<0b010, "st64bv0">;
7854
7855  class ST64BPattern<Intrinsic intrinsic, Instruction instruction>
7856    : Pat<(intrinsic GPR64sp:$addr, GPR64:$x0, GPR64:$x1, GPR64:$x2, GPR64:$x3, GPR64:$x4, GPR64:$x5, GPR64:$x6, GPR64:$x7),
7857          (instruction (REG_SEQUENCE GPR64x8Class, $x0, x8sub_0, $x1, x8sub_1, $x2, x8sub_2, $x3, x8sub_3, $x4, x8sub_4, $x5, x8sub_5, $x6, x8sub_6, $x7, x8sub_7), $addr)>;
7858
7859  def : ST64BPattern<int_aarch64_st64b, ST64B>;
7860  def : ST64BPattern<int_aarch64_st64bv, ST64BV>;
7861  def : ST64BPattern<int_aarch64_st64bv0, ST64BV0>;
7862}
7863
7864include "AArch64InstrAtomics.td"
7865include "AArch64SVEInstrInfo.td"
7866
7867include "AArch64InstrGISel.td"
7868