1//=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// AArch64 Instruction definitions. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// ARM Instruction Predicate Definitions. 15// 16def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">, 17 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">; 18def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">, 19 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">; 20def HasV8_3a : Predicate<"Subtarget->hasV8_3aOps()">, 21 AssemblerPredicate<"HasV8_3aOps", "armv8.3a">; 22def HasV8_4a : Predicate<"Subtarget->hasV8_4aOps()">, 23 AssemblerPredicate<"HasV8_4aOps", "armv8.4a">; 24def HasV8_5a : Predicate<"Subtarget->hasV8_5aOps()">, 25 AssemblerPredicate<"HasV8_5aOps", "armv8.5a">; 26def HasVH : Predicate<"Subtarget->hasVH()">, 27 AssemblerPredicate<"FeatureVH", "vh">; 28 29def HasLOR : Predicate<"Subtarget->hasLOR()">, 30 AssemblerPredicate<"FeatureLOR", "lor">; 31 32def HasPA : Predicate<"Subtarget->hasPA()">, 33 AssemblerPredicate<"FeaturePA", "pa">; 34 35def HasJS : Predicate<"Subtarget->hasJS()">, 36 AssemblerPredicate<"FeatureJS", "jsconv">; 37 38def HasCCIDX : Predicate<"Subtarget->hasCCIDX()">, 39 AssemblerPredicate<"FeatureCCIDX", "ccidx">; 40 41def HasComplxNum : Predicate<"Subtarget->hasComplxNum()">, 42 AssemblerPredicate<"FeatureComplxNum", "complxnum">; 43 44def HasNV : Predicate<"Subtarget->hasNV()">, 45 AssemblerPredicate<"FeatureNV", "nv">; 46 47def HasRASv8_4 : Predicate<"Subtarget->hasRASv8_4()">, 48 AssemblerPredicate<"FeatureRASv8_4", "rasv8_4">; 49 50def HasMPAM : Predicate<"Subtarget->hasMPAM()">, 51 AssemblerPredicate<"FeatureMPAM", "mpam">; 52 53def HasDIT : Predicate<"Subtarget->hasDIT()">, 54 AssemblerPredicate<"FeatureDIT", "dit">; 55 56def HasTRACEV8_4 : Predicate<"Subtarget->hasTRACEV8_4()">, 57 AssemblerPredicate<"FeatureTRACEV8_4", "tracev8.4">; 58 59def HasAM : Predicate<"Subtarget->hasAM()">, 60 AssemblerPredicate<"FeatureAM", "am">; 61 62def HasSEL2 : Predicate<"Subtarget->hasSEL2()">, 63 AssemblerPredicate<"FeatureSEL2", "sel2">; 64 65def HasTLB_RMI : Predicate<"Subtarget->hasTLB_RMI()">, 66 AssemblerPredicate<"FeatureTLB_RMI", "tlb-rmi">; 67 68def HasFMI : Predicate<"Subtarget->hasFMI()">, 69 AssemblerPredicate<"FeatureFMI", "fmi">; 70 71def HasRCPC_IMMO : Predicate<"Subtarget->hasRCPCImm()">, 72 AssemblerPredicate<"FeatureRCPC_IMMO", "rcpc-immo">; 73 74def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">, 75 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">; 76def HasNEON : Predicate<"Subtarget->hasNEON()">, 77 AssemblerPredicate<"FeatureNEON", "neon">; 78def HasCrypto : Predicate<"Subtarget->hasCrypto()">, 79 AssemblerPredicate<"FeatureCrypto", "crypto">; 80def HasSM4 : Predicate<"Subtarget->hasSM4()">, 81 AssemblerPredicate<"FeatureSM4", "sm4">; 82def HasSHA3 : Predicate<"Subtarget->hasSHA3()">, 83 AssemblerPredicate<"FeatureSHA3", "sha3">; 84def HasSHA2 : Predicate<"Subtarget->hasSHA2()">, 85 AssemblerPredicate<"FeatureSHA2", "sha2">; 86def HasAES : Predicate<"Subtarget->hasAES()">, 87 AssemblerPredicate<"FeatureAES", "aes">; 88def HasDotProd : Predicate<"Subtarget->hasDotProd()">, 89 AssemblerPredicate<"FeatureDotProd", "dotprod">; 90def HasCRC : Predicate<"Subtarget->hasCRC()">, 91 AssemblerPredicate<"FeatureCRC", "crc">; 92def HasLSE : Predicate<"Subtarget->hasLSE()">, 93 AssemblerPredicate<"FeatureLSE", "lse">; 94def HasRAS : Predicate<"Subtarget->hasRAS()">, 95 AssemblerPredicate<"FeatureRAS", "ras">; 96def HasRDM : Predicate<"Subtarget->hasRDM()">, 97 AssemblerPredicate<"FeatureRDM", "rdm">; 98def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">; 99def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">, 100 AssemblerPredicate<"FeatureFullFP16", "fullfp16">; 101def HasFP16FML : Predicate<"Subtarget->hasFP16FML()">, 102 AssemblerPredicate<"FeatureFP16FML", "fp16fml">; 103def HasSPE : Predicate<"Subtarget->hasSPE()">, 104 AssemblerPredicate<"FeatureSPE", "spe">; 105def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">, 106 AssemblerPredicate<"FeatureFuseAES", 107 "fuse-aes">; 108def HasSVE : Predicate<"Subtarget->hasSVE()">, 109 AssemblerPredicate<"FeatureSVE", "sve">; 110def HasSVE2 : Predicate<"Subtarget->hasSVE2()">, 111 AssemblerPredicate<"FeatureSVE2", "sve2">; 112def HasSVE2AES : Predicate<"Subtarget->hasSVE2AES()">, 113 AssemblerPredicate<"FeatureSVE2AES", "sve2-aes">; 114def HasSVE2SM4 : Predicate<"Subtarget->hasSVE2SM4()">, 115 AssemblerPredicate<"FeatureSVE2SM4", "sve2-sm4">; 116def HasSVE2SHA3 : Predicate<"Subtarget->hasSVE2SHA3()">, 117 AssemblerPredicate<"FeatureSVE2SHA3", "sve2-sha3">; 118def HasSVE2BitPerm : Predicate<"Subtarget->hasSVE2BitPerm()">, 119 AssemblerPredicate<"FeatureSVE2BitPerm", "sve2-bitperm">; 120def HasRCPC : Predicate<"Subtarget->hasRCPC()">, 121 AssemblerPredicate<"FeatureRCPC", "rcpc">; 122def HasAltNZCV : Predicate<"Subtarget->hasAlternativeNZCV()">, 123 AssemblerPredicate<"FeatureAltFPCmp", "altnzcv">; 124def HasFRInt3264 : Predicate<"Subtarget->hasFRInt3264()">, 125 AssemblerPredicate<"FeatureFRInt3264", "frint3264">; 126def HasSB : Predicate<"Subtarget->hasSB()">, 127 AssemblerPredicate<"FeatureSB", "sb">; 128def HasPredRes : Predicate<"Subtarget->hasPredRes()">, 129 AssemblerPredicate<"FeaturePredRes", "predres">; 130def HasCCDP : Predicate<"Subtarget->hasCCDP()">, 131 AssemblerPredicate<"FeatureCacheDeepPersist", "ccdp">; 132def HasBTI : Predicate<"Subtarget->hasBTI()">, 133 AssemblerPredicate<"FeatureBranchTargetId", "bti">; 134def HasMTE : Predicate<"Subtarget->hasMTE()">, 135 AssemblerPredicate<"FeatureMTE", "mte">; 136def IsLE : Predicate<"Subtarget->isLittleEndian()">; 137def IsBE : Predicate<"!Subtarget->isLittleEndian()">; 138def IsWindows : Predicate<"Subtarget->isTargetWindows()">; 139def UseAlternateSExtLoadCVTF32 140 : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">; 141 142def UseNegativeImmediates 143 : Predicate<"false">, AssemblerPredicate<"!FeatureNoNegativeImmediates", 144 "NegativeImmediates">; 145 146def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER", 147 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, 148 SDTCisInt<1>]>>; 149 150 151//===----------------------------------------------------------------------===// 152// AArch64-specific DAG Nodes. 153// 154 155// SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS 156def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2, 157 [SDTCisSameAs<0, 2>, 158 SDTCisSameAs<0, 3>, 159 SDTCisInt<0>, SDTCisVT<1, i32>]>; 160 161// SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS 162def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3, 163 [SDTCisSameAs<0, 1>, 164 SDTCisSameAs<0, 2>, 165 SDTCisInt<0>, 166 SDTCisVT<3, i32>]>; 167 168// SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS 169def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3, 170 [SDTCisSameAs<0, 2>, 171 SDTCisSameAs<0, 3>, 172 SDTCisInt<0>, 173 SDTCisVT<1, i32>, 174 SDTCisVT<4, i32>]>; 175 176def SDT_AArch64Brcond : SDTypeProfile<0, 3, 177 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>, 178 SDTCisVT<2, i32>]>; 179def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>; 180def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, 181 SDTCisVT<2, OtherVT>]>; 182 183 184def SDT_AArch64CSel : SDTypeProfile<1, 4, 185 [SDTCisSameAs<0, 1>, 186 SDTCisSameAs<0, 2>, 187 SDTCisInt<3>, 188 SDTCisVT<4, i32>]>; 189def SDT_AArch64CCMP : SDTypeProfile<1, 5, 190 [SDTCisVT<0, i32>, 191 SDTCisInt<1>, 192 SDTCisSameAs<1, 2>, 193 SDTCisInt<3>, 194 SDTCisInt<4>, 195 SDTCisVT<5, i32>]>; 196def SDT_AArch64FCCMP : SDTypeProfile<1, 5, 197 [SDTCisVT<0, i32>, 198 SDTCisFP<1>, 199 SDTCisSameAs<1, 2>, 200 SDTCisInt<3>, 201 SDTCisInt<4>, 202 SDTCisVT<5, i32>]>; 203def SDT_AArch64FCmp : SDTypeProfile<0, 2, 204 [SDTCisFP<0>, 205 SDTCisSameAs<0, 1>]>; 206def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>; 207def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>; 208def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>, 209 SDTCisSameAs<0, 1>, 210 SDTCisSameAs<0, 2>]>; 211def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>; 212def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>; 213def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 214 SDTCisInt<2>, SDTCisInt<3>]>; 215def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; 216def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 217 SDTCisSameAs<0,2>, SDTCisInt<3>]>; 218def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>; 219 220def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; 221def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>; 222def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>; 223def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 224 SDTCisSameAs<0,2>]>; 225def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 226 SDTCisSameAs<0,2>, 227 SDTCisSameAs<0,3>]>; 228def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>; 229def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>; 230 231def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>; 232 233def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>, 234 SDTCisPtrTy<1>]>; 235 236// Generates the general dynamic sequences, i.e. 237// adrp x0, :tlsdesc:var 238// ldr x1, [x0, #:tlsdesc_lo12:var] 239// add x0, x0, #:tlsdesc_lo12:var 240// .tlsdesccall var 241// blr x1 242 243// (the TPIDR_EL0 offset is put directly in X0, hence no "result" here) 244// number of operands (the variable) 245def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1, 246 [SDTCisPtrTy<0>]>; 247 248def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4, 249 [SDTCisVT<0, i64>, SDTCisVT<1, i32>, 250 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, 251 SDTCisSameAs<1, 4>]>; 252 253 254// Node definitions. 255def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>; 256def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>; 257def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>; 258def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>; 259def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START", 260 SDCallSeqStart<[ SDTCisVT<0, i32>, 261 SDTCisVT<1, i32> ]>, 262 [SDNPHasChain, SDNPOutGlue]>; 263def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END", 264 SDCallSeqEnd<[ SDTCisVT<0, i32>, 265 SDTCisVT<1, i32> ]>, 266 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 267def AArch64call : SDNode<"AArch64ISD::CALL", 268 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, 269 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 270 SDNPVariadic]>; 271def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond, 272 [SDNPHasChain]>; 273def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz, 274 [SDNPHasChain]>; 275def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz, 276 [SDNPHasChain]>; 277def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz, 278 [SDNPHasChain]>; 279def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz, 280 [SDNPHasChain]>; 281 282 283def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>; 284def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>; 285def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>; 286def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>; 287def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone, 288 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 289def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >; 290def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>; 291def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut, 292 [SDNPCommutative]>; 293def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>; 294def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut, 295 [SDNPCommutative]>; 296def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>; 297def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>; 298 299def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>; 300def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>; 301def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>; 302 303def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>; 304 305def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>; 306 307def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>; 308def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>; 309def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>; 310def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>; 311def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>; 312 313def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>; 314def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>; 315def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>; 316def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>; 317def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>; 318def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>; 319 320def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>; 321def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>; 322def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>; 323def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>; 324def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>; 325def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>; 326def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>; 327 328def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>; 329def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>; 330def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>; 331def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>; 332 333def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>; 334def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>; 335def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>; 336def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>; 337def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>; 338def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>; 339def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>; 340def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>; 341 342def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>; 343def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>; 344def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>; 345 346def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>; 347def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>; 348def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>; 349def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>; 350def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>; 351 352def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>; 353def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>; 354def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>; 355 356def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>; 357def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>; 358def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>; 359def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>; 360def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>; 361def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS), 362 (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>; 363 364def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>; 365def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>; 366def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>; 367def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>; 368def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>; 369 370def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>; 371def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>; 372 373def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>; 374 375def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET, 376 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 377 378def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH, 379 [SDNPHasChain, SDNPSideEffect]>; 380 381def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>; 382def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>; 383 384def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ", 385 SDT_AArch64TLSDescCallSeq, 386 [SDNPInGlue, SDNPOutGlue, SDNPHasChain, 387 SDNPVariadic]>; 388 389 390def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge", 391 SDT_AArch64WrapperLarge>; 392 393def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>; 394 395def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, 396 SDTCisSameAs<1, 2>]>; 397def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>; 398def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>; 399 400def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>; 401def AArch64frecps : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>; 402def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>; 403def AArch64frsqrts : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>; 404 405def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>; 406def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>; 407def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>; 408def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>; 409def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>; 410def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>; 411 412def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>; 413def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 414def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 415def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 416def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 417 418//===----------------------------------------------------------------------===// 419 420//===----------------------------------------------------------------------===// 421 422// AArch64 Instruction Predicate Definitions. 423// We could compute these on a per-module basis but doing so requires accessing 424// the Function object through the <Target>Subtarget and objections were raised 425// to that (see post-commit review comments for r301750). 426let RecomputePerFunction = 1 in { 427 def ForCodeSize : Predicate<"MF->getFunction().hasOptSize()">; 428 def NotForCodeSize : Predicate<"!MF->getFunction().hasOptSize()">; 429 // Avoid generating STRQro if it is slow, unless we're optimizing for code size. 430 def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().hasOptSize()">; 431 432 def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>; 433 def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>; 434} 435 436include "AArch64InstrFormats.td" 437include "SVEInstrFormats.td" 438 439//===----------------------------------------------------------------------===// 440 441//===----------------------------------------------------------------------===// 442// Miscellaneous instructions. 443//===----------------------------------------------------------------------===// 444 445let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in { 446// We set Sched to empty list because we expect these instructions to simply get 447// removed in most cases. 448def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 449 [(AArch64callseq_start timm:$amt1, timm:$amt2)]>, 450 Sched<[]>; 451def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 452 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>, 453 Sched<[]>; 454} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 455 456let isReMaterializable = 1, isCodeGenOnly = 1 in { 457// FIXME: The following pseudo instructions are only needed because remat 458// cannot handle multiple instructions. When that changes, they can be 459// removed, along with the AArch64Wrapper node. 460 461let AddedComplexity = 10 in 462def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr), 463 [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>, 464 Sched<[WriteLDAdr]>; 465 466// The MOVaddr instruction should match only when the add is not folded 467// into a load or store address. 468def MOVaddr 469 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), 470 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi), 471 tglobaladdr:$low))]>, 472 Sched<[WriteAdrAdr]>; 473def MOVaddrJT 474 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), 475 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi), 476 tjumptable:$low))]>, 477 Sched<[WriteAdrAdr]>; 478def MOVaddrCP 479 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), 480 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi), 481 tconstpool:$low))]>, 482 Sched<[WriteAdrAdr]>; 483def MOVaddrBA 484 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), 485 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi), 486 tblockaddress:$low))]>, 487 Sched<[WriteAdrAdr]>; 488def MOVaddrTLS 489 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), 490 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi), 491 tglobaltlsaddr:$low))]>, 492 Sched<[WriteAdrAdr]>; 493def MOVaddrEXT 494 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low), 495 [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi), 496 texternalsym:$low))]>, 497 Sched<[WriteAdrAdr]>; 498// Normally AArch64addlow either gets folded into a following ldr/str, 499// or together with an adrp into MOVaddr above. For cases with TLS, it 500// might appear without either of them, so allow lowering it into a plain 501// add. 502def ADDlowTLS 503 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low), 504 [(set GPR64:$dst, (AArch64addlow GPR64:$src, 505 tglobaltlsaddr:$low))]>, 506 Sched<[WriteAdr]>; 507 508} // isReMaterializable, isCodeGenOnly 509 510def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr), 511 (LOADgot tglobaltlsaddr:$addr)>; 512 513def : Pat<(AArch64LOADgot texternalsym:$addr), 514 (LOADgot texternalsym:$addr)>; 515 516def : Pat<(AArch64LOADgot tconstpool:$addr), 517 (LOADgot tconstpool:$addr)>; 518 519// 32-bit jump table destination is actually only 2 instructions since we can 520// use the table itself as a PC-relative base. But optimization occurs after 521// branch relaxation so be pessimistic. 522let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch" in { 523def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), 524 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>, 525 Sched<[]>; 526def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), 527 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>, 528 Sched<[]>; 529def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch), 530 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>, 531 Sched<[]>; 532} 533 534// Space-consuming pseudo to aid testing of placement and reachability 535// algorithms. Immediate operand is the number of bytes this "instruction" 536// occupies; register operands can be used to enforce dependency and constrain 537// the scheduler. 538let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in 539def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn), 540 [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>, 541 Sched<[]>; 542 543let hasSideEffects = 1, isCodeGenOnly = 1 in { 544 def SpeculationSafeValueX 545 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>; 546 def SpeculationSafeValueW 547 : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>; 548} 549 550 551//===----------------------------------------------------------------------===// 552// System instructions. 553//===----------------------------------------------------------------------===// 554 555def HINT : HintI<"hint">; 556def : InstAlias<"nop", (HINT 0b000)>; 557def : InstAlias<"yield",(HINT 0b001)>; 558def : InstAlias<"wfe", (HINT 0b010)>; 559def : InstAlias<"wfi", (HINT 0b011)>; 560def : InstAlias<"sev", (HINT 0b100)>; 561def : InstAlias<"sevl", (HINT 0b101)>; 562def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>; 563def : InstAlias<"csdb", (HINT 20)>; 564def : InstAlias<"bti", (HINT 32)>, Requires<[HasBTI]>; 565def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>; 566 567// v8.2a Statistical Profiling extension 568def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>; 569 570// As far as LLVM is concerned this writes to the system's exclusive monitors. 571let mayLoad = 1, mayStore = 1 in 572def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">; 573 574// NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot 575// model patterns with sufficiently fine granularity. 576let mayLoad = ?, mayStore = ? in { 577def DMB : CRmSystemI<barrier_op, 0b101, "dmb", 578 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>; 579 580def DSB : CRmSystemI<barrier_op, 0b100, "dsb", 581 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>; 582 583def ISB : CRmSystemI<barrier_op, 0b110, "isb", 584 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>; 585 586def TSB : CRmSystemI<barrier_op, 0b010, "tsb", []> { 587 let CRm = 0b0010; 588 let Inst{12} = 0; 589 let Predicates = [HasTRACEV8_4]; 590} 591} 592 593// ARMv8.2-A Dot Product 594let Predicates = [HasDotProd] in { 595defm SDOT : SIMDThreeSameVectorDot<0, "sdot", int_aarch64_neon_sdot>; 596defm UDOT : SIMDThreeSameVectorDot<1, "udot", int_aarch64_neon_udot>; 597defm SDOTlane : SIMDThreeSameVectorDotIndex<0, "sdot", int_aarch64_neon_sdot>; 598defm UDOTlane : SIMDThreeSameVectorDotIndex<1, "udot", int_aarch64_neon_udot>; 599} 600 601// ARMv8.2-A FP16 Fused Multiply-Add Long 602let Predicates = [HasNEON, HasFP16FML] in { 603defm FMLAL : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>; 604defm FMLSL : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>; 605defm FMLAL2 : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>; 606defm FMLSL2 : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>; 607defm FMLALlane : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>; 608defm FMLSLlane : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>; 609defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>; 610defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>; 611} 612 613// Armv8.2-A Crypto extensions 614let Predicates = [HasSHA3] in { 615def SHA512H : CryptoRRRTied<0b0, 0b00, "sha512h">; 616def SHA512H2 : CryptoRRRTied<0b0, 0b01, "sha512h2">; 617def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">; 618def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">; 619def RAX1 : CryptoRRR_2D<0b0,0b11, "rax1">; 620def EOR3 : CryptoRRRR_16B<0b00, "eor3">; 621def BCAX : CryptoRRRR_16B<0b01, "bcax">; 622def XAR : CryptoRRRi6<"xar">; 623} // HasSHA3 624 625let Predicates = [HasSM4] in { 626def SM3TT1A : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">; 627def SM3TT1B : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">; 628def SM3TT2A : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">; 629def SM3TT2B : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">; 630def SM3SS1 : CryptoRRRR_4S<0b10, "sm3ss1">; 631def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">; 632def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">; 633def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">; 634def SM4E : CryptoRRTied_4S<0b0, 0b01, "sm4e">; 635} // HasSM4 636 637let Predicates = [HasRCPC] in { 638 // v8.3 Release Consistent Processor Consistent support, optional in v8.2. 639 def LDAPRB : RCPCLoad<0b00, "ldaprb", GPR32>; 640 def LDAPRH : RCPCLoad<0b01, "ldaprh", GPR32>; 641 def LDAPRW : RCPCLoad<0b10, "ldapr", GPR32>; 642 def LDAPRX : RCPCLoad<0b11, "ldapr", GPR64>; 643} 644 645// v8.3a complex add and multiply-accumulate. No predicate here, that is done 646// inside the multiclass as the FP16 versions need different predicates. 647defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop, 648 "fcmla", null_frag>; 649defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd, 650 "fcadd", null_frag>; 651defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla", 652 null_frag>; 653 654// v8.3a Pointer Authentication 655// These instructions inhabit part of the hint space and so can be used for 656// armv8 targets 657let Uses = [LR], Defs = [LR] in { 658 def PACIAZ : SystemNoOperands<0b000, "paciaz">; 659 def PACIBZ : SystemNoOperands<0b010, "pacibz">; 660 def AUTIAZ : SystemNoOperands<0b100, "autiaz">; 661 def AUTIBZ : SystemNoOperands<0b110, "autibz">; 662} 663let Uses = [LR, SP], Defs = [LR] in { 664 def PACIASP : SystemNoOperands<0b001, "paciasp">; 665 def PACIBSP : SystemNoOperands<0b011, "pacibsp">; 666 def AUTIASP : SystemNoOperands<0b101, "autiasp">; 667 def AUTIBSP : SystemNoOperands<0b111, "autibsp">; 668} 669let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in { 670 def PACIA1716 : SystemNoOperands<0b000, "pacia1716">; 671 def PACIB1716 : SystemNoOperands<0b010, "pacib1716">; 672 def AUTIA1716 : SystemNoOperands<0b100, "autia1716">; 673 def AUTIB1716 : SystemNoOperands<0b110, "autib1716">; 674} 675 676let Uses = [LR], Defs = [LR], CRm = 0b0000 in { 677 def XPACLRI : SystemNoOperands<0b111, "xpaclri">; 678} 679 680// These pointer authentication isntructions require armv8.3a 681let Predicates = [HasPA] in { 682 multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm> { 683 def IA : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia")>; 684 def IB : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib")>; 685 def DA : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da")>; 686 def DB : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db")>; 687 def IZA : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza")>; 688 def DZA : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza")>; 689 def IZB : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb")>; 690 def DZB : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb")>; 691 } 692 693 defm PAC : SignAuth<0b000, 0b010, "pac">; 694 defm AUT : SignAuth<0b001, 0b011, "aut">; 695 696 def XPACI : SignAuthZero<0b100, 0b00, "xpaci">; 697 def XPACD : SignAuthZero<0b100, 0b01, "xpacd">; 698 def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>; 699 700 // Combined Instructions 701 def BRAA : AuthBranchTwoOperands<0, 0, "braa">; 702 def BRAB : AuthBranchTwoOperands<0, 1, "brab">; 703 def BLRAA : AuthBranchTwoOperands<1, 0, "blraa">; 704 def BLRAB : AuthBranchTwoOperands<1, 1, "blrab">; 705 706 def BRAAZ : AuthOneOperand<0b000, 0, "braaz">; 707 def BRABZ : AuthOneOperand<0b000, 1, "brabz">; 708 def BLRAAZ : AuthOneOperand<0b001, 0, "blraaz">; 709 def BLRABZ : AuthOneOperand<0b001, 1, "blrabz">; 710 711 let isReturn = 1, isTerminator = 1, isBarrier = 1 in { 712 def RETAA : AuthReturn<0b010, 0, "retaa">; 713 def RETAB : AuthReturn<0b010, 1, "retab">; 714 def ERETAA : AuthReturn<0b100, 0, "eretaa">; 715 def ERETAB : AuthReturn<0b100, 1, "eretab">; 716 } 717 718 defm LDRAA : AuthLoad<0, "ldraa", simm10Scaled>; 719 defm LDRAB : AuthLoad<1, "ldrab", simm10Scaled>; 720 721} 722 723// v8.3a floating point conversion for javascript 724let Predicates = [HasJS, HasFPARMv8] in 725def FJCVTZS : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32, 726 "fjcvtzs", 727 [(set GPR32:$Rd, 728 (int_aarch64_fjcvtzs FPR64:$Rn))]> { 729 let Inst{31} = 0; 730} // HasJS, HasFPARMv8 731 732// v8.4 Flag manipulation instructions 733let Predicates = [HasFMI] in { 734def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> { 735 let Inst{20-5} = 0b0000001000000000; 736} 737def SETF8 : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">; 738def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">; 739def RMIF : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif", 740 "{\t$Rn, $imm, $mask}">; 741} // HasFMI 742 743// v8.5 flag manipulation instructions 744let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in { 745 746def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> { 747 let Inst{18-16} = 0b000; 748 let Inst{11-8} = 0b0000; 749 let Unpredictable{11-8} = 0b1111; 750 let Inst{7-5} = 0b001; 751} 752 753def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> { 754 let Inst{18-16} = 0b000; 755 let Inst{11-8} = 0b0000; 756 let Unpredictable{11-8} = 0b1111; 757 let Inst{7-5} = 0b010; 758} 759} // HasAltNZCV 760 761 762// Armv8.5-A speculation barrier 763def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> { 764 let Inst{20-5} = 0b0001100110000111; 765 let Unpredictable{11-8} = 0b1111; 766 let Predicates = [HasSB]; 767 let hasSideEffects = 1; 768} 769 770def : InstAlias<"clrex", (CLREX 0xf)>; 771def : InstAlias<"isb", (ISB 0xf)>; 772def : InstAlias<"ssbb", (DSB 0)>; 773def : InstAlias<"pssbb", (DSB 4)>; 774 775def MRS : MRSI; 776def MSR : MSRI; 777def MSRpstateImm1 : MSRpstateImm0_1; 778def MSRpstateImm4 : MSRpstateImm0_15; 779 780// The thread pointer (on Linux, at least, where this has been implemented) is 781// TPIDR_EL0. 782def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins), 783 [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>; 784 785let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in { 786def HWASAN_CHECK_MEMACCESS : Pseudo< 787 (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo), 788 [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 imm:$accessinfo))]>, 789 Sched<[]>; 790} 791 792// The cycle counter PMC register is PMCCNTR_EL0. 793let Predicates = [HasPerfMon] in 794def : Pat<(readcyclecounter), (MRS 0xdce8)>; 795 796// FPCR register 797def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>; 798 799// Generic system instructions 800def SYSxt : SystemXtI<0, "sys">; 801def SYSLxt : SystemLXtI<1, "sysl">; 802 803def : InstAlias<"sys $op1, $Cn, $Cm, $op2", 804 (SYSxt imm0_7:$op1, sys_cr_op:$Cn, 805 sys_cr_op:$Cm, imm0_7:$op2, XZR)>; 806 807//===----------------------------------------------------------------------===// 808// Move immediate instructions. 809//===----------------------------------------------------------------------===// 810 811defm MOVK : InsertImmediate<0b11, "movk">; 812defm MOVN : MoveImmediate<0b00, "movn">; 813 814let PostEncoderMethod = "fixMOVZ" in 815defm MOVZ : MoveImmediate<0b10, "movz">; 816 817// First group of aliases covers an implicit "lsl #0". 818def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0), 0>; 819def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0), 0>; 820def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>; 821def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>; 822def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>; 823def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>; 824 825// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax. 826def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>; 827def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>; 828def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>; 829def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>; 830 831def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>; 832def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>; 833def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>; 834def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>; 835 836def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48), 0>; 837def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32), 0>; 838def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16), 0>; 839def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0), 0>; 840 841def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>; 842def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>; 843 844def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>; 845def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>; 846 847def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16), 0>; 848def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0), 0>; 849 850// Final group of aliases covers true "mov $Rd, $imm" cases. 851multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR, 852 int width, int shift> { 853 def _asmoperand : AsmOperandClass { 854 let Name = basename # width # "_lsl" # shift # "MovAlias"; 855 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", " 856 # shift # ">"; 857 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">"; 858 } 859 860 def _movimm : Operand<i32> { 861 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand"); 862 } 863 864 def : InstAlias<"mov $Rd, $imm", 865 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>; 866} 867 868defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>; 869defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>; 870 871defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>; 872defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>; 873defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>; 874defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>; 875 876defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>; 877defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>; 878 879defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>; 880defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>; 881defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>; 882defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>; 883 884let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1, 885 isAsCheapAsAMove = 1 in { 886// FIXME: The following pseudo instructions are only needed because remat 887// cannot handle multiple instructions. When that changes, we can select 888// directly to the real instructions and get rid of these pseudos. 889 890def MOVi32imm 891 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src), 892 [(set GPR32:$dst, imm:$src)]>, 893 Sched<[WriteImm]>; 894def MOVi64imm 895 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src), 896 [(set GPR64:$dst, imm:$src)]>, 897 Sched<[WriteImm]>; 898} // isReMaterializable, isCodeGenOnly 899 900// If possible, we want to use MOVi32imm even for 64-bit moves. This gives the 901// eventual expansion code fewer bits to worry about getting right. Marshalling 902// the types is a little tricky though: 903def i64imm_32bit : ImmLeaf<i64, [{ 904 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm); 905}]>; 906 907def s64imm_32bit : ImmLeaf<i64, [{ 908 int64_t Imm64 = static_cast<int64_t>(Imm); 909 return Imm64 >= std::numeric_limits<int32_t>::min() && 910 Imm64 <= std::numeric_limits<int32_t>::max(); 911}]>; 912 913def trunc_imm : SDNodeXForm<imm, [{ 914 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32); 915}]>; 916 917def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">, 918 GISDNodeXFormEquiv<trunc_imm>; 919 920def : Pat<(i64 i64imm_32bit:$src), 921 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>; 922 923// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model). 924def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{ 925return CurDAG->getTargetConstant( 926 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32); 927}]>; 928 929def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{ 930return CurDAG->getTargetConstant( 931 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64); 932}]>; 933 934 935def : Pat<(f32 fpimm:$in), 936 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>; 937def : Pat<(f64 fpimm:$in), 938 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>; 939 940 941// Deal with the various forms of (ELF) large addressing with MOVZ/MOVK 942// sequences. 943def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2, 944 tglobaladdr:$g1, tglobaladdr:$g0), 945 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0), 946 tglobaladdr:$g1, 16), 947 tglobaladdr:$g2, 32), 948 tglobaladdr:$g3, 48)>; 949 950def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2, 951 tblockaddress:$g1, tblockaddress:$g0), 952 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0), 953 tblockaddress:$g1, 16), 954 tblockaddress:$g2, 32), 955 tblockaddress:$g3, 48)>; 956 957def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2, 958 tconstpool:$g1, tconstpool:$g0), 959 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0), 960 tconstpool:$g1, 16), 961 tconstpool:$g2, 32), 962 tconstpool:$g3, 48)>; 963 964def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2, 965 tjumptable:$g1, tjumptable:$g0), 966 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0), 967 tjumptable:$g1, 16), 968 tjumptable:$g2, 32), 969 tjumptable:$g3, 48)>; 970 971 972//===----------------------------------------------------------------------===// 973// Arithmetic instructions. 974//===----------------------------------------------------------------------===// 975 976// Add/subtract with carry. 977defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>; 978defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>; 979 980def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>; 981def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>; 982def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>; 983def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>; 984 985// Add/subtract 986defm ADD : AddSub<0, "add", "sub", add>; 987defm SUB : AddSub<1, "sub", "add">; 988 989def : InstAlias<"mov $dst, $src", 990 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>; 991def : InstAlias<"mov $dst, $src", 992 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>; 993def : InstAlias<"mov $dst, $src", 994 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>; 995def : InstAlias<"mov $dst, $src", 996 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>; 997 998defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">; 999defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">; 1000 1001// Use SUBS instead of SUB to enable CSE between SUBS and SUB. 1002def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm), 1003 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>; 1004def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm), 1005 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>; 1006def : Pat<(sub GPR32:$Rn, GPR32:$Rm), 1007 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>; 1008def : Pat<(sub GPR64:$Rn, GPR64:$Rm), 1009 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>; 1010def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm), 1011 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>; 1012def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm), 1013 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>; 1014let AddedComplexity = 1 in { 1015def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3), 1016 (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>; 1017def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3), 1018 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>; 1019} 1020 1021// Because of the immediate format for add/sub-imm instructions, the 1022// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1). 1023// These patterns capture that transformation. 1024let AddedComplexity = 1 in { 1025def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1026 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1027def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1028 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1029def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1030 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1031def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1032 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1033} 1034 1035// Because of the immediate format for add/sub-imm instructions, the 1036// expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1). 1037// These patterns capture that transformation. 1038let AddedComplexity = 1 in { 1039def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1040 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1041def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1042 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1043def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm), 1044 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>; 1045def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm), 1046 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>; 1047} 1048 1049def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>; 1050def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>; 1051def : InstAlias<"neg $dst, $src$shift", 1052 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>; 1053def : InstAlias<"neg $dst, $src$shift", 1054 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>; 1055 1056def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>; 1057def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>; 1058def : InstAlias<"negs $dst, $src$shift", 1059 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>; 1060def : InstAlias<"negs $dst, $src$shift", 1061 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>; 1062 1063 1064// Unsigned/Signed divide 1065defm UDIV : Div<0, "udiv", udiv>; 1066defm SDIV : Div<1, "sdiv", sdiv>; 1067 1068def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>; 1069def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>; 1070def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>; 1071def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>; 1072 1073// Variable shift 1074defm ASRV : Shift<0b10, "asr", sra>; 1075defm LSLV : Shift<0b00, "lsl", shl>; 1076defm LSRV : Shift<0b01, "lsr", srl>; 1077defm RORV : Shift<0b11, "ror", rotr>; 1078 1079def : ShiftAlias<"asrv", ASRVWr, GPR32>; 1080def : ShiftAlias<"asrv", ASRVXr, GPR64>; 1081def : ShiftAlias<"lslv", LSLVWr, GPR32>; 1082def : ShiftAlias<"lslv", LSLVXr, GPR64>; 1083def : ShiftAlias<"lsrv", LSRVWr, GPR32>; 1084def : ShiftAlias<"lsrv", LSRVXr, GPR64>; 1085def : ShiftAlias<"rorv", RORVWr, GPR32>; 1086def : ShiftAlias<"rorv", RORVXr, GPR64>; 1087 1088// Multiply-add 1089let AddedComplexity = 5 in { 1090defm MADD : MulAccum<0, "madd", add>; 1091defm MSUB : MulAccum<1, "msub", sub>; 1092 1093def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)), 1094 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>; 1095def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)), 1096 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>; 1097 1098def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))), 1099 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>; 1100def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))), 1101 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>; 1102def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)), 1103 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>; 1104def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)), 1105 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>; 1106} // AddedComplexity = 5 1107 1108let AddedComplexity = 5 in { 1109def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>; 1110def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>; 1111def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>; 1112def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>; 1113 1114def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))), 1115 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1116def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))), 1117 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1118 1119def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))), 1120 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1121def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))), 1122 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>; 1123 1124def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))), 1125 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1126def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))), 1127 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1128def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))), 1129 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1130 (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1131 1132def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))), 1133 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1134def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))), 1135 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1136def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))), 1137 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1138 (MOVi32imm (trunc_imm imm:$C)), XZR)>; 1139 1140def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)), 1141 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1142def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)), 1143 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1144def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)), 1145 GPR64:$Ra)), 1146 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1147 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1148 1149def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))), 1150 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1151def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))), 1152 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1153def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32), 1154 (s64imm_32bit:$C)))), 1155 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)), 1156 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>; 1157} // AddedComplexity = 5 1158 1159def : MulAccumWAlias<"mul", MADDWrrr>; 1160def : MulAccumXAlias<"mul", MADDXrrr>; 1161def : MulAccumWAlias<"mneg", MSUBWrrr>; 1162def : MulAccumXAlias<"mneg", MSUBXrrr>; 1163def : WideMulAccumAlias<"smull", SMADDLrrr>; 1164def : WideMulAccumAlias<"smnegl", SMSUBLrrr>; 1165def : WideMulAccumAlias<"umull", UMADDLrrr>; 1166def : WideMulAccumAlias<"umnegl", UMSUBLrrr>; 1167 1168// Multiply-high 1169def SMULHrr : MulHi<0b010, "smulh", mulhs>; 1170def UMULHrr : MulHi<0b110, "umulh", mulhu>; 1171 1172// CRC32 1173def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">; 1174def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">; 1175def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">; 1176def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">; 1177 1178def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">; 1179def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">; 1180def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">; 1181def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">; 1182 1183// v8.1 atomic CAS 1184defm CAS : CompareAndSwap<0, 0, "">; 1185defm CASA : CompareAndSwap<1, 0, "a">; 1186defm CASL : CompareAndSwap<0, 1, "l">; 1187defm CASAL : CompareAndSwap<1, 1, "al">; 1188 1189// v8.1 atomic CASP 1190defm CASP : CompareAndSwapPair<0, 0, "">; 1191defm CASPA : CompareAndSwapPair<1, 0, "a">; 1192defm CASPL : CompareAndSwapPair<0, 1, "l">; 1193defm CASPAL : CompareAndSwapPair<1, 1, "al">; 1194 1195// v8.1 atomic SWP 1196defm SWP : Swap<0, 0, "">; 1197defm SWPA : Swap<1, 0, "a">; 1198defm SWPL : Swap<0, 1, "l">; 1199defm SWPAL : Swap<1, 1, "al">; 1200 1201// v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register) 1202defm LDADD : LDOPregister<0b000, "add", 0, 0, "">; 1203defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">; 1204defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">; 1205defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">; 1206 1207defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">; 1208defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">; 1209defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">; 1210defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">; 1211 1212defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">; 1213defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">; 1214defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">; 1215defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">; 1216 1217defm LDSET : LDOPregister<0b011, "set", 0, 0, "">; 1218defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">; 1219defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">; 1220defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">; 1221 1222defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">; 1223defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">; 1224defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">; 1225defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">; 1226 1227defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">; 1228defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">; 1229defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">; 1230defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">; 1231 1232defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">; 1233defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">; 1234defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">; 1235defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">; 1236 1237defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">; 1238defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">; 1239defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">; 1240defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">; 1241 1242// v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR" 1243defm : STOPregister<"stadd","LDADD">; // STADDx 1244defm : STOPregister<"stclr","LDCLR">; // STCLRx 1245defm : STOPregister<"steor","LDEOR">; // STEORx 1246defm : STOPregister<"stset","LDSET">; // STSETx 1247defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx 1248defm : STOPregister<"stsmin","LDSMIN">;// STSMINx 1249defm : STOPregister<"stumax","LDUMAX">;// STUMAXx 1250defm : STOPregister<"stumin","LDUMIN">;// STUMINx 1251 1252// v8.5 Memory Tagging Extension 1253let Predicates = [HasMTE] in { 1254 1255def IRG : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>, 1256 Sched<[]>{ 1257 let Inst{31} = 1; 1258} 1259def GMI : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{ 1260 let Inst{31} = 1; 1261 let isNotDuplicable = 1; 1262} 1263def ADDG : AddSubG<0, "addg", null_frag>; 1264def SUBG : AddSubG<1, "subg", null_frag>; 1265 1266def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>; 1267 1268def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>; 1269def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{ 1270 let Defs = [NZCV]; 1271} 1272 1273def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>; 1274 1275def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">; 1276 1277def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4), 1278 (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>; 1279def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)), 1280 (LDG GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>; 1281 1282def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>; 1283 1284def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]", 1285 (outs GPR64:$Rt), (ins GPR64sp:$Rn)>; 1286def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]", 1287 (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>; 1288def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]", 1289 (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> { 1290 let Inst{23} = 0; 1291} 1292 1293defm STG : MemTagStore<0b00, "stg">; 1294defm STZG : MemTagStore<0b01, "stzg">; 1295defm ST2G : MemTagStore<0b10, "st2g">; 1296defm STZ2G : MemTagStore<0b11, "stz2g">; 1297 1298def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1299 (STGOffset $Rn, $Rm, $imm)>; 1300def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1301 (STZGOffset $Rn, $Rm, $imm)>; 1302def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1303 (ST2GOffset $Rn, $Rm, $imm)>; 1304def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)), 1305 (STZ2GOffset $Rn, $Rm, $imm)>; 1306 1307defm STGP : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">; 1308def STGPpre : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">; 1309def STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">; 1310 1311def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)), 1312 (STGOffset GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>; 1313 1314def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2), 1315 (STGPi $Rt, $Rt2, $Rn, $imm)>; 1316 1317def IRGstack 1318 : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>, 1319 Sched<[]>; 1320def TAGPstack 1321 : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>, 1322 Sched<[]>; 1323 1324// Explicit SP in the first operand prevents ShrinkWrap optimization 1325// from leaving this instruction out of the stack frame. When IRGstack 1326// is transformed into IRG, this operand is replaced with the actual 1327// register / expression for the tagged base pointer of the current function. 1328def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>; 1329 1330// Large STG to be expanded into a loop. $Rm is the size, $Rn is start address. 1331// $Rn_wback is one past the end of the range. 1332let isCodeGenOnly=1, mayStore=1 in { 1333def STGloop 1334 : Pseudo<(outs GPR64common:$Rm_wback, GPR64sp:$Rn_wback), (ins GPR64common:$Rm, GPR64sp:$Rn), 1335 [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,$Rm = $Rm_wback,@earlyclobber $Rm_wback" >, 1336 Sched<[WriteAdr, WriteST]>; 1337 1338def STZGloop 1339 : Pseudo<(outs GPR64common:$Rm_wback, GPR64sp:$Rn_wback), (ins GPR64common:$Rm, GPR64sp:$Rn), 1340 [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,$Rm = $Rm_wback,@earlyclobber $Rm_wback" >, 1341 Sched<[WriteAdr, WriteST]>; 1342} 1343 1344} // Predicates = [HasMTE] 1345 1346//===----------------------------------------------------------------------===// 1347// Logical instructions. 1348//===----------------------------------------------------------------------===// 1349 1350// (immediate) 1351defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">; 1352defm AND : LogicalImm<0b00, "and", and, "bic">; 1353defm EOR : LogicalImm<0b10, "eor", xor, "eon">; 1354defm ORR : LogicalImm<0b01, "orr", or, "orn">; 1355 1356// FIXME: these aliases *are* canonical sometimes (when movz can't be 1357// used). Actually, it seems to be working right now, but putting logical_immXX 1358// here is a bit dodgy on the AsmParser side too. 1359def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR, 1360 logical_imm32:$imm), 0>; 1361def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR, 1362 logical_imm64:$imm), 0>; 1363 1364 1365// (register) 1366defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>; 1367defm BICS : LogicalRegS<0b11, 1, "bics", 1368 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>; 1369defm AND : LogicalReg<0b00, 0, "and", and>; 1370defm BIC : LogicalReg<0b00, 1, "bic", 1371 BinOpFrag<(and node:$LHS, (not node:$RHS))>>; 1372defm EON : LogicalReg<0b10, 1, "eon", 1373 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>; 1374defm EOR : LogicalReg<0b10, 0, "eor", xor>; 1375defm ORN : LogicalReg<0b01, 1, "orn", 1376 BinOpFrag<(or node:$LHS, (not node:$RHS))>>; 1377defm ORR : LogicalReg<0b01, 0, "orr", or>; 1378 1379def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>; 1380def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>; 1381 1382def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>; 1383def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>; 1384 1385def : InstAlias<"mvn $Wd, $Wm$sh", 1386 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>; 1387def : InstAlias<"mvn $Xd, $Xm$sh", 1388 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>; 1389 1390def : InstAlias<"tst $src1, $src2", 1391 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>; 1392def : InstAlias<"tst $src1, $src2", 1393 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>; 1394 1395def : InstAlias<"tst $src1, $src2", 1396 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>; 1397def : InstAlias<"tst $src1, $src2", 1398 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>; 1399 1400def : InstAlias<"tst $src1, $src2$sh", 1401 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>; 1402def : InstAlias<"tst $src1, $src2$sh", 1403 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>; 1404 1405 1406def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>; 1407def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>; 1408 1409 1410//===----------------------------------------------------------------------===// 1411// One operand data processing instructions. 1412//===----------------------------------------------------------------------===// 1413 1414defm CLS : OneOperandData<0b101, "cls">; 1415defm CLZ : OneOperandData<0b100, "clz", ctlz>; 1416defm RBIT : OneOperandData<0b000, "rbit", bitreverse>; 1417 1418def REV16Wr : OneWRegData<0b001, "rev16", 1419 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>; 1420def REV16Xr : OneXRegData<0b001, "rev16", null_frag>; 1421 1422def : Pat<(cttz GPR32:$Rn), 1423 (CLZWr (RBITWr GPR32:$Rn))>; 1424def : Pat<(cttz GPR64:$Rn), 1425 (CLZXr (RBITXr GPR64:$Rn))>; 1426def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)), 1427 (i32 1))), 1428 (CLSWr GPR32:$Rn)>; 1429def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)), 1430 (i64 1))), 1431 (CLSXr GPR64:$Rn)>; 1432 1433// Unlike the other one operand instructions, the instructions with the "rev" 1434// mnemonic do *not* just different in the size bit, but actually use different 1435// opcode bits for the different sizes. 1436def REVWr : OneWRegData<0b010, "rev", bswap>; 1437def REVXr : OneXRegData<0b011, "rev", bswap>; 1438def REV32Xr : OneXRegData<0b010, "rev32", 1439 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>; 1440 1441def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>; 1442 1443// The bswap commutes with the rotr so we want a pattern for both possible 1444// orders. 1445def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>; 1446def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>; 1447 1448//===----------------------------------------------------------------------===// 1449// Bitfield immediate extraction instruction. 1450//===----------------------------------------------------------------------===// 1451let hasSideEffects = 0 in 1452defm EXTR : ExtractImm<"extr">; 1453def : InstAlias<"ror $dst, $src, $shift", 1454 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>; 1455def : InstAlias<"ror $dst, $src, $shift", 1456 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>; 1457 1458def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)), 1459 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>; 1460def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)), 1461 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>; 1462 1463//===----------------------------------------------------------------------===// 1464// Other bitfield immediate instructions. 1465//===----------------------------------------------------------------------===// 1466let hasSideEffects = 0 in { 1467defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">; 1468defm SBFM : BitfieldImm<0b00, "sbfm">; 1469defm UBFM : BitfieldImm<0b10, "ubfm">; 1470} 1471 1472def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{ 1473 uint64_t enc = (32 - N->getZExtValue()) & 0x1f; 1474 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1475}]>; 1476 1477def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{ 1478 uint64_t enc = 31 - N->getZExtValue(); 1479 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1480}]>; 1481 1482// min(7, 31 - shift_amt) 1483def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{ 1484 uint64_t enc = 31 - N->getZExtValue(); 1485 enc = enc > 7 ? 7 : enc; 1486 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1487}]>; 1488 1489// min(15, 31 - shift_amt) 1490def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{ 1491 uint64_t enc = 31 - N->getZExtValue(); 1492 enc = enc > 15 ? 15 : enc; 1493 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1494}]>; 1495 1496def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{ 1497 uint64_t enc = (64 - N->getZExtValue()) & 0x3f; 1498 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1499}]>; 1500 1501def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{ 1502 uint64_t enc = 63 - N->getZExtValue(); 1503 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1504}]>; 1505 1506// min(7, 63 - shift_amt) 1507def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{ 1508 uint64_t enc = 63 - N->getZExtValue(); 1509 enc = enc > 7 ? 7 : enc; 1510 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1511}]>; 1512 1513// min(15, 63 - shift_amt) 1514def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{ 1515 uint64_t enc = 63 - N->getZExtValue(); 1516 enc = enc > 15 ? 15 : enc; 1517 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1518}]>; 1519 1520// min(31, 63 - shift_amt) 1521def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{ 1522 uint64_t enc = 63 - N->getZExtValue(); 1523 enc = enc > 31 ? 31 : enc; 1524 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64); 1525}]>; 1526 1527def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)), 1528 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)), 1529 (i64 (i32shift_b imm0_31:$imm)))>; 1530def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)), 1531 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)), 1532 (i64 (i64shift_b imm0_63:$imm)))>; 1533 1534let AddedComplexity = 10 in { 1535def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)), 1536 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>; 1537def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)), 1538 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>; 1539} 1540 1541def : InstAlias<"asr $dst, $src, $shift", 1542 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>; 1543def : InstAlias<"asr $dst, $src, $shift", 1544 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>; 1545def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>; 1546def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>; 1547def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>; 1548def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>; 1549def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>; 1550 1551def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)), 1552 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>; 1553def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)), 1554 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>; 1555 1556def : InstAlias<"lsr $dst, $src, $shift", 1557 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>; 1558def : InstAlias<"lsr $dst, $src, $shift", 1559 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>; 1560def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>; 1561def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>; 1562def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>; 1563def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>; 1564def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>; 1565 1566//===----------------------------------------------------------------------===// 1567// Conditional comparison instructions. 1568//===----------------------------------------------------------------------===// 1569defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>; 1570defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>; 1571 1572//===----------------------------------------------------------------------===// 1573// Conditional select instructions. 1574//===----------------------------------------------------------------------===// 1575defm CSEL : CondSelect<0, 0b00, "csel">; 1576 1577def inc : PatFrag<(ops node:$in), (add node:$in, 1)>; 1578defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>; 1579defm CSINV : CondSelectOp<1, 0b00, "csinv", not>; 1580defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>; 1581 1582def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV), 1583 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>; 1584def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV), 1585 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>; 1586def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV), 1587 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>; 1588def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV), 1589 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>; 1590def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV), 1591 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>; 1592def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV), 1593 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>; 1594 1595def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV), 1596 (CSINCWr WZR, WZR, (i32 imm:$cc))>; 1597def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV), 1598 (CSINCXr XZR, XZR, (i32 imm:$cc))>; 1599def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV), 1600 (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>; 1601def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV), 1602 (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>; 1603def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV), 1604 (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>; 1605def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV), 1606 (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>; 1607def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV), 1608 (CSINVWr WZR, WZR, (i32 imm:$cc))>; 1609def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV), 1610 (CSINVXr XZR, XZR, (i32 imm:$cc))>; 1611def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV), 1612 (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>; 1613def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV), 1614 (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>; 1615def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV), 1616 (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>; 1617def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV), 1618 (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>; 1619 1620// The inverse of the condition code from the alias instruction is what is used 1621// in the aliased instruction. The parser all ready inverts the condition code 1622// for these aliases. 1623def : InstAlias<"cset $dst, $cc", 1624 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>; 1625def : InstAlias<"cset $dst, $cc", 1626 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>; 1627 1628def : InstAlias<"csetm $dst, $cc", 1629 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>; 1630def : InstAlias<"csetm $dst, $cc", 1631 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>; 1632 1633def : InstAlias<"cinc $dst, $src, $cc", 1634 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>; 1635def : InstAlias<"cinc $dst, $src, $cc", 1636 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>; 1637 1638def : InstAlias<"cinv $dst, $src, $cc", 1639 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>; 1640def : InstAlias<"cinv $dst, $src, $cc", 1641 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>; 1642 1643def : InstAlias<"cneg $dst, $src, $cc", 1644 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>; 1645def : InstAlias<"cneg $dst, $src, $cc", 1646 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>; 1647 1648//===----------------------------------------------------------------------===// 1649// PC-relative instructions. 1650//===----------------------------------------------------------------------===// 1651let isReMaterializable = 1 in { 1652let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { 1653def ADR : ADRI<0, "adr", adrlabel, 1654 [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>; 1655} // hasSideEffects = 0 1656 1657def ADRP : ADRI<1, "adrp", adrplabel, 1658 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>; 1659} // isReMaterializable = 1 1660 1661// page address of a constant pool entry, block address 1662def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>; 1663def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>; 1664def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>; 1665def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>; 1666def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>; 1667def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>; 1668def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>; 1669 1670//===----------------------------------------------------------------------===// 1671// Unconditional branch (register) instructions. 1672//===----------------------------------------------------------------------===// 1673 1674let isReturn = 1, isTerminator = 1, isBarrier = 1 in { 1675def RET : BranchReg<0b0010, "ret", []>; 1676def DRPS : SpecialReturn<0b0101, "drps">; 1677def ERET : SpecialReturn<0b0100, "eret">; 1678} // isReturn = 1, isTerminator = 1, isBarrier = 1 1679 1680// Default to the LR register. 1681def : InstAlias<"ret", (RET LR)>; 1682 1683let isCall = 1, Defs = [LR], Uses = [SP] in { 1684def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>; 1685} // isCall 1686 1687let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { 1688def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>; 1689} // isBranch, isTerminator, isBarrier, isIndirectBranch 1690 1691// Create a separate pseudo-instruction for codegen to use so that we don't 1692// flag lr as used in every function. It'll be restored before the RET by the 1693// epilogue if it's legitimately used. 1694def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>, 1695 Sched<[WriteBrReg]> { 1696 let isTerminator = 1; 1697 let isBarrier = 1; 1698 let isReturn = 1; 1699} 1700 1701// This is a directive-like pseudo-instruction. The purpose is to insert an 1702// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction 1703// (which in the usual case is a BLR). 1704let hasSideEffects = 1 in 1705def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> { 1706 let AsmString = ".tlsdesccall $sym"; 1707} 1708 1709// Pseudo instruction to tell the streamer to emit a 'B' character into the 1710// augmentation string. 1711def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {} 1712 1713// FIXME: maybe the scratch register used shouldn't be fixed to X1? 1714// FIXME: can "hasSideEffects be dropped? 1715let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1, 1716 isCodeGenOnly = 1 in 1717def TLSDESC_CALLSEQ 1718 : Pseudo<(outs), (ins i64imm:$sym), 1719 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>, 1720 Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>; 1721def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym), 1722 (TLSDESC_CALLSEQ texternalsym:$sym)>; 1723 1724//===----------------------------------------------------------------------===// 1725// Conditional branch (immediate) instruction. 1726//===----------------------------------------------------------------------===// 1727def Bcc : BranchCond; 1728 1729//===----------------------------------------------------------------------===// 1730// Compare-and-branch instructions. 1731//===----------------------------------------------------------------------===// 1732defm CBZ : CmpBranch<0, "cbz", AArch64cbz>; 1733defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>; 1734 1735//===----------------------------------------------------------------------===// 1736// Test-bit-and-branch instructions. 1737//===----------------------------------------------------------------------===// 1738defm TBZ : TestBranch<0, "tbz", AArch64tbz>; 1739defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>; 1740 1741//===----------------------------------------------------------------------===// 1742// Unconditional branch (immediate) instructions. 1743//===----------------------------------------------------------------------===// 1744let isBranch = 1, isTerminator = 1, isBarrier = 1 in { 1745def B : BranchImm<0, "b", [(br bb:$addr)]>; 1746} // isBranch, isTerminator, isBarrier 1747 1748let isCall = 1, Defs = [LR], Uses = [SP] in { 1749def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>; 1750} // isCall 1751def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>; 1752 1753//===----------------------------------------------------------------------===// 1754// Exception generation instructions. 1755//===----------------------------------------------------------------------===// 1756let isTrap = 1 in { 1757def BRK : ExceptionGeneration<0b001, 0b00, "brk">; 1758} 1759def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">; 1760def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">; 1761def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">; 1762def HLT : ExceptionGeneration<0b010, 0b00, "hlt">; 1763def HVC : ExceptionGeneration<0b000, 0b10, "hvc">; 1764def SMC : ExceptionGeneration<0b000, 0b11, "smc">; 1765def SVC : ExceptionGeneration<0b000, 0b01, "svc">; 1766 1767// DCPSn defaults to an immediate operand of zero if unspecified. 1768def : InstAlias<"dcps1", (DCPS1 0)>; 1769def : InstAlias<"dcps2", (DCPS2 0)>; 1770def : InstAlias<"dcps3", (DCPS3 0)>; 1771 1772def UDF : UDFType<0, "udf">; 1773 1774//===----------------------------------------------------------------------===// 1775// Load instructions. 1776//===----------------------------------------------------------------------===// 1777 1778// Pair (indexed, offset) 1779defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">; 1780defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">; 1781defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">; 1782defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">; 1783defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">; 1784 1785defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">; 1786 1787// Pair (pre-indexed) 1788def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">; 1789def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">; 1790def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">; 1791def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">; 1792def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">; 1793 1794def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">; 1795 1796// Pair (post-indexed) 1797def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">; 1798def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">; 1799def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">; 1800def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">; 1801def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">; 1802 1803def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">; 1804 1805 1806// Pair (no allocate) 1807defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">; 1808defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">; 1809defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">; 1810defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">; 1811defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">; 1812 1813//--- 1814// (register offset) 1815//--- 1816 1817// Integer 1818defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>; 1819defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>; 1820defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>; 1821defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>; 1822 1823// Floating-point 1824defm LDRB : Load8RO<0b00, 1, 0b01, FPR8Op, "ldr", untyped, load>; 1825defm LDRH : Load16RO<0b01, 1, 0b01, FPR16Op, "ldr", f16, load>; 1826defm LDRS : Load32RO<0b10, 1, 0b01, FPR32Op, "ldr", f32, load>; 1827defm LDRD : Load64RO<0b11, 1, 0b01, FPR64Op, "ldr", f64, load>; 1828defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>; 1829 1830// Load sign-extended half-word 1831defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>; 1832defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>; 1833 1834// Load sign-extended byte 1835defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>; 1836defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>; 1837 1838// Load sign-extended word 1839defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>; 1840 1841// Pre-fetch. 1842defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">; 1843 1844// For regular load, we do not have any alignment requirement. 1845// Thus, it is safe to directly map the vector loads with interesting 1846// addressing modes. 1847// FIXME: We could do the same for bitconvert to floating point vectors. 1848multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop, 1849 ValueType ScalTy, ValueType VecTy, 1850 Instruction LOADW, Instruction LOADX, 1851 SubRegIndex sub> { 1852 def : Pat<(VecTy (scalar_to_vector (ScalTy 1853 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))), 1854 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)), 1855 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset), 1856 sub)>; 1857 1858 def : Pat<(VecTy (scalar_to_vector (ScalTy 1859 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))), 1860 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)), 1861 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset), 1862 sub)>; 1863} 1864 1865let AddedComplexity = 10 in { 1866defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>; 1867defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>; 1868 1869defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>; 1870defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>; 1871 1872defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>; 1873defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>; 1874 1875defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>; 1876defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>; 1877 1878defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>; 1879defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>; 1880 1881defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>; 1882 1883defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>; 1884 1885 1886def : Pat <(v1i64 (scalar_to_vector (i64 1887 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm, 1888 ro_Wextend64:$extend))))), 1889 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>; 1890 1891def : Pat <(v1i64 (scalar_to_vector (i64 1892 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm, 1893 ro_Xextend64:$extend))))), 1894 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>; 1895} 1896 1897// Match all load 64 bits width whose type is compatible with FPR64 1898multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy, 1899 Instruction LOADW, Instruction LOADX> { 1900 1901 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), 1902 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 1903 1904 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), 1905 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 1906} 1907 1908let AddedComplexity = 10 in { 1909let Predicates = [IsLE] in { 1910 // We must do vector loads with LD1 in big-endian. 1911 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>; 1912 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>; 1913 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>; 1914 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>; 1915 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>; 1916} 1917 1918defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>; 1919defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>; 1920 1921// Match all load 128 bits width whose type is compatible with FPR128 1922let Predicates = [IsLE] in { 1923 // We must do vector loads with LD1 in big-endian. 1924 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>; 1925 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>; 1926 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>; 1927 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>; 1928 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>; 1929 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>; 1930 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>; 1931} 1932} // AddedComplexity = 10 1933 1934// zextload -> i64 1935multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop, 1936 Instruction INSTW, Instruction INSTX> { 1937 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), 1938 (SUBREG_TO_REG (i64 0), 1939 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), 1940 sub_32)>; 1941 1942 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), 1943 (SUBREG_TO_REG (i64 0), 1944 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), 1945 sub_32)>; 1946} 1947 1948let AddedComplexity = 10 in { 1949 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>; 1950 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>; 1951 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>; 1952 1953 // zextloadi1 -> zextloadi8 1954 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>; 1955 1956 // extload -> zextload 1957 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>; 1958 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>; 1959 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>; 1960 1961 // extloadi1 -> zextloadi8 1962 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>; 1963} 1964 1965 1966// zextload -> i64 1967multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop, 1968 Instruction INSTW, Instruction INSTX> { 1969 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))), 1970 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 1971 1972 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))), 1973 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 1974 1975} 1976 1977let AddedComplexity = 10 in { 1978 // extload -> zextload 1979 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>; 1980 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>; 1981 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>; 1982 1983 // zextloadi1 -> zextloadi8 1984 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>; 1985} 1986 1987//--- 1988// (unsigned immediate) 1989//--- 1990defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr", 1991 [(set GPR64z:$Rt, 1992 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>; 1993defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr", 1994 [(set GPR32z:$Rt, 1995 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>; 1996defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr", 1997 [(set FPR8Op:$Rt, 1998 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>; 1999defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr", 2000 [(set (f16 FPR16Op:$Rt), 2001 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>; 2002defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr", 2003 [(set (f32 FPR32Op:$Rt), 2004 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>; 2005defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr", 2006 [(set (f64 FPR64Op:$Rt), 2007 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>; 2008defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr", 2009 [(set (f128 FPR128Op:$Rt), 2010 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>; 2011 2012// For regular load, we do not have any alignment requirement. 2013// Thus, it is safe to directly map the vector loads with interesting 2014// addressing modes. 2015// FIXME: We could do the same for bitconvert to floating point vectors. 2016def : Pat <(v8i8 (scalar_to_vector (i32 2017 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 2018 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)), 2019 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>; 2020def : Pat <(v16i8 (scalar_to_vector (i32 2021 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 2022 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 2023 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>; 2024def : Pat <(v4i16 (scalar_to_vector (i32 2025 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 2026 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 2027 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2028def : Pat <(v8i16 (scalar_to_vector (i32 2029 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 2030 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 2031 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>; 2032def : Pat <(v2i32 (scalar_to_vector (i32 2033 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), 2034 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), 2035 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>; 2036def : Pat <(v4i32 (scalar_to_vector (i32 2037 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), 2038 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 2039 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>; 2040def : Pat <(v1i64 (scalar_to_vector (i64 2041 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))), 2042 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2043def : Pat <(v2i64 (scalar_to_vector (i64 2044 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))), 2045 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), 2046 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>; 2047 2048// Match all load 64 bits width whose type is compatible with FPR64 2049let Predicates = [IsLE] in { 2050 // We must use LD1 to perform vector loads in big-endian. 2051 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2052 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2053 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2054 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2055 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2056 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2057 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2058 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2059 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2060 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2061} 2062def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2063 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2064def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))), 2065 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>; 2066 2067// Match all load 128 bits width whose type is compatible with FPR128 2068let Predicates = [IsLE] in { 2069 // We must use LD1 to perform vector loads in big-endian. 2070 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2071 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2072 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2073 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2074 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2075 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2076 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2077 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2078 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2079 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2080 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2081 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2082 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2083 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2084} 2085def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))), 2086 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>; 2087 2088defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh", 2089 [(set GPR32:$Rt, 2090 (zextloadi16 (am_indexed16 GPR64sp:$Rn, 2091 uimm12s2:$offset)))]>; 2092defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb", 2093 [(set GPR32:$Rt, 2094 (zextloadi8 (am_indexed8 GPR64sp:$Rn, 2095 uimm12s1:$offset)))]>; 2096// zextload -> i64 2097def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2098 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2099def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2100 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>; 2101 2102// zextloadi1 -> zextloadi8 2103def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2104 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>; 2105def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2106 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2107 2108// extload -> zextload 2109def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2110 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>; 2111def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2112 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>; 2113def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2114 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>; 2115def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))), 2116 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>; 2117def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))), 2118 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>; 2119def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2120 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2121def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))), 2122 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>; 2123 2124// load sign-extended half-word 2125defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh", 2126 [(set GPR32:$Rt, 2127 (sextloadi16 (am_indexed16 GPR64sp:$Rn, 2128 uimm12s2:$offset)))]>; 2129defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh", 2130 [(set GPR64:$Rt, 2131 (sextloadi16 (am_indexed16 GPR64sp:$Rn, 2132 uimm12s2:$offset)))]>; 2133 2134// load sign-extended byte 2135defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb", 2136 [(set GPR32:$Rt, 2137 (sextloadi8 (am_indexed8 GPR64sp:$Rn, 2138 uimm12s1:$offset)))]>; 2139defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb", 2140 [(set GPR64:$Rt, 2141 (sextloadi8 (am_indexed8 GPR64sp:$Rn, 2142 uimm12s1:$offset)))]>; 2143 2144// load sign-extended word 2145defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw", 2146 [(set GPR64:$Rt, 2147 (sextloadi32 (am_indexed32 GPR64sp:$Rn, 2148 uimm12s4:$offset)))]>; 2149 2150// load zero-extended word 2151def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))), 2152 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>; 2153 2154// Pre-fetch. 2155def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm", 2156 [(AArch64Prefetch imm:$Rt, 2157 (am_indexed64 GPR64sp:$Rn, 2158 uimm12s8:$offset))]>; 2159 2160def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>; 2161 2162//--- 2163// (literal) 2164 2165def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{ 2166 if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) { 2167 const DataLayout &DL = MF->getDataLayout(); 2168 unsigned Align = G->getGlobal()->getPointerAlignment(DL); 2169 return Align >= 4 && G->getOffset() % 4 == 0; 2170 } 2171 if (auto *C = dyn_cast<ConstantPoolSDNode>(N)) 2172 return C->getAlignment() >= 4 && C->getOffset() % 4 == 0; 2173 return false; 2174}]>; 2175 2176def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr", 2177 [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>; 2178def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr", 2179 [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>; 2180def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr", 2181 [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>; 2182def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr", 2183 [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>; 2184def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr", 2185 [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>; 2186 2187// load sign-extended word 2188def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw", 2189 [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>; 2190 2191let AddedComplexity = 20 in { 2192def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))), 2193 (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>; 2194} 2195 2196// prefetch 2197def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>; 2198// [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>; 2199 2200//--- 2201// (unscaled immediate) 2202defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur", 2203 [(set GPR64z:$Rt, 2204 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>; 2205defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur", 2206 [(set GPR32z:$Rt, 2207 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>; 2208defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur", 2209 [(set FPR8Op:$Rt, 2210 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>; 2211defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur", 2212 [(set FPR16Op:$Rt, 2213 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2214defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur", 2215 [(set (f32 FPR32Op:$Rt), 2216 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>; 2217defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur", 2218 [(set (f64 FPR64Op:$Rt), 2219 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>; 2220defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur", 2221 [(set (f128 FPR128Op:$Rt), 2222 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>; 2223 2224defm LDURHH 2225 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh", 2226 [(set GPR32:$Rt, 2227 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2228defm LDURBB 2229 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb", 2230 [(set GPR32:$Rt, 2231 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2232 2233// Match all load 64 bits width whose type is compatible with FPR64 2234let Predicates = [IsLE] in { 2235 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2236 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2237 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2238 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2239 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2240 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2241 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2242 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2243 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2244 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2245} 2246def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2247 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2248def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))), 2249 (LDURDi GPR64sp:$Rn, simm9:$offset)>; 2250 2251// Match all load 128 bits width whose type is compatible with FPR128 2252let Predicates = [IsLE] in { 2253 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2254 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2255 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2256 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2257 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2258 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2259 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2260 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2261 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2262 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2263 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2264 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2265 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))), 2266 (LDURQi GPR64sp:$Rn, simm9:$offset)>; 2267} 2268 2269// anyext -> zext 2270def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2271 (LDURHHi GPR64sp:$Rn, simm9:$offset)>; 2272def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2273 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2274def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2275 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2276def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))), 2277 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2278def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2279 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2280def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2281 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2282def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2283 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2284// unscaled zext 2285def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2286 (LDURHHi GPR64sp:$Rn, simm9:$offset)>; 2287def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2288 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2289def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2290 (LDURBBi GPR64sp:$Rn, simm9:$offset)>; 2291def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))), 2292 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2293def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2294 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2295def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2296 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2297def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2298 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2299 2300 2301//--- 2302// LDR mnemonics fall back to LDUR for negative or unaligned offsets. 2303 2304// Define new assembler match classes as we want to only match these when 2305// the don't otherwise match the scaled addressing mode for LDR/STR. Don't 2306// associate a DiagnosticType either, as we want the diagnostic for the 2307// canonical form (the scaled operand) to take precedence. 2308class SImm9OffsetOperand<int Width> : AsmOperandClass { 2309 let Name = "SImm9OffsetFB" # Width; 2310 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">"; 2311 let RenderMethod = "addImmOperands"; 2312} 2313 2314def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>; 2315def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>; 2316def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>; 2317def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>; 2318def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>; 2319 2320def simm9_offset_fb8 : Operand<i64> { 2321 let ParserMatchClass = SImm9OffsetFB8Operand; 2322} 2323def simm9_offset_fb16 : Operand<i64> { 2324 let ParserMatchClass = SImm9OffsetFB16Operand; 2325} 2326def simm9_offset_fb32 : Operand<i64> { 2327 let ParserMatchClass = SImm9OffsetFB32Operand; 2328} 2329def simm9_offset_fb64 : Operand<i64> { 2330 let ParserMatchClass = SImm9OffsetFB64Operand; 2331} 2332def simm9_offset_fb128 : Operand<i64> { 2333 let ParserMatchClass = SImm9OffsetFB128Operand; 2334} 2335 2336def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2337 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 2338def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2339 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 2340def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2341 (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 2342def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2343 (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 2344def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2345 (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 2346def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2347 (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 2348def : InstAlias<"ldr $Rt, [$Rn, $offset]", 2349 (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>; 2350 2351// zextload -> i64 2352def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))), 2353 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2354def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))), 2355 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>; 2356 2357// load sign-extended half-word 2358defm LDURSHW 2359 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh", 2360 [(set GPR32:$Rt, 2361 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2362defm LDURSHX 2363 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh", 2364 [(set GPR64:$Rt, 2365 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>; 2366 2367// load sign-extended byte 2368defm LDURSBW 2369 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb", 2370 [(set GPR32:$Rt, 2371 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>; 2372defm LDURSBX 2373 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb", 2374 [(set GPR64:$Rt, 2375 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>; 2376 2377// load sign-extended word 2378defm LDURSW 2379 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw", 2380 [(set GPR64:$Rt, 2381 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>; 2382 2383// zero and sign extending aliases from generic LDR* mnemonics to LDUR*. 2384def : InstAlias<"ldrb $Rt, [$Rn, $offset]", 2385 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 2386def : InstAlias<"ldrh $Rt, [$Rn, $offset]", 2387 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 2388def : InstAlias<"ldrsb $Rt, [$Rn, $offset]", 2389 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 2390def : InstAlias<"ldrsb $Rt, [$Rn, $offset]", 2391 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 2392def : InstAlias<"ldrsh $Rt, [$Rn, $offset]", 2393 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 2394def : InstAlias<"ldrsh $Rt, [$Rn, $offset]", 2395 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 2396def : InstAlias<"ldrsw $Rt, [$Rn, $offset]", 2397 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 2398 2399// Pre-fetch. 2400defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum", 2401 [(AArch64Prefetch imm:$Rt, 2402 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>; 2403 2404//--- 2405// (unscaled immediate, unprivileged) 2406defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">; 2407defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">; 2408 2409defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">; 2410defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">; 2411 2412// load sign-extended half-word 2413defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">; 2414defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">; 2415 2416// load sign-extended byte 2417defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">; 2418defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">; 2419 2420// load sign-extended word 2421defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">; 2422 2423//--- 2424// (immediate pre-indexed) 2425def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">; 2426def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">; 2427def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op, "ldr">; 2428def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">; 2429def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">; 2430def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">; 2431def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">; 2432 2433// load sign-extended half-word 2434def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">; 2435def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">; 2436 2437// load sign-extended byte 2438def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">; 2439def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">; 2440 2441// load zero-extended byte 2442def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">; 2443def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">; 2444 2445// load sign-extended word 2446def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">; 2447 2448//--- 2449// (immediate post-indexed) 2450def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">; 2451def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">; 2452def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op, "ldr">; 2453def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">; 2454def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">; 2455def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">; 2456def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">; 2457 2458// load sign-extended half-word 2459def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">; 2460def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">; 2461 2462// load sign-extended byte 2463def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">; 2464def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">; 2465 2466// load zero-extended byte 2467def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">; 2468def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">; 2469 2470// load sign-extended word 2471def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">; 2472 2473//===----------------------------------------------------------------------===// 2474// Store instructions. 2475//===----------------------------------------------------------------------===// 2476 2477// Pair (indexed, offset) 2478// FIXME: Use dedicated range-checked addressing mode operand here. 2479defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">; 2480defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">; 2481defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">; 2482defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">; 2483defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">; 2484 2485// Pair (pre-indexed) 2486def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">; 2487def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">; 2488def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">; 2489def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">; 2490def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">; 2491 2492// Pair (pre-indexed) 2493def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">; 2494def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">; 2495def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">; 2496def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">; 2497def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">; 2498 2499// Pair (no allocate) 2500defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">; 2501defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">; 2502defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">; 2503defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">; 2504defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">; 2505 2506//--- 2507// (Register offset) 2508 2509// Integer 2510defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>; 2511defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>; 2512defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>; 2513defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>; 2514 2515 2516// Floating-point 2517defm STRB : Store8RO< 0b00, 1, 0b00, FPR8Op, "str", untyped, store>; 2518defm STRH : Store16RO<0b01, 1, 0b00, FPR16Op, "str", f16, store>; 2519defm STRS : Store32RO<0b10, 1, 0b00, FPR32Op, "str", f32, store>; 2520defm STRD : Store64RO<0b11, 1, 0b00, FPR64Op, "str", f64, store>; 2521defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str", f128, store>; 2522 2523let Predicates = [UseSTRQro], AddedComplexity = 10 in { 2524 def : Pat<(store (f128 FPR128:$Rt), 2525 (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm, 2526 ro_Wextend128:$extend)), 2527 (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>; 2528 def : Pat<(store (f128 FPR128:$Rt), 2529 (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm, 2530 ro_Xextend128:$extend)), 2531 (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>; 2532} 2533 2534multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop, 2535 Instruction STRW, Instruction STRX> { 2536 2537 def : Pat<(storeop GPR64:$Rt, 2538 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)), 2539 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32), 2540 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 2541 2542 def : Pat<(storeop GPR64:$Rt, 2543 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)), 2544 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32), 2545 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 2546} 2547 2548let AddedComplexity = 10 in { 2549 // truncstore i64 2550 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>; 2551 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>; 2552 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>; 2553} 2554 2555multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR, 2556 Instruction STRW, Instruction STRX> { 2557 def : Pat<(store (VecTy FPR:$Rt), 2558 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)), 2559 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 2560 2561 def : Pat<(store (VecTy FPR:$Rt), 2562 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)), 2563 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 2564} 2565 2566let AddedComplexity = 10 in { 2567// Match all store 64 bits width whose type is compatible with FPR64 2568let Predicates = [IsLE] in { 2569 // We must use ST1 to store vectors in big-endian. 2570 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>; 2571 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>; 2572 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>; 2573 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>; 2574 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>; 2575} 2576 2577defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>; 2578defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>; 2579 2580// Match all store 128 bits width whose type is compatible with FPR128 2581let Predicates = [IsLE, UseSTRQro] in { 2582 // We must use ST1 to store vectors in big-endian. 2583 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>; 2584 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>; 2585 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>; 2586 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>; 2587 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>; 2588 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>; 2589 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>; 2590} 2591} // AddedComplexity = 10 2592 2593// Match stores from lane 0 to the appropriate subreg's store. 2594multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop, 2595 ValueType VecTy, ValueType STy, 2596 SubRegIndex SubRegIdx, 2597 Instruction STRW, Instruction STRX> { 2598 2599 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)), 2600 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)), 2601 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx), 2602 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>; 2603 2604 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)), 2605 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)), 2606 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx), 2607 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>; 2608} 2609 2610let AddedComplexity = 19 in { 2611 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>; 2612 defm : VecROStoreLane0Pat<ro16, store, v8f16, f16, hsub, STRHroW, STRHroX>; 2613 defm : VecROStoreLane0Pat<ro32, store, v4i32, i32, ssub, STRSroW, STRSroX>; 2614 defm : VecROStoreLane0Pat<ro32, store, v4f32, f32, ssub, STRSroW, STRSroX>; 2615 defm : VecROStoreLane0Pat<ro64, store, v2i64, i64, dsub, STRDroW, STRDroX>; 2616 defm : VecROStoreLane0Pat<ro64, store, v2f64, f64, dsub, STRDroW, STRDroX>; 2617} 2618 2619//--- 2620// (unsigned immediate) 2621defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str", 2622 [(store GPR64z:$Rt, 2623 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>; 2624defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str", 2625 [(store GPR32z:$Rt, 2626 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>; 2627defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str", 2628 [(store FPR8Op:$Rt, 2629 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>; 2630defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str", 2631 [(store (f16 FPR16Op:$Rt), 2632 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>; 2633defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str", 2634 [(store (f32 FPR32Op:$Rt), 2635 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>; 2636defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str", 2637 [(store (f64 FPR64Op:$Rt), 2638 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>; 2639defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>; 2640 2641defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh", 2642 [(truncstorei16 GPR32z:$Rt, 2643 (am_indexed16 GPR64sp:$Rn, 2644 uimm12s2:$offset))]>; 2645defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1, "strb", 2646 [(truncstorei8 GPR32z:$Rt, 2647 (am_indexed8 GPR64sp:$Rn, 2648 uimm12s1:$offset))]>; 2649 2650let AddedComplexity = 10 in { 2651 2652// Match all store 64 bits width whose type is compatible with FPR64 2653def : Pat<(store (v1i64 FPR64:$Rt), 2654 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2655 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2656def : Pat<(store (v1f64 FPR64:$Rt), 2657 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2658 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2659 2660let Predicates = [IsLE] in { 2661 // We must use ST1 to store vectors in big-endian. 2662 def : Pat<(store (v2f32 FPR64:$Rt), 2663 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2664 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2665 def : Pat<(store (v8i8 FPR64:$Rt), 2666 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2667 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2668 def : Pat<(store (v4i16 FPR64:$Rt), 2669 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2670 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2671 def : Pat<(store (v2i32 FPR64:$Rt), 2672 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2673 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2674 def : Pat<(store (v4f16 FPR64:$Rt), 2675 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)), 2676 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>; 2677} 2678 2679// Match all store 128 bits width whose type is compatible with FPR128 2680def : Pat<(store (f128 FPR128:$Rt), 2681 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2682 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2683 2684let Predicates = [IsLE] in { 2685 // We must use ST1 to store vectors in big-endian. 2686 def : Pat<(store (v4f32 FPR128:$Rt), 2687 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2688 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2689 def : Pat<(store (v2f64 FPR128:$Rt), 2690 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2691 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2692 def : Pat<(store (v16i8 FPR128:$Rt), 2693 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2694 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2695 def : Pat<(store (v8i16 FPR128:$Rt), 2696 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2697 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2698 def : Pat<(store (v4i32 FPR128:$Rt), 2699 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2700 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2701 def : Pat<(store (v2i64 FPR128:$Rt), 2702 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2703 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2704 def : Pat<(store (v8f16 FPR128:$Rt), 2705 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)), 2706 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>; 2707} 2708 2709// truncstore i64 2710def : Pat<(truncstorei32 GPR64:$Rt, 2711 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)), 2712 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>; 2713def : Pat<(truncstorei16 GPR64:$Rt, 2714 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)), 2715 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>; 2716def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)), 2717 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>; 2718 2719} // AddedComplexity = 10 2720 2721// Match stores from lane 0 to the appropriate subreg's store. 2722multiclass VecStoreLane0Pat<Operand UIAddrMode, SDPatternOperator storeop, 2723 ValueType VTy, ValueType STy, 2724 SubRegIndex SubRegIdx, Operand IndexType, 2725 Instruction STR> { 2726 def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)), 2727 (UIAddrMode GPR64sp:$Rn, IndexType:$offset)), 2728 (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx), 2729 GPR64sp:$Rn, IndexType:$offset)>; 2730} 2731 2732let AddedComplexity = 19 in { 2733 defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>; 2734 defm : VecStoreLane0Pat<am_indexed16, store, v8f16, f16, hsub, uimm12s2, STRHui>; 2735 defm : VecStoreLane0Pat<am_indexed32, store, v4i32, i32, ssub, uimm12s4, STRSui>; 2736 defm : VecStoreLane0Pat<am_indexed32, store, v4f32, f32, ssub, uimm12s4, STRSui>; 2737 defm : VecStoreLane0Pat<am_indexed64, store, v2i64, i64, dsub, uimm12s8, STRDui>; 2738 defm : VecStoreLane0Pat<am_indexed64, store, v2f64, f64, dsub, uimm12s8, STRDui>; 2739} 2740 2741//--- 2742// (unscaled immediate) 2743defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur", 2744 [(store GPR64z:$Rt, 2745 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>; 2746defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur", 2747 [(store GPR32z:$Rt, 2748 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>; 2749defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur", 2750 [(store FPR8Op:$Rt, 2751 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>; 2752defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur", 2753 [(store (f16 FPR16Op:$Rt), 2754 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>; 2755defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur", 2756 [(store (f32 FPR32Op:$Rt), 2757 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>; 2758defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur", 2759 [(store (f64 FPR64Op:$Rt), 2760 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>; 2761defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur", 2762 [(store (f128 FPR128Op:$Rt), 2763 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>; 2764defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh", 2765 [(truncstorei16 GPR32z:$Rt, 2766 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>; 2767defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb", 2768 [(truncstorei8 GPR32z:$Rt, 2769 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>; 2770 2771// Armv8.4 Weaker Release Consistency enhancements 2772// LDAPR & STLR with Immediate Offset instructions 2773let Predicates = [HasRCPC_IMMO] in { 2774defm STLURB : BaseStoreUnscaleV84<"stlurb", 0b00, 0b00, GPR32>; 2775defm STLURH : BaseStoreUnscaleV84<"stlurh", 0b01, 0b00, GPR32>; 2776defm STLURW : BaseStoreUnscaleV84<"stlur", 0b10, 0b00, GPR32>; 2777defm STLURX : BaseStoreUnscaleV84<"stlur", 0b11, 0b00, GPR64>; 2778defm LDAPURB : BaseLoadUnscaleV84<"ldapurb", 0b00, 0b01, GPR32>; 2779defm LDAPURSBW : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>; 2780defm LDAPURSBX : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>; 2781defm LDAPURH : BaseLoadUnscaleV84<"ldapurh", 0b01, 0b01, GPR32>; 2782defm LDAPURSHW : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>; 2783defm LDAPURSHX : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>; 2784defm LDAPUR : BaseLoadUnscaleV84<"ldapur", 0b10, 0b01, GPR32>; 2785defm LDAPURSW : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>; 2786defm LDAPURX : BaseLoadUnscaleV84<"ldapur", 0b11, 0b01, GPR64>; 2787} 2788 2789// Match all store 64 bits width whose type is compatible with FPR64 2790def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2791 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2792def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2793 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2794 2795let AddedComplexity = 10 in { 2796 2797let Predicates = [IsLE] in { 2798 // We must use ST1 to store vectors in big-endian. 2799 def : Pat<(store (v2f32 FPR64:$Rt), 2800 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2801 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2802 def : Pat<(store (v8i8 FPR64:$Rt), 2803 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2804 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2805 def : Pat<(store (v4i16 FPR64:$Rt), 2806 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2807 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2808 def : Pat<(store (v2i32 FPR64:$Rt), 2809 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2810 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2811 def : Pat<(store (v4f16 FPR64:$Rt), 2812 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)), 2813 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2814} 2815 2816// Match all store 128 bits width whose type is compatible with FPR128 2817def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2818 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2819 2820let Predicates = [IsLE] in { 2821 // We must use ST1 to store vectors in big-endian. 2822 def : Pat<(store (v4f32 FPR128:$Rt), 2823 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2824 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2825 def : Pat<(store (v2f64 FPR128:$Rt), 2826 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2827 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2828 def : Pat<(store (v16i8 FPR128:$Rt), 2829 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2830 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2831 def : Pat<(store (v8i16 FPR128:$Rt), 2832 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2833 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2834 def : Pat<(store (v4i32 FPR128:$Rt), 2835 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2836 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2837 def : Pat<(store (v2i64 FPR128:$Rt), 2838 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2839 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2840 def : Pat<(store (v2f64 FPR128:$Rt), 2841 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2842 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2843 def : Pat<(store (v8f16 FPR128:$Rt), 2844 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)), 2845 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>; 2846} 2847 2848} // AddedComplexity = 10 2849 2850// unscaled i64 truncating stores 2851def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)), 2852 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>; 2853def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)), 2854 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>; 2855def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)), 2856 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>; 2857 2858// Match stores from lane 0 to the appropriate subreg's store. 2859multiclass VecStoreULane0Pat<SDPatternOperator StoreOp, 2860 ValueType VTy, ValueType STy, 2861 SubRegIndex SubRegIdx, Instruction STR> { 2862 defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>; 2863} 2864 2865let AddedComplexity = 19 in { 2866 defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>; 2867 defm : VecStoreULane0Pat<store, v8f16, f16, hsub, STURHi>; 2868 defm : VecStoreULane0Pat<store, v4i32, i32, ssub, STURSi>; 2869 defm : VecStoreULane0Pat<store, v4f32, f32, ssub, STURSi>; 2870 defm : VecStoreULane0Pat<store, v2i64, i64, dsub, STURDi>; 2871 defm : VecStoreULane0Pat<store, v2f64, f64, dsub, STURDi>; 2872} 2873 2874//--- 2875// STR mnemonics fall back to STUR for negative or unaligned offsets. 2876def : InstAlias<"str $Rt, [$Rn, $offset]", 2877 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 2878def : InstAlias<"str $Rt, [$Rn, $offset]", 2879 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 2880def : InstAlias<"str $Rt, [$Rn, $offset]", 2881 (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 2882def : InstAlias<"str $Rt, [$Rn, $offset]", 2883 (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 2884def : InstAlias<"str $Rt, [$Rn, $offset]", 2885 (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>; 2886def : InstAlias<"str $Rt, [$Rn, $offset]", 2887 (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>; 2888def : InstAlias<"str $Rt, [$Rn, $offset]", 2889 (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>; 2890 2891def : InstAlias<"strb $Rt, [$Rn, $offset]", 2892 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>; 2893def : InstAlias<"strh $Rt, [$Rn, $offset]", 2894 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>; 2895 2896//--- 2897// (unscaled immediate, unprivileged) 2898defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">; 2899defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">; 2900 2901defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">; 2902defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">; 2903 2904//--- 2905// (immediate pre-indexed) 2906def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str", pre_store, i32>; 2907def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str", pre_store, i64>; 2908def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op, "str", pre_store, untyped>; 2909def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str", pre_store, f16>; 2910def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str", pre_store, f32>; 2911def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str", pre_store, f64>; 2912def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>; 2913 2914def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8, i32>; 2915def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>; 2916 2917// truncstore i64 2918def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 2919 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 2920 simm9:$off)>; 2921def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 2922 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 2923 simm9:$off)>; 2924def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 2925 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 2926 simm9:$off)>; 2927 2928def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2929 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2930def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2931 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2932def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2933 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2934def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2935 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2936def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2937 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2938def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2939 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2940def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2941 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2942 2943def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2944 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2945def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2946 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2947def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2948 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2949def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2950 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2951def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2952 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2953def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2954 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2955def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2956 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2957 2958//--- 2959// (immediate post-indexed) 2960def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z, "str", post_store, i32>; 2961def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z, "str", post_store, i64>; 2962def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op, "str", post_store, untyped>; 2963def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op, "str", post_store, f16>; 2964def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op, "str", post_store, f32>; 2965def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op, "str", post_store, f64>; 2966def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>; 2967 2968def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>; 2969def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>; 2970 2971// truncstore i64 2972def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 2973 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 2974 simm9:$off)>; 2975def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 2976 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 2977 simm9:$off)>; 2978def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off), 2979 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr, 2980 simm9:$off)>; 2981 2982def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2983 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2984def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2985 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2986def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2987 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2988def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2989 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2990def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2991 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2992def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2993 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2994def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off), 2995 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>; 2996 2997def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 2998 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 2999def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3000 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3001def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3002 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3003def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3004 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3005def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3006 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3007def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3008 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3009def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off), 3010 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>; 3011 3012//===----------------------------------------------------------------------===// 3013// Load/store exclusive instructions. 3014//===----------------------------------------------------------------------===// 3015 3016def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">; 3017def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">; 3018def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">; 3019def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">; 3020 3021def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">; 3022def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">; 3023def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">; 3024def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">; 3025 3026def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">; 3027def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">; 3028def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">; 3029def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">; 3030 3031def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">; 3032def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">; 3033def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">; 3034def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">; 3035 3036def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">; 3037def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">; 3038def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">; 3039def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">; 3040 3041def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">; 3042def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">; 3043def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">; 3044def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">; 3045 3046def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">; 3047def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">; 3048 3049def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">; 3050def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">; 3051 3052def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">; 3053def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">; 3054 3055def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">; 3056def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">; 3057 3058let Predicates = [HasLOR] in { 3059 // v8.1a "Limited Order Region" extension load-acquire instructions 3060 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">; 3061 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">; 3062 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">; 3063 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">; 3064 3065 // v8.1a "Limited Order Region" extension store-release instructions 3066 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">; 3067 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">; 3068 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">; 3069 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">; 3070} 3071 3072//===----------------------------------------------------------------------===// 3073// Scaled floating point to integer conversion instructions. 3074//===----------------------------------------------------------------------===// 3075 3076defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>; 3077defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>; 3078defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>; 3079defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>; 3080defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>; 3081defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>; 3082defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>; 3083defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>; 3084defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>; 3085defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>; 3086defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>; 3087defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>; 3088 3089multiclass FPToIntegerIntPats<Intrinsic round, string INST> { 3090 def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>; 3091 def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>; 3092 def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>; 3093 def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>; 3094 def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>; 3095 def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>; 3096 3097 def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))), 3098 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>; 3099 def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))), 3100 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>; 3101 def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))), 3102 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>; 3103 def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))), 3104 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>; 3105 def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))), 3106 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>; 3107 def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))), 3108 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>; 3109} 3110 3111defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">; 3112defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">; 3113 3114multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> { 3115 def : Pat<(i32 (to_int (round f32:$Rn))), 3116 (!cast<Instruction>(INST # UWSr) f32:$Rn)>; 3117 def : Pat<(i64 (to_int (round f32:$Rn))), 3118 (!cast<Instruction>(INST # UXSr) f32:$Rn)>; 3119 def : Pat<(i32 (to_int (round f64:$Rn))), 3120 (!cast<Instruction>(INST # UWDr) f64:$Rn)>; 3121 def : Pat<(i64 (to_int (round f64:$Rn))), 3122 (!cast<Instruction>(INST # UXDr) f64:$Rn)>; 3123} 3124 3125defm : FPToIntegerPats<fp_to_sint, fceil, "FCVTPS">; 3126defm : FPToIntegerPats<fp_to_uint, fceil, "FCVTPU">; 3127defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">; 3128defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">; 3129defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">; 3130defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">; 3131defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">; 3132defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">; 3133 3134let Predicates = [HasFullFP16] in { 3135 def : Pat<(i32 (lround f16:$Rn)), 3136 (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>; 3137 def : Pat<(i64 (lround f16:$Rn)), 3138 (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>; 3139 def : Pat<(i64 (llround f16:$Rn)), 3140 (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>; 3141} 3142def : Pat<(i32 (lround f32:$Rn)), 3143 (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>; 3144def : Pat<(i32 (lround f64:$Rn)), 3145 (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>; 3146def : Pat<(i64 (lround f32:$Rn)), 3147 (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>; 3148def : Pat<(i64 (lround f64:$Rn)), 3149 (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>; 3150def : Pat<(i64 (llround f32:$Rn)), 3151 (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>; 3152def : Pat<(i64 (llround f64:$Rn)), 3153 (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>; 3154 3155//===----------------------------------------------------------------------===// 3156// Scaled integer to floating point conversion instructions. 3157//===----------------------------------------------------------------------===// 3158 3159defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>; 3160defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>; 3161 3162//===----------------------------------------------------------------------===// 3163// Unscaled integer to floating point conversion instruction. 3164//===----------------------------------------------------------------------===// 3165 3166defm FMOV : UnscaledConversion<"fmov">; 3167 3168// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable 3169let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in { 3170def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>, 3171 Sched<[WriteF]>, Requires<[HasFullFP16]>; 3172def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>, 3173 Sched<[WriteF]>; 3174def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>, 3175 Sched<[WriteF]>; 3176} 3177// Similarly add aliases 3178def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>, 3179 Requires<[HasFullFP16]>; 3180def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>; 3181def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>; 3182 3183//===----------------------------------------------------------------------===// 3184// Floating point conversion instruction. 3185//===----------------------------------------------------------------------===// 3186 3187defm FCVT : FPConversion<"fcvt">; 3188 3189//===----------------------------------------------------------------------===// 3190// Floating point single operand instructions. 3191//===----------------------------------------------------------------------===// 3192 3193defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>; 3194defm FMOV : SingleOperandFPData<0b0000, "fmov">; 3195defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>; 3196defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>; 3197defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>; 3198defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>; 3199defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>; 3200defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>; 3201 3202def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))), 3203 (FRINTNDr FPR64:$Rn)>; 3204 3205defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>; 3206defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>; 3207 3208let SchedRW = [WriteFDiv] in { 3209defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>; 3210} 3211 3212let Predicates = [HasFRInt3264] in { 3213 defm FRINT32Z : FRIntNNT<0b00, "frint32z">; 3214 defm FRINT64Z : FRIntNNT<0b10, "frint64z">; 3215 defm FRINT32X : FRIntNNT<0b01, "frint32x">; 3216 defm FRINT64X : FRIntNNT<0b11, "frint64x">; 3217} // HasFRInt3264 3218 3219let Predicates = [HasFullFP16] in { 3220 def : Pat<(i32 (lrint f16:$Rn)), 3221 (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>; 3222 def : Pat<(i64 (lrint f16:$Rn)), 3223 (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>; 3224 def : Pat<(i64 (llrint f16:$Rn)), 3225 (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>; 3226} 3227def : Pat<(i32 (lrint f32:$Rn)), 3228 (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>; 3229def : Pat<(i32 (lrint f64:$Rn)), 3230 (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>; 3231def : Pat<(i64 (lrint f32:$Rn)), 3232 (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>; 3233def : Pat<(i64 (lrint f64:$Rn)), 3234 (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>; 3235def : Pat<(i64 (llrint f32:$Rn)), 3236 (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>; 3237def : Pat<(i64 (llrint f64:$Rn)), 3238 (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>; 3239 3240//===----------------------------------------------------------------------===// 3241// Floating point two operand instructions. 3242//===----------------------------------------------------------------------===// 3243 3244defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>; 3245let SchedRW = [WriteFDiv] in { 3246defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>; 3247} 3248defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>; 3249defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaximum>; 3250defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>; 3251defm FMIN : TwoOperandFPData<0b0101, "fmin", fminimum>; 3252let SchedRW = [WriteFMul] in { 3253defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>; 3254defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>; 3255} 3256defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>; 3257 3258def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 3259 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>; 3260def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 3261 (FMINDrr FPR64:$Rn, FPR64:$Rm)>; 3262def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 3263 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>; 3264def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 3265 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>; 3266 3267//===----------------------------------------------------------------------===// 3268// Floating point three operand instructions. 3269//===----------------------------------------------------------------------===// 3270 3271defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>; 3272defm FMSUB : ThreeOperandFPData<0, 1, "fmsub", 3273 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >; 3274defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd", 3275 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >; 3276defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub", 3277 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >; 3278 3279// The following def pats catch the case where the LHS of an FMA is negated. 3280// The TriOpFrag above catches the case where the middle operand is negated. 3281 3282// N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike 3283// the NEON variant. 3284def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)), 3285 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; 3286 3287def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)), 3288 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; 3289 3290// We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and 3291// "(-a) + b*(-c)". 3292def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))), 3293 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; 3294 3295def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))), 3296 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; 3297 3298def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))), 3299 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>; 3300 3301def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))), 3302 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>; 3303 3304//===----------------------------------------------------------------------===// 3305// Floating point comparison instructions. 3306//===----------------------------------------------------------------------===// 3307 3308defm FCMPE : FPComparison<1, "fcmpe">; 3309defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>; 3310 3311//===----------------------------------------------------------------------===// 3312// Floating point conditional comparison instructions. 3313//===----------------------------------------------------------------------===// 3314 3315defm FCCMPE : FPCondComparison<1, "fccmpe">; 3316defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>; 3317 3318//===----------------------------------------------------------------------===// 3319// Floating point conditional select instruction. 3320//===----------------------------------------------------------------------===// 3321 3322defm FCSEL : FPCondSelect<"fcsel">; 3323 3324// CSEL instructions providing f128 types need to be handled by a 3325// pseudo-instruction since the eventual code will need to introduce basic 3326// blocks and control flow. 3327def F128CSEL : Pseudo<(outs FPR128:$Rd), 3328 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond), 3329 [(set (f128 FPR128:$Rd), 3330 (AArch64csel FPR128:$Rn, FPR128:$Rm, 3331 (i32 imm:$cond), NZCV))]> { 3332 let Uses = [NZCV]; 3333 let usesCustomInserter = 1; 3334 let hasNoSchedulingInfo = 1; 3335} 3336 3337//===----------------------------------------------------------------------===// 3338// Instructions used for emitting unwind opcodes on ARM64 Windows. 3339//===----------------------------------------------------------------------===// 3340let isPseudo = 1 in { 3341 def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>; 3342 def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>; 3343 def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>; 3344 def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 3345 def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 3346 def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 3347 def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 3348 def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 3349 def SEH_SaveFReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>; 3350 def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 3351 def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>; 3352 def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>; 3353 def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>; 3354 def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>; 3355 def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>; 3356 def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>; 3357 def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>; 3358} 3359 3360// Pseudo instructions for Windows EH 3361//===----------------------------------------------------------------------===// 3362let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, 3363 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in { 3364 def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>; 3365 let usesCustomInserter = 1 in 3366 def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>, 3367 Sched<[]>; 3368} 3369 3370let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1, 3371 usesCustomInserter = 1 in 3372def CATCHPAD : Pseudo<(outs), (ins), [(catchpad)]>, Sched<[]>; 3373 3374//===----------------------------------------------------------------------===// 3375// Floating point immediate move. 3376//===----------------------------------------------------------------------===// 3377 3378let isReMaterializable = 1 in { 3379defm FMOV : FPMoveImmediate<"fmov">; 3380} 3381 3382//===----------------------------------------------------------------------===// 3383// Advanced SIMD two vector instructions. 3384//===----------------------------------------------------------------------===// 3385 3386defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl", 3387 int_aarch64_neon_uabd>; 3388// Match UABDL in log2-shuffle patterns. 3389def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)), 3390 (zext (v8i8 V64:$opB))))), 3391 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>; 3392def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))), 3393 (v8i16 (add (sub (zext (v8i8 V64:$opA)), 3394 (zext (v8i8 V64:$opB))), 3395 (AArch64vashr v8i16:$src, (i32 15))))), 3396 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>; 3397def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)), 3398 (zext (extract_high_v16i8 V128:$opB))))), 3399 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>; 3400def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))), 3401 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)), 3402 (zext (extract_high_v16i8 V128:$opB))), 3403 (AArch64vashr v8i16:$src, (i32 15))))), 3404 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>; 3405def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)), 3406 (zext (v4i16 V64:$opB))))), 3407 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>; 3408def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)), 3409 (zext (extract_high_v8i16 V128:$opB))))), 3410 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>; 3411def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)), 3412 (zext (v2i32 V64:$opB))))), 3413 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>; 3414def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)), 3415 (zext (extract_high_v4i32 V128:$opB))))), 3416 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>; 3417 3418defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>; 3419defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>; 3420defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>; 3421defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>; 3422defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>; 3423defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>; 3424defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>; 3425defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>; 3426defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>; 3427defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>; 3428 3429defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>; 3430defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>; 3431defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>; 3432defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>; 3433defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>; 3434defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>; 3435defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>; 3436defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">; 3437def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))), 3438 (FCVTLv4i16 V64:$Rn)>; 3439def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn), 3440 (i64 4)))), 3441 (FCVTLv8i16 V128:$Rn)>; 3442def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>; 3443def : Pat<(v2f64 (fpextend (v2f32 (extract_subvector (v4f32 V128:$Rn), 3444 (i64 2))))), 3445 (FCVTLv4i32 V128:$Rn)>; 3446 3447def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>; 3448def : Pat<(v4f32 (fpextend (v4f16 (extract_subvector (v8f16 V128:$Rn), 3449 (i64 4))))), 3450 (FCVTLv8i16 V128:$Rn)>; 3451 3452defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>; 3453defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>; 3454defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>; 3455defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>; 3456defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">; 3457def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))), 3458 (FCVTNv4i16 V128:$Rn)>; 3459def : Pat<(concat_vectors V64:$Rd, 3460 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))), 3461 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>; 3462def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>; 3463def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>; 3464def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))), 3465 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>; 3466defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>; 3467defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>; 3468defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn", 3469 int_aarch64_neon_fcvtxn>; 3470defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>; 3471defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>; 3472 3473def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>; 3474def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>; 3475def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>; 3476def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>; 3477def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>; 3478 3479def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>; 3480def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>; 3481def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>; 3482def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>; 3483def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>; 3484 3485defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>; 3486defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>; 3487defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>; 3488defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>; 3489defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>; 3490defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>; 3491defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>; 3492defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>; 3493defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>; 3494 3495let Predicates = [HasFRInt3264] in { 3496 defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z">; 3497 defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z">; 3498 defm FRINT32X : FRIntNNTVector<1, 0, "frint32x">; 3499 defm FRINT64X : FRIntNNTVector<1, 1, "frint64x">; 3500} // HasFRInt3264 3501 3502defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>; 3503defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>; 3504defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg", 3505 UnOpFrag<(sub immAllZerosV, node:$LHS)> >; 3506defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>; 3507// Aliases for MVN -> NOT. 3508def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}", 3509 (NOTv8i8 V64:$Vd, V64:$Vn)>; 3510def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}", 3511 (NOTv16i8 V128:$Vd, V128:$Vn)>; 3512 3513def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>; 3514def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>; 3515def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>; 3516def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>; 3517def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>; 3518def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>; 3519def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>; 3520 3521def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 3522def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3523def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 3524def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3525def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 3526def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 3527def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3528def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3529 3530def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 3531def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3532def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>; 3533def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3534def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>; 3535 3536defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>; 3537defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>; 3538defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>; 3539defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>; 3540defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp", 3541 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >; 3542defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>; 3543defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>; 3544defm SHLL : SIMDVectorLShiftLongBySizeBHS; 3545defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>; 3546defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>; 3547defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>; 3548defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>; 3549defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>; 3550defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp", 3551 BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >; 3552defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp", 3553 int_aarch64_neon_uaddlp>; 3554defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>; 3555defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>; 3556defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>; 3557defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>; 3558defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>; 3559defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>; 3560 3561def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>; 3562def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>; 3563def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>; 3564def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>; 3565def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>; 3566def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>; 3567 3568// Patterns for vector long shift (by element width). These need to match all 3569// three of zext, sext and anyext so it's easier to pull the patterns out of the 3570// definition. 3571multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> { 3572 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)), 3573 (SHLLv8i8 V64:$Rn)>; 3574 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)), 3575 (SHLLv16i8 V128:$Rn)>; 3576 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)), 3577 (SHLLv4i16 V64:$Rn)>; 3578 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)), 3579 (SHLLv8i16 V128:$Rn)>; 3580 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)), 3581 (SHLLv2i32 V64:$Rn)>; 3582 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)), 3583 (SHLLv4i32 V128:$Rn)>; 3584} 3585 3586defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>; 3587defm : SIMDVectorLShiftLongBySizeBHSPats<zext>; 3588defm : SIMDVectorLShiftLongBySizeBHSPats<sext>; 3589 3590//===----------------------------------------------------------------------===// 3591// Advanced SIMD three vector instructions. 3592//===----------------------------------------------------------------------===// 3593 3594defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>; 3595defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>; 3596defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>; 3597defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>; 3598defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>; 3599defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>; 3600defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>; 3601defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>; 3602defm FABD : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>; 3603let Predicates = [HasNEON] in { 3604foreach VT = [ v2f32, v4f32, v2f64 ] in 3605def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>; 3606} 3607let Predicates = [HasNEON, HasFullFP16] in { 3608foreach VT = [ v4f16, v8f16 ] in 3609def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>; 3610} 3611defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>; 3612defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>; 3613defm FADDP : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>; 3614defm FADD : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>; 3615defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>; 3616defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>; 3617defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>; 3618defm FDIV : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>; 3619defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>; 3620defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>; 3621defm FMAXP : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>; 3622defm FMAX : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>; 3623defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>; 3624defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>; 3625defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>; 3626defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>; 3627 3628// NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the 3629// instruction expects the addend first, while the fma intrinsic puts it last. 3630defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla", 3631 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >; 3632defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls", 3633 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >; 3634 3635// The following def pats catch the case where the LHS of an FMA is negated. 3636// The TriOpFrag above catches the case where the middle operand is negated. 3637def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)), 3638 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>; 3639 3640def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)), 3641 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>; 3642 3643def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)), 3644 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>; 3645 3646defm FMULX : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>; 3647defm FMUL : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>; 3648defm FRECPS : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>; 3649defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>; 3650defm FSUB : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>; 3651defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", 3652 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >; 3653defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", 3654 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >; 3655defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>; 3656defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>; 3657defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba", 3658 TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >; 3659defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>; 3660defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>; 3661defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>; 3662defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>; 3663defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>; 3664defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>; 3665defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>; 3666defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>; 3667defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>; 3668defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>; 3669defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>; 3670defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>; 3671defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>; 3672defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>; 3673defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>; 3674defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>; 3675defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>; 3676defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba", 3677 TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >; 3678defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>; 3679defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>; 3680defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>; 3681defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>; 3682defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>; 3683defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>; 3684defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>; 3685defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>; 3686defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>; 3687defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>; 3688defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>; 3689defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>; 3690defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>; 3691defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>; 3692defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah", 3693 int_aarch64_neon_sqadd>; 3694defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh", 3695 int_aarch64_neon_sqsub>; 3696 3697defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>; 3698defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic", 3699 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >; 3700defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">; 3701defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>; 3702defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl", 3703 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>; 3704defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>; 3705defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn", 3706 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >; 3707defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>; 3708 3709 3710def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm), 3711 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 3712def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm), 3713 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 3714def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm), 3715 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 3716def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm), 3717 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>; 3718 3719def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm), 3720 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 3721def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm), 3722 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 3723def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm), 3724 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 3725def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm), 3726 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>; 3727 3728def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}", 3729 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>; 3730def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}", 3731 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>; 3732def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}", 3733 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>; 3734def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}", 3735 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>; 3736 3737def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}", 3738 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>; 3739def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}", 3740 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>; 3741def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}", 3742 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>; 3743def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}", 3744 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>; 3745 3746def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" # 3747 "|cmls.8b\t$dst, $src1, $src2}", 3748 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 3749def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" # 3750 "|cmls.16b\t$dst, $src1, $src2}", 3751 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 3752def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" # 3753 "|cmls.4h\t$dst, $src1, $src2}", 3754 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 3755def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" # 3756 "|cmls.8h\t$dst, $src1, $src2}", 3757 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 3758def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" # 3759 "|cmls.2s\t$dst, $src1, $src2}", 3760 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 3761def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" # 3762 "|cmls.4s\t$dst, $src1, $src2}", 3763 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 3764def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" # 3765 "|cmls.2d\t$dst, $src1, $src2}", 3766 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 3767 3768def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" # 3769 "|cmlo.8b\t$dst, $src1, $src2}", 3770 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 3771def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" # 3772 "|cmlo.16b\t$dst, $src1, $src2}", 3773 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 3774def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" # 3775 "|cmlo.4h\t$dst, $src1, $src2}", 3776 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 3777def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" # 3778 "|cmlo.8h\t$dst, $src1, $src2}", 3779 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 3780def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" # 3781 "|cmlo.2s\t$dst, $src1, $src2}", 3782 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 3783def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" # 3784 "|cmlo.4s\t$dst, $src1, $src2}", 3785 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 3786def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" # 3787 "|cmlo.2d\t$dst, $src1, $src2}", 3788 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 3789 3790def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" # 3791 "|cmle.8b\t$dst, $src1, $src2}", 3792 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 3793def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" # 3794 "|cmle.16b\t$dst, $src1, $src2}", 3795 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 3796def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" # 3797 "|cmle.4h\t$dst, $src1, $src2}", 3798 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 3799def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" # 3800 "|cmle.8h\t$dst, $src1, $src2}", 3801 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 3802def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" # 3803 "|cmle.2s\t$dst, $src1, $src2}", 3804 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 3805def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" # 3806 "|cmle.4s\t$dst, $src1, $src2}", 3807 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 3808def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" # 3809 "|cmle.2d\t$dst, $src1, $src2}", 3810 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 3811 3812def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" # 3813 "|cmlt.8b\t$dst, $src1, $src2}", 3814 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>; 3815def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" # 3816 "|cmlt.16b\t$dst, $src1, $src2}", 3817 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>; 3818def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" # 3819 "|cmlt.4h\t$dst, $src1, $src2}", 3820 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>; 3821def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" # 3822 "|cmlt.8h\t$dst, $src1, $src2}", 3823 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>; 3824def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" # 3825 "|cmlt.2s\t$dst, $src1, $src2}", 3826 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>; 3827def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" # 3828 "|cmlt.4s\t$dst, $src1, $src2}", 3829 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>; 3830def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" # 3831 "|cmlt.2d\t$dst, $src1, $src2}", 3832 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>; 3833 3834let Predicates = [HasNEON, HasFullFP16] in { 3835def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" # 3836 "|fcmle.4h\t$dst, $src1, $src2}", 3837 (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 3838def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" # 3839 "|fcmle.8h\t$dst, $src1, $src2}", 3840 (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 3841} 3842def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" # 3843 "|fcmle.2s\t$dst, $src1, $src2}", 3844 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 3845def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" # 3846 "|fcmle.4s\t$dst, $src1, $src2}", 3847 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 3848def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" # 3849 "|fcmle.2d\t$dst, $src1, $src2}", 3850 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 3851 3852let Predicates = [HasNEON, HasFullFP16] in { 3853def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" # 3854 "|fcmlt.4h\t$dst, $src1, $src2}", 3855 (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 3856def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" # 3857 "|fcmlt.8h\t$dst, $src1, $src2}", 3858 (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 3859} 3860def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" # 3861 "|fcmlt.2s\t$dst, $src1, $src2}", 3862 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 3863def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" # 3864 "|fcmlt.4s\t$dst, $src1, $src2}", 3865 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 3866def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" # 3867 "|fcmlt.2d\t$dst, $src1, $src2}", 3868 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 3869 3870let Predicates = [HasNEON, HasFullFP16] in { 3871def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" # 3872 "|facle.4h\t$dst, $src1, $src2}", 3873 (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 3874def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" # 3875 "|facle.8h\t$dst, $src1, $src2}", 3876 (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 3877} 3878def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" # 3879 "|facle.2s\t$dst, $src1, $src2}", 3880 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 3881def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" # 3882 "|facle.4s\t$dst, $src1, $src2}", 3883 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 3884def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" # 3885 "|facle.2d\t$dst, $src1, $src2}", 3886 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 3887 3888let Predicates = [HasNEON, HasFullFP16] in { 3889def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" # 3890 "|faclt.4h\t$dst, $src1, $src2}", 3891 (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>; 3892def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" # 3893 "|faclt.8h\t$dst, $src1, $src2}", 3894 (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>; 3895} 3896def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" # 3897 "|faclt.2s\t$dst, $src1, $src2}", 3898 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>; 3899def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" # 3900 "|faclt.4s\t$dst, $src1, $src2}", 3901 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>; 3902def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" # 3903 "|faclt.2d\t$dst, $src1, $src2}", 3904 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>; 3905 3906//===----------------------------------------------------------------------===// 3907// Advanced SIMD three scalar instructions. 3908//===----------------------------------------------------------------------===// 3909 3910defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>; 3911defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>; 3912defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>; 3913defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>; 3914defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>; 3915defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>; 3916defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>; 3917defm FABD : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>; 3918def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), 3919 (FABD64 FPR64:$Rn, FPR64:$Rm)>; 3920let Predicates = [HasFullFP16] in { 3921def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>; 3922} 3923def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>; 3924def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>; 3925defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge", 3926 int_aarch64_neon_facge>; 3927defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt", 3928 int_aarch64_neon_facgt>; 3929defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>; 3930defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>; 3931defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>; 3932defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>; 3933defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>; 3934defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>; 3935defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>; 3936defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>; 3937defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>; 3938defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>; 3939defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>; 3940defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>; 3941defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>; 3942defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>; 3943defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>; 3944defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>; 3945defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>; 3946defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>; 3947defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>; 3948defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>; 3949defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>; 3950let Predicates = [HasRDM] in { 3951 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">; 3952 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">; 3953 def : Pat<(i32 (int_aarch64_neon_sqadd 3954 (i32 FPR32:$Rd), 3955 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn), 3956 (i32 FPR32:$Rm))))), 3957 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>; 3958 def : Pat<(i32 (int_aarch64_neon_sqsub 3959 (i32 FPR32:$Rd), 3960 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn), 3961 (i32 FPR32:$Rm))))), 3962 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>; 3963} 3964 3965def : InstAlias<"cmls $dst, $src1, $src2", 3966 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3967def : InstAlias<"cmle $dst, $src1, $src2", 3968 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3969def : InstAlias<"cmlo $dst, $src1, $src2", 3970 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3971def : InstAlias<"cmlt $dst, $src1, $src2", 3972 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3973def : InstAlias<"fcmle $dst, $src1, $src2", 3974 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 3975def : InstAlias<"fcmle $dst, $src1, $src2", 3976 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3977def : InstAlias<"fcmlt $dst, $src1, $src2", 3978 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 3979def : InstAlias<"fcmlt $dst, $src1, $src2", 3980 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3981def : InstAlias<"facle $dst, $src1, $src2", 3982 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 3983def : InstAlias<"facle $dst, $src1, $src2", 3984 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3985def : InstAlias<"faclt $dst, $src1, $src2", 3986 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>; 3987def : InstAlias<"faclt $dst, $src1, $src2", 3988 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>; 3989 3990//===----------------------------------------------------------------------===// 3991// Advanced SIMD three scalar instructions (mixed operands). 3992//===----------------------------------------------------------------------===// 3993defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull", 3994 int_aarch64_neon_sqdmulls_scalar>; 3995defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">; 3996defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">; 3997 3998def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd), 3999 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn), 4000 (i32 FPR32:$Rm))))), 4001 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>; 4002def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd), 4003 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn), 4004 (i32 FPR32:$Rm))))), 4005 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>; 4006 4007//===----------------------------------------------------------------------===// 4008// Advanced SIMD two scalar instructions. 4009//===----------------------------------------------------------------------===// 4010 4011defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>; 4012defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>; 4013defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>; 4014defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>; 4015defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>; 4016defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>; 4017defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>; 4018defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>; 4019defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>; 4020defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>; 4021defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>; 4022defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">; 4023defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">; 4024defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">; 4025defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">; 4026defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">; 4027defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">; 4028defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">; 4029defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">; 4030def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">; 4031defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">; 4032defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">; 4033defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe">; 4034defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx">; 4035defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte">; 4036defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg", 4037 UnOpFrag<(sub immAllZerosV, node:$LHS)> >; 4038defm SCVTF : SIMDFPTwoScalarCVT< 0, 0, 0b11101, "scvtf", AArch64sitof>; 4039defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>; 4040defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>; 4041defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>; 4042defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>; 4043defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd", 4044 int_aarch64_neon_suqadd>; 4045defm UCVTF : SIMDFPTwoScalarCVT< 1, 0, 0b11101, "ucvtf", AArch64uitof>; 4046defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>; 4047defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd", 4048 int_aarch64_neon_usqadd>; 4049 4050def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>; 4051 4052def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))), 4053 (FCVTASv1i64 FPR64:$Rn)>; 4054def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))), 4055 (FCVTAUv1i64 FPR64:$Rn)>; 4056def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))), 4057 (FCVTMSv1i64 FPR64:$Rn)>; 4058def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))), 4059 (FCVTMUv1i64 FPR64:$Rn)>; 4060def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))), 4061 (FCVTNSv1i64 FPR64:$Rn)>; 4062def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))), 4063 (FCVTNUv1i64 FPR64:$Rn)>; 4064def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))), 4065 (FCVTPSv1i64 FPR64:$Rn)>; 4066def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))), 4067 (FCVTPUv1i64 FPR64:$Rn)>; 4068 4069def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))), 4070 (FRECPEv1f16 FPR16:$Rn)>; 4071def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))), 4072 (FRECPEv1i32 FPR32:$Rn)>; 4073def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))), 4074 (FRECPEv1i64 FPR64:$Rn)>; 4075def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))), 4076 (FRECPEv1i64 FPR64:$Rn)>; 4077 4078def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))), 4079 (FRECPEv1i32 FPR32:$Rn)>; 4080def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))), 4081 (FRECPEv2f32 V64:$Rn)>; 4082def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))), 4083 (FRECPEv4f32 FPR128:$Rn)>; 4084def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))), 4085 (FRECPEv1i64 FPR64:$Rn)>; 4086def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))), 4087 (FRECPEv1i64 FPR64:$Rn)>; 4088def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))), 4089 (FRECPEv2f64 FPR128:$Rn)>; 4090 4091def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))), 4092 (FRECPS32 FPR32:$Rn, FPR32:$Rm)>; 4093def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))), 4094 (FRECPSv2f32 V64:$Rn, V64:$Rm)>; 4095def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))), 4096 (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>; 4097def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))), 4098 (FRECPS64 FPR64:$Rn, FPR64:$Rm)>; 4099def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))), 4100 (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>; 4101 4102def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))), 4103 (FRECPXv1f16 FPR16:$Rn)>; 4104def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))), 4105 (FRECPXv1i32 FPR32:$Rn)>; 4106def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))), 4107 (FRECPXv1i64 FPR64:$Rn)>; 4108 4109def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))), 4110 (FRSQRTEv1f16 FPR16:$Rn)>; 4111def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))), 4112 (FRSQRTEv1i32 FPR32:$Rn)>; 4113def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))), 4114 (FRSQRTEv1i64 FPR64:$Rn)>; 4115def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))), 4116 (FRSQRTEv1i64 FPR64:$Rn)>; 4117 4118def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))), 4119 (FRSQRTEv1i32 FPR32:$Rn)>; 4120def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))), 4121 (FRSQRTEv2f32 V64:$Rn)>; 4122def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))), 4123 (FRSQRTEv4f32 FPR128:$Rn)>; 4124def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))), 4125 (FRSQRTEv1i64 FPR64:$Rn)>; 4126def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))), 4127 (FRSQRTEv1i64 FPR64:$Rn)>; 4128def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))), 4129 (FRSQRTEv2f64 FPR128:$Rn)>; 4130 4131def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))), 4132 (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>; 4133def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))), 4134 (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>; 4135def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))), 4136 (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>; 4137def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))), 4138 (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>; 4139def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))), 4140 (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>; 4141 4142// If an integer is about to be converted to a floating point value, 4143// just load it on the floating point unit. 4144// Here are the patterns for 8 and 16-bits to float. 4145// 8-bits -> float. 4146multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy, 4147 SDPatternOperator loadop, Instruction UCVTF, 4148 ROAddrMode ro, Instruction LDRW, Instruction LDRX, 4149 SubRegIndex sub> { 4150 def : Pat<(DstTy (uint_to_fp (SrcTy 4151 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, 4152 ro.Wext:$extend))))), 4153 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)), 4154 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend), 4155 sub))>; 4156 4157 def : Pat<(DstTy (uint_to_fp (SrcTy 4158 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, 4159 ro.Wext:$extend))))), 4160 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)), 4161 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend), 4162 sub))>; 4163} 4164 4165defm : UIntToFPROLoadPat<f32, i32, zextloadi8, 4166 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>; 4167def : Pat <(f32 (uint_to_fp (i32 4168 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 4169 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 4170 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>; 4171def : Pat <(f32 (uint_to_fp (i32 4172 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))), 4173 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 4174 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>; 4175// 16-bits -> float. 4176defm : UIntToFPROLoadPat<f32, i32, zextloadi16, 4177 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>; 4178def : Pat <(f32 (uint_to_fp (i32 4179 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 4180 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 4181 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>; 4182def : Pat <(f32 (uint_to_fp (i32 4183 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))), 4184 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)), 4185 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>; 4186// 32-bits are handled in target specific dag combine: 4187// performIntToFpCombine. 4188// 64-bits integer to 32-bits floating point, not possible with 4189// UCVTF on floating point registers (both source and destination 4190// must have the same size). 4191 4192// Here are the patterns for 8, 16, 32, and 64-bits to double. 4193// 8-bits -> double. 4194defm : UIntToFPROLoadPat<f64, i32, zextloadi8, 4195 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>; 4196def : Pat <(f64 (uint_to_fp (i32 4197 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))), 4198 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 4199 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>; 4200def : Pat <(f64 (uint_to_fp (i32 4201 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))), 4202 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 4203 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>; 4204// 16-bits -> double. 4205defm : UIntToFPROLoadPat<f64, i32, zextloadi16, 4206 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>; 4207def : Pat <(f64 (uint_to_fp (i32 4208 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))), 4209 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 4210 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>; 4211def : Pat <(f64 (uint_to_fp (i32 4212 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))), 4213 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 4214 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>; 4215// 32-bits -> double. 4216defm : UIntToFPROLoadPat<f64, i32, load, 4217 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>; 4218def : Pat <(f64 (uint_to_fp (i32 4219 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), 4220 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 4221 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>; 4222def : Pat <(f64 (uint_to_fp (i32 4223 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))), 4224 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 4225 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>; 4226// 64-bits -> double are handled in target specific dag combine: 4227// performIntToFpCombine. 4228 4229//===----------------------------------------------------------------------===// 4230// Advanced SIMD three different-sized vector instructions. 4231//===----------------------------------------------------------------------===// 4232 4233defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>; 4234defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>; 4235defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>; 4236defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>; 4237defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>; 4238defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal", 4239 int_aarch64_neon_sabd>; 4240defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl", 4241 int_aarch64_neon_sabd>; 4242defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl", 4243 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>; 4244defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw", 4245 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>; 4246defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal", 4247 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 4248defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl", 4249 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 4250defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>; 4251defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal", 4252 int_aarch64_neon_sqadd>; 4253defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl", 4254 int_aarch64_neon_sqsub>; 4255defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull", 4256 int_aarch64_neon_sqdmull>; 4257defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl", 4258 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>; 4259defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw", 4260 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>; 4261defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal", 4262 int_aarch64_neon_uabd>; 4263defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl", 4264 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>; 4265defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw", 4266 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>; 4267defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal", 4268 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 4269defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl", 4270 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 4271defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>; 4272defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl", 4273 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>; 4274defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw", 4275 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>; 4276 4277// Additional patterns for SMULL and UMULL 4278multiclass Neon_mul_widen_patterns<SDPatternOperator opnode, 4279 Instruction INST8B, Instruction INST4H, Instruction INST2S> { 4280 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))), 4281 (INST8B V64:$Rn, V64:$Rm)>; 4282 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))), 4283 (INST4H V64:$Rn, V64:$Rm)>; 4284 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))), 4285 (INST2S V64:$Rn, V64:$Rm)>; 4286} 4287 4288defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16, 4289 SMULLv4i16_v4i32, SMULLv2i32_v2i64>; 4290defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16, 4291 UMULLv4i16_v4i32, UMULLv2i32_v2i64>; 4292 4293// Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL 4294multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode, 4295 Instruction INST8B, Instruction INST4H, Instruction INST2S> { 4296 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))), 4297 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>; 4298 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))), 4299 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>; 4300 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))), 4301 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>; 4302} 4303 4304defm : Neon_mulacc_widen_patterns< 4305 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>, 4306 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>; 4307defm : Neon_mulacc_widen_patterns< 4308 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>, 4309 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>; 4310defm : Neon_mulacc_widen_patterns< 4311 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>, 4312 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>; 4313defm : Neon_mulacc_widen_patterns< 4314 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>, 4315 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>; 4316 4317// Patterns for 64-bit pmull 4318def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm), 4319 (PMULLv1i64 V64:$Rn, V64:$Rm)>; 4320def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)), 4321 (extractelt (v2i64 V128:$Rm), (i64 1))), 4322 (PMULLv2i64 V128:$Rn, V128:$Rm)>; 4323 4324// CodeGen patterns for addhn and subhn instructions, which can actually be 4325// written in LLVM IR without too much difficulty. 4326 4327// ADDHN 4328def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))), 4329 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>; 4330def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm), 4331 (i32 16))))), 4332 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>; 4333def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm), 4334 (i32 32))))), 4335 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>; 4336def : Pat<(concat_vectors (v8i8 V64:$Rd), 4337 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), 4338 (i32 8))))), 4339 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 4340 V128:$Rn, V128:$Rm)>; 4341def : Pat<(concat_vectors (v4i16 V64:$Rd), 4342 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm), 4343 (i32 16))))), 4344 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 4345 V128:$Rn, V128:$Rm)>; 4346def : Pat<(concat_vectors (v2i32 V64:$Rd), 4347 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm), 4348 (i32 32))))), 4349 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 4350 V128:$Rn, V128:$Rm)>; 4351 4352// SUBHN 4353def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))), 4354 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>; 4355def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 4356 (i32 16))))), 4357 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>; 4358def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 4359 (i32 32))))), 4360 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>; 4361def : Pat<(concat_vectors (v8i8 V64:$Rd), 4362 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 4363 (i32 8))))), 4364 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 4365 V128:$Rn, V128:$Rm)>; 4366def : Pat<(concat_vectors (v4i16 V64:$Rd), 4367 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 4368 (i32 16))))), 4369 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 4370 V128:$Rn, V128:$Rm)>; 4371def : Pat<(concat_vectors (v2i32 V64:$Rd), 4372 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm), 4373 (i32 32))))), 4374 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub), 4375 V128:$Rn, V128:$Rm)>; 4376 4377//---------------------------------------------------------------------------- 4378// AdvSIMD bitwise extract from vector instruction. 4379//---------------------------------------------------------------------------- 4380 4381defm EXT : SIMDBitwiseExtract<"ext">; 4382 4383def AdjustExtImm : SDNodeXForm<imm, [{ 4384 return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32); 4385}]>; 4386multiclass ExtPat<ValueType VT64, ValueType VT128, int N> { 4387 def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))), 4388 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>; 4389 def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))), 4390 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>; 4391 // We use EXT to handle extract_subvector to copy the upper 64-bits of a 4392 // 128-bit vector. 4393 def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))), 4394 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>; 4395 // A 64-bit EXT of two halves of the same 128-bit register can be done as a 4396 // single 128-bit EXT. 4397 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)), 4398 (extract_subvector V128:$Rn, (i64 N)), 4399 (i32 imm:$imm))), 4400 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>; 4401 // A 64-bit EXT of the high half of a 128-bit register can be done using a 4402 // 128-bit EXT of the whole register with an adjustment to the immediate. The 4403 // top half of the other operand will be unset, but that doesn't matter as it 4404 // will not be used. 4405 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)), 4406 V64:$Rm, 4407 (i32 imm:$imm))), 4408 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, 4409 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 4410 (AdjustExtImm imm:$imm)), dsub)>; 4411} 4412 4413defm : ExtPat<v8i8, v16i8, 8>; 4414defm : ExtPat<v4i16, v8i16, 4>; 4415defm : ExtPat<v4f16, v8f16, 4>; 4416defm : ExtPat<v2i32, v4i32, 2>; 4417defm : ExtPat<v2f32, v4f32, 2>; 4418defm : ExtPat<v1i64, v2i64, 1>; 4419defm : ExtPat<v1f64, v2f64, 1>; 4420 4421//---------------------------------------------------------------------------- 4422// AdvSIMD zip vector 4423//---------------------------------------------------------------------------- 4424 4425defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>; 4426defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>; 4427defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>; 4428defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>; 4429defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>; 4430defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>; 4431 4432//---------------------------------------------------------------------------- 4433// AdvSIMD TBL/TBX instructions 4434//---------------------------------------------------------------------------- 4435 4436defm TBL : SIMDTableLookup< 0, "tbl">; 4437defm TBX : SIMDTableLookupTied<1, "tbx">; 4438 4439def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))), 4440 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>; 4441def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))), 4442 (TBLv16i8One V128:$Ri, V128:$Rn)>; 4443 4444def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd), 4445 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))), 4446 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>; 4447def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd), 4448 (v16i8 V128:$Ri), (v16i8 V128:$Rn))), 4449 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>; 4450 4451 4452//---------------------------------------------------------------------------- 4453// AdvSIMD scalar CPY instruction 4454//---------------------------------------------------------------------------- 4455 4456defm CPY : SIMDScalarCPY<"cpy">; 4457 4458//---------------------------------------------------------------------------- 4459// AdvSIMD scalar pairwise instructions 4460//---------------------------------------------------------------------------- 4461 4462defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">; 4463defm FADDP : SIMDFPPairwiseScalar<0, 0b01101, "faddp">; 4464defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">; 4465defm FMAXP : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">; 4466defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">; 4467defm FMINP : SIMDFPPairwiseScalar<1, 0b01111, "fminp">; 4468def : Pat<(v2i64 (AArch64saddv V128:$Rn)), 4469 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>; 4470def : Pat<(v2i64 (AArch64uaddv V128:$Rn)), 4471 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>; 4472def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))), 4473 (FADDPv2i32p V64:$Rn)>; 4474def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))), 4475 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>; 4476def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))), 4477 (FADDPv2i64p V128:$Rn)>; 4478def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))), 4479 (FMAXNMPv2i32p V64:$Rn)>; 4480def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))), 4481 (FMAXNMPv2i64p V128:$Rn)>; 4482def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))), 4483 (FMAXPv2i32p V64:$Rn)>; 4484def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))), 4485 (FMAXPv2i64p V128:$Rn)>; 4486def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))), 4487 (FMINNMPv2i32p V64:$Rn)>; 4488def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))), 4489 (FMINNMPv2i64p V128:$Rn)>; 4490def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))), 4491 (FMINPv2i32p V64:$Rn)>; 4492def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))), 4493 (FMINPv2i64p V128:$Rn)>; 4494 4495//---------------------------------------------------------------------------- 4496// AdvSIMD INS/DUP instructions 4497//---------------------------------------------------------------------------- 4498 4499def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>; 4500def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>; 4501def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>; 4502def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>; 4503def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>; 4504def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>; 4505def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>; 4506 4507def DUPv2i64lane : SIMDDup64FromElement; 4508def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>; 4509def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>; 4510def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>; 4511def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>; 4512def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>; 4513def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>; 4514 4515// DUP from a 64-bit register to a 64-bit register is just a copy 4516def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))), 4517 (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>; 4518def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))), 4519 (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>; 4520 4521def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))), 4522 (v2f32 (DUPv2i32lane 4523 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub), 4524 (i64 0)))>; 4525def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))), 4526 (v4f32 (DUPv4i32lane 4527 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub), 4528 (i64 0)))>; 4529def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))), 4530 (v2f64 (DUPv2i64lane 4531 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub), 4532 (i64 0)))>; 4533def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))), 4534 (v4f16 (DUPv4i16lane 4535 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub), 4536 (i64 0)))>; 4537def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))), 4538 (v8f16 (DUPv8i16lane 4539 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub), 4540 (i64 0)))>; 4541 4542def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)), 4543 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>; 4544def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)), 4545 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>; 4546 4547def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)), 4548 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>; 4549def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)), 4550 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>; 4551def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)), 4552 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>; 4553 4554// If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane 4555// instruction even if the types don't match: we just have to remap the lane 4556// carefully. N.b. this trick only applies to truncations. 4557def VecIndex_x2 : SDNodeXForm<imm, [{ 4558 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64); 4559}]>; 4560def VecIndex_x4 : SDNodeXForm<imm, [{ 4561 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64); 4562}]>; 4563def VecIndex_x8 : SDNodeXForm<imm, [{ 4564 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64); 4565}]>; 4566 4567multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT, 4568 ValueType Src128VT, ValueType ScalVT, 4569 Instruction DUP, SDNodeXForm IdxXFORM> { 4570 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn), 4571 imm:$idx)))), 4572 (DUP V128:$Rn, (IdxXFORM imm:$idx))>; 4573 4574 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn), 4575 imm:$idx)))), 4576 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>; 4577} 4578 4579defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>; 4580defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>; 4581defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>; 4582 4583defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>; 4584defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>; 4585defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>; 4586 4587multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP, 4588 SDNodeXForm IdxXFORM> { 4589 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn), 4590 imm:$idx))))), 4591 (DUP V128:$Rn, (IdxXFORM imm:$idx))>; 4592 4593 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn), 4594 imm:$idx))))), 4595 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>; 4596} 4597 4598defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>; 4599defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>; 4600defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>; 4601 4602defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>; 4603defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>; 4604defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>; 4605 4606// SMOV and UMOV definitions, with some extra patterns for convenience 4607defm SMOV : SMov; 4608defm UMOV : UMov; 4609 4610def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8), 4611 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>; 4612def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8), 4613 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>; 4614def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16), 4615 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>; 4616def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16), 4617 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>; 4618def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16), 4619 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>; 4620def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))), 4621 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>; 4622 4623def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn), 4624 VectorIndexB:$idx)))), i8), 4625 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>; 4626def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn), 4627 VectorIndexH:$idx)))), i16), 4628 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>; 4629 4630// Extracting i8 or i16 elements will have the zero-extend transformed to 4631// an 'and' mask by type legalization since neither i8 nor i16 are legal types 4632// for AArch64. Match these patterns here since UMOV already zeroes out the high 4633// bits of the destination register. 4634def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), 4635 (i32 0xff)), 4636 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>; 4637def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx), 4638 (i32 0xffff)), 4639 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>; 4640 4641defm INS : SIMDIns; 4642 4643def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)), 4644 (SUBREG_TO_REG (i32 0), 4645 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 4646def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)), 4647 (SUBREG_TO_REG (i32 0), 4648 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 4649 4650def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)), 4651 (SUBREG_TO_REG (i32 0), 4652 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 4653def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)), 4654 (SUBREG_TO_REG (i32 0), 4655 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>; 4656 4657def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))), 4658 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 4659def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))), 4660 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 4661 4662def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))), 4663 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), 4664 (i32 FPR32:$Rn), ssub))>; 4665def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))), 4666 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 4667 (i32 FPR32:$Rn), ssub))>; 4668 4669def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))), 4670 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), 4671 (i64 FPR64:$Rn), dsub))>; 4672 4673def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))), 4674 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 4675def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))), 4676 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>; 4677 4678def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))), 4679 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>; 4680def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))), 4681 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>; 4682 4683def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))), 4684 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>; 4685 4686def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn), 4687 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))), 4688 (EXTRACT_SUBREG 4689 (INSvi16lane 4690 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)), 4691 VectorIndexS:$imm, 4692 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)), 4693 (i64 0)), 4694 dsub)>; 4695 4696def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn), 4697 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))), 4698 (INSvi16lane 4699 V128:$Rn, VectorIndexH:$imm, 4700 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)), 4701 (i64 0))>; 4702 4703def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn), 4704 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))), 4705 (EXTRACT_SUBREG 4706 (INSvi32lane 4707 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)), 4708 VectorIndexS:$imm, 4709 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)), 4710 (i64 0)), 4711 dsub)>; 4712def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn), 4713 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))), 4714 (INSvi32lane 4715 V128:$Rn, VectorIndexS:$imm, 4716 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)), 4717 (i64 0))>; 4718def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn), 4719 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))), 4720 (INSvi64lane 4721 V128:$Rn, VectorIndexD:$imm, 4722 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)), 4723 (i64 0))>; 4724 4725// Copy an element at a constant index in one vector into a constant indexed 4726// element of another. 4727// FIXME refactor to a shared class/dev parameterized on vector type, vector 4728// index type and INS extension 4729def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane 4730 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs), 4731 VectorIndexB:$idx2)), 4732 (v16i8 (INSvi8lane 4733 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2) 4734 )>; 4735def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane 4736 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs), 4737 VectorIndexH:$idx2)), 4738 (v8i16 (INSvi16lane 4739 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2) 4740 )>; 4741def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane 4742 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs), 4743 VectorIndexS:$idx2)), 4744 (v4i32 (INSvi32lane 4745 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2) 4746 )>; 4747def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane 4748 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs), 4749 VectorIndexD:$idx2)), 4750 (v2i64 (INSvi64lane 4751 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2) 4752 )>; 4753 4754multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64, 4755 ValueType VTScal, Instruction INS> { 4756 def : Pat<(VT128 (vector_insert V128:$src, 4757 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)), 4758 imm:$Immd)), 4759 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>; 4760 4761 def : Pat<(VT128 (vector_insert V128:$src, 4762 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)), 4763 imm:$Immd)), 4764 (INS V128:$src, imm:$Immd, 4765 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>; 4766 4767 def : Pat<(VT64 (vector_insert V64:$src, 4768 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)), 4769 imm:$Immd)), 4770 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), 4771 imm:$Immd, V128:$Rn, imm:$Immn), 4772 dsub)>; 4773 4774 def : Pat<(VT64 (vector_insert V64:$src, 4775 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)), 4776 imm:$Immd)), 4777 (EXTRACT_SUBREG 4778 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd, 4779 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn), 4780 dsub)>; 4781} 4782 4783defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>; 4784defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>; 4785defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>; 4786 4787 4788// Floating point vector extractions are codegen'd as either a sequence of 4789// subregister extractions, or a MOV (aka CPY here, alias for DUP) if 4790// the lane number is anything other than zero. 4791def : Pat<(vector_extract (v2f64 V128:$Rn), 0), 4792 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>; 4793def : Pat<(vector_extract (v4f32 V128:$Rn), 0), 4794 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>; 4795def : Pat<(vector_extract (v8f16 V128:$Rn), 0), 4796 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>; 4797 4798def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx), 4799 (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>; 4800def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx), 4801 (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>; 4802def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx), 4803 (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>; 4804 4805// All concat_vectors operations are canonicalised to act on i64 vectors for 4806// AArch64. In the general case we need an instruction, which had just as well be 4807// INS. 4808class ConcatPat<ValueType DstTy, ValueType SrcTy> 4809 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)), 4810 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1, 4811 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>; 4812 4813def : ConcatPat<v2i64, v1i64>; 4814def : ConcatPat<v2f64, v1f64>; 4815def : ConcatPat<v4i32, v2i32>; 4816def : ConcatPat<v4f32, v2f32>; 4817def : ConcatPat<v8i16, v4i16>; 4818def : ConcatPat<v8f16, v4f16>; 4819def : ConcatPat<v16i8, v8i8>; 4820 4821// If the high lanes are undef, though, we can just ignore them: 4822class ConcatUndefPat<ValueType DstTy, ValueType SrcTy> 4823 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)), 4824 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>; 4825 4826def : ConcatUndefPat<v2i64, v1i64>; 4827def : ConcatUndefPat<v2f64, v1f64>; 4828def : ConcatUndefPat<v4i32, v2i32>; 4829def : ConcatUndefPat<v4f32, v2f32>; 4830def : ConcatUndefPat<v8i16, v4i16>; 4831def : ConcatUndefPat<v16i8, v8i8>; 4832 4833//---------------------------------------------------------------------------- 4834// AdvSIMD across lanes instructions 4835//---------------------------------------------------------------------------- 4836 4837defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">; 4838defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">; 4839defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">; 4840defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">; 4841defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">; 4842defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">; 4843defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">; 4844defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>; 4845defm FMAXV : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>; 4846defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>; 4847defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>; 4848 4849// Patterns for across-vector intrinsics, that have a node equivalent, that 4850// returns a vector (with only the low lane defined) instead of a scalar. 4851// In effect, opNode is the same as (scalar_to_vector (IntNode)). 4852multiclass SIMDAcrossLanesIntrinsic<string baseOpc, 4853 SDPatternOperator opNode> { 4854// If a lane instruction caught the vector_extract around opNode, we can 4855// directly match the latter to the instruction. 4856def : Pat<(v8i8 (opNode V64:$Rn)), 4857 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)), 4858 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>; 4859def : Pat<(v16i8 (opNode V128:$Rn)), 4860 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4861 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>; 4862def : Pat<(v4i16 (opNode V64:$Rn)), 4863 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 4864 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>; 4865def : Pat<(v8i16 (opNode V128:$Rn)), 4866 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 4867 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>; 4868def : Pat<(v4i32 (opNode V128:$Rn)), 4869 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 4870 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>; 4871 4872 4873// If none did, fallback to the explicit patterns, consuming the vector_extract. 4874def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)), 4875 (i32 0)), (i64 0))), 4876 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)), 4877 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), 4878 bsub), ssub)>; 4879def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))), 4880 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4881 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), 4882 bsub), ssub)>; 4883def : Pat<(i32 (vector_extract (insert_subvector undef, 4884 (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))), 4885 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)), 4886 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), 4887 hsub), ssub)>; 4888def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))), 4889 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), 4890 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), 4891 hsub), ssub)>; 4892def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))), 4893 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), 4894 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), 4895 ssub), ssub)>; 4896 4897} 4898 4899multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc, 4900 SDPatternOperator opNode> 4901 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> { 4902// If there is a sign extension after this intrinsic, consume it as smov already 4903// performed it 4904def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef, 4905 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)), 4906 (i32 (SMOVvi8to32 4907 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4908 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub), 4909 (i64 0)))>; 4910def : Pat<(i32 (sext_inreg (i32 (vector_extract 4911 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)), 4912 (i32 (SMOVvi8to32 4913 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4914 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub), 4915 (i64 0)))>; 4916def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef, 4917 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)), 4918 (i32 (SMOVvi16to32 4919 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4920 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub), 4921 (i64 0)))>; 4922def : Pat<(i32 (sext_inreg (i32 (vector_extract 4923 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)), 4924 (i32 (SMOVvi16to32 4925 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4926 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub), 4927 (i64 0)))>; 4928} 4929 4930multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc, 4931 SDPatternOperator opNode> 4932 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> { 4933// If there is a masking operation keeping only what has been actually 4934// generated, consume it. 4935def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef, 4936 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)), 4937 (i32 (EXTRACT_SUBREG 4938 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4939 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub), 4940 ssub))>; 4941def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))), 4942 maski8_or_more)), 4943 (i32 (EXTRACT_SUBREG 4944 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4945 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub), 4946 ssub))>; 4947def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef, 4948 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)), 4949 (i32 (EXTRACT_SUBREG 4950 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4951 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub), 4952 ssub))>; 4953def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))), 4954 maski16_or_more)), 4955 (i32 (EXTRACT_SUBREG 4956 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4957 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub), 4958 ssub))>; 4959} 4960 4961defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>; 4962// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm 4963def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))), 4964 (ADDPv2i32 V64:$Rn, V64:$Rn)>; 4965 4966defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>; 4967// vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm 4968def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))), 4969 (ADDPv2i32 V64:$Rn, V64:$Rn)>; 4970 4971defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>; 4972def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))), 4973 (SMAXPv2i32 V64:$Rn, V64:$Rn)>; 4974 4975defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>; 4976def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))), 4977 (SMINPv2i32 V64:$Rn, V64:$Rn)>; 4978 4979defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>; 4980def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))), 4981 (UMAXPv2i32 V64:$Rn, V64:$Rn)>; 4982 4983defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>; 4984def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))), 4985 (UMINPv2i32 V64:$Rn, V64:$Rn)>; 4986 4987multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> { 4988 def : Pat<(i32 (intOp (v8i8 V64:$Rn))), 4989 (i32 (SMOVvi16to32 4990 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4991 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub), 4992 (i64 0)))>; 4993def : Pat<(i32 (intOp (v16i8 V128:$Rn))), 4994 (i32 (SMOVvi16to32 4995 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 4996 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub), 4997 (i64 0)))>; 4998 4999def : Pat<(i32 (intOp (v4i16 V64:$Rn))), 5000 (i32 (EXTRACT_SUBREG 5001 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5002 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub), 5003 ssub))>; 5004def : Pat<(i32 (intOp (v8i16 V128:$Rn))), 5005 (i32 (EXTRACT_SUBREG 5006 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5007 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub), 5008 ssub))>; 5009 5010def : Pat<(i64 (intOp (v4i32 V128:$Rn))), 5011 (i64 (EXTRACT_SUBREG 5012 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5013 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub), 5014 dsub))>; 5015} 5016 5017multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc, 5018 Intrinsic intOp> { 5019 def : Pat<(i32 (intOp (v8i8 V64:$Rn))), 5020 (i32 (EXTRACT_SUBREG 5021 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5022 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub), 5023 ssub))>; 5024def : Pat<(i32 (intOp (v16i8 V128:$Rn))), 5025 (i32 (EXTRACT_SUBREG 5026 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5027 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub), 5028 ssub))>; 5029 5030def : Pat<(i32 (intOp (v4i16 V64:$Rn))), 5031 (i32 (EXTRACT_SUBREG 5032 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5033 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub), 5034 ssub))>; 5035def : Pat<(i32 (intOp (v8i16 V128:$Rn))), 5036 (i32 (EXTRACT_SUBREG 5037 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5038 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub), 5039 ssub))>; 5040 5041def : Pat<(i64 (intOp (v4i32 V128:$Rn))), 5042 (i64 (EXTRACT_SUBREG 5043 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5044 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub), 5045 dsub))>; 5046} 5047 5048defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>; 5049defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>; 5050 5051// The vaddlv_s32 intrinsic gets mapped to SADDLP. 5052def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))), 5053 (i64 (EXTRACT_SUBREG 5054 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5055 (SADDLPv2i32_v1i64 V64:$Rn), dsub), 5056 dsub))>; 5057// The vaddlv_u32 intrinsic gets mapped to UADDLP. 5058def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))), 5059 (i64 (EXTRACT_SUBREG 5060 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), 5061 (UADDLPv2i32_v1i64 V64:$Rn), dsub), 5062 dsub))>; 5063 5064//------------------------------------------------------------------------------ 5065// AdvSIMD modified immediate instructions 5066//------------------------------------------------------------------------------ 5067 5068// AdvSIMD BIC 5069defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>; 5070// AdvSIMD ORR 5071defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>; 5072 5073def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>; 5074def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>; 5075def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>; 5076def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>; 5077 5078def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>; 5079def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>; 5080def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>; 5081def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>; 5082 5083def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>; 5084def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>; 5085def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>; 5086def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>; 5087 5088def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>; 5089def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>; 5090def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>; 5091def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>; 5092 5093// AdvSIMD FMOV 5094def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8, 5095 "fmov", ".2d", 5096 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>; 5097def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64, fpimm8, 5098 "fmov", ".2s", 5099 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>; 5100def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8, 5101 "fmov", ".4s", 5102 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>; 5103let Predicates = [HasNEON, HasFullFP16] in { 5104def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64, fpimm8, 5105 "fmov", ".4h", 5106 [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>; 5107def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8, 5108 "fmov", ".8h", 5109 [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>; 5110} // Predicates = [HasNEON, HasFullFP16] 5111 5112// AdvSIMD MOVI 5113 5114// EDIT byte mask: scalar 5115let isReMaterializable = 1, isAsCheapAsAMove = 1 in 5116def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi", 5117 [(set FPR64:$Rd, simdimmtype10:$imm8)]>; 5118// The movi_edit node has the immediate value already encoded, so we use 5119// a plain imm0_255 here. 5120def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)), 5121 (MOVID imm0_255:$shift)>; 5122 5123// EDIT byte mask: 2d 5124 5125// The movi_edit node has the immediate value already encoded, so we use 5126// a plain imm0_255 in the pattern 5127let isReMaterializable = 1, isAsCheapAsAMove = 1 in 5128def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128, 5129 simdimmtype10, 5130 "movi", ".2d", 5131 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>; 5132 5133def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>; 5134def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>; 5135def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>; 5136def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>; 5137 5138def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>; 5139def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>; 5140def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>; 5141def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>; 5142 5143// Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the 5144// extract is free and this gives better MachineCSE results. 5145def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 5146def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 5147def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 5148def : Pat<(v8i8 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>; 5149 5150def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 5151def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 5152def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 5153def : Pat<(v8i8 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>; 5154 5155// EDIT per word & halfword: 2s, 4h, 4s, & 8h 5156let isReMaterializable = 1, isAsCheapAsAMove = 1 in 5157defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">; 5158 5159def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 5160def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 5161def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 5162def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 5163 5164def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 5165def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 5166def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 5167def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 5168 5169def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 5170 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>; 5171def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 5172 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>; 5173def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 5174 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>; 5175def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))), 5176 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>; 5177 5178let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 5179// EDIT per word: 2s & 4s with MSL shifter 5180def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s", 5181 [(set (v2i32 V64:$Rd), 5182 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 5183def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s", 5184 [(set (v4i32 V128:$Rd), 5185 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 5186 5187// Per byte: 8b & 16b 5188def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64, imm0_255, 5189 "movi", ".8b", 5190 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>; 5191 5192def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255, 5193 "movi", ".16b", 5194 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>; 5195} 5196 5197// AdvSIMD MVNI 5198 5199// EDIT per word & halfword: 2s, 4h, 4s, & 8h 5200let isReMaterializable = 1, isAsCheapAsAMove = 1 in 5201defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">; 5202 5203def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 5204def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 5205def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 5206def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 5207 5208def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>; 5209def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>; 5210def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>; 5211def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>; 5212 5213def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 5214 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>; 5215def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 5216 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>; 5217def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 5218 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>; 5219def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))), 5220 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>; 5221 5222// EDIT per word: 2s & 4s with MSL shifter 5223let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 5224def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s", 5225 [(set (v2i32 V64:$Rd), 5226 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 5227def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s", 5228 [(set (v4i32 V128:$Rd), 5229 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>; 5230} 5231 5232//---------------------------------------------------------------------------- 5233// AdvSIMD indexed element 5234//---------------------------------------------------------------------------- 5235 5236let hasSideEffects = 0 in { 5237 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">; 5238 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">; 5239} 5240 5241// NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the 5242// instruction expects the addend first, while the intrinsic expects it last. 5243 5244// On the other hand, there are quite a few valid combinatorial options due to 5245// the commutativity of multiplication and the fact that (-x) * y = x * (-y). 5246defm : SIMDFPIndexedTiedPatterns<"FMLA", 5247 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>; 5248defm : SIMDFPIndexedTiedPatterns<"FMLA", 5249 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>; 5250 5251defm : SIMDFPIndexedTiedPatterns<"FMLS", 5252 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >; 5253defm : SIMDFPIndexedTiedPatterns<"FMLS", 5254 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >; 5255defm : SIMDFPIndexedTiedPatterns<"FMLS", 5256 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >; 5257defm : SIMDFPIndexedTiedPatterns<"FMLS", 5258 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >; 5259 5260multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> { 5261 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit 5262 // and DUP scalar. 5263 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), 5264 (AArch64duplane32 (v4f32 (fneg V128:$Rm)), 5265 VectorIndexS:$idx))), 5266 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>; 5267 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), 5268 (v2f32 (AArch64duplane32 5269 (v4f32 (insert_subvector undef, 5270 (v2f32 (fneg V64:$Rm)), 5271 (i32 0))), 5272 VectorIndexS:$idx)))), 5273 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, 5274 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 5275 VectorIndexS:$idx)>; 5276 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn), 5277 (AArch64dup (f32 (fneg FPR32Op:$Rm))))), 5278 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, 5279 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>; 5280 5281 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit 5282 // and DUP scalar. 5283 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), 5284 (AArch64duplane32 (v4f32 (fneg V128:$Rm)), 5285 VectorIndexS:$idx))), 5286 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm, 5287 VectorIndexS:$idx)>; 5288 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), 5289 (v4f32 (AArch64duplane32 5290 (v4f32 (insert_subvector undef, 5291 (v2f32 (fneg V64:$Rm)), 5292 (i32 0))), 5293 VectorIndexS:$idx)))), 5294 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, 5295 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), 5296 VectorIndexS:$idx)>; 5297 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn), 5298 (AArch64dup (f32 (fneg FPR32Op:$Rm))))), 5299 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, 5300 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>; 5301 5302 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar 5303 // (DUPLANE from 64-bit would be trivial). 5304 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn), 5305 (AArch64duplane64 (v2f64 (fneg V128:$Rm)), 5306 VectorIndexD:$idx))), 5307 (FMLSv2i64_indexed 5308 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>; 5309 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn), 5310 (AArch64dup (f64 (fneg FPR64Op:$Rm))))), 5311 (FMLSv2i64_indexed V128:$Rd, V128:$Rn, 5312 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>; 5313 5314 // 2 variants for 32-bit scalar version: extract from .2s or from .4s 5315 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn), 5316 (vector_extract (v4f32 (fneg V128:$Rm)), 5317 VectorIndexS:$idx))), 5318 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn, 5319 V128:$Rm, VectorIndexS:$idx)>; 5320 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn), 5321 (vector_extract (v4f32 (insert_subvector undef, 5322 (v2f32 (fneg V64:$Rm)), 5323 (i32 0))), 5324 VectorIndexS:$idx))), 5325 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn, 5326 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>; 5327 5328 // 1 variant for 64-bit scalar version: extract from .1d or from .2d 5329 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn), 5330 (vector_extract (v2f64 (fneg V128:$Rm)), 5331 VectorIndexS:$idx))), 5332 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn, 5333 V128:$Rm, VectorIndexS:$idx)>; 5334} 5335 5336defm : FMLSIndexedAfterNegPatterns< 5337 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >; 5338defm : FMLSIndexedAfterNegPatterns< 5339 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >; 5340 5341defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>; 5342defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>; 5343 5344def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))), 5345 (FMULv2i32_indexed V64:$Rn, 5346 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub), 5347 (i64 0))>; 5348def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))), 5349 (FMULv4i32_indexed V128:$Rn, 5350 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub), 5351 (i64 0))>; 5352def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))), 5353 (FMULv2i64_indexed V128:$Rn, 5354 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub), 5355 (i64 0))>; 5356 5357defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>; 5358defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>; 5359defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla", 5360 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>; 5361defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls", 5362 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>; 5363defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>; 5364defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal", 5365 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 5366defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl", 5367 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>; 5368defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull", 5369 int_aarch64_neon_smull>; 5370defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal", 5371 int_aarch64_neon_sqadd>; 5372defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl", 5373 int_aarch64_neon_sqsub>; 5374defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah", 5375 int_aarch64_neon_sqadd>; 5376defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh", 5377 int_aarch64_neon_sqsub>; 5378defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>; 5379defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal", 5380 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 5381defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl", 5382 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>; 5383defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull", 5384 int_aarch64_neon_umull>; 5385 5386// A scalar sqdmull with the second operand being a vector lane can be 5387// handled directly with the indexed instruction encoding. 5388def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn), 5389 (vector_extract (v4i32 V128:$Vm), 5390 VectorIndexS:$idx)), 5391 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>; 5392 5393//---------------------------------------------------------------------------- 5394// AdvSIMD scalar shift instructions 5395//---------------------------------------------------------------------------- 5396defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">; 5397defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">; 5398defm SCVTF : SIMDFPScalarRShift<0, 0b11100, "scvtf">; 5399defm UCVTF : SIMDFPScalarRShift<1, 0b11100, "ucvtf">; 5400// Codegen patterns for the above. We don't put these directly on the 5401// instructions because TableGen's type inference can't handle the truth. 5402// Having the same base pattern for fp <--> int totally freaks it out. 5403def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm), 5404 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>; 5405def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm), 5406 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>; 5407def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)), 5408 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>; 5409def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)), 5410 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>; 5411def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn), 5412 vecshiftR64:$imm)), 5413 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>; 5414def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn), 5415 vecshiftR64:$imm)), 5416 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>; 5417def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm), 5418 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>; 5419def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)), 5420 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 5421def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn), 5422 vecshiftR64:$imm)), 5423 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 5424def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)), 5425 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 5426def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn), 5427 vecshiftR64:$imm)), 5428 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>; 5429def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm), 5430 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>; 5431 5432// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported. 5433 5434def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)), 5435 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 5436def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)), 5437 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 5438def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)), 5439 (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>; 5440def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp 5441 (and FPR32:$Rn, (i32 65535)), 5442 vecshiftR16:$imm)), 5443 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 5444def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)), 5445 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>; 5446def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)), 5447 (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>; 5448def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)), 5449 (i32 (INSERT_SUBREG 5450 (i32 (IMPLICIT_DEF)), 5451 (FCVTZSh FPR16:$Rn, vecshiftR32:$imm), 5452 hsub))>; 5453def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)), 5454 (i64 (INSERT_SUBREG 5455 (i64 (IMPLICIT_DEF)), 5456 (FCVTZSh FPR16:$Rn, vecshiftR64:$imm), 5457 hsub))>; 5458def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)), 5459 (i32 (INSERT_SUBREG 5460 (i32 (IMPLICIT_DEF)), 5461 (FCVTZUh FPR16:$Rn, vecshiftR32:$imm), 5462 hsub))>; 5463def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)), 5464 (i64 (INSERT_SUBREG 5465 (i64 (IMPLICIT_DEF)), 5466 (FCVTZUh FPR16:$Rn, vecshiftR64:$imm), 5467 hsub))>; 5468def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))), 5469 (i32 (INSERT_SUBREG 5470 (i32 (IMPLICIT_DEF)), 5471 (FACGE16 FPR16:$Rn, FPR16:$Rm), 5472 hsub))>; 5473def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))), 5474 (i32 (INSERT_SUBREG 5475 (i32 (IMPLICIT_DEF)), 5476 (FACGT16 FPR16:$Rn, FPR16:$Rm), 5477 hsub))>; 5478 5479defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>; 5480defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">; 5481defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn", 5482 int_aarch64_neon_sqrshrn>; 5483defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun", 5484 int_aarch64_neon_sqrshrun>; 5485defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>; 5486defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>; 5487defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn", 5488 int_aarch64_neon_sqshrn>; 5489defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun", 5490 int_aarch64_neon_sqshrun>; 5491defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">; 5492defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>; 5493defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra", 5494 TriOpFrag<(add node:$LHS, 5495 (AArch64srshri node:$MHS, node:$RHS))>>; 5496defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>; 5497defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra", 5498 TriOpFrag<(add node:$LHS, 5499 (AArch64vashr node:$MHS, node:$RHS))>>; 5500defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn", 5501 int_aarch64_neon_uqrshrn>; 5502defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>; 5503defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn", 5504 int_aarch64_neon_uqshrn>; 5505defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>; 5506defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra", 5507 TriOpFrag<(add node:$LHS, 5508 (AArch64urshri node:$MHS, node:$RHS))>>; 5509defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>; 5510defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra", 5511 TriOpFrag<(add node:$LHS, 5512 (AArch64vlshr node:$MHS, node:$RHS))>>; 5513 5514//---------------------------------------------------------------------------- 5515// AdvSIMD vector shift instructions 5516//---------------------------------------------------------------------------- 5517defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>; 5518defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>; 5519defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf", 5520 int_aarch64_neon_vcvtfxs2fp>; 5521defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn", 5522 int_aarch64_neon_rshrn>; 5523defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>; 5524defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn", 5525 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>; 5526defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>; 5527def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn), 5528 (i32 vecshiftL64:$imm))), 5529 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>; 5530defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn", 5531 int_aarch64_neon_sqrshrn>; 5532defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun", 5533 int_aarch64_neon_sqrshrun>; 5534defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>; 5535defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>; 5536defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn", 5537 int_aarch64_neon_sqshrn>; 5538defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun", 5539 int_aarch64_neon_sqshrun>; 5540defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>; 5541def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn), 5542 (i32 vecshiftR64:$imm))), 5543 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>; 5544defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>; 5545defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra", 5546 TriOpFrag<(add node:$LHS, 5547 (AArch64srshri node:$MHS, node:$RHS))> >; 5548defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll", 5549 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>; 5550 5551defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>; 5552defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra", 5553 TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>; 5554defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf", 5555 int_aarch64_neon_vcvtfxu2fp>; 5556defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn", 5557 int_aarch64_neon_uqrshrn>; 5558defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>; 5559defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn", 5560 int_aarch64_neon_uqshrn>; 5561defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>; 5562defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra", 5563 TriOpFrag<(add node:$LHS, 5564 (AArch64urshri node:$MHS, node:$RHS))> >; 5565defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll", 5566 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>; 5567defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>; 5568defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra", 5569 TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >; 5570 5571// SHRN patterns for when a logical right shift was used instead of arithmetic 5572// (the immediate guarantees no sign bits actually end up in the result so it 5573// doesn't matter). 5574def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))), 5575 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>; 5576def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))), 5577 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>; 5578def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))), 5579 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>; 5580 5581def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd), 5582 (trunc (AArch64vlshr (v8i16 V128:$Rn), 5583 vecshiftR16Narrow:$imm)))), 5584 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 5585 V128:$Rn, vecshiftR16Narrow:$imm)>; 5586def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd), 5587 (trunc (AArch64vlshr (v4i32 V128:$Rn), 5588 vecshiftR32Narrow:$imm)))), 5589 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 5590 V128:$Rn, vecshiftR32Narrow:$imm)>; 5591def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd), 5592 (trunc (AArch64vlshr (v2i64 V128:$Rn), 5593 vecshiftR64Narrow:$imm)))), 5594 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 5595 V128:$Rn, vecshiftR32Narrow:$imm)>; 5596 5597// Vector sign and zero extensions are implemented with SSHLL and USSHLL. 5598// Anyexts are implemented as zexts. 5599def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>; 5600def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>; 5601def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>; 5602def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>; 5603def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>; 5604def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>; 5605def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>; 5606def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>; 5607def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>; 5608// Also match an extend from the upper half of a 128 bit source register. 5609def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))), 5610 (USHLLv16i8_shift V128:$Rn, (i32 0))>; 5611def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))), 5612 (USHLLv16i8_shift V128:$Rn, (i32 0))>; 5613def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))), 5614 (SSHLLv16i8_shift V128:$Rn, (i32 0))>; 5615def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))), 5616 (USHLLv8i16_shift V128:$Rn, (i32 0))>; 5617def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))), 5618 (USHLLv8i16_shift V128:$Rn, (i32 0))>; 5619def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))), 5620 (SSHLLv8i16_shift V128:$Rn, (i32 0))>; 5621def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))), 5622 (USHLLv4i32_shift V128:$Rn, (i32 0))>; 5623def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))), 5624 (USHLLv4i32_shift V128:$Rn, (i32 0))>; 5625def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))), 5626 (SSHLLv4i32_shift V128:$Rn, (i32 0))>; 5627 5628// Vector shift sxtl aliases 5629def : InstAlias<"sxtl.8h $dst, $src1", 5630 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 5631def : InstAlias<"sxtl $dst.8h, $src1.8b", 5632 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 5633def : InstAlias<"sxtl.4s $dst, $src1", 5634 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 5635def : InstAlias<"sxtl $dst.4s, $src1.4h", 5636 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 5637def : InstAlias<"sxtl.2d $dst, $src1", 5638 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 5639def : InstAlias<"sxtl $dst.2d, $src1.2s", 5640 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 5641 5642// Vector shift sxtl2 aliases 5643def : InstAlias<"sxtl2.8h $dst, $src1", 5644 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 5645def : InstAlias<"sxtl2 $dst.8h, $src1.16b", 5646 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 5647def : InstAlias<"sxtl2.4s $dst, $src1", 5648 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 5649def : InstAlias<"sxtl2 $dst.4s, $src1.8h", 5650 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 5651def : InstAlias<"sxtl2.2d $dst, $src1", 5652 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 5653def : InstAlias<"sxtl2 $dst.2d, $src1.4s", 5654 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 5655 5656// Vector shift uxtl aliases 5657def : InstAlias<"uxtl.8h $dst, $src1", 5658 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 5659def : InstAlias<"uxtl $dst.8h, $src1.8b", 5660 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>; 5661def : InstAlias<"uxtl.4s $dst, $src1", 5662 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 5663def : InstAlias<"uxtl $dst.4s, $src1.4h", 5664 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>; 5665def : InstAlias<"uxtl.2d $dst, $src1", 5666 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 5667def : InstAlias<"uxtl $dst.2d, $src1.2s", 5668 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>; 5669 5670// Vector shift uxtl2 aliases 5671def : InstAlias<"uxtl2.8h $dst, $src1", 5672 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 5673def : InstAlias<"uxtl2 $dst.8h, $src1.16b", 5674 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>; 5675def : InstAlias<"uxtl2.4s $dst, $src1", 5676 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 5677def : InstAlias<"uxtl2 $dst.4s, $src1.8h", 5678 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>; 5679def : InstAlias<"uxtl2.2d $dst, $src1", 5680 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 5681def : InstAlias<"uxtl2 $dst.2d, $src1.4s", 5682 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>; 5683 5684// If an integer is about to be converted to a floating point value, 5685// just load it on the floating point unit. 5686// These patterns are more complex because floating point loads do not 5687// support sign extension. 5688// The sign extension has to be explicitly added and is only supported for 5689// one step: byte-to-half, half-to-word, word-to-doubleword. 5690// SCVTF GPR -> FPR is 9 cycles. 5691// SCVTF FPR -> FPR is 4 cyclces. 5692// (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles. 5693// Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR 5694// and still being faster. 5695// However, this is not good for code size. 5696// 8-bits -> float. 2 sizes step-up. 5697class SExtLoadi8CVTf32Pat<dag addrmode, dag INST> 5698 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))), 5699 (SCVTFv1i32 (f32 (EXTRACT_SUBREG 5700 (SSHLLv4i16_shift 5701 (f64 5702 (EXTRACT_SUBREG 5703 (SSHLLv8i8_shift 5704 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5705 INST, 5706 bsub), 5707 0), 5708 dsub)), 5709 0), 5710 ssub)))>, 5711 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>; 5712 5713def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext), 5714 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>; 5715def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext), 5716 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>; 5717def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), 5718 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>; 5719def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset), 5720 (LDURBi GPR64sp:$Rn, simm9:$offset)>; 5721 5722// 16-bits -> float. 1 size step-up. 5723class SExtLoadi16CVTf32Pat<dag addrmode, dag INST> 5724 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))), 5725 (SCVTFv1i32 (f32 (EXTRACT_SUBREG 5726 (SSHLLv4i16_shift 5727 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5728 INST, 5729 hsub), 5730 0), 5731 ssub)))>, Requires<[NotForCodeSize]>; 5732 5733def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext), 5734 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>; 5735def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext), 5736 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>; 5737def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), 5738 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>; 5739def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset), 5740 (LDURHi GPR64sp:$Rn, simm9:$offset)>; 5741 5742// 32-bits to 32-bits are handled in target specific dag combine: 5743// performIntToFpCombine. 5744// 64-bits integer to 32-bits floating point, not possible with 5745// SCVTF on floating point registers (both source and destination 5746// must have the same size). 5747 5748// Here are the patterns for 8, 16, 32, and 64-bits to double. 5749// 8-bits -> double. 3 size step-up: give up. 5750// 16-bits -> double. 2 size step. 5751class SExtLoadi16CVTf64Pat<dag addrmode, dag INST> 5752 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))), 5753 (SCVTFv1i64 (f64 (EXTRACT_SUBREG 5754 (SSHLLv2i32_shift 5755 (f64 5756 (EXTRACT_SUBREG 5757 (SSHLLv4i16_shift 5758 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5759 INST, 5760 hsub), 5761 0), 5762 dsub)), 5763 0), 5764 dsub)))>, 5765 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>; 5766 5767def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext), 5768 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>; 5769def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext), 5770 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>; 5771def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), 5772 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>; 5773def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset), 5774 (LDURHi GPR64sp:$Rn, simm9:$offset)>; 5775// 32-bits -> double. 1 size step-up. 5776class SExtLoadi32CVTf64Pat<dag addrmode, dag INST> 5777 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))), 5778 (SCVTFv1i64 (f64 (EXTRACT_SUBREG 5779 (SSHLLv2i32_shift 5780 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 5781 INST, 5782 ssub), 5783 0), 5784 dsub)))>, Requires<[NotForCodeSize]>; 5785 5786def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext), 5787 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>; 5788def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext), 5789 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>; 5790def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), 5791 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>; 5792def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset), 5793 (LDURSi GPR64sp:$Rn, simm9:$offset)>; 5794 5795// 64-bits -> double are handled in target specific dag combine: 5796// performIntToFpCombine. 5797 5798 5799//---------------------------------------------------------------------------- 5800// AdvSIMD Load-Store Structure 5801//---------------------------------------------------------------------------- 5802defm LD1 : SIMDLd1Multiple<"ld1">; 5803defm LD2 : SIMDLd2Multiple<"ld2">; 5804defm LD3 : SIMDLd3Multiple<"ld3">; 5805defm LD4 : SIMDLd4Multiple<"ld4">; 5806 5807defm ST1 : SIMDSt1Multiple<"st1">; 5808defm ST2 : SIMDSt2Multiple<"st2">; 5809defm ST3 : SIMDSt3Multiple<"st3">; 5810defm ST4 : SIMDSt4Multiple<"st4">; 5811 5812class Ld1Pat<ValueType ty, Instruction INST> 5813 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>; 5814 5815def : Ld1Pat<v16i8, LD1Onev16b>; 5816def : Ld1Pat<v8i16, LD1Onev8h>; 5817def : Ld1Pat<v4i32, LD1Onev4s>; 5818def : Ld1Pat<v2i64, LD1Onev2d>; 5819def : Ld1Pat<v8i8, LD1Onev8b>; 5820def : Ld1Pat<v4i16, LD1Onev4h>; 5821def : Ld1Pat<v2i32, LD1Onev2s>; 5822def : Ld1Pat<v1i64, LD1Onev1d>; 5823 5824class St1Pat<ValueType ty, Instruction INST> 5825 : Pat<(store ty:$Vt, GPR64sp:$Rn), 5826 (INST ty:$Vt, GPR64sp:$Rn)>; 5827 5828def : St1Pat<v16i8, ST1Onev16b>; 5829def : St1Pat<v8i16, ST1Onev8h>; 5830def : St1Pat<v4i32, ST1Onev4s>; 5831def : St1Pat<v2i64, ST1Onev2d>; 5832def : St1Pat<v8i8, ST1Onev8b>; 5833def : St1Pat<v4i16, ST1Onev4h>; 5834def : St1Pat<v2i32, ST1Onev2s>; 5835def : St1Pat<v1i64, ST1Onev1d>; 5836 5837//--- 5838// Single-element 5839//--- 5840 5841defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>; 5842defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>; 5843defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>; 5844defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>; 5845let mayLoad = 1, hasSideEffects = 0 in { 5846defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>; 5847defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>; 5848defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>; 5849defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>; 5850defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>; 5851defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>; 5852defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>; 5853defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>; 5854defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>; 5855defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>; 5856defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>; 5857defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>; 5858defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>; 5859defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>; 5860defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>; 5861defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>; 5862} 5863 5864def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))), 5865 (LD1Rv8b GPR64sp:$Rn)>; 5866def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))), 5867 (LD1Rv16b GPR64sp:$Rn)>; 5868def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))), 5869 (LD1Rv4h GPR64sp:$Rn)>; 5870def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))), 5871 (LD1Rv8h GPR64sp:$Rn)>; 5872def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))), 5873 (LD1Rv2s GPR64sp:$Rn)>; 5874def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))), 5875 (LD1Rv4s GPR64sp:$Rn)>; 5876def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))), 5877 (LD1Rv2d GPR64sp:$Rn)>; 5878def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))), 5879 (LD1Rv1d GPR64sp:$Rn)>; 5880// Grab the floating point version too 5881def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))), 5882 (LD1Rv2s GPR64sp:$Rn)>; 5883def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))), 5884 (LD1Rv4s GPR64sp:$Rn)>; 5885def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))), 5886 (LD1Rv2d GPR64sp:$Rn)>; 5887def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))), 5888 (LD1Rv1d GPR64sp:$Rn)>; 5889def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))), 5890 (LD1Rv4h GPR64sp:$Rn)>; 5891def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))), 5892 (LD1Rv8h GPR64sp:$Rn)>; 5893 5894class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex, 5895 ValueType VTy, ValueType STy, Instruction LD1> 5896 : Pat<(vector_insert (VTy VecListOne128:$Rd), 5897 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx), 5898 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>; 5899 5900def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>; 5901def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>; 5902def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>; 5903def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>; 5904def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>; 5905def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>; 5906def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>; 5907 5908class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex, 5909 ValueType VTy, ValueType STy, Instruction LD1> 5910 : Pat<(vector_insert (VTy VecListOne64:$Rd), 5911 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx), 5912 (EXTRACT_SUBREG 5913 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub), 5914 VecIndex:$idx, GPR64sp:$Rn), 5915 dsub)>; 5916 5917def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>; 5918def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>; 5919def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>; 5920def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>; 5921def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>; 5922 5923 5924defm LD1 : SIMDLdSt1SingleAliases<"ld1">; 5925defm LD2 : SIMDLdSt2SingleAliases<"ld2">; 5926defm LD3 : SIMDLdSt3SingleAliases<"ld3">; 5927defm LD4 : SIMDLdSt4SingleAliases<"ld4">; 5928 5929// Stores 5930defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>; 5931defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>; 5932defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>; 5933defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>; 5934 5935let AddedComplexity = 19 in 5936class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex, 5937 ValueType VTy, ValueType STy, Instruction ST1> 5938 : Pat<(scalar_store 5939 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)), 5940 GPR64sp:$Rn), 5941 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>; 5942 5943def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>; 5944def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>; 5945def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>; 5946def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>; 5947def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>; 5948def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>; 5949def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>; 5950 5951let AddedComplexity = 19 in 5952class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex, 5953 ValueType VTy, ValueType STy, Instruction ST1> 5954 : Pat<(scalar_store 5955 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)), 5956 GPR64sp:$Rn), 5957 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub), 5958 VecIndex:$idx, GPR64sp:$Rn)>; 5959 5960def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>; 5961def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>; 5962def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>; 5963def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>; 5964def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>; 5965 5966multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex, 5967 ValueType VTy, ValueType STy, Instruction ST1, 5968 int offset> { 5969 def : Pat<(scalar_store 5970 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)), 5971 GPR64sp:$Rn, offset), 5972 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub), 5973 VecIndex:$idx, GPR64sp:$Rn, XZR)>; 5974 5975 def : Pat<(scalar_store 5976 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)), 5977 GPR64sp:$Rn, GPR64:$Rm), 5978 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub), 5979 VecIndex:$idx, GPR64sp:$Rn, $Rm)>; 5980} 5981 5982defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>; 5983defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST, 5984 2>; 5985defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>; 5986defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>; 5987defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>; 5988defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>; 5989defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>; 5990 5991multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex, 5992 ValueType VTy, ValueType STy, Instruction ST1, 5993 int offset> { 5994 def : Pat<(scalar_store 5995 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)), 5996 GPR64sp:$Rn, offset), 5997 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>; 5998 5999 def : Pat<(scalar_store 6000 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)), 6001 GPR64sp:$Rn, GPR64:$Rm), 6002 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>; 6003} 6004 6005defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST, 6006 1>; 6007defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST, 6008 2>; 6009defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>; 6010defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>; 6011defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>; 6012defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>; 6013defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>; 6014 6015let mayStore = 1, hasSideEffects = 0 in { 6016defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>; 6017defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>; 6018defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>; 6019defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>; 6020defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>; 6021defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>; 6022defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>; 6023defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>; 6024defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>; 6025defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>; 6026defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>; 6027defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>; 6028} 6029 6030defm ST1 : SIMDLdSt1SingleAliases<"st1">; 6031defm ST2 : SIMDLdSt2SingleAliases<"st2">; 6032defm ST3 : SIMDLdSt3SingleAliases<"st3">; 6033defm ST4 : SIMDLdSt4SingleAliases<"st4">; 6034 6035//---------------------------------------------------------------------------- 6036// Crypto extensions 6037//---------------------------------------------------------------------------- 6038 6039let Predicates = [HasAES] in { 6040def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>; 6041def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>; 6042def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>; 6043def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>; 6044} 6045 6046// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required 6047// for AES fusion on some CPUs. 6048let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { 6049def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, 6050 Sched<[WriteV]>; 6051def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, 6052 Sched<[WriteV]>; 6053} 6054 6055// Only use constrained versions of AES(I)MC instructions if they are paired with 6056// AESE/AESD. 6057def : Pat<(v16i8 (int_aarch64_crypto_aesmc 6058 (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1), 6059 (v16i8 V128:$src2))))), 6060 (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1), 6061 (v16i8 V128:$src2)))))>, 6062 Requires<[HasFuseAES]>; 6063 6064def : Pat<(v16i8 (int_aarch64_crypto_aesimc 6065 (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1), 6066 (v16i8 V128:$src2))))), 6067 (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1), 6068 (v16i8 V128:$src2)))))>, 6069 Requires<[HasFuseAES]>; 6070 6071let Predicates = [HasSHA2] in { 6072def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>; 6073def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>; 6074def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>; 6075def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>; 6076def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>; 6077def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>; 6078def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>; 6079 6080def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>; 6081def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>; 6082def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>; 6083} 6084 6085//---------------------------------------------------------------------------- 6086// Compiler-pseudos 6087//---------------------------------------------------------------------------- 6088// FIXME: Like for X86, these should go in their own separate .td file. 6089 6090def def32 : PatLeaf<(i32 GPR32:$src), [{ 6091 return isDef32(*N); 6092}]>; 6093 6094// In the case of a 32-bit def that is known to implicitly zero-extend, 6095// we can use a SUBREG_TO_REG. 6096def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>; 6097 6098// For an anyext, we don't care what the high bits are, so we can perform an 6099// INSERT_SUBREF into an IMPLICIT_DEF. 6100def : Pat<(i64 (anyext GPR32:$src)), 6101 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>; 6102 6103// When we need to explicitly zero-extend, we use a 32-bit MOV instruction and 6104// then assert the extension has happened. 6105def : Pat<(i64 (zext GPR32:$src)), 6106 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>; 6107 6108// To sign extend, we use a signed bitfield move instruction (SBFM) on the 6109// containing super-reg. 6110def : Pat<(i64 (sext GPR32:$src)), 6111 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>; 6112def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>; 6113def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>; 6114def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>; 6115def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>; 6116def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>; 6117def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>; 6118def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>; 6119 6120def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)), 6121 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)), 6122 (i64 (i32shift_sext_i8 imm0_31:$imm)))>; 6123def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)), 6124 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)), 6125 (i64 (i64shift_sext_i8 imm0_63:$imm)))>; 6126 6127def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)), 6128 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)), 6129 (i64 (i32shift_sext_i16 imm0_31:$imm)))>; 6130def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)), 6131 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)), 6132 (i64 (i64shift_sext_i16 imm0_63:$imm)))>; 6133 6134def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)), 6135 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32), 6136 (i64 (i64shift_a imm0_63:$imm)), 6137 (i64 (i64shift_sext_i32 imm0_63:$imm)))>; 6138 6139// sra patterns have an AddedComplexity of 10, so make sure we have a higher 6140// AddedComplexity for the following patterns since we want to match sext + sra 6141// patterns before we attempt to match a single sra node. 6142let AddedComplexity = 20 in { 6143// We support all sext + sra combinations which preserve at least one bit of the 6144// original value which is to be sign extended. E.g. we support shifts up to 6145// bitwidth-1 bits. 6146def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)), 6147 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>; 6148def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)), 6149 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>; 6150 6151def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)), 6152 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>; 6153def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)), 6154 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>; 6155 6156def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)), 6157 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32), 6158 (i64 imm0_31:$imm), 31)>; 6159} // AddedComplexity = 20 6160 6161// To truncate, we can simply extract from a subregister. 6162def : Pat<(i32 (trunc GPR64sp:$src)), 6163 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>; 6164 6165// __builtin_trap() uses the BRK instruction on AArch64. 6166def : Pat<(trap), (BRK 1)>; 6167def : Pat<(debugtrap), (BRK 0xF000)>, Requires<[IsWindows]>; 6168 6169// Multiply high patterns which multiply the lower subvector using smull/umull 6170// and the upper subvector with smull2/umull2. Then shuffle the high the high 6171// part of both results together. 6172def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)), 6173 (UZP2v16i8 6174 (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub), 6175 (EXTRACT_SUBREG V128:$Rm, dsub)), 6176 (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>; 6177def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)), 6178 (UZP2v8i16 6179 (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub), 6180 (EXTRACT_SUBREG V128:$Rm, dsub)), 6181 (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>; 6182def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)), 6183 (UZP2v4i32 6184 (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub), 6185 (EXTRACT_SUBREG V128:$Rm, dsub)), 6186 (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>; 6187 6188def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)), 6189 (UZP2v16i8 6190 (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub), 6191 (EXTRACT_SUBREG V128:$Rm, dsub)), 6192 (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>; 6193def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)), 6194 (UZP2v8i16 6195 (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub), 6196 (EXTRACT_SUBREG V128:$Rm, dsub)), 6197 (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>; 6198def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)), 6199 (UZP2v4i32 6200 (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub), 6201 (EXTRACT_SUBREG V128:$Rm, dsub)), 6202 (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>; 6203 6204// Conversions within AdvSIMD types in the same register size are free. 6205// But because we need a consistent lane ordering, in big endian many 6206// conversions require one or more REV instructions. 6207// 6208// Consider a simple memory load followed by a bitconvert then a store. 6209// v0 = load v2i32 6210// v1 = BITCAST v2i32 v0 to v4i16 6211// store v4i16 v2 6212// 6213// In big endian mode every memory access has an implicit byte swap. LDR and 6214// STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that 6215// is, they treat the vector as a sequence of elements to be byte-swapped. 6216// The two pairs of instructions are fundamentally incompatible. We've decided 6217// to use LD1/ST1 only to simplify compiler implementation. 6218// 6219// LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes 6220// the original code sequence: 6221// v0 = load v2i32 6222// v1 = REV v2i32 (implicit) 6223// v2 = BITCAST v2i32 v1 to v4i16 6224// v3 = REV v4i16 v2 (implicit) 6225// store v4i16 v3 6226// 6227// But this is now broken - the value stored is different to the value loaded 6228// due to lane reordering. To fix this, on every BITCAST we must perform two 6229// other REVs: 6230// v0 = load v2i32 6231// v1 = REV v2i32 (implicit) 6232// v2 = REV v2i32 6233// v3 = BITCAST v2i32 v2 to v4i16 6234// v4 = REV v4i16 6235// v5 = REV v4i16 v4 (implicit) 6236// store v4i16 v5 6237// 6238// This means an extra two instructions, but actually in most cases the two REV 6239// instructions can be combined into one. For example: 6240// (REV64_2s (REV64_4h X)) === (REV32_4h X) 6241// 6242// There is also no 128-bit REV instruction. This must be synthesized with an 6243// EXT instruction. 6244// 6245// Most bitconverts require some sort of conversion. The only exceptions are: 6246// a) Identity conversions - vNfX <-> vNiX 6247// b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX 6248// 6249 6250// Natural vector casts (64 bit) 6251def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>; 6252def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>; 6253def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>; 6254def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>; 6255def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>; 6256def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>; 6257 6258def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>; 6259def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>; 6260def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; 6261def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>; 6262def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>; 6263 6264def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>; 6265def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>; 6266def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>; 6267def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>; 6268def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>; 6269def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>; 6270 6271def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>; 6272def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>; 6273def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>; 6274def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>; 6275def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>; 6276def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>; 6277def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>; 6278 6279def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>; 6280def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>; 6281def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>; 6282def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>; 6283def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>; 6284def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>; 6285 6286// Natural vector casts (128 bit) 6287def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>; 6288def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>; 6289def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>; 6290def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>; 6291def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>; 6292def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>; 6293def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>; 6294 6295def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>; 6296def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>; 6297def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; 6298def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>; 6299def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>; 6300def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>; 6301def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>; 6302 6303def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>; 6304def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>; 6305def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>; 6306def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>; 6307def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>; 6308def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>; 6309def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>; 6310 6311def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>; 6312def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>; 6313def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>; 6314def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>; 6315def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>; 6316def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>; 6317def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>; 6318 6319def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>; 6320def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>; 6321def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>; 6322def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>; 6323def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>; 6324def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>; 6325def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>; 6326 6327def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>; 6328def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>; 6329def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>; 6330def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>; 6331def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>; 6332def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>; 6333def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>; 6334 6335let Predicates = [IsLE] in { 6336def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6337def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6338def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6339def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6340def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6341 6342def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))), 6343 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6344def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))), 6345 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6346def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))), 6347 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6348def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))), 6349 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6350def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))), 6351 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6352def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))), 6353 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6354} 6355let Predicates = [IsBE] in { 6356def : Pat<(v8i8 (bitconvert GPR64:$Xn)), 6357 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 6358def : Pat<(v4i16 (bitconvert GPR64:$Xn)), 6359 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 6360def : Pat<(v2i32 (bitconvert GPR64:$Xn)), 6361 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 6362def : Pat<(v4f16 (bitconvert GPR64:$Xn)), 6363 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 6364def : Pat<(v2f32 (bitconvert GPR64:$Xn)), 6365 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>; 6366 6367def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))), 6368 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 6369def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))), 6370 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 6371def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))), 6372 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 6373def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))), 6374 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 6375def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))), 6376 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>; 6377} 6378def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6379def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6380def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))), 6381 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6382def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)), 6383 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6384def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)), 6385 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6386def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>; 6387 6388def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))), 6389 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>; 6390def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))), 6391 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>; 6392def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))), 6393 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>; 6394def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))), 6395 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>; 6396def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))), 6397 (COPY_TO_REGCLASS V64:$Vn, GPR64)>; 6398 6399let Predicates = [IsLE] in { 6400def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>; 6401def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>; 6402def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>; 6403def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>; 6404def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>; 6405} 6406let Predicates = [IsBE] in { 6407def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), 6408 (v1i64 (REV64v2i32 FPR64:$src))>; 6409def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), 6410 (v1i64 (REV64v4i16 FPR64:$src))>; 6411def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), 6412 (v1i64 (REV64v8i8 FPR64:$src))>; 6413def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), 6414 (v1i64 (REV64v4i16 FPR64:$src))>; 6415def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), 6416 (v1i64 (REV64v2i32 FPR64:$src))>; 6417} 6418def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>; 6419def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>; 6420 6421let Predicates = [IsLE] in { 6422def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>; 6423def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>; 6424def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>; 6425def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>; 6426def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>; 6427def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>; 6428} 6429let Predicates = [IsBE] in { 6430def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), 6431 (v2i32 (REV64v2i32 FPR64:$src))>; 6432def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), 6433 (v2i32 (REV32v4i16 FPR64:$src))>; 6434def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), 6435 (v2i32 (REV32v8i8 FPR64:$src))>; 6436def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), 6437 (v2i32 (REV64v2i32 FPR64:$src))>; 6438def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), 6439 (v2i32 (REV64v2i32 FPR64:$src))>; 6440def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), 6441 (v2i32 (REV32v4i16 FPR64:$src))>; 6442} 6443def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>; 6444 6445let Predicates = [IsLE] in { 6446def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>; 6447def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>; 6448def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>; 6449def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>; 6450def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>; 6451def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>; 6452} 6453let Predicates = [IsBE] in { 6454def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), 6455 (v4i16 (REV64v4i16 FPR64:$src))>; 6456def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), 6457 (v4i16 (REV32v4i16 FPR64:$src))>; 6458def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), 6459 (v4i16 (REV16v8i8 FPR64:$src))>; 6460def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), 6461 (v4i16 (REV64v4i16 FPR64:$src))>; 6462def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), 6463 (v4i16 (REV32v4i16 FPR64:$src))>; 6464def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), 6465 (v4i16 (REV64v4i16 FPR64:$src))>; 6466} 6467def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>; 6468 6469let Predicates = [IsLE] in { 6470def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>; 6471def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>; 6472def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>; 6473def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>; 6474def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>; 6475def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>; 6476} 6477let Predicates = [IsBE] in { 6478def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), 6479 (v4f16 (REV64v4i16 FPR64:$src))>; 6480def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), 6481 (v4f16 (REV32v4i16 FPR64:$src))>; 6482def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), 6483 (v4f16 (REV16v8i8 FPR64:$src))>; 6484def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), 6485 (v4f16 (REV64v4i16 FPR64:$src))>; 6486def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), 6487 (v4f16 (REV32v4i16 FPR64:$src))>; 6488def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), 6489 (v4f16 (REV64v4i16 FPR64:$src))>; 6490} 6491def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; 6492 6493let Predicates = [IsLE] in { 6494def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>; 6495def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>; 6496def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>; 6497def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>; 6498def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>; 6499def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>; 6500def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>; 6501} 6502let Predicates = [IsBE] in { 6503def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), 6504 (v8i8 (REV64v8i8 FPR64:$src))>; 6505def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), 6506 (v8i8 (REV32v8i8 FPR64:$src))>; 6507def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), 6508 (v8i8 (REV16v8i8 FPR64:$src))>; 6509def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), 6510 (v8i8 (REV64v8i8 FPR64:$src))>; 6511def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), 6512 (v8i8 (REV32v8i8 FPR64:$src))>; 6513def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), 6514 (v8i8 (REV64v8i8 FPR64:$src))>; 6515def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), 6516 (v8i8 (REV16v8i8 FPR64:$src))>; 6517} 6518 6519let Predicates = [IsLE] in { 6520def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>; 6521def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>; 6522def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>; 6523def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>; 6524def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>; 6525} 6526let Predicates = [IsBE] in { 6527def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), 6528 (f64 (REV64v2i32 FPR64:$src))>; 6529def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), 6530 (f64 (REV64v4i16 FPR64:$src))>; 6531def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), 6532 (f64 (REV64v2i32 FPR64:$src))>; 6533def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), 6534 (f64 (REV64v8i8 FPR64:$src))>; 6535def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), 6536 (f64 (REV64v4i16 FPR64:$src))>; 6537} 6538def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>; 6539def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>; 6540 6541let Predicates = [IsLE] in { 6542def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>; 6543def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>; 6544def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>; 6545def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>; 6546def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>; 6547} 6548let Predicates = [IsBE] in { 6549def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), 6550 (v1f64 (REV64v2i32 FPR64:$src))>; 6551def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), 6552 (v1f64 (REV64v4i16 FPR64:$src))>; 6553def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), 6554 (v1f64 (REV64v8i8 FPR64:$src))>; 6555def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), 6556 (v1f64 (REV64v2i32 FPR64:$src))>; 6557def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), 6558 (v1f64 (REV64v4i16 FPR64:$src))>; 6559} 6560def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>; 6561def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>; 6562 6563let Predicates = [IsLE] in { 6564def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>; 6565def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>; 6566def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>; 6567def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>; 6568def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>; 6569def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>; 6570} 6571let Predicates = [IsBE] in { 6572def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), 6573 (v2f32 (REV64v2i32 FPR64:$src))>; 6574def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), 6575 (v2f32 (REV32v4i16 FPR64:$src))>; 6576def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), 6577 (v2f32 (REV32v8i8 FPR64:$src))>; 6578def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), 6579 (v2f32 (REV64v2i32 FPR64:$src))>; 6580def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), 6581 (v2f32 (REV64v2i32 FPR64:$src))>; 6582def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), 6583 (v2f32 (REV32v4i16 FPR64:$src))>; 6584} 6585def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>; 6586 6587let Predicates = [IsLE] in { 6588def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>; 6589def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>; 6590def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>; 6591def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>; 6592def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>; 6593def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>; 6594def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>; 6595} 6596let Predicates = [IsBE] in { 6597def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), 6598 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>; 6599def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), 6600 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src), 6601 (REV64v4i32 FPR128:$src), (i32 8)))>; 6602def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), 6603 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src), 6604 (REV64v8i16 FPR128:$src), (i32 8)))>; 6605def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), 6606 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src), 6607 (REV64v8i16 FPR128:$src), (i32 8)))>; 6608def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), 6609 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>; 6610def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), 6611 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src), 6612 (REV64v4i32 FPR128:$src), (i32 8)))>; 6613def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), 6614 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src), 6615 (REV64v16i8 FPR128:$src), (i32 8)))>; 6616} 6617 6618let Predicates = [IsLE] in { 6619def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>; 6620def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>; 6621def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>; 6622def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>; 6623def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>; 6624def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>; 6625} 6626let Predicates = [IsBE] in { 6627def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), 6628 (v2f64 (EXTv16i8 FPR128:$src, 6629 FPR128:$src, (i32 8)))>; 6630def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), 6631 (v2f64 (REV64v4i32 FPR128:$src))>; 6632def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), 6633 (v2f64 (REV64v8i16 FPR128:$src))>; 6634def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), 6635 (v2f64 (REV64v8i16 FPR128:$src))>; 6636def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), 6637 (v2f64 (REV64v16i8 FPR128:$src))>; 6638def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), 6639 (v2f64 (REV64v4i32 FPR128:$src))>; 6640} 6641def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>; 6642 6643let Predicates = [IsLE] in { 6644def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>; 6645def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>; 6646def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>; 6647def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>; 6648def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>; 6649def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>; 6650} 6651let Predicates = [IsBE] in { 6652def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), 6653 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src), 6654 (REV64v4i32 FPR128:$src), (i32 8)))>; 6655def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), 6656 (v4f32 (REV32v8i16 FPR128:$src))>; 6657def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), 6658 (v4f32 (REV32v8i16 FPR128:$src))>; 6659def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), 6660 (v4f32 (REV32v16i8 FPR128:$src))>; 6661def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), 6662 (v4f32 (REV64v4i32 FPR128:$src))>; 6663def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), 6664 (v4f32 (REV64v4i32 FPR128:$src))>; 6665} 6666def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>; 6667 6668let Predicates = [IsLE] in { 6669def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>; 6670def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>; 6671def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>; 6672def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>; 6673def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>; 6674def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>; 6675} 6676let Predicates = [IsBE] in { 6677def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), 6678 (v2i64 (EXTv16i8 FPR128:$src, 6679 FPR128:$src, (i32 8)))>; 6680def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), 6681 (v2i64 (REV64v4i32 FPR128:$src))>; 6682def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), 6683 (v2i64 (REV64v8i16 FPR128:$src))>; 6684def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), 6685 (v2i64 (REV64v16i8 FPR128:$src))>; 6686def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), 6687 (v2i64 (REV64v4i32 FPR128:$src))>; 6688def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), 6689 (v2i64 (REV64v8i16 FPR128:$src))>; 6690} 6691def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>; 6692 6693let Predicates = [IsLE] in { 6694def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>; 6695def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>; 6696def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>; 6697def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>; 6698def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>; 6699def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>; 6700} 6701let Predicates = [IsBE] in { 6702def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), 6703 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src), 6704 (REV64v4i32 FPR128:$src), 6705 (i32 8)))>; 6706def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), 6707 (v4i32 (REV64v4i32 FPR128:$src))>; 6708def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), 6709 (v4i32 (REV32v8i16 FPR128:$src))>; 6710def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), 6711 (v4i32 (REV32v16i8 FPR128:$src))>; 6712def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), 6713 (v4i32 (REV64v4i32 FPR128:$src))>; 6714def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), 6715 (v4i32 (REV32v8i16 FPR128:$src))>; 6716} 6717def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>; 6718 6719let Predicates = [IsLE] in { 6720def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>; 6721def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>; 6722def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>; 6723def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>; 6724def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>; 6725def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>; 6726} 6727let Predicates = [IsBE] in { 6728def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), 6729 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src), 6730 (REV64v8i16 FPR128:$src), 6731 (i32 8)))>; 6732def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), 6733 (v8i16 (REV64v8i16 FPR128:$src))>; 6734def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), 6735 (v8i16 (REV32v8i16 FPR128:$src))>; 6736def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), 6737 (v8i16 (REV16v16i8 FPR128:$src))>; 6738def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), 6739 (v8i16 (REV64v8i16 FPR128:$src))>; 6740def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), 6741 (v8i16 (REV32v8i16 FPR128:$src))>; 6742} 6743def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>; 6744 6745let Predicates = [IsLE] in { 6746def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>; 6747def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>; 6748def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>; 6749def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>; 6750def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>; 6751def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>; 6752} 6753let Predicates = [IsBE] in { 6754def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), 6755 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src), 6756 (REV64v8i16 FPR128:$src), 6757 (i32 8)))>; 6758def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), 6759 (v8f16 (REV64v8i16 FPR128:$src))>; 6760def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), 6761 (v8f16 (REV32v8i16 FPR128:$src))>; 6762def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), 6763 (v8f16 (REV16v16i8 FPR128:$src))>; 6764def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), 6765 (v8f16 (REV64v8i16 FPR128:$src))>; 6766def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), 6767 (v8f16 (REV32v8i16 FPR128:$src))>; 6768} 6769def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; 6770 6771let Predicates = [IsLE] in { 6772def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>; 6773def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>; 6774def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>; 6775def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>; 6776def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>; 6777def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>; 6778def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>; 6779} 6780let Predicates = [IsBE] in { 6781def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), 6782 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src), 6783 (REV64v16i8 FPR128:$src), 6784 (i32 8)))>; 6785def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), 6786 (v16i8 (REV64v16i8 FPR128:$src))>; 6787def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), 6788 (v16i8 (REV32v16i8 FPR128:$src))>; 6789def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), 6790 (v16i8 (REV16v16i8 FPR128:$src))>; 6791def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), 6792 (v16i8 (REV64v16i8 FPR128:$src))>; 6793def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), 6794 (v16i8 (REV32v16i8 FPR128:$src))>; 6795def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), 6796 (v16i8 (REV16v16i8 FPR128:$src))>; 6797} 6798 6799def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))), 6800 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6801def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))), 6802 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6803def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))), 6804 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6805def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))), 6806 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6807def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))), 6808 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6809def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))), 6810 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6811def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))), 6812 (EXTRACT_SUBREG V128:$Rn, dsub)>; 6813 6814def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))), 6815 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 6816def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))), 6817 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 6818def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))), 6819 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 6820def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))), 6821 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>; 6822 6823// A 64-bit subvector insert to the first 128-bit vector position 6824// is a subregister copy that needs no instruction. 6825multiclass InsertSubvectorUndef<ValueType Ty> { 6826 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)), 6827 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6828 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)), 6829 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6830 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)), 6831 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6832 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)), 6833 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6834 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)), 6835 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6836 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)), 6837 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6838 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)), 6839 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>; 6840} 6841 6842defm : InsertSubvectorUndef<i32>; 6843defm : InsertSubvectorUndef<i64>; 6844 6845// Use pair-wise add instructions when summing up the lanes for v2f64, v2i64 6846// or v2f32. 6847def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)), 6848 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))), 6849 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>; 6850def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)), 6851 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))), 6852 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>; 6853 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector, 6854 // so we match on v4f32 here, not v2f32. This will also catch adding 6855 // the low two lanes of a true v4f32 vector. 6856def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)), 6857 (vector_extract (v4f32 FPR128:$Rn), (i64 1))), 6858 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>; 6859 6860// Scalar 64-bit shifts in FPR64 registers. 6861def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 6862 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 6863def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 6864 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 6865def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 6866 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 6867def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))), 6868 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>; 6869 6870// Patterns for nontemporal/no-allocate stores. 6871// We have to resort to tricks to turn a single-input store into a store pair, 6872// because there is no single-input nontemporal store, only STNP. 6873let Predicates = [IsLE] in { 6874let AddedComplexity = 15 in { 6875class NTStore128Pat<ValueType VT> : 6876 Pat<(nontemporalstore (VT FPR128:$Rt), 6877 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)), 6878 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub), 6879 (CPYi64 FPR128:$Rt, (i64 1)), 6880 GPR64sp:$Rn, simm7s8:$offset)>; 6881 6882def : NTStore128Pat<v2i64>; 6883def : NTStore128Pat<v4i32>; 6884def : NTStore128Pat<v8i16>; 6885def : NTStore128Pat<v16i8>; 6886 6887class NTStore64Pat<ValueType VT> : 6888 Pat<(nontemporalstore (VT FPR64:$Rt), 6889 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)), 6890 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub), 6891 (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)), 6892 GPR64sp:$Rn, simm7s4:$offset)>; 6893 6894// FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64? 6895def : NTStore64Pat<v1f64>; 6896def : NTStore64Pat<v1i64>; 6897def : NTStore64Pat<v2i32>; 6898def : NTStore64Pat<v4i16>; 6899def : NTStore64Pat<v8i8>; 6900 6901def : Pat<(nontemporalstore GPR64:$Rt, 6902 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)), 6903 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), 6904 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32), 6905 GPR64sp:$Rn, simm7s4:$offset)>; 6906} // AddedComplexity=10 6907} // Predicates = [IsLE] 6908 6909// Tail call return handling. These are all compiler pseudo-instructions, 6910// so no encoding information or anything like that. 6911let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in { 6912 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>, 6913 Sched<[WriteBrReg]>; 6914 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>, 6915 Sched<[WriteBrReg]>; 6916 // Indirect tail-call with any register allowed, used by MachineOutliner when 6917 // this is proven safe. 6918 // FIXME: If we have to add any more hacks like this, we should instead relax 6919 // some verifier checks for outlined functions. 6920 def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>, 6921 Sched<[WriteBrReg]>; 6922 // Indirect tail-call limited to only use registers (x16 and x17) which are 6923 // allowed to tail-call a "BTI c" instruction. 6924 def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>, 6925 Sched<[WriteBrReg]>; 6926} 6927 6928def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)), 6929 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>, 6930 Requires<[NotUseBTI]>; 6931def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)), 6932 (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>, 6933 Requires<[UseBTI]>; 6934def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)), 6935 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>; 6936def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)), 6937 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>; 6938 6939def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>; 6940def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>; 6941 6942include "AArch64InstrAtomics.td" 6943include "AArch64SVEInstrInfo.td" 6944